hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e749e40ac6f903b80541d1d51e56d5ed157c08cc | 9,455 | ipynb | Jupyter Notebook | EvaluateEntireDataset.ipynb | kevindoyle93/fyp-ml-notebooks | 7df6110986e21708598e43befb527850599f5354 | [
"Apache-2.0"
] | null | null | null | EvaluateEntireDataset.ipynb | kevindoyle93/fyp-ml-notebooks | 7df6110986e21708598e43befb527850599f5354 | [
"Apache-2.0"
] | null | null | null | EvaluateEntireDataset.ipynb | kevindoyle93/fyp-ml-notebooks | 7df6110986e21708598e43befb527850599f5354 | [
"Apache-2.0"
] | null | null | null | 32.944251 | 120 | 0.466843 | [
[
[
"## Import Pandas and the classifiers to experiment with",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import preprocessing",
"_____no_output_____"
]
],
[
[
"## Evaluate models on test data",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\n\ndef evaluate_model(model, row_name):\n training_df = pd.read_csv('data/individual_teams.csv', index_col=0)\n test_df = pd.read_csv('data/test_data.csv', index_col=0)\n\n target_feature = 'won_match'\n training_columns = [col for col in training_df.columns if col != target_feature]\n\n model.fit(training_df[training_columns], training_df[target_feature])\n predictions = model.predict(test_df[training_columns])\n \n misclassification = 1 - metrics.accuracy_score(predictions, test_df[target_feature])\n recall = metrics.recall_score(predictions, test_df[target_feature])\n precision = metrics.precision_score(predictions, test_df[target_feature])\n f1 = metrics.f1_score(predictions, test_df[target_feature])\n \n confusion_matrix = metrics.confusion_matrix(predictions, test_df[target_feature])\n true_positives = confusion_matrix[1][1]\n true_negatives = confusion_matrix[0][0]\n false_positives = confusion_matrix[0][1]\n false_negatives = confusion_matrix[1][0]\n true_positive_rate = true_positives / (true_positives + false_negatives)\n true_negative_rate = true_negatives / (true_negatives + false_positives)\n false_positive_rate = false_positives / (true_negatives + false_positives)\n false_negative_rate = false_negatives / (true_positives + false_negatives)\n \n return [\n row_name,\n misclassification,\n recall,\n precision,\n f1,\n true_positive_rate,\n false_positive_rate,\n true_negative_rate,\n false_negative_rate,\n ]",
"_____no_output_____"
],
[
"data = [\n evaluate_model(GradientBoostingClassifier(n_estimators=55, learning_rate=0.1), 'Gradient Boost'),\n evaluate_model(RandomForestClassifier(), 'Random Forest'),\n evaluate_model(DecisionTreeClassifier(), 'Decision Tree'),\n evaluate_model(SVC(kernel='linear'), 'Linear SVM'),\n evaluate_model(SGDClassifier(loss='log', n_iter=60), 'SGD'),\n evaluate_model(LogisticRegression(solver='lbfgs'), 'Regression'),\n evaluate_model(MLPClassifier(hidden_layer_sizes=(90,), activation='logistic', max_iter=200), 'Neural Net'),\n]\n\nresults = pd.DataFrame(data, columns=[\n 'Classifier', \n 'Misclassification', \n 'Recall', \n 'Precision', \n 'F1', \n 'TPR', \n 'FPR', \n 'TNR', \n 'FNR'\n ])\n\nresults",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e749f6dde40359dd8c03317ae3e17353cb87aea6 | 80,390 | ipynb | Jupyter Notebook | MNIST_NN_Activation_Function.ipynb | priyamkgp/NN-Activation-Function | 28ac9576a2027b8e282baba7d862ebbc9419691e | [
"MIT"
] | null | null | null | MNIST_NN_Activation_Function.ipynb | priyamkgp/NN-Activation-Function | 28ac9576a2027b8e282baba7d862ebbc9419691e | [
"MIT"
] | null | null | null | MNIST_NN_Activation_Function.ipynb | priyamkgp/NN-Activation-Function | 28ac9576a2027b8e282baba7d862ebbc9419691e | [
"MIT"
] | null | null | null | 58.08526 | 29,342 | 0.547854 | [
[
[
"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n# data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') \n# data = pd.read_csv('C:\\\\Users\\\\Priyam\\\\Documents\\\\ML Assignments\\\\MNIST\\\\train\\\\train.csv')\n\ndata = pd.read_csv('train.csv') # /kaggle/input/digit-recognizer/train.csv",
"_____no_output_____"
],
[
"data.head(8)",
"_____no_output_____"
],
[
"data = np.array(data)\nm, n = data.shape\nnp.random.shuffle(data) # shuffle before splitting into training and test sets\n\ndata_train = data[0:1000].T\nY_train = data_train[0].astype(int)\nX_train = data_train[1:n]\nX_train = X_train / 255.\n_,m_train = X_train.shape\n\ndata_test = data[1000:m].T\nY_test = data_test[0].astype(int)\nX_test = data_test[1:n]\nX_test = X_test / 255.",
"_____no_output_____"
],
[
"def init_params():\n W1 = np.random.rand(10, 784) - 0.5\n b1 = np.random.rand(10, 1) - 0.5\n W2 = np.random.rand(10, 10) - 0.5\n b2 = np.random.rand(10, 1) - 0.5\n \n k0 = np.random.rand(1,1)\n k1 = np.random.rand(1,1)\n return W1, b1, W2, b2, k0, k1\n\n# Activation function for hidden layer\ndef activation_fn(Z, k0, k1): \n return k0 + k1*Z\n\n# Derivative of activation function for hidden layer\ndef activation_fn_deriv(Z, k0, k1):\n return k1\n\n# Activation function for output layer\ndef softmax(Z): \n A = np.exp(Z) / sum(np.exp(Z))\n return A\n\ndef one_hot(Y):\n one_hot_Y = np.zeros((Y.size, int(Y.max()) + 1))\n one_hot_Y[np.arange(Y.size), Y] = 1\n one_hot_Y = one_hot_Y.T\n return one_hot_Y\n \ndef forward_prop(W1, b1, W2, b2, k0, k1, X):\n Z1 = W1.dot(X) + b1\n A1 = activation_fn(Z1, k0, k1)\n Z2 = W2.dot(A1) + b2\n A2 = softmax(Z2)\n return Z1, A1, Z2, A2\n\ndef backward_prop(Z1, A1, Z2, A2, W1, W2, k0, k1, X, Y):\n one_hot_Y = one_hot(Y)\n dZ2 = A2 - one_hot_Y\n dW2 = 1 / m * dZ2.dot(A1.T)\n db2 = 1 / m * np.sum(dZ2)\n dZ1 = W2.T.dot(dZ2) * activation_fn_deriv(Z1, k0, k1)\n dW1 = 1 / m * dZ1.dot(X.T)\n db1 = 1 / m * np.sum(dZ1)\n \n da1 = W2.T.dot(dZ2)\n dk0 = 1 / m * np.sum(da1)\n temp = np.multiply(da1,Z1)\n dk1 = np.mean(temp)\n return dW1, db1, dW2, db2, dk0, dk1\n\ndef update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, k0, k1, dk0, dk1, alpha):\n W1 = W1 - alpha * dW1\n b1 = b1 - alpha * db1 \n W2 = W2 - alpha * dW2 \n b2 = b2 - alpha * db2\n k0 = k0 - alpha * dk0\n k1 = k1 - alpha * dk1\n return W1, b1, W2, b2, k0, k1",
"_____no_output_____"
],
[
"def get_predictions(A2):\n return np.argmax(A2, 0)\n\ndef get_accuracy(predictions, Y):\n #print(predictions, Y)\n return np.sum(predictions == Y) / Y.size",
"_____no_output_____"
],
[
"def cross_entropy(Y, A2, p):\n row, col = np.shape(A2)\n loss_CE = list()\n for i in range(col):\n pos = np.where(p[:,col-1]==1)\n loss = - p[pos,col-1]*np.log(softmax(A2[:,col-1])[pos])\n loss_CE.append(loss)\n return np.mean(loss_CE)\n\ndef gradient_descent(X, Y, X_test, Y_test, alpha, iterations):\n W1, b1, W2, b2, k0, k1 = init_params()\n k0_list = list()\n k1_list = list()\n loss_entropy_train = list()\n loss_entropy_test = list()\n epochs = list()\n \n for i in range(iterations):\n Z1, A1, Z2, A2 = forward_prop(W1, b1, W2, b2, k0, k1, X)\n dW1, db1, dW2, db2, dk0, dk1 = backward_prop(Z1, A1, Z2, A2, W1, W2, k0, k1, X, Y)\n W1, b1, W2, b2, k0, k1 = update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, k0, k1, dk0, dk1, alpha)\n \n epochs.append(i)\n k0_list.append(k0)\n k1_list.append(k1)\n \n true_label = one_hot(Y)\n loss_entropy_train.append(cross_entropy(Y, A2, true_label))\n \n Z1_test, A1_test, Z2_test, A2_test = forward_prop(W1, b1, W2, b2, k0, k1, X_test)\n true_label_test = one_hot(Y_test)\n loss_entropy_test.append(cross_entropy(Y_test, A2_test, true_label_test))\n \n if i % 1000 == 0: \n print(\"Iteration: \", i)\n predictions = get_predictions(A2)\n print(get_accuracy(predictions, Y))\n print(\"Loss (train): {:.3g} nats\".format(loss_entropy_train[i-1]))\n print(\"Loss (test): {:.3g} nats\".format(loss_entropy_test[i-1]))\n return W1, b1, W2, b2, k0, k1, k0_list, k1_list, epochs, loss_entropy_train, loss_entropy_test",
"_____no_output_____"
],
[
"W1, b1, W2, b2, k0, k1, k0_list, k1_list, epochs, loss_entropy_train, loss_entropy_test = gradient_descent(X_train, Y_train, X_test, Y_test, 0.10, 2000) # Learning rate alpha = 0.1, total epochs = 2000",
"Iteration: 0\n0.081\nLoss (train): 2.41 nats\nLoss (test): 2.4 nats\nIteration: 1000\n0.836\nLoss (train): 1.47 nats\nLoss (test): 1.49 nats\n"
],
[
"print(f'The final values of parameters: k_0 = {k0}, k_1 = {k1}')",
"The final values of parameters: k_0 = [[0.53145857]], k_1 = [[1.63123416]]\n"
],
[
"def make_predictions(X, W1, b1, W2, b2, k0, k1):\n _, _, _, A2 = forward_prop(W1, b1, W2, b2, k0, k1, X)\n predictions = get_predictions(A2)\n return predictions",
"_____no_output_____"
],
[
"# Train data accuracy\ntrain_data_predictions = make_predictions(X_train, W1, b1, W2, b2, k0, k1)\nget_accuracy(train_data_predictions, Y_train)",
"_____no_output_____"
],
[
"# Test data accuracy\ntest_data_predictions = make_predictions(X_test, W1, b1, W2, b2, k0, k1)\nget_accuracy(test_data_predictions, Y_test)",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(2,2,constrained_layout=True)\naxs[0,0].plot(epochs, np.squeeze(k0_list))\naxs[0,0].set(xlabel = 'Epochs', ylabel = 'k_0')\naxs[0,1].plot(epochs, np.squeeze(k1_list))\naxs[0,1].set(xlabel = 'Epochs', ylabel = 'k_1')\naxs[1,0].plot(epochs,loss_entropy_train,'-b',label = 'Train')\naxs[1,0].plot(epochs,loss_entropy_test,'-r',label = 'Test')\naxs[1,0].legend()\naxs[1,0].set(xlabel = 'Epochs', ylabel = 'Cross entropy loss')\naxs[1,1].plot(loss_entropy_train,loss_entropy_test)\naxs[1,1].set(xlabel = 'Cross entropy loss (train)', ylabel = 'Cross entropy loss (test)')\n\nfrom google.colab import files\nplt.savefig('mnist_fig.jpg')\nfiles.download('mnist_fig.jpg')",
"_____no_output_____"
],
[
"from sklearn.metrics import f1_score\nf1 = f1_score(Y_test, test_data_predictions,average = 'weighted')\nprint(f'F1-Score is: {f1}')",
"F1-Score is: 0.8140924474669023\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e749fef1667bf8c9ce0c85fbe444957c28afc2c1 | 123,607 | ipynb | Jupyter Notebook | .ipynb_checkpoints/NeuralNetworks5-checkpoint.ipynb | shubhamchouksey/IRIS_Dataset | 664e0dd42130b152b191170da8d0cb0fc3d7ff9b | [
"MIT"
] | 2 | 2020-04-03T10:12:47.000Z | 2020-04-04T06:58:14.000Z | .ipynb_checkpoints/NeuralNetworks5-checkpoint.ipynb | shubhamchouksey/IRIS_Dataset | 664e0dd42130b152b191170da8d0cb0fc3d7ff9b | [
"MIT"
] | null | null | null | .ipynb_checkpoints/NeuralNetworks5-checkpoint.ipynb | shubhamchouksey/IRIS_Dataset | 664e0dd42130b152b191170da8d0cb0fc3d7ff9b | [
"MIT"
] | 1 | 2020-05-07T06:30:59.000Z | 2020-05-07T06:30:59.000Z | 256.446058 | 42,828 | 0.908403 | [
[
[
"# Classification with Neural Networks\n\n**Neural networks** are a powerful set of machine learning algorithms. Neural network use one or more **hidden layers** of multiple **hidden units** to perform **function approximation**. The use of multiple hidden units in one or more layers, allows neural networks to approximate complex functions. Neural network models capable of approximating complex functions are said to have high **model capacity**. This property allows neural networks to solve complex machine learning problems. \n\nHowever, because of the large number of hidden units, neural networks have many **weights** or **parameters**. This situation often leads to **over-fitting** of neural network models, which limits generalization. Thus, finding optimal hyperparameters when fitting neural network models is essential for good performance. \n\nAn additional issue with neural networks is **computational complexity**. Many optimization iterations are required. Each optimization iteration requires the update of a large number of parameters. ",
"_____no_output_____"
],
[
"## Example: Iris dataset\n\nAs a first example you will use neutral network models to classify the species of iris flowers using the famous iris dataset. \n\nAs a first step, execute the code in the cell below to load the required packages to run the rest of this notebook. ",
"_____no_output_____"
]
],
[
[
"from sklearn.neural_network import MLPClassifier\nfrom sklearn import preprocessing\n#from statsmodels.api import datasets\nfrom sklearn import datasets ## Get dataset from sklearn\nimport sklearn.model_selection as ms\nimport sklearn.metrics as sklm\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport numpy.random as nr\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"To get a feel for these data, you will now load and plot them. The code in the cell below does the following:\n\n1. Loads the iris data as a Pandas data frame. \n2. Adds column names to the data frame.\n3. Displays all 4 possible scatter plot views of the data. \n\nExecute this code and examine the results. ",
"_____no_output_____"
]
],
[
[
"def plot_iris(iris):\n '''Function to plot iris data by type'''\n setosa = iris[iris['Species'] == 'setosa']\n versicolor = iris[iris['Species'] == 'versicolor']\n virginica = iris[iris['Species'] == 'virginica']\n fig, ax = plt.subplots(2, 2, figsize=(12,12))\n x_ax = ['Sepal_Length', 'Sepal_Width']\n y_ax = ['Petal_Length', 'Petal_Width']\n for i in range(2):\n for j in range(2):\n ax[i,j].scatter(setosa[x_ax[i]], setosa[y_ax[j]], marker = 'x')\n ax[i,j].scatter(versicolor[x_ax[i]], versicolor[y_ax[j]], marker = 'o')\n ax[i,j].scatter(virginica[x_ax[i]], virginica[y_ax[j]], marker = '+')\n ax[i,j].set_xlabel(x_ax[i])\n ax[i,j].set_ylabel(y_ax[j])\n \n## Import the dataset from sklearn.datasets\niris = datasets.load_iris()\n\n## Create a data frame from the dictionary\nspecies = [iris.target_names[x] for x in iris.target]\niris = pd.DataFrame(iris['data'], columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])\niris['Species'] = species\n\n## Plot views of the iris data \nplot_iris(iris) ",
"_____no_output_____"
]
],
[
[
"You can see that Setosa (in blue) is well separated from the other two categories. The Versicolor (in orange) and the Virginica (in green) show considerable overlap. The question is how well our classifier will seperate these categories. \n\nScikit Learn classifiers require numerically coded numpy arrays for the features and as a label. The code in the cell below does the following processing:\n1. Creates a numpy array of the features.\n2. Numerically codes the label using a dictionary lookup, and converts it to a numpy array. \n\nExecute this code.",
"_____no_output_____"
]
],
[
[
"Features = np.array(iris[['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']])\n\nlevels = {'setosa':0, 'versicolor':1, 'virginica':2}\nLabels = np.array([levels[x] for x in iris['Species']])",
"_____no_output_____"
]
],
[
[
"Next, execute the code in the cell below to split the dataset into test and training set. Notice that unusually, 100 of the 150 cases are being used as the test dataset. ",
"_____no_output_____"
]
],
[
[
"## Randomly sample cases to create independent training and test data\nnr.seed(1115)\nindx = range(Features.shape[0])\nindx = ms.train_test_split(indx, test_size = 100)\nX_train = Features[indx[0],:]\ny_train = np.ravel(Labels[indx[0]])\nX_test = Features[indx[1],:]\ny_test = np.ravel(Labels[indx[1]])",
"_____no_output_____"
]
],
[
[
"As is always the case with machine learning, numeric features must be scaled. The code in the cell below performs the following processing:\n\n1. A Zscore scale object is defined using the `StandarScaler` function from the Scikit Learn preprocessing package. \n2. The scaler is fit to the training features. Subsequently, this scaler is used to apply the same scaling to the test data and in production. \n3. The training features are scaled using the `transform` method. \n\nExecute this code.",
"_____no_output_____"
]
],
[
[
"scale = preprocessing.StandardScaler()\nscale.fit(X_train)\nX_train = scale.transform(X_train)",
"_____no_output_____"
]
],
[
[
"Now you will define and fit a neural network model. The code in the cell below defines a single hidden layer neural network model with 50 units. The code uses the MLPClassifer function from the Scikit Lean neural_network package. The model is then fit. Execute this code.",
"_____no_output_____"
]
],
[
[
"nr.seed(1115)\nnn_mod = MLPClassifier(hidden_layer_sizes = (50,))\nnn_mod.fit(X_train, y_train)",
"/home/ins/anaconda3/lib/python3.7/site-packages/sklearn/neural_network/multilayer_perceptron.py:566: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n"
]
],
[
[
"Notice that the many neural network model object hyperparameters are displayed. Optimizing these parameters for a given situation can be quite time consuming. \n\nNext, the code in the cell below performs the following processing to score the test data subset:\n1. The test features are scaled using the scaler computed for the training features. \n2. The `predict` method is used to compute the scores from the scaled features. \n\nExecute this code. ",
"_____no_output_____"
]
],
[
[
"X_test = scale.transform(X_test)\nscores = nn_mod.predict(X_test)",
"_____no_output_____"
]
],
[
[
"It is time to evaluate the model results. Keep in mind that the problem has been made difficult deliberately, by having more test cases than training cases. \n\nThe iris data has three species categories. Therefore it is necessary to use evaluation code for a three category problem. The function in the cell below extends code from pervious labs to deal with a three category problem. Execute this code and examine the results.",
"_____no_output_____"
]
],
[
[
"def print_metrics_3(labels, scores):\n \n conf = sklm.confusion_matrix(labels, scores)\n print(' Confusion matrix')\n print(' Score Setosa Score Versicolor Score Virginica')\n print('Actual Setosa %6d' % conf[0,0] + ' %5d' % conf[0,1] + ' %5d' % conf[0,2])\n print('Actual Versicolor %6d' % conf[1,0] + ' %5d' % conf[1,1] + ' %5d' % conf[1,2])\n print('Actual Vriginica %6d' % conf[2,0] + ' %5d' % conf[2,1] + ' %5d' % conf[2,2])\n ## Now compute and display the accuracy and metrics\n print('')\n print('Accuracy %0.2f' % sklm.accuracy_score(labels, scores))\n metrics = sklm.precision_recall_fscore_support(labels, scores)\n print(' ')\n print(' Setosa Versicolor Virginica')\n print('Num case %0.2f' % metrics[3][0] + ' %0.2f' % metrics[3][1] + ' %0.2f' % metrics[3][2])\n print('Precision %0.2f' % metrics[0][0] + ' %0.2f' % metrics[0][1] + ' %0.2f' % metrics[0][2])\n print('Recall %0.2f' % metrics[1][0] + ' %0.2f' % metrics[1][1] + ' %0.2f' % metrics[1][2])\n print('F1 %0.2f' % metrics[2][0] + ' %0.2f' % metrics[2][1] + ' %0.2f' % metrics[2][2])\n \nprint_metrics_3(y_test, scores) ",
" Confusion matrix\n Score Setosa Score Versicolor Score Virginica\nActual Setosa 34 1 0\nActual Versicolor 0 25 9\nActual Vriginica 0 2 29\n\nAccuracy 0.88\n \n Setosa Versicolor Virginica\nNum case 35.00 34.00 31.00\nPrecision 1.00 0.89 0.76\nRecall 0.97 0.74 0.94\nF1 0.99 0.81 0.84\n"
]
],
[
[
"Examine these results. Notice the following:\n1. The confusion matrix has dimension 3X3. You can see that most cases are correctly classified. \n2. The overall accuracy is 0.88. Since the classes are roughly balanced, this metric indicates relatively good performance of the classifier, particularly since it was only trained on 50 cases. \n3. The precision, recall and F1 for each of the classes is relatively good. Versicolor has the worst metrics since it has the largest number of misclassified cases. \n\nTo get a better feel for what the classifier is doing, the code in the cell below displays a set of plots showing correctly (as '+') and incorrectly (as 'o') cases, with the species color-coded. Execute this code and examine the results. ",
"_____no_output_____"
]
],
[
[
"def plot_iris_score(iris, y_test, scores):\n '''Function to plot iris data by type'''\n ## Find correctly and incorrectly classified cases\n true = np.equal(scores, y_test).astype(int)\n \n ## Create data frame from the test data\n iris = pd.DataFrame(iris)\n levels = {0:'setosa', 1:'versicolor', 2:'virginica'}\n iris['Species'] = [levels[x] for x in y_test]\n iris.columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width', 'Species']\n \n ## Set up for the plot\n fig, ax = plt.subplots(2, 2, figsize=(12,12))\n markers = ['o', '+']\n x_ax = ['Sepal_Length', 'Sepal_Width']\n y_ax = ['Petal_Length', 'Petal_Width']\n \n for t in range(2): # loop over correct and incorect classifications\n setosa = iris[(iris['Species'] == 'setosa') & (true == t)]\n versicolor = iris[(iris['Species'] == 'versicolor') & (true == t)]\n virginica = iris[(iris['Species'] == 'virginica') & (true == t)]\n # loop over all the dimensions\n for i in range(2):\n for j in range(2):\n ax[i,j].scatter(setosa[x_ax[i]], setosa[y_ax[j]], marker = markers[t], color = 'blue')\n ax[i,j].scatter(versicolor[x_ax[i]], versicolor[y_ax[j]], marker = markers[t], color = 'orange')\n ax[i,j].scatter(virginica[x_ax[i]], virginica[y_ax[j]], marker = markers[t], color = 'green')\n ax[i,j].set_xlabel(x_ax[i])\n ax[i,j].set_ylabel(y_ax[j])\n\nplot_iris_score(X_test, y_test, scores)",
"_____no_output_____"
]
],
[
[
"Examine these plots. You can see how the classifier has divided the feature space between the classes. Notice that most of the errors occur in the overlap region between Virginica and Versicolor. This behavior is to be expected. There is an error in classifying Setosa which is a bit surprising, and which probably arises from the projection of the division between classes. ",
"_____no_output_____"
],
[
"Is it possible that a more complex neural network would separate these cases better? The more complex model should have greater model capacity, but will be more susceptible to over-fitting. The code in the cell below uses a neural network with 2 hidden layers and 100 units per layer, coded as (100,100). This model is fit with the training data and displays the evaluation of the model. \n\nExecute this code, and answer **Question 1** on the course page.",
"_____no_output_____"
]
],
[
[
"nr.seed(1115)\nnn_mod = MLPClassifier(hidden_layer_sizes = (100,100),\n max_iter=300)\nnn_mod.fit(X_train, y_train)\nscores = nn_mod.predict(X_test)\nprint_metrics_3(y_test, scores) \nplot_iris_score(X_test, y_test, scores)",
" Confusion matrix\n Score Setosa Score Versicolor Score Virginica\nActual Setosa 35 0 0\nActual Versicolor 0 29 5\nActual Vriginica 0 2 29\n\nAccuracy 0.93\n \n Setosa Versicolor Virginica\nNum case 35.00 34.00 31.00\nPrecision 1.00 0.94 0.85\nRecall 1.00 0.85 0.94\nF1 1.00 0.89 0.89\n"
]
],
[
[
"These are remarkably good results. Apparently, adding additional model capacity allowed the neural network model to perform exceptionally well. There are only 7 misclassified cases, giving an overall accuracy of 0.93. ",
"_____no_output_____"
],
[
"## Summary\n\nIn this lab you have accomplished the following:\n1. Used neural models to classify the cases of the iris data. The model with greater capacity achieved significantly better results.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e74a10bcea499bdcecd63c88f2287ac75a1ee0ea | 36,839 | ipynb | Jupyter Notebook | src/notebooks/252-baseline-options-for-stacked-area-chart.ipynb | s-lasch/The-Python-Graph-Gallery | 1df060780e5e9cf763815581aad15da20f5a4213 | [
"0BSD"
] | 1 | 2022-01-28T09:36:36.000Z | 2022-01-28T09:36:36.000Z | src/notebooks/252-baseline-options-for-stacked-area-chart.ipynb | preguza/The-Python-Graph-Gallery | 4645ec59eaa6b8c8e2ff4eee86516ee3a7933b4d | [
"0BSD"
] | null | null | null | src/notebooks/252-baseline-options-for-stacked-area-chart.ipynb | preguza/The-Python-Graph-Gallery | 4645ec59eaa6b8c8e2ff4eee86516ee3a7933b4d | [
"0BSD"
] | null | null | null | 454.802469 | 34,224 | 0.943049 | [
[
[
"The `stackplot()` function of [matplotlib](http://python-graph-gallery.com/matplotlib/) allows to make [stacked area chart](http://python-graph-gallery.com/stacked-area-plot/). It provides a **baseline** argument that allows to custom the position of the areas around the baseline. Four possibilities are exist, and they are represented here. This chart is strongly inspired from the [Hooked](http://thoppe.github.io/) answer on this [stack overflow question](https://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib), thanks to him!",
"_____no_output_____"
]
],
[
[
"# libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n \n# Create data\nX = np.arange(0, 10, 1)\nY = X + 5 * np.random.random((5, X.size))\n \n# There are 4 types of baseline we can use:\nbaseline = [\"zero\", \"sym\", \"wiggle\", \"weighted_wiggle\"]\n \n# Let's make 4 plots, 1 for each baseline\nfor n, v in enumerate(baseline):\n if n<3 :\n plt.tick_params(labelbottom='off')\n plt.subplot(2 ,2, n + 1)\n plt.stackplot(X, *Y, baseline=v)\n plt.title(v)\n plt.tight_layout()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e74a11aadfd453ad669f6f63bd5baabe62e9ff33 | 17,340 | ipynb | Jupyter Notebook | python/multimodal.ipynb | erick2307/multimodal-evac | 1ebc001668a6fdf80f9ba26adc1e21b13c58f1f6 | [
"MIT"
] | null | null | null | python/multimodal.ipynb | erick2307/multimodal-evac | 1ebc001668a6fdf80f9ba26adc1e21b13c58f1f6 | [
"MIT"
] | null | null | null | python/multimodal.ipynb | erick2307/multimodal-evac | 1ebc001668a6fdf80f9ba26adc1e21b13c58f1f6 | [
"MIT"
] | null | null | null | 17,340 | 17,340 | 0.662341 | [
[
[
"# BASIC MODEL",
"_____no_output_____"
],
[
"## Classes",
"_____no_output_____"
]
],
[
[
"#import libraries\nimport numpy as np\nimport pandas as pd\nimport pylab as plt\nimport time\nimport glob, os, os.path\nimport osmnx as ox\nimport networkx as nx\nimport cv2",
"_____no_output_____"
],
[
"class Vehicle:\n \"\"\"A vehicle class\"\"\"\n count = 0 \n vehicles = []\n def __init__(self,verb=False):\n self.v_uid = self.count #unique id\n self.v_home = (0,0) #initial location\n self.v_dead = False #to check if it is out of the model\n self.v_state = 0 #code for state=> e.g 0:wait; 1:moving\n self.v_pos = (0,0)\n self.v_origin = (0,0) #start location\n self.v_destination = (0,0) #end location\n self.v_velocity = (0,0) #needs to be adjusted based on unit of model\n self.v_passengers = []\n #to control uid and an aggregated list\n self.__class__.count += 1\n Vehicle.vehicles.append(self)\n",
"_____no_output_____"
],
[
"class Pedestrian:\n \"\"\"A pedestrian class\"\"\"\n count = 0\n peds = []\n def __init__(self,verb=False):\n self.p_uid = self.count\n self.p_age = 0\n self.p_gender = 0 #0:female;1:male\n self.p_home = (0,0)\n self.p_state = 0\n self.p_pos = (0,0)\n self.p_origin = (0,0)\n self.p_destination = (0,0)\n self.p_velocity = (0,0)\n self.p_show = 1\n #to control uid and an aggregated list\n self.__class__.count +=1\n Pedestrian.peds.append(self)",
"_____no_output_____"
],
[
"class Environment:\n \"\"\"A road network environment class\"\"\"\n def __init__(self,bbox,ntype='drive',verb=False):\n #bbox is a dict of north,south,east,west lat,lon edges of target area\n self.e_bbox = {'north': bbox['north'], 'south': bbox['south'],\n 'east': bbox['east'], 'west': bbox['west']}\n self.e_network(ntype=ntype,verb=verb)\n\n def e_network(self,ntype='drive',verb=False):\n # Obtain the roadmap data from OpenStreetMap by using OSMNX \n self.e_G = ox.graph_from_bbox(self.e_bbox['north'], self.e_bbox['south'], \n self.e_bbox['east'], self.e_bbox['west'], \n network_type=ntype)\n \n def e_project_network(self,verb=False):\n #Project network\n self.e_Gp = ox.project_graph(self.e_G)\n \n def e_get_nodes(self,verb=False):\n self.e_nodes, self.e_edges = ox.graph_to_gdfs(self.e_G)\n return self.e_nodes\n \n def e_get_edges(self,verb=False):\n self.e_nodes, self.e_edges = ox.graph_to_gdfs(self.e_G)\n return self.e_edges\n\n def e_plot(self,verb=False):\n #returning a fig and ax\n return ox.plot_graph(self.e_G,bgcolor='w',node_color='k',figsize=(16,8),\n node_alpha=0.1,edge_color=(0,0,0,0.5),show=False,\n close=False)",
"_____no_output_____"
],
[
"class Model:\n \"A model class\"\n def __init__(self,bbox=None,ntype='drive',vehicles=10,\n population=50,verb=False):\n self.Env = Environment(bbox,ntype=ntype,verb=verb)\n self.nodes = self.Env.e_get_nodes()\n self.edges = self.Env.e_edges\n for i in range(vehicles):\n v = Vehicle()\n s = self.nodes.sample()\n v.v_home = (float(s['x']),\n float(s['y']))\n for i in range(population):\n p = Pedestrian()\n s = self.nodes.sample()\n p.p_home = (float(s['x']),\n float(s['y']))\n\n def go(self,sim_time=5,plot=False,video=False,verb=False):\n for t in range(sim_time):\n for i,v in enumerate(Vehicle.vehicles):\n s = self.nodes.sample()\n v.v_home = (float(s['x']),\n float(s['y']))\n if plot:\n self.plot(id=t,save=True,show=False)\n if video:\n self.video()\n\n def plot(self,id=0,save=False,show=False,verb=False):\n #plot network\n fig,ax = self.Env.e_plot()\n fig.tight_layout()\n #plot vehicles\n x = [v.v_home[0] for v in Vehicle.vehicles]\n y = [v.v_home[1] for v in Vehicle.vehicles]\n ax.scatter(x,y,c='b',marker='s')\n #plot pedestrians\n x = [p.p_home[0] for p in Pedestrian.peds]\n y = [p.p_home[1] for p in Pedestrian.peds]\n ax.scatter(x,y,c='m',marker='.')\n if save:\n plt.savefig(f'./img/fig{id:04d}.png')\n plt.close()\n print(f'figure {id:04d} plotted')\n if show:\n plt.show()\n \n def video(self,image_folder='./img',\n video_name = './anim.mp4',\n fps=1,verbose=False):\n images = [img for img in sorted(os.listdir(image_folder)) if img.endswith(\".png\")]\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n height, width, layers = frame.shape\n video = cv2.VideoWriter(video_name,cv2.VideoWriter_fourcc(*'MP4V'),#('M','J','P','G'), \n fps, (width,height))\n \n for image in images:\n if verbose:\n print(image)\n video.write(cv2.imread(os.path.join(image_folder, image)))\n cv2.destroyAllWindows()\n video.release()",
"_____no_output_____"
]
],
[
[
"## Parameters",
"_____no_output_____"
],
[
"For bounding box (bbox) of other areas:\n[OSM](https://www.openstreetmap.org/export#map=5/33.907/138.460)",
"_____no_output_____"
]
],
[
[
"#BBOX\n#small area is faster (by default this is in the class)\narahama = {'north': 38.2271, 'south': 38.2077,\n 'east': 140.9894, 'west': 140.9695}\n\n#this is the same extension from Abe san's simulation\nkochi = {'north': 33.5978428707312631, 'south': 33.3844862625877710,\n 'east': 133.7029719124942346, 'west': 133.3254475395832799}",
"_____no_output_____"
]
],
[
[
"## Running the model",
"_____no_output_____"
]
],
[
[
"%%time\n#create a model class of an area with number of vehicles and population\nM = Model(bbox=kochi,ntype='walk',vehicles=10,population=50,verb=False)",
"CPU times: user 57.5 s, sys: 1.31 s, total: 58.8 s\nWall time: 1min 16s\n"
],
[
"%%time\n#plot current situation ('id' is an integer)\nM.plot(id=1,save=True,show=True)",
"figure 0001 plotted\nCPU times: user 20.4 s, sys: 432 ms, total: 20.9 s\nWall time: 23.4 s\n"
],
[
"%%time\n#number of steps for simulation, can plot and make video same time\nM.go(sim_time=100,plot=True,video=True)",
"figure 0000 plotted\nfigure 0001 plotted\nfigure 0002 plotted\nfigure 0003 plotted\nfigure 0004 plotted\nfigure 0005 plotted\nfigure 0006 plotted\nfigure 0007 plotted\nfigure 0008 plotted\nfigure 0009 plotted\nfigure 0010 plotted\nfigure 0011 plotted\nfigure 0012 plotted\nfigure 0013 plotted\nfigure 0014 plotted\nfigure 0015 plotted\nfigure 0016 plotted\nfigure 0017 plotted\nfigure 0018 plotted\nfigure 0019 plotted\nfigure 0020 plotted\nfigure 0021 plotted\nfigure 0022 plotted\nfigure 0023 plotted\nfigure 0024 plotted\nfigure 0025 plotted\nfigure 0026 plotted\nfigure 0027 plotted\nfigure 0028 plotted\nfigure 0029 plotted\nfigure 0030 plotted\nfigure 0031 plotted\nfigure 0032 plotted\nfigure 0033 plotted\nfigure 0034 plotted\nfigure 0035 plotted\nfigure 0036 plotted\nfigure 0037 plotted\nfigure 0038 plotted\nfigure 0039 plotted\nfigure 0040 plotted\nfigure 0041 plotted\nfigure 0042 plotted\nfigure 0043 plotted\nfigure 0044 plotted\nfigure 0045 plotted\nfigure 0046 plotted\nfigure 0047 plotted\nfigure 0048 plotted\nfigure 0049 plotted\nfigure 0050 plotted\nfigure 0051 plotted\nfigure 0052 plotted\nfigure 0053 plotted\nfigure 0054 plotted\nfigure 0055 plotted\nfigure 0056 plotted\nfigure 0057 plotted\nfigure 0058 plotted\nfigure 0059 plotted\nfigure 0060 plotted\nfigure 0061 plotted\nfigure 0062 plotted\nfigure 0063 plotted\nfigure 0064 plotted\nfigure 0065 plotted\nfigure 0066 plotted\nfigure 0067 plotted\nfigure 0068 plotted\nfigure 0069 plotted\nfigure 0070 plotted\nfigure 0071 plotted\nfigure 0072 plotted\nfigure 0073 plotted\nfigure 0074 plotted\nfigure 0075 plotted\nfigure 0076 plotted\nfigure 0077 plotted\nfigure 0078 plotted\nfigure 0079 plotted\nfigure 0080 plotted\nfigure 0081 plotted\nfigure 0082 plotted\nfigure 0083 plotted\nfigure 0084 plotted\nfigure 0085 plotted\nfigure 0086 plotted\nfigure 0087 plotted\nfigure 0088 plotted\nfigure 0089 plotted\nfigure 0090 plotted\nfigure 0091 plotted\nfigure 0092 plotted\nfigure 0093 plotted\nfigure 0094 plotted\nfigure 0095 plotted\nfigure 0096 plotted\nfigure 0097 plotted\nfigure 0098 plotted\nfigure 0099 plotted\nCPU times: user 21.2 s, sys: 757 ms, total: 21.9 s\nWall time: 22.1 s\n"
],
[
"%%time\n#only to make video\nM.video()",
"CPU times: user 841 ms, sys: 42.7 ms, total: 884 ms\nWall time: 920 ms\n"
]
],
[
[
"# Overlaying the inundation data",
"_____no_output_____"
]
],
[
[
"#this is the same extension from Abe san's simulation\nkochi = {'north': 33.5978428707312631, 'south': 33.3844862625877710,\n 'east': 133.7029719124942346, 'west': 133.3254475395832799}\n\ne = Environment(kochi)",
"_____no_output_____"
],
[
"fig = e.e_plot()",
"_____no_output_____"
],
[
"nodes = e.e_get_nodes()\nedges = e.e_get_edges()",
"_____no_output_____"
],
[
"import rasterio\nfrom rasterio.plot import show",
"_____no_output_____"
],
[
"raster = rasterio.open('./max5_wgs84_kochishi.tif')",
"_____no_output_____"
],
[
"tsu = []\nfor row in nodes.iterrows():\n x = row[1].x\n y = row[1].y\n row, col = raster.index(x,y)\n try:\n tsu.append(raster.read(1)[row,col])\n except:\n tsu.append(-99)",
"_____no_output_____"
],
[
"for i,v in enumerate(tsu):\n if v == -99.:\n tsu[i]=0",
"_____no_output_____"
],
[
"max(tsu)",
"_____no_output_____"
],
[
"nodes['tsu']=tsu",
"_____no_output_____"
],
[
"nx.set_node_attributes(e.e_G, values=nodes['tsu'], name=\"depth\")\nnc = ox.plot.get_node_colors_by_attr(e.e_G, 'depth', cmap='Blues', start=0, stop=1, na_color='none', equal_size=True)\nncr = ox.plot.get_node_colors_by_attr(e.e_G, 'depth', cmap='Reds', start=0, stop=1, na_color='none', equal_size=False)",
"_____no_output_____"
],
[
"import matplotlib as mpl\nimport matplotlib.cm as cm\ncmap = plt.cm.get_cmap('Blues')\nnorm=plt.Normalize(vmin=nodes['tsu'].min(), vmax=nodes['tsu'].max())\nsm = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\nsm.set_array([])\n\nfig,ax = ox.plot_graph(e.e_G,figsize=(16,8),bgcolor='w',node_color=nc,\n node_alpha=0.8,node_edgecolor='grey',edge_color='grey',show=False,\n close=False)\nfig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax, orientation='vertical')\nfig.savefig('inundation.png')\n ",
"_____no_output_____"
],
[
"nodes",
"_____no_output_____"
],
[
"edges",
"_____no_output_____"
],
[
"# edges['risk']=0.5*(nodes.loc[edges['u']]['tsu']+nodes.loc[edges['v']]['tsu'])\nrisk=[]\nfor row in edges.iterrows():\n risk.append(0.5*(nodes.loc[row[1].u]['tsu']+nodes.loc[row[1].v]['tsu']))",
"_____no_output_____"
],
[
"edges['risk']=risk",
"_____no_output_____"
],
[
"p = ox.shortest_path(e.e_G, 253768914, 8721376111, weight='risk')\nox.plot.plot_graph_route(e.e_G, p, route_color='r', route_linewidth=2, route_alpha=0.5, orig_dest_size=100, ax=ax)\nfig.savefig('route.png')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74a142c729fa6d7d5bcd6719b753f018594a53e | 834,813 | ipynb | Jupyter Notebook | project/tgkim/newton/notebooks/NewtonRNN (PyTorch - SHOF).ipynb | SYTEARK/ML2022 | 36d6250034cfd4b8de97314a3e2e8dddd31a25be | [
"MIT"
] | 1 | 2022-02-05T04:12:27.000Z | 2022-02-05T04:12:27.000Z | project/tgkim/newton/notebooks/NewtonRNN (PyTorch - SHOF).ipynb | SYTEARK/ML2022 | 36d6250034cfd4b8de97314a3e2e8dddd31a25be | [
"MIT"
] | null | null | null | project/tgkim/newton/notebooks/NewtonRNN (PyTorch - SHOF).ipynb | SYTEARK/ML2022 | 36d6250034cfd4b8de97314a3e2e8dddd31a25be | [
"MIT"
] | null | null | null | 907.405435 | 447,572 | 0.956746 | [
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n\n# PyTorch Lightning\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import WandbLogger\n# from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor\n\nimport wandb\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport netCDF4",
"_____no_output_____"
],
[
"AVAIL_GPUS = min(1, torch.cuda.device_count())\nBATCH_SIZE = 128 if AVAIL_GPUS else 64",
"_____no_output_____"
],
[
"pl.seed_everything(125)",
"Global seed set to 125\n"
]
],
[
[
"## Load Data",
"_____no_output_____"
]
],
[
[
"sho = netCDF4.Dataset('../data/sho_friction2.nc').variables\nt_sho = np.array(sho['t'][:], dtype=np.float32)\ns_sho = np.array(sho['s'][:], dtype=np.float32)\nv_sho = np.array(sho['v'][:], dtype=np.float32)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6), dpi=150)\nplt.plot(t_sho, s_sho)\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6), dpi=150)\nplt.plot(t_sho, v_sho)\nplt.show()",
"_____no_output_____"
],
[
"# X_total = np.column_stack([s_sho, v_sho])\nX_total = s_sho.reshape(-1, 1)\nsc = MinMaxScaler()\nX_normalized = sc.fit_transform(X_total)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6), dpi=150)\nplt.plot(t_sho, X_normalized[:,0])\nplt.show()",
"_____no_output_____"
],
[
"def sliding_window(data, seq_length):\n x = []\n y = []\n\n for i in range(len(data)-seq_length-1):\n _x = data[i:(i+seq_length)]\n _y = data[i+seq_length]\n x.append(_x)\n y.append(_y)\n\n return np.array(x),np.array(y)",
"_____no_output_____"
],
[
"def train_test(df, test_periods):\n train = df[:-test_periods]\n test = df[-test_periods:]\n return train, test",
"_____no_output_____"
],
[
"X, y = sliding_window(X_normalized, 10)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
],
[
"N = X.shape[0]\nN_train = 100\nN_test = N - N_train\n\nX_train = X[:N_train]\ny_train = y[:N_train]\nX_test = X[N_train:150]\ny_test = y[N_train:150]",
"_____no_output_____"
],
[
"class NewtonData(Dataset):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n \n def __len__(self):\n return self.X.shape[0]\n \n def __getitem__(self, idx):\n return self.X[idx], self.y[idx]",
"_____no_output_____"
],
[
"ds_train = NewtonData(X_train, y_train)\nds_val = NewtonData(X_test, y_test)",
"_____no_output_____"
],
[
"len(ds_train)",
"_____no_output_____"
],
[
"len(ds_val)",
"_____no_output_____"
],
[
"ds_train[0][0].shape",
"_____no_output_____"
],
[
"dl_train = DataLoader(ds_train, batch_size=10, shuffle=True)",
"_____no_output_____"
],
[
"ds_train[0]",
"_____no_output_____"
],
[
"class SingleRNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, dropout=0, bidirectional=False):\n super(SingleRNN, self).__init__()\n \n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_direction = int(bidirectional) + 1\n self.rnn = nn.LSTM(input_size, hidden_size, 1, dropout=dropout, batch_first=True, bidirectional=bidirectional)\n self.fc = nn.Linear(hidden_size, 1)\n \n def forward(self, x):\n # input shape: batch, seq, dim\n rnn_output, _ = self.rnn(x)\n output = self.fc(rnn_output)\n \n return output[:,-1,:]",
"_____no_output_____"
],
[
"model = SingleRNN(input_size=1, hidden_size=5)\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(model.parameters(), lr=0.001)",
"_____no_output_____"
],
[
"model.eval()\ndl_iter = iter(dl_train)\nx_one, y_one = next(dl_iter)",
"_____no_output_____"
],
[
"x_one.shape",
"_____no_output_____"
],
[
"y_one.shape",
"_____no_output_____"
],
[
"model(x_one).shape",
"_____no_output_____"
],
[
"epochs = 500\nmodel.train()\nfor epoch in range(epochs+1):\n for x, y in dl_train:\n y_hat = model(x)\n optimizer.zero_grad()\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n \n if epoch%100==0:\n print(f'epoch: {epoch:4} loss:{loss.item():10.8f}')",
"epoch: 0 loss:0.18823907\nepoch: 100 loss:0.00012492\nepoch: 200 loss:0.00003303\nepoch: 300 loss:0.00006098\nepoch: 400 loss:0.00001842\nepoch: 500 loss:0.00000927\n"
],
[
"model.eval()",
"_____no_output_____"
],
[
"X, y = sliding_window(X_normalized, 10)",
"_____no_output_____"
],
[
"total_data = NewtonData(X, y)",
"_____no_output_____"
],
[
"dl = DataLoader(total_data, batch_size=len(total_data))\ndl_iter = iter(dl)\nX, y = next(dl_iter)",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
],
[
"t = t_sho[10:-1]\nt.shape",
"_____no_output_____"
],
[
"plt.plot(t, y.cpu().numpy())",
"_____no_output_____"
],
[
"y_pred = model(X)",
"_____no_output_____"
],
[
"plt.plot(t, y_pred.detach().numpy())",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6), dpi=150)\nplt.plot(t, y.cpu().numpy())\nplt.plot(t, y_pred.detach().numpy())\nplt.show()",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"t.shape",
"_____no_output_____"
],
[
"N_extrap = 190\nX_new = X[0]\nX_new.shape",
"_____no_output_____"
],
[
"X_new.view(1,-1,1)",
"_____no_output_____"
],
[
"y_ex = []\nt_ex = []\ndt = 1e-1\nfor i in range(N_extrap):\n y_new = model(X_new.view(1, -1, 1))\n X_new = torch.concat([X_new[1:], y_new])\n t_ex.append(t[0] + i * dt)\n y_ex.append(y_new.view(-1).detach().numpy())",
"_____no_output_____"
],
[
"# t_total = np.concatenate([t[0], t_ex])\nt_total = t_ex\n# y_pred_total = np.concatenate([y_pred.detach().numpy()[0:100], y_ex])\ny_pred_total = y_ex",
"_____no_output_____"
],
[
"shoo = netCDF4.Dataset('../data/sho_friction3.nc').variables\nt_shoo = np.array(shoo['t'][:], dtype=np.float32)\ns_shoo = np.array(shoo['s'][:], dtype=np.float32)\nv_shoo = np.array(shoo['v'][:], dtype=np.float32)",
"_____no_output_____"
],
[
"sc = MinMaxScaler()\ns_shoo_new = sc.fit_transform(s_shoo.reshape(-1, 1))",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6), dpi=300)\nplt.plot(t, y.cpu().numpy(), '--', alpha=0.8)\nplt.plot(t_total, y_pred_total, '--', alpha=0.8)\nplt.plot(t_shoo[10:], s_shoo_new[10:], '--', alpha=0.8)\n\n# plt.axvline(t[100], linestyle='--', color='r')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74a2a09fac07fc971e554a77d78aa5e95681bf3 | 1,688 | ipynb | Jupyter Notebook | GerarModelo.ipynb | santiagosilas/simple-api-ml | 5ecf09cf49d19e3ded2996cf090002dbad880df1 | [
"MIT"
] | null | null | null | GerarModelo.ipynb | santiagosilas/simple-api-ml | 5ecf09cf49d19e3ded2996cf090002dbad880df1 | [
"MIT"
] | null | null | null | GerarModelo.ipynb | santiagosilas/simple-api-ml | 5ecf09cf49d19e3ded2996cf090002dbad880df1 | [
"MIT"
] | null | null | null | 17.583333 | 59 | 0.494076 | [
[
[
"from sklearn import datasets\niris = datasets.load_iris()\nX, y = iris.data, iris.target",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier()\nmodelo = knn.fit(X, y)",
"_____no_output_____"
],
[
"import joblib\njoblib.dump(modelo, 'model.joblib')",
"_____no_output_____"
],
[
"!ls",
"LICENSE\nREADME.md\nUntitled.ipynb\nmodel.joblib\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e74a2f29b2805ec8ccfd8854271d12830f95c6be | 6,516 | ipynb | Jupyter Notebook | BIS_package_information.ipynb | bgotthold-usgs/bis | b07ff483a764a564f26d670eb2d1a297c6611585 | [
"Unlicense"
] | null | null | null | BIS_package_information.ipynb | bgotthold-usgs/bis | b07ff483a764a564f26d670eb2d1a297c6611585 | [
"Unlicense"
] | null | null | null | BIS_package_information.ipynb | bgotthold-usgs/bis | b07ff483a764a564f26d670eb2d1a297c6611585 | [
"Unlicense"
] | null | null | null | 28.207792 | 619 | 0.53407 | [
[
[
"# Notes on accessing package information access using recent updates:\n\n# created __init__.py files (follows default package organization), \n# added a help report,\n# added a metadata read function, \n# added a version read function, \n# reorganized the package setup structure. \n\n# Examples of accessing package information are shown below.\n",
"_____no_output_____"
],
[
"# Notes install CSASL BIS package\n\n# For local install of BIS package (BIS folder stored on computer, pip installed):\n\n# 1) pip install your_path_to_bis_folder/bis\n\n# e.g. pip install /Users/twellman/Downloads/bis\n\n# remote install w/ git and pip installed on local computer:\n\n# 1) pip install git+https://github.com/usgs-bcb/bis.git\n",
"_____no_output_____"
],
[
"import bis",
"_____no_output_____"
],
[
"****************************************************\n* 'BIS PACKAGE INFORMATION'\n****************************************************",
"_____no_output_____"
],
[
"help(bis)",
"Help on package bis:\n\nNAME\n bis\n\nFILE\n /Users/twellman/anaconda/lib/python2.7/site-packages/bis/__init__.py\n\nDESCRIPTION\n ~~~~~~~~~~~~~~~~~~~~~\n BIS PYTHON PACKAGE\n ~~~~~~~~~~~~~~~~~~~~~\n \n A set of helper code for Biogeographic Information System projects\n \n url : https://maps.usgs.gov/\n Email : [email protected]\n \n Author: Core Science Analytics, Synthesis and Libraries\n Core Science Systems Division, U.S. Geological Survey\n \n Software metadata: retrieve using \"bis.get_package_metadata()\"\n\nPACKAGE CONTENTS\n bis\n itis\n natureserve\n sgcn\n tess\n tir\n worms\n\nFUNCTIONS\n get_package_metadata()\n # metadata retrieval\n\nDATA\n __version__ = '0.0.1'\n\nVERSION\n 0.0.1\n\n\n"
],
[
"dir(bis)",
"_____no_output_____"
],
[
"bis.get_package_metadata()",
"Metadata-Version: 1.1\nName: bis\nVersion: 0.0.1\nSummary: A set of helper code for Biogeographic Information System projects\nHome-page: https://maps.usgs.gov/\nAuthor: USGS CSASL BCB\nAuthor-email: [email protected]\nLicense: None\nDescription: Biogeographic Information System (BIS) Helper Code\n=======================\nThis module contains scripts and configuration details needed for Biogeographic Information System projects.\nUnder USGS Software Release Policy, the software codes here are considered preliminary, not released officially, and posted to this repo for informal sharing among colleagues.\nThis software is preliminary or provisional and is subject to revision. It is being provided to meet the need for timely best science. The software has not received final approval by the U.S. Geological Survey (USGS). No warranty, expressed or implied, is made by the USGS or the U.S. Government as to the functionality of the software and related material nor shall the fact of release constitute any such warranty. The software is provided on the condition that neither the USGS nor the U.S. Government shall be held liable for any damages resulting from the authorized or unauthorized use of the software.\nKeywords: biogeography\nPlatform: UNKNOWN\nClassifier: Development Status :: 3 - Alpha\nClassifier: Intended Audience :: Developers\nClassifier: Topic :: Software Development :: Build Tools\nClassifier: License :: OSI Approved :: CC0\nClassifier: Programming Language :: Python :: 2\nClassifier: Programming Language :: Python :: 2.7\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3.3\nClassifier: Programming Language :: Python :: 3.4\nClassifier: Programming Language :: Python :: 3.5\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74a35bffb4650541ee602bb1832b1c7e5012415 | 134,655 | ipynb | Jupyter Notebook | promotions/channel-attribution-lstm.ipynb | fengwangjiang/algorithmic-examples | 552d0805c2f6d914fc824fe646c91e498c6c4b4b | [
"Apache-2.0"
] | 1 | 2021-01-21T19:23:30.000Z | 2021-01-21T19:23:30.000Z | promotions/channel-attribution-lstm.ipynb | fengwangjiang/algorithmic-examples | 552d0805c2f6d914fc824fe646c91e498c6c4b4b | [
"Apache-2.0"
] | null | null | null | promotions/channel-attribution-lstm.ipynb | fengwangjiang/algorithmic-examples | 552d0805c2f6d914fc824fe646c91e498c6c4b4b | [
"Apache-2.0"
] | 1 | 2022-02-07T05:56:32.000Z | 2022-02-07T05:56:32.000Z | 168.31875 | 24,540 | 0.867521 | [
[
[
"# Implementation of multi-touch multi-channel attribution model using LSTM with attention\n#\n# Ning li, Sai Kumar Arava, Chen Dong, Zhenyu Yan, Abhishek Pani, Deep Neural Net with Attention for Multi-channel Multi-touch Attribution\n# Kan Ren, et al, Learning Multi-touch Conversion Attribution with Dual-attention Mechanisms for Online Advertising\n#\n# Input dataset:\n# http://ailab.criteo.com/criteo-attribution-modeling-bidding-dataset/\n# or\n# https://drive.google.com/file/d/1vvngMlMomaPODdKCOdL-3scj2Vv_JhTm/view",
"_____no_output_____"
]
],
[
[
"### Data description\nThis dataset represents a sample of 30 days of Criteo live traffic data. Each line corresponds to one impression (a banner) that was displayed to a user. For each banner we have detailed information about the context, if it was clicked, if it led to a conversion and if it led to a conversion that was attributed to Criteo or not. Data has been sub-sampled and anonymized so as not to disclose proprietary elements.\n\nHere is a detailed description of the fields (they are tab-separated in the file):\n\n* timestamp: timestamp of the impression (starting from 0 for the first impression). The dataset is sorted according to timestamp.\n* uid: a unique user identifier\n* campaign: a unique identifier for the campaign\n* conversion: 1 if there was a conversion in the 30 days after the impression (independently of whether this impression was last click or not)\n* conversion_timestamp: the timestamp of the conversion or -1 if no conversion was observed\n* conversion_id: a unique identifier for each conversion (so that timelines can be reconstructed if needed). -1 if there was no conversion\n* attribution: 1 if the conversion was attributed to Criteo, 0 otherwise\n* click: 1 if the impression was clicked, 0 otherwise\n* click_pos: the position of the click before a conversion (0 for first-click)\n* click_nb: number of clicks. More than 1 if there was several clicks before a conversion\n* cost: the price paid by Criteo for this display (disclaimer: not the real price, only a transformed version of it)\n* cpo: the cost-per-order in case of attributed conversion (disclaimer: not the real price, only a transformed version of it)\n* time_since_last_click: the time since the last click (in s) for the given impression\n* cat(1-9): contextual features associated to the display. Can be used to learn the click/conversion models. We do not disclose the meaning of these features but it is not relevant for this study. Each column is a categorical variable. In the experiments, they are mapped to a fixed dimensionality space using the Hashing Trick (see paper for reference).\n\n### Key figures\n* 2.4Gb uncompressed\n* 16.5M impressions\n* 45K conversions\n* 700 campaigns",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt \nimport numpy as np\n\nfrom sklearn.utils import resample\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\nimport keras\n\nplt.style.use('ggplot')",
"_____no_output_____"
],
[
"# Initial data preparation\n\ndef add_derived_columns(df):\n df_ext = df.copy()\n df_ext['jid'] = df_ext['uid'].map(str) + '_' + df_ext['conversion_id'].map(str)\n \n min_max_scaler = MinMaxScaler()\n for cname in ('timestamp', 'time_since_last_click'):\n x = df_ext[cname].values.reshape(-1, 1) \n df_ext[cname + '_norm'] = min_max_scaler.fit_transform(x)\n \n return df_ext\n\ndef filter_journeys_by_length(df, min_touchpoints):\n if min_touchpoints <= 1:\n return df\n else:\n grouped = df.groupby(['jid'])['uid'].count().reset_index(name=\"count\")\n return df[df['jid'].isin( grouped[grouped['count'] >= min_touchpoints]['jid'].values )]\n\ndef sample_campaigns(df, n_campaigns): \n campaigns = np.random.choice( df['campaign'].unique(), n_campaigns, replace = False )\n return df[ df['campaign'].isin(campaigns) ]\n\ndef balance_conversions(df):\n df_minority = df[df.conversion == 1]\n df_majority = df[df.conversion == 0]\n \n df_majority_jids = np.array_split(df_majority['jid'].unique(), 100 * df_majority.shape[0]/df_minority.shape[0] )\n \n df_majority_sampled = pd.DataFrame(data=None, columns=df.columns)\n for jid_chunk in df_majority_jids:\n df_majority_sampled = pd.concat([df_majority_sampled, df_majority[df_majority.jid.isin(jid_chunk)]])\n if df_majority_sampled.shape[0] > df_minority.shape[0]:\n break\n \n return pd.concat([df_majority_sampled, df_minority]).sample(frac=1).reset_index(drop=True)\n\ndef map_one_hot(df, column_names, result_column_name):\n mapper = {} \n for i, col_name in enumerate(column_names):\n for val in df[col_name].unique():\n mapper[str(val) + str(i)] = len(mapper)\n \n df_ext = df.copy()\n \n def one_hot(values):\n v = np.zeros( len(mapper) )\n for i, val in enumerate(values): \n v[ mapper[str(val) + str(i)] ] = 1\n return v \n \n df_ext[result_column_name] = df_ext[column_names].values.tolist()\n df_ext[result_column_name] = df_ext[result_column_name].map(one_hot)\n \n return df_ext\n \ndata_file = 'data/criteo_attribution_dataset.tsv.gz'\ndf0 = pd.read_csv(data_file, sep='\\t', compression='gzip')\n\nn_campaigns = 400\n\ndf1 = add_derived_columns(df0)\ndf2 = sample_campaigns(df1, n_campaigns)\ndf3 = filter_journeys_by_length(df2, 2)\ndf4 = balance_conversions(df3)\ndf5 = map_one_hot(df4, ['cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat8'], 'cats')\ndf6 = map_one_hot(df5, ['campaign'], 'campaigns').sort_values(by=['timestamp_norm'])\n\nprint(df6.shape[0])\nprint([df6[df6.conversion == 0].shape[0], df6[df6.conversion == 1].shape[0]])",
"_____no_output_____"
],
[
"# Data exploration\n\ndef journey_lenght_histogram(df):\n counts = df.groupby(['jid'])['uid'].count().reset_index(name=\"count\").groupby(['count']).count()\n return counts.index, counts.values / df.shape[0]\n\nhist_x, hist_y = journey_lenght_histogram(df4)\n\nplt.plot(range(len(hist_all)), hist_all, label='all journeys')\nplt.yscale('log')\nplt.xlim(0, 120)\nplt.xlabel('Journey length (number of touchpoints)')\nplt.ylabel('Fraction of journeys')\nplt.show()",
"[145440]\n jid\ncount \n2 58572\n3 27400\n4 15811\n5 9922\n6 6998\n7 4915\n8 3739\n9 3020\n10 2319\n11 1907\n"
]
],
[
[
"## Last Touch Attribution",
"_____no_output_____"
]
],
[
[
"def last_touch_attribution(df):\n \n def count_by_campaign(df):\n counters = np.zeros(n_campaigns)\n for campaign_one_hot in df['campaigns'].values:\n campaign_id = np.argmax(campaign_one_hot)\n counters[campaign_id] = counters[campaign_id] + 1\n return counters\n \n campaign_impressions = count_by_campaign(df)\n \n df_converted = df[df['conversion'] == 1]\n idx = df_converted.groupby(['jid'])['timestamp_norm'].transform(max) == df_converted['timestamp_norm']\n campaign_conversions = count_by_campaign(df_converted[idx])\n \n return campaign_conversions / campaign_impressions\n \nlta = last_touch_attribution(df6)",
"_____no_output_____"
],
[
"# Visualization of the attribution scores\n\ncampaign_idx = range(150, 200)\n\nfig = plt.figure(figsize=(15,4))\nax = fig.add_subplot(111)\nplt.bar( range(len(lta[campaign_idx])), lta[campaign_idx], label='LTA' )\nplt.xlabel('Campaign ID')\nplt.ylabel('Return per impression')\nplt.legend(loc='upper left')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Logistic Regression",
"_____no_output_____"
]
],
[
[
"def features_for_logistic_regression(df):\n\n def pairwise_max(series):\n return np.max(series.tolist(), axis = 0).tolist()\n \n aggregation = {\n 'campaigns': pairwise_max,\n 'cats': pairwise_max,\n 'click': 'sum',\n 'cost': 'sum',\n 'conversion': 'max'\n }\n \n df_agg = df.groupby(['jid']).agg(aggregation)\n \n df_agg['features'] = df_agg[['campaigns', 'cats', 'click', 'cost']].values.tolist()\n \n return (\n np.stack(df_agg['features'].map(lambda x: np.hstack(x)).values),\n df_agg['conversion'].values\n )",
"_____no_output_____"
],
[
"x, y = features_for_logistic_regression(df6)\nprint(np.shape(x))",
"(145440, 1561)\n"
],
[
"from sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 1)\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.20, random_state = 1)",
"_____no_output_____"
],
[
"# Quick sanity check\nfrom sklearn.linear_model import LogisticRegression\n\nlogisticRegr = LogisticRegression()\nlogisticRegr.fit(x_train, y_train)\nscore = logisticRegr.score(x_test, y_test)\nprint(score)",
"0.8653396589658966\n"
],
[
"from keras.models import Sequential \nfrom keras.layers import Dense, Dropout\nfrom keras.constraints import NonNeg\n\nm = np.shape(x)[1]\n \nmodel = Sequential() \nmodel.add(Dense(1, input_dim=m, activation='sigmoid', name = 'contributions')) \n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) \nhistory = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1, validation_data=(x_val, y_val)) \nscore = model.evaluate(x_test, y_test, verbose=0) \nprint('Test score:', score[0]) \nprint('Test accuracy:', score[1])",
"Train on 93081 samples, validate on 23271 samples\nEpoch 1/10\n93081/93081 [==============================] - 3s 30us/step - loss: 0.5277 - acc: 0.7693 - val_loss: 0.4710 - val_acc: 0.8031\nEpoch 2/10\n93081/93081 [==============================] - 1s 12us/step - loss: 0.4442 - acc: 0.8172 - val_loss: 0.4363 - val_acc: 0.8172\nEpoch 3/10\n93081/93081 [==============================] - 1s 12us/step - loss: 0.4183 - acc: 0.8293 - val_loss: 0.4180 - val_acc: 0.8290\nEpoch 4/10\n93081/93081 [==============================] - 1s 13us/step - loss: 0.4032 - acc: 0.8367 - val_loss: 0.4066 - val_acc: 0.8343\nEpoch 5/10\n93081/93081 [==============================] - 1s 14us/step - loss: 0.3931 - acc: 0.8416 - val_loss: 0.3984 - val_acc: 0.8383\nEpoch 6/10\n93081/93081 [==============================] - 1s 13us/step - loss: 0.3853 - acc: 0.8443 - val_loss: 0.3931 - val_acc: 0.8410\nEpoch 7/10\n93081/93081 [==============================] - 1s 13us/step - loss: 0.3795 - acc: 0.8471 - val_loss: 0.3875 - val_acc: 0.8432\nEpoch 8/10\n93081/93081 [==============================] - 1s 13us/step - loss: 0.3747 - acc: 0.8489 - val_loss: 0.3830 - val_acc: 0.8444\nEpoch 9/10\n93081/93081 [==============================] - 1s 13us/step - loss: 0.3706 - acc: 0.8500 - val_loss: 0.3799 - val_acc: 0.8448\nEpoch 10/10\n93081/93081 [==============================] - 1s 14us/step - loss: 0.3674 - acc: 0.8511 - val_loss: 0.3772 - val_acc: 0.8474\nTest score: 0.3732171668471283\nTest accuracy: 0.8457783278327833\n"
],
[
"# Visualization of the attribution scores\nfrom sklearn.utils.extmath import softmax\n\nkeras_logreg = model.get_layer('contributions').get_weights()[0].flatten()[0:n_campaigns]\nkeras_logreg = softmax([keras_logreg]).flatten()\n\nfig = plt.figure(figsize=(15,4))\nax = fig.add_subplot(111)\nplt.bar(range(len(keras_logreg[campaign_idx])), keras_logreg[campaign_idx] )\nplt.xlabel('Campaign ID')\nplt.ylabel('Return per impression')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Basic LSTM",
"_____no_output_____"
]
],
[
[
"def features_for_lstm(df, max_touchpoints):\n \n df_proj = df[['jid', 'campaigns', 'cats', 'click', 'cost', 'time_since_last_click_norm', 'timestamp_norm', 'conversion']]\n \n x2d = df_proj.values\n \n x3d_list = np.split(x2d[:, 1:], np.cumsum(np.unique(x2d[:, 0], return_counts=True)[1])[:-1])\n \n x3d = []\n y = []\n for xi in x3d_list:\n journey_matrix = np.apply_along_axis(np.hstack, 1, xi)\n journey_matrix = journey_matrix[ journey_matrix[:, 5].argsort() ] # sort impressions by timestamp\n n_touchpoints = len(journey_matrix)\n padded_journey = []\n if(n_touchpoints >= max_touchpoints):\n padded_journey = journey_matrix[0:max_touchpoints]\n else:\n padded_journey = np.pad(journey_matrix, ((0, max_touchpoints - n_touchpoints), (0, 0)), 'constant', constant_values=(0))\n \n x3d.append(padded_journey[:, 0:-1])\n y.append(np.max(padded_journey[:, -1]))\n \n return np.stack(x3d), y\n\nx, y = features_for_lstm(df6, max_touchpoints = 15)\nprint(np.shape(x))\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 1)\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.20, random_state = 1)",
"(145440, 15, 1563)\n"
],
[
"from keras.models import Sequential \nfrom keras.layers import Dense, LSTM\n\nn_steps, n_features = np.shape(x)[1:3]\n \nmodel = Sequential() \nmodel.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2, input_shape=(n_steps, n_features)))\nmodel.add(Dense(1, activation='sigmoid')) \n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) \nhistory = model.fit(x_train, y_train, batch_size=64, epochs=5, verbose=1, validation_data=(x_val, y_val)) \nscore = model.evaluate(x_test, y_test, verbose=0) \nprint('Test score:', score[0]) \nprint('Test accuracy:', score[1])",
"Train on 93081 samples, validate on 23271 samples\nEpoch 1/5\n93081/93081 [==============================] - 135s 1ms/step - loss: 0.3033 - acc: 0.8677 - val_loss: 0.2547 - val_acc: 0.8899\nEpoch 2/5\n93081/93081 [==============================] - 152s 2ms/step - loss: 0.2603 - acc: 0.8879 - val_loss: 0.2322 - val_acc: 0.9013\nEpoch 3/5\n93081/93081 [==============================] - 160s 2ms/step - loss: 0.2415 - acc: 0.8979 - val_loss: 0.2274 - val_acc: 0.9069\nEpoch 4/5\n93081/93081 [==============================] - 156s 2ms/step - loss: 0.2293 - acc: 0.9047 - val_loss: 0.2091 - val_acc: 0.9129\nEpoch 5/5\n93081/93081 [==============================] - 155s 2ms/step - loss: 0.2248 - acc: 0.9067 - val_loss: 0.2136 - val_acc: 0.9112\nTest score: 0.21668663059081575\nTest accuracy: 0.9088627612761276\n"
]
],
[
[
"## LSTM with Attention",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential \nfrom keras.layers import Dense, LSTM, Input, Lambda, RepeatVector, Permute, Flatten, Activation, Multiply\nfrom keras.constraints import NonNeg\nfrom keras import backend as K\nfrom keras.models import Model\n\nn_steps, n_features = np.shape(x)[1:3]\n\nhidden_units = 64\n\nmain_input = Input(shape=(n_steps, n_features))\n \nembeddings = Dense(128, activation='linear', input_shape=(n_steps, n_features))(main_input)\n\nactivations = LSTM(hidden_units, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)(embeddings)\n\nattention = Dense(1, activation='tanh')(activations)\nattention = Flatten()(attention)\nattention = Activation('softmax', name = 'attention_weigths')(attention)\nattention = RepeatVector(hidden_units * 1)(attention)\nattention = Permute([2, 1])(attention)\n\nweighted_activations = Multiply()([activations, attention])\nweighted_activations = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(hidden_units,))(weighted_activations)\n\nmain_output = Dense(1, activation='sigmoid')(weighted_activations)\n\nmodel = Model(inputs=main_input, outputs=main_output)\n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) \nhistory = model.fit(x_train, y_train, batch_size=64, epochs=5, verbose=1, validation_data=(x_val, y_val)) \nscore = model.evaluate(x_test, y_test, verbose=0) \nprint('Test score:', score[0]) \nprint('Test accuracy:', score[1])",
"Train on 93081 samples, validate on 23271 samples\nEpoch 1/5\n93081/93081 [==============================] - 110s 1ms/step - loss: 0.2293 - acc: 0.9018 - val_loss: 0.2162 - val_acc: 0.9110\nEpoch 2/5\n93081/93081 [==============================] - 109s 1ms/step - loss: 0.2001 - acc: 0.9152 - val_loss: 0.2038 - val_acc: 0.9123\nEpoch 3/5\n93081/93081 [==============================] - 96s 1ms/step - loss: 0.1906 - acc: 0.9199 - val_loss: 0.2002 - val_acc: 0.9166\nEpoch 4/5\n93081/93081 [==============================] - 85s 910us/step - loss: 0.1876 - acc: 0.9213 - val_loss: 0.1949 - val_acc: 0.9168\nEpoch 5/5\n93081/93081 [==============================] - 98s 1ms/step - loss: 0.1846 - acc: 0.9222 - val_loss: 0.1924 - val_acc: 0.9190\nTest score: 0.19408383931476947\nTest accuracy: 0.9186262376237624\n"
]
],
[
[
"## Analysis of LSTM-A Model",
"_____no_output_____"
]
],
[
[
"def get_campaign_id(x_journey_step):\n return np.argmax(x_journey_step[0:n_campaigns])\n\nattention_model = Model(inputs=model.input, outputs=model.get_layer('attention_weigths').output)\n\na = attention_model.predict(x_train)\n\nattributions = np.zeros(n_campaigns)\ncampaign_freq = np.ones(n_campaigns)\nfor i, journey in enumerate(a):\n for step, step_contribution in enumerate(journey):\n if(np.sum(x_train[i][step]) > 0):\n campaign_id = get_campaign_id(x_train[i][step])\n attributions[campaign_id] = attributions[campaign_id] + step_contribution\n campaign_freq[campaign_id] = campaign_freq[campaign_id] + 1",
"_____no_output_____"
],
[
"lstm_a = (attributions/campaign_freq)\n\nfig = plt.figure(figsize=(15, 4))\nax = fig.add_subplot(111)\nplt.bar( range(len(lstm_a[campaign_idx])), lstm_a[campaign_idx], label='LSTM-A' )\nplt.xlabel('Campaign ID')\nplt.ylabel('Contribution')\nplt.legend(loc='upper left')\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(15, 4))\nax = fig.add_subplot(111)\n\nratio = max(lta[idx]) / max(keras_logreg[idx])\nplt.bar(np.linspace(0, len(campaign_idx), len(campaign_idx)), lta[campaign_idx], width=0.4, alpha=0.7, label='LTA' )\nplt.bar(np.linspace(0, len(campaign_idx), len(campaign_idx)) - 0.3, keras_logreg[campaign_idx], width=0.4, alpha=0.7, label='Keras Log Reg' )\nplt.xlabel('Campaign ID')\nplt.ylabel('Contribution')\nplt.legend(loc='upper left')\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(15, 4))\nax = fig.add_subplot(111)\n\nratio = max(lta[campaign_idx]) / max(lstm_a[campaign_idx])\nplt.bar(np.linspace(0, len(campaign_idx), len(campaign_idx)), lta[campaign_idx], width=0.4, alpha=0.7, label='LTA' )\nplt.bar(np.linspace(0, len(campaign_idx), len(campaign_idx)) - 0.3, lstm_a[campaign_idx], width=0.4, alpha=0.7, label='LSTM-A' )\nplt.xlabel('Campaign ID')\nplt.ylabel('Contribution')\nplt.legend(loc='upper left')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Simulation",
"_____no_output_____"
]
],
[
[
"# Key assumption: If one of the campaigns in a journey runs out of budget, \n# then the conversion reward is fully lost for the entire journey\n# including both past and future campaigns\n\ndef simulate_budget_roi(df, budget_total, attribution, verbose=False):\n budgets = np.ceil(attribution * (budget_total / np.sum(attribution)))\n \n if(verbose):\n print(budgets)\n \n blacklist = set()\n conversions = set()\n for i in range(df.shape[0]):\n campaign_id = get_campaign_id(df.loc[i]['campaigns']) \n jid = df.loc[i]['jid']\n if jid not in blacklist:\n if budgets[campaign_id] >= 1:\n budgets[campaign_id] = budgets[campaign_id] - 1\n if(df.loc[i]['conversion'] == 1):\n conversions.add(jid)\n else:\n blacklist.add(jid)\n \n if(verbose):\n if(i % 10000 == 0):\n print('{:.2%} : {:.2%} budget spent'.format(i/df.shape[0], 1.0 - np.sum(budgets)/budget_total ))\n \n if(np.sum(budgets) < budget_total * 0.02):\n break\n \n return len(conversions.difference(blacklist))",
"_____no_output_____"
],
[
"pitches = [0.1, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\nattributions = [lta, keras_logreg, lstm_a]\n\nfor i, pitch in enumerate(pitches):\n for j, attribution in enumerate(attributions):\n reward = simulate_budget_roi(df6, 10000, attribution**pitch)\n print('{} {} : {}'.format(p, j, reward))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74a37251f5650f3abd6249adfb85779bc0a76a6 | 15,211 | ipynb | Jupyter Notebook | Sesion1/2pandas.ipynb | gdesirena/Taller_UNSIS | a2a3a1b9ce0ef03bffbb93e2ff6c3ca79d992b82 | [
"MIT"
] | 1 | 2022-03-14T22:38:27.000Z | 2022-03-14T22:38:27.000Z | Sesion1/2pandas.ipynb | gdesirena/Taller_UNSIS | a2a3a1b9ce0ef03bffbb93e2ff6c3ca79d992b82 | [
"MIT"
] | null | null | null | Sesion1/2pandas.ipynb | gdesirena/Taller_UNSIS | a2a3a1b9ce0ef03bffbb93e2ff6c3ca79d992b82 | [
"MIT"
] | null | null | null | 25.781356 | 394 | 0.58931 | [
[
[
"# Introducción a la manipulación de datos con pandas\n\n<img style=\"float: right; margin: 0px 0px 15px 15px;\" src=\"https://numfocus.org/wp-content/uploads/2016/07/pandas-logo-300.png\" width=\"400px\" height=\"400px\" />\n\n> Una de las mejores cosas de Python (especialmente si eres un analista de datos) es la gran cantidad de librerías de alto nivel que se encuentran disponibles.\n\n> Algunas de estas librerías se encuentran en la librería estándar, es decir, se pueden encontrar donde sea que esté Python. Otras librerías se pueden añadir fácilmente.\n\n> Hoy nos dedicaremos a estudiar la librería de Python para análisis de datos: **Pandas**. Como analistas de datos, esta librería será pan de cada día y les aseguro que cada día que la utilicen aprenderán cosas nuevas... entonces, más vale comenzar pronto.\n\nReferencias:\n- https://pandas.pydata.org/\n- https://towardsdatascience.com/data-science-with-python-intro-to-loading-and-subsetting-data-with-pandas-9f26895ddd7f\n___",
"_____no_output_____"
],
[
"# 0. Motivación\n\nComo analistas de datos, normalmente trabajamos con grandes cantidades de datos. \n\nLos datos que debemos cargar pueden guardarse de muchas maneras distintas: archivos CSV, archivos de Excel, etcétera. Incluso, los datos pueden estar disponibles a través de servicios web. \n\nPara trabajar con datos, se hace necesario representarlos en una estructura tabular (cualquier cosa con forma de tabla con filas y columnas).\n\nEn algunos casos, los datos ya están en forma tabular y es más fácil cargarlos. En otros, debemos trabajar con datos no estructurados o que no están organizados de una manera determinada (texto plano, imágenes, audio, etcétera).\n\nEn esta clase nos vamos a concentrar en cargar datos desde archivos CSV (valores separados por coma).",
"_____no_output_____"
],
[
"## Pandas\n\nPandas es una librería de código abierto para el lenguaje de programación Python, desarrollada por Wes McKinney. Es una librería muy eficiente y proporciona estructuras de datos y herramientas de análisis muy fáciles de usar.\n\nComo las librerías que hemos visto antes, Pandas viene instalado por defecto con Anaconda, así que lo único que tenemos que hacer para empezar a trabajar con ella es importarla. La comunidad utiliza normalmente la abreviación pd para referirse a pandas:",
"_____no_output_____"
]
],
[
[
"# Importar pandas\n",
"_____no_output_____"
]
],
[
[
"Los **pd.DataFrames** son los objetos por excelencia de pandas para manipular datos. Son eficientes y rápidos. Son la estructura de datos donde pandas carga los diferentes formatos de datos: cuando nuestros datos están limpios y estructurados, cada fila representa una observación, y cada columna una variable o característica. Tanto las filas como las columnas pueden tener etiquetas.\n\nEn esta clase vamos a trabajar con datos de características y precios de casas de la ciudad de Portland, OR (datos tomados del curso [Machine Learning](https://www.coursera.org/learn/machine-learning) de Andrew Ng). Los datos se encuentran en el archivo `house_pricing_short.csv`.\n\nEn esta y la siguiente clase, además de importar datos, aprenderemos a:\n- seleccionar subconjuntos de datos;\n- filtrar variables por categorías;\n- relacionar tablas con datos complementarios;\n- entre otros.\n\nComenzamos:",
"_____no_output_____"
],
[
"___\n# 1. Importando datos\n\nEl archivo `house_pricing_short.csv` contiene información acerca de el número de cuartos, el tamaño y los precios de casas de la ciudad de Portland, OR.\n\nEl primer paso para comenzar a trabajar con datos es importarlos. Lo podemos hacer con la función `pd.read_csv()`:",
"_____no_output_____"
]
],
[
[
"# Ayuda en la función pd.read_csv()\n",
"_____no_output_____"
]
],
[
[
"Importemos los datos:",
"_____no_output_____"
]
],
[
[
"# Importar house_pricing.csv\n",
"_____no_output_____"
],
[
"# Observar los datos\n",
"_____no_output_____"
],
[
"# Tipo de lo que importamos\n",
"_____no_output_____"
]
],
[
[
"Hagamos que el índice represente el identificador de cada casa:",
"_____no_output_____"
],
[
"___\n# 2. Indización y selección de datos\n\nHay muchas formas de las cuales podemos seleccionar datos de DataFrames. Veremos, de acuerdo al artículo al final de este documento, la forma basada en corchetes ([]) y en los métodos `loc()` y `iloc()`.\n\nCon los corchetes, podemos seleccionar ciertas filas, o bien, ciertas columnas. \n\nPara una selección de filas, podemos usar el indizado como en las listas: [start_index:end_index:step], recordando que el `end_index` no es inclusivo.\n\nPor ejemplo, seleccionar las casas en las primeras dos filas:",
"_____no_output_____"
]
],
[
[
"# data[0:2:1]\n# data[0:2]\n",
"_____no_output_____"
]
],
[
[
"Ahora, seleccionar de la casa 7 en adelante:",
"_____no_output_____"
],
[
"Finalmente, seleccionar las casas en las filas impares:",
"_____no_output_____"
],
[
"Similarmente, para una selección de columnas, podemos usar una lista con los nombres de las columnas requeridas. ",
"_____no_output_____"
]
],
[
[
"# Seleccionar la columna n_bedrooms\n",
"_____no_output_____"
]
],
[
[
"Finalmente, seleccionamos dos columnas:",
"_____no_output_____"
]
],
[
[
"# Seleccionar las columnas n_bedrooms y size\n",
"_____no_output_____"
]
],
[
[
"Muy bien, ya vimos que los corchetes son útiles. También existen los poderosos métodos `loc` y `iloc`, que nos dan el poder de seleccionar ambos a la vez: columnas y filas.\n\n¿En qué se diferencian?\n\n- El método `loc` nos permite seleccionar filas y columnas de nuestros datos basados en etoquetas. Primero, se deben especificar las etiquetas de las filas, y luego las de las columnas.\n\n- El método `lioc` nos permite hacer lo mismo pero basado en índices enteros de nuestro DataFrame (como si fueran matrices).\n\nComo antes, si queremos seleccionar todas las filas, o columnas, simplemente escribimos `:` en el lugar adecuado.\n\nMejor con ejemplos:",
"_____no_output_____"
],
[
"Para un mejor entendimiento de esta parte, reetiquetaremos la numeración de clientes con una \"numeración\" alfabética. Es decir: 1-A, 2-B, ..., 10-J.",
"_____no_output_____"
]
],
[
[
"# Resetear índice en el lugar\n",
"_____no_output_____"
],
[
"# Reasignar índice alfabético\n",
"_____no_output_____"
]
],
[
[
"Ahora sí.\n\nSeleccionemos la primer casa con ambos métodos:",
"_____no_output_____"
]
],
[
[
"# Primer casa con loc\n",
"_____no_output_____"
],
[
"# Primer casa con iloc\n",
"_____no_output_____"
]
],
[
[
"Ahora, seleccionemos las casas A y C con ambos métodos:",
"_____no_output_____"
]
],
[
[
"# Casas A y C con loc\n",
"_____no_output_____"
],
[
"# Casas A y C con iloc\n",
"_____no_output_____"
]
],
[
[
"Ahora, de las casas B y E, queremos sus tamaños y sus números de recámaras:",
"_____no_output_____"
]
],
[
[
"# loc\n",
"_____no_output_____"
],
[
"# iloc\n",
"_____no_output_____"
]
],
[
[
"Ahora, queremos solo los tamaños y los precios, pero de todas las casas:",
"_____no_output_____"
]
],
[
[
"# loc\n",
"_____no_output_____"
],
[
"# iloc\n",
"_____no_output_____"
]
],
[
[
"¿Qué tal? Ya tenemos varias formas de seleccionar e indexar ciertos datos.\n\nEsto es, sin duda, muy útil. Por otra parte, muchas veces queremos obtener cierta información (clientes, en nuestro ejemplo) que cumplan algunos requisitos. Por ejemplo:\n- que sean mayores de 18 años, o\n- que su antiguedad en la plataforma sea menor a seis meses, o\n- que residan en cierta zona,\n- entre otros.\n\nPara ello utilizamos los operadores de comparación (==, >, <, >=, <=, !=).",
"_____no_output_____"
],
[
"___\n# 3. Filtrado de datos\n\nLos operadores de comparación pueden ser utilizados con pandas. \n\nEsto resulta ser súper útil para filtrar datos con ciertas condiciones específicas (esto lo veremos enseguida). ",
"_____no_output_____"
],
[
"**Ejemplos**\n\n¿Cuáles casas tienen más de 3 recámaras?",
"_____no_output_____"
],
[
"¿Cuáles casas valen menos de $\\$300,000$?",
"_____no_output_____"
],
[
"___\n# 4. Propiedades estadísticas de las variables",
"_____no_output_____"
],
[
"En este caso tenemos nada más 10 registros:",
"_____no_output_____"
],
[
"y es fácil explorar las variables simplemente viéndolas una por una, registro por registro:",
"_____no_output_____"
],
[
"Sin embargo, será común tener cantidades de registros en los órdenes de cientos de miles, millones y muchísimos más. En estos casos, deja de ser viable llevar a cabo un entendimiento de estas variables por medio de inspección visual directa. \n\nEs mucho más conveniente resumir las variables por medio de medidas estadísticas como la media, mediana, desviación estándar y cuartiles:",
"_____no_output_____"
]
],
[
[
"# Media\n",
"_____no_output_____"
],
[
"# Mediana\n",
"_____no_output_____"
],
[
"# Desviación estándar\n",
"_____no_output_____"
],
[
"# Resumen general\n",
"_____no_output_____"
]
],
[
[
"Incluso, `pandas` nos facilita la visualización de estas propiedades mediante histogramas:",
"_____no_output_____"
],
[
"# Hasta acá la sesión de hoy, la siguiente clase seguimos.\n\n## [Tutorial de pandas](https://pandas.pydata.org/pandas-docs/stable/getting_started/intro_tutorials/06_calculate_statistics.html)\n\n## [Artículo recomendado](https://medium.com/dunder-data/minimally-sufficient-pandas-a8e67f2a2428)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74a5b941710c6d42823e78c10aab7787a5d7555 | 134,589 | ipynb | Jupyter Notebook | notebooks/1k3f_pc-saft_params.ipynb | andylitalo/g-adsa | b6c3b903796ff7751301ab793f93af14ddaff8b1 | [
"MIT"
] | null | null | null | notebooks/1k3f_pc-saft_params.ipynb | andylitalo/g-adsa | b6c3b903796ff7751301ab793f93af14ddaff8b1 | [
"MIT"
] | null | null | null | notebooks/1k3f_pc-saft_params.ipynb | andylitalo/g-adsa | b6c3b903796ff7751301ab793f93af14ddaff8b1 | [
"MIT"
] | null | null | null | 419.280374 | 46,952 | 0.937031 | [
[
[
"# Guessing the Parameters for a PC-SAFT Model of 1k3f (VORATEC SD 301) Polyol\n\nBegun July 24, 2021 to produce plots for ICTAM 2020+1.\n\n**NOTE: ALL CALCULATIONS SHOWN HERE USE N = 41, BUT TO BE CONSISTENT WITH N = 123 FOR 3K2F (2700 G/MOL), SHOULD USE N = 45 (~123/2.7)**",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport dataproc\nimport plot\n\nfrom importlib import reload\n\n# System parameters\n# molecular weight of CO2\nmw_co2 = 44.01\n# conversion of m3 per mL\nm3_per_mL = 1E-6\n# Save plots?\nsave_plots = True\n# file path to saved data\ndata_folder = '../g-adsa_results/'\n# csv data files\ncsv_file_list = ['1k3f_30c', '1k3f_60c']",
"_____no_output_____"
]
],
[
[
"Loads data into dictionary.",
"_____no_output_____"
]
],
[
[
"d = dataproc.load_proc_data(csv_file_list, data_folder)",
"_____no_output_____"
]
],
[
[
"Compare results of prediction with guessed PC-SAFT parameters to actual data.",
"_____no_output_____"
],
[
"### Solubility",
"_____no_output_____"
]
],
[
[
"tk_fs = 18\nax_fs = 20\n\n\n# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_30c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\n# plots interfacial tension for 30 C\nax = plot.sensitivity_manual(d, d_dft, '1k3f_30c', 'solub', 'sigma', 3.17, data_folder, '', ['epsn_233-0~sigma_3-01'],\n color='#1181B3', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\n# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_60c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nax = plot.sensitivity_manual(d, d_dft, '1k3f_60c', 'solub', 'sigma', 3.17, data_folder, '', ['epsn_233-0~sigma_3-01'],\n color='#B74A0D', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\nax.tick_params(labelsize=tk_fs)\nax.set_title('')\nax.set_xlabel(ax.xaxis.get_label().get_text(), fontsize=ax_fs)\nax.set_ylabel(ax.yaxis.get_label().get_text(), fontsize=ax_fs)",
"Analyzing dft_pred//1k3f_30c_sensitivity\\1k3f_30c.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_229-3~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_233-0~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_263-0~sigma_3-17.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\1k3f_60c.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_229-3~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_233-0~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_263-0~sigma_3-17.csv\n"
]
],
[
[
"### Interfacial Tension",
"_____no_output_____"
]
],
[
[
"reload(dataproc)\nreload(plot)\n\nax_fs = 20\ntk_fs = 18\n\n# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_30c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\n# plots interfacial tension for 30 C\nax = plot.sensitivity_manual(d, d_dft, '1k3f_30c', 'if_tension', 'sigma', 3.17, data_folder, '', [],\n color='#1181B3', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\n\n\n# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_60c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nax = plot.sensitivity_manual(d, d_dft, '1k3f_60c', 'if_tension', 'sigma', 3.17, data_folder, '', [],\n color='#B74A0D', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\nax.tick_params(labelsize=tk_fs)\nax.set_title('')\nax.set_xlabel(ax.xaxis.get_label().get_text(), fontsize=ax_fs)\nax.set_ylabel(ax.yaxis.get_label().get_text(), fontsize=ax_fs)",
"_____no_output_____"
]
],
[
[
"### Specific Volume",
"_____no_output_____"
]
],
[
[
"# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_30c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\n# plots interfacial tension for 30 C\nax = plot.sensitivity_manual(d, d_dft, '1k3f_30c', 'spec_vol', 'sigma', 3.17, data_folder, '', ['epsn_233-0~sigma_3-01'],\n color='#1181B3', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\n# folder of csv data files showing sensitivity of DFT predictions to PC-SAFT parameters\ndft_sensitivity_folder = 'dft_pred//1k3f_60c_sensitivity//'\n\n# loads dft predictions into a similarly structured dictionary\nd_dft = dataproc.load_dft(dft_sensitivity_folder)\n\nax = plot.sensitivity_manual(d, d_dft, '1k3f_60c', 'spec_vol', 'sigma', 3.17, data_folder, '', [],\n color='#B74A0D', ms=10, m_ads='o', m_des='o', lw=4, ax=ax)\nax.tick_params(labelsize=tk_fs)\nax.set_title('')\nax.set_xlabel(ax.xaxis.get_label().get_text(), fontsize=ax_fs)\nax.set_ylabel(ax.yaxis.get_label().get_text(), fontsize=ax_fs)",
"Analyzing dft_pred//1k3f_30c_sensitivity\\1k3f_30c.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_229-3~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_233-0~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_30c_sensitivity\\epsn_263-0~sigma_3-17.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\1k3f_60c.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_229-3~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_233-0~sigma_3-01.csv\nAnalyzing dft_pred//1k3f_60c_sensitivity\\epsn_263-0~sigma_3-17.csv\n"
]
],
[
[
"Repeat plots for 60 C.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74a60b59d14ad1f9aed3530446fed6c8616cc71 | 2,598 | ipynb | Jupyter Notebook | Assignments/Applied Machine Learning for Analytics/HW4/Untitled.ipynb | oliviapy960825/oliviapy960825.github.io | 7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde | [
"CC-BY-3.0"
] | null | null | null | Assignments/Applied Machine Learning for Analytics/HW4/Untitled.ipynb | oliviapy960825/oliviapy960825.github.io | 7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde | [
"CC-BY-3.0"
] | null | null | null | Assignments/Applied Machine Learning for Analytics/HW4/Untitled.ipynb | oliviapy960825/oliviapy960825.github.io | 7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde | [
"CC-BY-3.0"
] | null | null | null | 29.191011 | 628 | 0.553118 | [
[
[
"import pandas as pd\nfrom sklearn.utils import shuffle\ndf=pd.read_csv(\"002_duplicate.csv\")\n#print(df['text'].iloc[5])\nprint(df['text'].iloc[77])\n\"\"\"df=shuffle(df)\nprint(df)\"\"\"",
"RT @nurse_writer: @lifebiomedguru @Stutzy6 In one study 55% of children had upper respiratory infections following the vaccine and 40% had…\n"
],
[
"import emoji\nfor i in df['text']:\n print(i)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e74a74d2e7a496c17d069d21bd6e8d9a5a3ab32e | 14,678 | ipynb | Jupyter Notebook | aulas/salvando-img-previsao-do-tempo/codigos/parte_2.ipynb | eddyrodrigues/cursou.github.io | ec98e254a172939844783a1fcabeea57cdba6b8e | [
"MIT"
] | null | null | null | aulas/salvando-img-previsao-do-tempo/codigos/parte_2.ipynb | eddyrodrigues/cursou.github.io | ec98e254a172939844783a1fcabeea57cdba6b8e | [
"MIT"
] | null | null | null | aulas/salvando-img-previsao-do-tempo/codigos/parte_2.ipynb | eddyrodrigues/cursou.github.io | ec98e254a172939844783a1fcabeea57cdba6b8e | [
"MIT"
] | null | null | null | 25.350604 | 727 | 0.545919 | [
[
[
"## Importações básicas",
"_____no_output_____"
]
],
[
[
"import requests\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom io import BytesIO\n",
"_____no_output_____"
]
],
[
[
"## Início código",
"_____no_output_____"
]
],
[
[
"wblink='https://www.weather.gov/okx/winter#tab-2'\nwblink2='https://www.weather.gov'",
"_____no_output_____"
],
[
"req=requests.get(wblink)",
"_____no_output_____"
],
[
"html_code = BeautifulSoup(req.text, 'html.parser')",
"_____no_output_____"
],
[
"#help(html_code)\n",
"_____no_output_____"
],
[
"html_code.find_all(id=\"stsImg\", limit=1)",
"_____no_output_____"
],
[
"html_code.find_all(id=\"stsImg\", limit=1)[0].attrs['src']",
"_____no_output_____"
],
[
"link_img = html_code.find_all(id=\"stsImg\", limit=1)[0].attrs['src']\nwb_final_img = wblink2+link_img",
"_____no_output_____"
],
[
"wb_final_img",
"_____no_output_____"
],
[
"img_req = requests.get(wb_final_img)",
"_____no_output_____"
],
[
"img_req.status_code",
"_____no_output_____"
],
[
"img_bytes = img_req.content",
"_____no_output_____"
],
[
"tipos = ['jpg', 'png', 'PNG', 'JPG', 'JPEG', 'jpeg']\ntipo_final = ''\nfor tipo in tipos:\n if(tipo in str(img_bytes)):\n tipo_final = tipo\ntipo_final",
"_____no_output_____"
],
[
"arquivo = 'previsao_neve.'+tipo_final\narquivo_previsao = open(arquivo, 'wb+')",
"_____no_output_____"
],
[
"arquivo_previsao.write((img_bytes))",
"_____no_output_____"
],
[
"arquivo_previsao.close()",
"_____no_output_____"
]
],
[
[
"## Comparando imagens",
"_____no_output_____"
]
],
[
[
"arquivo_temp = open(\"old_forecast1.PNG\", \"rb\")\nD1 = arquivo_temp.read()",
"_____no_output_____"
],
[
"arquivo2 = open('previsao_neve.png', 'rb')\nD2 = arquivo2.read()",
"_____no_output_____"
],
[
"\nif D1 == img_bytes:\n print(\"True\")\n \nelse:\n print(\"False\")\n",
"True\n"
],
[
"if (D1 == img_bytes):\n print(\"Iguais.\")\nelse:\n print(\"Não são iguais.\")",
"Iguais.\n"
],
[
"arquivo2.close()\narquivo_temp.close()",
"_____no_output_____"
]
],
[
[
"## Codigo Novo com intervalo",
"_____no_output_____"
]
],
[
[
"import time\n\nwhile(True):\n img_req = requests.get(wb_final_img)\n img_bytes = img_req.content\n tipos = ['jpg', 'png', 'PNG', 'JPG', 'JPEG', 'jpeg']\n tipo_final = ''\n for tipo in tipos:\n if(tipo in str(img_bytes)):\n tipo_final = tipo\n tipo_final\n print(\"tipo do arquivo_baixado\", tipo_final)\n arq_pc = open(\"old_forecast1.PNG\", \"rb\")\n arq_pc_bytes = arq_pc.read()\n arq_pc.close()\n if(img_bytes != arq_pc_bytes):\n arq_pc = open(\"old_forecast1.PNG\", \"wb\")\n arq_pc.write(img_bytes)\n arq_pc.close()\n print(\"ImgSalva\")\n print(\"Reiniciando o código\")\n time.sleep(5)",
"tipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\ntipo do arquivo_baixado PNG\nReiniciando o código\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74a74fb8e4bf5437ee545eb6eda7bf4ffce3ae1 | 64,812 | ipynb | Jupyter Notebook | Notebooks/Week-2/CS6134_Exercise_2_3.ipynb | EnriqueNaredoGarcia/UL-CS6134 | 8187a592cdad2d21c24a029cddac755299d0d041 | [
"BSD-3-Clause"
] | null | null | null | Notebooks/Week-2/CS6134_Exercise_2_3.ipynb | EnriqueNaredoGarcia/UL-CS6134 | 8187a592cdad2d21c24a029cddac755299d0d041 | [
"BSD-3-Clause"
] | null | null | null | Notebooks/Week-2/CS6134_Exercise_2_3.ipynb | EnriqueNaredoGarcia/UL-CS6134 | 8187a592cdad2d21c24a029cddac755299d0d041 | [
"BSD-3-Clause"
] | null | null | null | 88.783562 | 14,306 | 0.772187 | [
[
[
"<div>\n<img src=\"https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_\" width=\"350\"/>\n</div> \n\n#**Artificial Intelligence - MSc**\nCS6134 - MACHINE LEARNING APPLICATIONS \n\n###Instructor: Enrique Naredo\n###CS6134_Exercise_2.3",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"# import libraries\nfrom sklearn.linear_model import LogisticRegression\nfrom pandas import DataFrame\nfrom mlxtend.plotting import plot_decision_regions\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Real Dataset",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\n\n# Loading some example data\niris = datasets.load_iris()\nX2 = iris.data[:, [0, 2]]\ny2 = iris.target",
"_____no_output_____"
],
[
"# create a data frame\ndf = DataFrame(dict(x=X2[:,0], y=X2[:,1], label=y2))\n\n# three classes: 'cyan', 'brown', 2:'yellow'\ncolors = \n\n# figure\nfig, ax = \ngrouped = .groupby('label')\n\n# scatter plot\n\n\n# show the plot\nplt.show()",
"_____no_output_____"
],
[
"# show first 15 rows\ndf.head()",
"_____no_output_____"
],
[
"# count the data elements for each class\ndf['label'].",
"_____no_output_____"
]
],
[
[
"## Training & Test Data",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\n# training: 70%-30% \nX_train,X_test,y_train,y_test =\n",
"_____no_output_____"
],
[
"X_train[0:10]",
"_____no_output_____"
]
],
[
[
"## Logistic Regresion",
"_____no_output_____"
],
[
"* [Logistic regression](https://en.wikipedia.org/wiki/Logistic_regression) is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. \n* In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). \n\n\n",
"_____no_output_____"
]
],
[
[
"# Logistic Regression model\nLR_model = LogisticRegression()\n\n# fit to data a Logistic Regresion model\nLR_model.fit()",
"_____no_output_____"
]
],
[
[
"Decision boundary from [Wikipedia](https://en.wikipedia.org/wiki/Decision_boundary): \n\n* A decision boundary is the region of a problem space in \nwhich the output label of a classifier is ambiguous.\n\n* If the decision surface is a hyperplane, then the classification problem is linear, and the classes are linearly separable.\n\n* In a statistical-classification problem with two classes, a decision boundary or decision surface is a hypersurface that partitions the underlying vector space into two sets, one for each class.\n\n* You can find a related information to this topic in this [publication](https://towardsdatascience.com/classification-problems-and-exploring-decision-boundaries-3317e03afcdb)\n\n\n",
"_____no_output_____"
]
],
[
[
"# Plotting the decision boundary \n# from the LogisticRegression model\nplot_decision_regions()\n",
"/usr/local/lib/python3.7/dist-packages/mlxtend/plotting/decision_regions.py:244: MatplotlibDeprecationWarning: Passing unsupported keyword arguments to axis() will raise a TypeError in 3.3.\n ax.axis(xmin=xx.min(), xmax=xx.max(), y_min=yy.min(), y_max=yy.max())\n"
]
],
[
[
"## Predictions",
"_____no_output_____"
]
],
[
[
"# make predictions (assign class labels)\ny_pred = \n\n# show the inputs and predicted outputs\nfor i in range(len()):\n ",
"X0 = [5.8 5.1], Class Predicted = 2\nX1 = [6.8 5.9], Class Predicted = 2\nX2 = [7.7 6.7], Class Predicted = 2\nX3 = [4.6 1.4], Class Predicted = 0\nX4 = [7.9 6.4], Class Predicted = 2\nX5 = [5.1 1.4], Class Predicted = 0\nX6 = [6. 4.], Class Predicted = 1\nX7 = [5.4 1.7], Class Predicted = 0\nX8 = [5.7 4.5], Class Predicted = 1\nX9 = [5.7 5. ], Class Predicted = 2\nX10 = [7.2 5.8], Class Predicted = 2\nX11 = [6.7 5. ], Class Predicted = 2\nX12 = [4.9 1.5], Class Predicted = 0\nX13 = [6.4 5.3], Class Predicted = 2\nX14 = [5.7 1.7], Class Predicted = 0\nX15 = [4.9 4.5], Class Predicted = 1\nX16 = [6.1 5.6], Class Predicted = 2\nX17 = [6.1 4. ], Class Predicted = 1\nX18 = [5.6 4.1], Class Predicted = 1\nX19 = [5.1 1.9], Class Predicted = 0\nX20 = [5.6 4.5], Class Predicted = 1\nX21 = [6.4 5.6], Class Predicted = 2\nX22 = [5. 3.3], Class Predicted = 1\nX23 = [5.9 4.8], Class Predicted = 1\nX24 = [5.6 3.6], Class Predicted = 1\nX25 = [6.1 4.6], Class Predicted = 1\nX26 = [5.1 1.7], Class Predicted = 0\nX27 = [6. 4.8], Class Predicted = 1\nX28 = [6.5 5.2], Class Predicted = 2\nX29 = [7.2 6. ], Class Predicted = 2\n"
],
[
"# create a data frame\ndf_new = DataFrame()",
"_____no_output_____"
],
[
"# show 12 rows \ndf_new.head(_)",
"_____no_output_____"
],
[
"# three classes: 'red', 'blue', 'green'\n\n# figure\nfig2, ax2 \n\n# new data\ngrouped = df_new.groupby()\n\n# scatter plot\nfor key2, group2 in :\n\n\n# show the plot\nplt.show()",
"_____no_output_____"
],
[
"# Plotting the new decision boundary \n# from the LogisticRegression model\nplot_decision_regions(X_test, y_pred, clf= , legend= )",
"/usr/local/lib/python3.7/dist-packages/mlxtend/plotting/decision_regions.py:244: MatplotlibDeprecationWarning: Passing unsupported keyword arguments to axis() will raise a TypeError in 3.3.\n ax.axis(xmin=xx.min(), xmax=xx.max(), y_min=yy.min(), y_max=yy.max())\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74a77eff19785dcb8c583e4f0f51dcf1c6834c5 | 30,553 | ipynb | Jupyter Notebook | 06_Stats/US_Baby_Names/Exercises.ipynb | Gioparra91/Pandas-exercise | 85cc6f34055fbd36959f1799c748dcddf722c5da | [
"BSD-3-Clause"
] | null | null | null | 06_Stats/US_Baby_Names/Exercises.ipynb | Gioparra91/Pandas-exercise | 85cc6f34055fbd36959f1799c748dcddf722c5da | [
"BSD-3-Clause"
] | null | null | null | 06_Stats/US_Baby_Names/Exercises.ipynb | Gioparra91/Pandas-exercise | 85cc6f34055fbd36959f1799c748dcddf722c5da | [
"BSD-3-Clause"
] | null | null | null | 26.521701 | 175 | 0.320885 | [
[
[
"# US - Baby Names",
"_____no_output_____"
],
[
"### Introduction:\n\nWe are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle. \nIn the file it will be names from 2004 until 2014\n\n\n### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv). ",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called baby_names.",
"_____no_output_____"
]
],
[
[
"baby_names = pd.read_csv(\"https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv\")",
"_____no_output_____"
],
[
"baby_names.tail()",
"_____no_output_____"
]
],
[
[
"### Step 4. See the first 10 entries",
"_____no_output_____"
]
],
[
[
"baby_names.head(10)",
"_____no_output_____"
]
],
[
[
"### Step 5. Delete the column 'Unnamed: 0' and 'Id'",
"_____no_output_____"
]
],
[
[
"#del baby_names['Unnamed: 0']\ndel baby_names['Id']",
"_____no_output_____"
]
],
[
[
"### Step 6. Is there more male or female names in the dataset?",
"_____no_output_____"
]
],
[
[
"baby_names[\"Gender_n\"] = baby_names.Gender.map({\"F\":0,\"M\":1})\nmale = baby_names[\"Gender_n\"].sum()\nperc_male = male/len(baby_names.Gender)\nperc_male # more female",
"_____no_output_____"
]
],
[
[
"### Step 7. Group the dataset by name and assign to names",
"_____no_output_____"
]
],
[
[
"del baby_names[\"Year\"]\nnames = baby_names.groupby(by=\"Name\").sum()\nnames.sort_values(\"Count\", ascending = 0).head()",
"_____no_output_____"
]
],
[
[
"### Step 8. How many different names exist in the dataset?",
"_____no_output_____"
]
],
[
[
"len(names)",
"_____no_output_____"
]
],
[
[
"### Step 9. What is the name with most occurrences?",
"_____no_output_____"
]
],
[
[
"names.Count.idxmax()",
"_____no_output_____"
]
],
[
[
"### Step 10. How many different names have the least occurrences?",
"_____no_output_____"
]
],
[
[
"len(names[names.Count == names.Count.min()])",
"_____no_output_____"
]
],
[
[
"### Step 11. What is the median name occurrence?",
"_____no_output_____"
]
],
[
[
"names[names.Count == names.Count.median()]",
"_____no_output_____"
]
],
[
[
"### Step 12. What is the standard deviation of names?",
"_____no_output_____"
]
],
[
[
"names.Count.std()",
"_____no_output_____"
]
],
[
[
"### Step 13. Get a summary with the mean, min, max, std and quartiles.",
"_____no_output_____"
]
],
[
[
"names.describe()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74a8915651af2ad3dcb740372c322cb82b34aa5 | 2,534 | ipynb | Jupyter Notebook | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/notebook/color_scatterplot.ipynb | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/notebook/color_scatterplot.ipynb | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/notebook/color_scatterplot.ipynb | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | 19.492308 | 117 | 0.529597 | [
[
[
"This IPython Notebook contains simple examples of the line function. \n\nTo clear all previously rendered cell outputs, select from the menu:\n\n Cell -> All Output -> Clear",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom six.moves import zip",
"_____no_output_____"
],
[
"from bokeh.plotting import figure, show, output_notebook",
"_____no_output_____"
],
[
"N = 4000",
"_____no_output_____"
],
[
"x = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]",
"_____no_output_____"
],
[
"output_notebook()",
"_____no_output_____"
],
[
"TOOLS=\"resize,crosshair,pan,wheel_zoom,box_zoom,reset,tap,previewsave,box_select,poly_select,lasso_select\"\n\np = figure(tools=TOOLS)\np.scatter(x,y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)",
"_____no_output_____"
],
[
"show(p)",
"_____no_output_____"
]
]
] | [
"raw",
"code"
] | [
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74a936a34a437ef029b041398a44e793fe223cd | 25,506 | ipynb | Jupyter Notebook | velocity_control_PID.ipynb | dchatterjee/control-systems-playbook | 98ba63da2a382adfdd54f76ece1ecebd08025ee6 | [
"MIT"
] | 3 | 2022-01-15T19:43:31.000Z | 2022-01-17T14:55:41.000Z | velocity_control_PID.ipynb | dchatterjee/control-systems-workspace | 98ba63da2a382adfdd54f76ece1ecebd08025ee6 | [
"MIT"
] | null | null | null | velocity_control_PID.ipynb | dchatterjee/control-systems-workspace | 98ba63da2a382adfdd54f76ece1ecebd08025ee6 | [
"MIT"
] | null | null | null | 108.53617 | 18,810 | 0.817023 | [
[
[
"<a href=\"https://colab.research.google.com/github/dchatterjee/control.vehicle-control/blob/main/velocity_control_PID.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom IPython import display",
"_____no_output_____"
],
[
"animate=True",
"_____no_output_____"
],
[
"def vehicle(v,t,u,load):\n # inputs\n # v = vehicle velocity (m/s)\n # t = time (sec)\n # u = gas pedal position (-50% to 100%)\n # load = passenger load + cargo (kg)\n Cd = 0.24 # drag coefficient\n rho = 1.225 # air density (kg/m^3)\n A = 5.0 # cross-sectional area (m^2)\n Fp = 30 # thrust parameter (N/%pedal)\n m = 500 # vehicle mass (kg)\n # calculate derivative of the velocity\n dv_dt = (1.0/(m+load)) * (Fp*u - 0.5*rho*Cd*A*v**2)\n return dv_dt",
"_____no_output_____"
],
[
"tf = 60.0 # final time for simulation\nnsteps = 61 # number of time steps\ndelta_t = tf/(nsteps-1) # how long is each time step?\nts = np.linspace(0,tf,nsteps) # linearly spaced time vector",
"_____no_output_____"
],
[
"# simulate step test operation\nstep = np.zeros(nsteps) # u = valve % open\nstep[11:] = 50.0 # step up pedal position\n# passenger(s) + cargo load\nload = 200.0 # kg\n# velocity initial condition\nv0 = 0.0\n# set point\nsp = 25.0\n# for storing the results\nvs = np.zeros(nsteps)\nsps = np.zeros(nsteps)",
"_____no_output_____"
],
[
"plt.figure(1,figsize=(5,4))\nif animate:\n plt.ion()\n plt.show()",
"_____no_output_____"
],
[
"# simulate with ODEINT\nfor i in range(nsteps-1):\n u = step[i]\n # clip inputs to -50% to 100%\n if u >= 100.0:\n u = 100.0\n if u <= -50.0:\n u = -50.0\n v = odeint(vehicle,v0,[0,delta_t],args=(u,load))\n v0 = v[-1] # take the last value\n vs[i+1] = v0 # store the velocity for plotting\n sps[i+1] = sp\n\n # plot results\n if animate:\n display.clear_output(wait=True)\n plt.clf()\n plt.subplot(2,1,1)\n plt.plot(ts[0:i+1],vs[0:i+1],'b-',linewidth=3)\n plt.plot(ts[0:i+1],sps[0:i+1],'k--',linewidth=2)\n plt.ylabel('Velocity (m/s)')\n plt.legend(['Velocity','Set Point'],loc=2)\n plt.subplot(2,1,2)\n plt.plot(ts[0:i+1],step[0:i+1],'r--',linewidth=3)\n plt.ylabel('Gas Pedal') \n plt.legend(['Gas Pedal (%)'])\n plt.xlabel('Time (sec)')\n plt.pause(0.1) ",
"_____no_output_____"
],
[
"if not animate:\n # plot results\n plt.subplot(2,1,1)\n plt.plot(ts,vs,'b-',linewidth=3)\n plt.plot(ts,sps,'k--',linewidth=2)\n plt.ylabel('Velocity (m/s)')\n plt.legend(['Velocity','Set Point'],loc=2)\n plt.subplot(2,1,2)\n plt.plot(ts,step,'r--',linewidth=3)\n plt.ylabel('Gas Pedal') \n plt.legend(['Gas Pedal (%)'])\n plt.xlabel('Time (sec)')\n plt.show()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74a95b65f292259ad49ef56d2dae7932f9c1e81 | 9,857 | ipynb | Jupyter Notebook | pc_to_tex.ipynb | Kuwamai/pc_to_tex | 707a8c9872e744dd938d3d40d02b67143015aade | [
"MIT"
] | 1 | 2022-03-08T06:47:50.000Z | 2022-03-08T06:47:50.000Z | pc_to_tex.ipynb | Kuwamai/pc_to_tex | 707a8c9872e744dd938d3d40d02b67143015aade | [
"MIT"
] | null | null | null | pc_to_tex.ipynb | Kuwamai/pc_to_tex | 707a8c9872e744dd938d3d40d02b67143015aade | [
"MIT"
] | 1 | 2022-03-28T01:43:58.000Z | 2022-03-28T01:43:58.000Z | 29.600601 | 168 | 0.457746 | [
[
[
"# pc_to_tex\nPointCloudShaderに必要な、点群データをテクスチャに書き込むスクリプト\n\n[](https://colab.research.google.com/github/Kuwamai/pc_to_tex/blob/main/pc_to_tex.ipynb)",
"_____no_output_____"
],
[
"## 使い方\n### 準備\n1. こちらの記事を参考に点群データを用意する\n * [ソーシャルVRに点群を持ち込みたい - クワマイでもできる](https://kuwamai.hatenablog.com/entry/2020/12/17/013711)\n1. ↑の`Open in colab`をクリックしてGoogle colaboratolyで開く\n * 場合によってはページが開かないので右クリックして`新しいタブで開く`を選択する必要があるかも\n1. 画面上の`ファイル`タブをクリックし、`ドライブにコピーを保存`をクリック\n1. 用意した点群データもGoogle driveにアップ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport struct\nfrom google.colab import drive\ndrive.mount(\"/content/drive\")",
"_____no_output_____"
]
],
[
[
"### 設定\n1. 下記変数の`file_path`に、アップしたGoogle drive上のパスを記入する\n * 例えば`マイドライブ`に`pc_to_tex`フォルダを作った場合は`drive/My Drive/pc_to_tex/`になる\n1. `pc_name`にアップした点群のファイル名を記入する\n1. `column_names`にそれぞれの列の名称$(x, y, z, r, g, b)$を記入する\n * 記事と同じ手順なら変更の必要なし\n * 座標系が異なる、法線など他のデータが含まれる際は適宜編集する\n1. `tex_width`に生成するテクスチャの幅を記入\n * 記入した値の2乗の数の点が1枚のテクスチャに保存される\n * 表示に使うメッシュのポリゴン数と合わせる必要があるので、わからなければ1024のままで大丈夫\n1. 点群ファイルの最初に点群取得位置などが含まれる場合は`skip_rows`に飛ばす行数を指定\n1. `center_pos`に点群の中心にしたい位置を記入\n * $(x, y, z)$の要素の順番は`column_names`で指定したものと対応させる\n1. 画面上の`ランタイム/すべてのセルを実行`をクリック",
"_____no_output_____"
]
],
[
[
"file_path = \"drive/My Drive/pc_to_tex/\"\npc_name = \"ShibuyaUnderground.asc\"\ncolumn_names = (\"x\", \"z\", \"y\", \"ignore\", \"r\", \"g\", \"b\")\ntex_width = 1024\nskiprows = 1\ncenter_pos = pd.DataFrame([(-11830.2, -37856, 3.82242)], columns=[\"x\", \"z\", \"y\"])\npc = pd.read_table(file_path + pc_name, sep=\" \", header=None, names=column_names, skiprows=skiprows)\n\nprint(\"↓データとヘッダー名が合ってるか確認\")\npc.head()",
"↓データとヘッダー名が合ってるか確認\n"
]
],
[
[
"## 点位置をテクスチャに書き込む",
"_____no_output_____"
]
],
[
[
"def save_tex(r, c, tex_width, tex_num):\n pos_tex = np.pad(c, ((0, tex_width * 2 - c.shape[0]), (0,0), (0,0)), \"constant\")\n pos_tex = Image.fromarray(np.uint8(np.round(pos_tex)))\n pos_tex.save(file_path + \"pos\" + str(tex_num) + \".png\")",
"_____no_output_____"
],
[
"tex_num = 0\npc[[\"x\", \"y\", \"z\"]] = pc[[\"x\", \"y\", \"z\"]] - center_pos[[\"x\", \"y\", \"z\"]].values\n\nfor i, pos in enumerate(zip(pc[\"x\"], pc[\"y\"], pc[\"z\"])):\n if i % tex_width ** 2 == 0:\n if not i == 0:\n save_tex(r, c, tex_width, tex_num)\n tex_num += 1\n \n r = np.empty((2, 0, 3))\n c = np.empty((0, tex_width * 2, 3))\n \n if i % tex_width * 2 == 0:\n if not i % tex_width ** 2== 0:\n c = np.append(c, r, axis=0)\n r = np.empty((2, 0, 3))\n \n a = np.empty((2, 2, 0))\n \n for xyz in pos:\n xs = struct.pack('>f', xyz)\n xn = struct.unpack('>L', xs)[0]\n b = np.array([[[xn >> 0 & 0xff],[xn >> 8 & 0xff]],\n [[xn >> 16 & 0xff],[xn >> 24 & 0xff]]])\n a = np.append(a, b, axis=2)\n \n r = np.append(r, a, axis=1)\n\nif r.shape[1] > 0:\n r = np.pad(r, ((0,0),(0,tex_width * 2 - r.shape[1]),(0,0)), \"constant\")\n c = np.append(c, r, axis=0)\n\nsave_tex(r, c, tex_width, tex_num)",
"_____no_output_____"
]
],
[
[
"## 色をテクスチャに書き込む",
"_____no_output_____"
]
],
[
[
"cols = pc[[\"r\", \"g\", \"b\"]].values.reshape([-1, 1, 3])\ntex_num = np.ceil((len(cols)) / tex_width ** 2)\ntex_length = int(tex_num * (tex_width ** 2) - len(cols))\ncol_texs = np.pad(cols, ((0,tex_length),(0,0),(0,0)), \"constant\")\ncol_texs = np.array_split(col_texs, tex_num)\n\nfor i, tex in enumerate(col_texs):\n col_tex = np.reshape(tex, (tex_width, tex_width, 3))\n col_tex = Image.fromarray(np.uint8(np.round(col_tex)))\n col_tex.save(file_path + \"col\" + str(i) + \".png\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74aa6ffea7077ffb6ffda8f500fe9f77d05e62a | 133,813 | ipynb | Jupyter Notebook | examples/example_srtm_severalTacs_simulatedData.ipynb | bilgelm/kineticmodel | 1060f7bb3a140d3dac3679d88ab997084e5de8c7 | [
"MIT"
] | 4 | 2017-10-23T08:00:50.000Z | 2018-08-07T23:15:01.000Z | examples/example_srtm_severalTacs_simulatedData.ipynb | bilgelm/kineticmodel | 1060f7bb3a140d3dac3679d88ab997084e5de8c7 | [
"MIT"
] | null | null | null | examples/example_srtm_severalTacs_simulatedData.ipynb | bilgelm/kineticmodel | 1060f7bb3a140d3dac3679d88ab997084e5de8c7 | [
"MIT"
] | 3 | 2017-11-09T13:45:36.000Z | 2021-11-05T12:29:22.000Z | 239.808244 | 42,496 | 0.908223 | [
[
[
"from kineticmodel import SRTM_Zhou2003, SRTM_Lammertsma1996",
"_____no_output_____"
],
[
"import sys, os\nsys.path.insert(0,os.pardir)\nfrom tests.generate_test_data import generate_fakeTAC_SRTM",
"_____no_output_____"
],
[
"import numpy as np\nnp.random.seed(0)\n\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\n% matplotlib inline",
"_____no_output_____"
],
[
"# generate noiseless fake data based on SRTM\nBP = 0.5\nR1 = 1.2\nt, dt, TAC, refTAC = generate_fakeTAC_SRTM(BP, R1)\n\nnumFrames = len(t)\n\nfig, ax = plt.subplots();\nax.plot(t, TAC, label='TAC');\nax.plot(t, refTAC, label='Reference TAC');\nax.set_xlabel('t');\nax.set_ylabel('Activity');\nax.set_title('Simulated data');\nax.legend();",
"_____no_output_____"
],
[
"# Generate \"image\" data\n# Assume that entire \"image\" corresponds to a region with uniform BP and R1 values\n\nimdim = (5,5,5)\nnumVoxels = np.prod(imdim)\n\n# Generate noisy simulations by adding normal noise -- I don't think this is a good way\npct_noise = 1\n\nTAC_matrix = TAC + np.random.normal(0,np.outer(TAC,np.repeat(pct_noise, numVoxels)/100).T)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots();\nax.plot(t, TAC_matrix.T, label='');\nax.plot(t, TAC, 'k-', label='TAC');\nax.plot(t, refTAC, 'k--', label='Reference TAC');\nax.set_xlabel('t');\nax.set_ylabel('Activity');\nax.set_title('Simulated data');\nax.legend();",
"_____no_output_____"
],
[
"# Initialize SRTM Lammerstma 1996 model\nmdl_lammertsma = SRTM_Lammertsma1996(t, dt, TAC_matrix, refTAC, time_unit='min')\n\n# fit model\nmdl_lammertsma.fit();",
"_____no_output_____"
],
[
"# Initialize SRTM Zhou 2003 model\nmdl_zhou = SRTM_Zhou2003(t, dt, TAC_matrix, refTAC, time_unit='min')\n\nmdl_zhou.fit();",
"_____no_output_____"
],
[
"# we now take advantage of the spatial constraint capabilities of Zhou model\n\n# Initialize SRTM Zhou 2003 model\nmdl_zhou_spatial_constraint = SRTM_Zhou2003(t, dt, TAC_matrix, refTAC, time_unit='min')\n\n# we first reorganize the TAC data in a 4-D matrix and apply Gaussian smoothing to \n# each time frame\n\nTAC_img = np.reshape(TAC_matrix, (*imdim,numFrames))\n\nsigma = 5\n\nsmoothTAC_img = np.zeros_like(TAC_img)\nfor k in range(TAC_matrix.shape[-1]):\n smoothTAC_img[:,:,:,k] = gaussian_filter(TAC_img[:,:,:,k],sigma=sigma)\n\nsmoothTAC_matrix = np.reshape(smoothTAC_img, TAC_matrix.shape)\n\nmdl_zhou_spatial_constraint.fit(smoothTAC=smoothTAC_matrix);\n\n# Refine R1\nimg_R1 = np.reshape(mdl_zhou_spatial_constraint.results['R1'], imdim)\nimg_k2 = np.reshape(mdl_zhou_spatial_constraint.results['k2'], imdim)\nimg_k2a = np.reshape(mdl_zhou_spatial_constraint.results['k2a'], imdim)\nimg_noiseVar_eqR1 = np.reshape(mdl_zhou_spatial_constraint.results['noiseVar_eqR1'], imdim)\n\nsmooth_img_R1 = gaussian_filter(img_R1,sigma=sigma)\nsmooth_img_k2 = gaussian_filter(img_k2,sigma=sigma)\nsmooth_img_k2a = gaussian_filter(img_k2a,sigma=sigma)\n\nsmooth_R1 = smooth_img_R1.flatten()\nsmooth_k2 = smooth_img_k2.flatten()\nsmooth_k2a = smooth_img_k2a.flatten()\n\n# get h\nm = 3\nh = np.zeros((numVoxels, m))\nh[:,0] = gaussian_filter(m * img_noiseVar_eqR1 / np.square(img_R1 - smooth_img_R1),\n sigma=sigma).flatten()\nh[:,1] = gaussian_filter(m * img_noiseVar_eqR1 / np.square(img_k2 - smooth_img_k2),\n sigma=sigma).flatten()\nh[:,2] = gaussian_filter(m * img_noiseVar_eqR1 / np.square(img_k2a - smooth_img_k2a),\n sigma=sigma).flatten()\n\nmdl_zhou_spatial_constraint.refine_R1(smooth_R1,smooth_k2,smooth_k2a,h)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(1,2, figsize=(10,4));\n\naxes[0].plot(mdl_lammertsma.results['BP'], '.', label='Lammertsma 1996');\naxes[0].plot(mdl_zhou.results['BP'], '.', label='Zhou 2003 w/o spatial constraint');\naxes[0].plot(mdl_zhou_spatial_constraint.results['BP'], '.', label='Zhou 2003');\naxes[0].axhline(y=BP, color='k', linestyle='--');\naxes[0].set_xlabel('voxel');\naxes[0].set_ylabel('BP');\n#axes[0].legend();\n\naxes[1].plot(mdl_lammertsma.results['R1'], '.', label='Lammertsma 1996');\naxes[1].plot(mdl_zhou.results['R1'], '.', label='Zhou 2003 w/o spatial constraint');\naxes[1].plot(mdl_zhou_spatial_constraint.results['R1_lrsc'], '.', label='Zhou 2003');\naxes[1].axhline(y=R1, color='k', linestyle='--');\naxes[1].set_xlabel('voxel');\naxes[1].set_ylabel('R1');\naxes[1].legend();",
"_____no_output_____"
],
[
"import temporalimage\nti = temporalimage.TemporalImage(TAC_img, np.eye(4), t-dt/2, t+dt/2, time_unit='min')",
"_____no_output_____"
],
[
"results_img = SRTM_Zhou2003.volume_wrapper(ti=ti, refTAC=refTAC, fwhm=(2*np.sqrt(2*np.log(2))) * 5)",
"_____no_output_____"
],
[
"results_img.keys()",
"_____no_output_____"
],
[
"results_img['BP']",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(1,2, figsize=(10,4));\n\naxes[0].plot(results_img['BP'].flatten(), '.', label='Zhou 2003');\naxes[0].axhline(y=BP, color='k', linestyle='--');\naxes[0].set_xlabel('voxel');\naxes[0].set_ylabel('BP');\n\naxes[1].plot(results_img['R1'].flatten(), '.', label='Zhou 2003');\naxes[1].axhline(y=R1, color='k', linestyle='--');\naxes[1].set_xlabel('voxel');\naxes[1].set_ylabel('R1');",
"_____no_output_____"
],
[
"np.allclose(mdl_zhou_spatial_constraint.results['BP'], results_img['BP'].flatten(),rtol=1e-18,atol=1e-18)",
"_____no_output_____"
],
[
"np.allclose(mdl_zhou_spatial_constraint.results['R1'], results_img['R1'].flatten(),rtol=1e-18,atol=1e-18)",
"_____no_output_____"
],
[
"np.allclose(mdl_zhou_spatial_constraint.results['R1_lrsc'], results_img['R1_lrsc'].flatten(),rtol=1e-18,atol=1e-18)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74aa899a3fb4676ede864908e53ec2130621391 | 2,566 | ipynb | Jupyter Notebook | Data/processed/.ipynb_checkpoints/Cleaneddata-checkpoint.ipynb | data301-2020-winter2/course-project-group_1015 | 0718ccbc3910313d6e4f4e492dcf40f5b9ef5928 | [
"MIT"
] | 1 | 2021-02-17T08:30:02.000Z | 2021-02-17T08:30:02.000Z | Data/processed/.ipynb_checkpoints/Cleaneddata-checkpoint.ipynb | data301-2020-winter2/course-project-group_1015 | 0718ccbc3910313d6e4f4e492dcf40f5b9ef5928 | [
"MIT"
] | 1 | 2021-03-24T00:52:25.000Z | 2021-03-24T18:53:53.000Z | Data/processed/.ipynb_checkpoints/Cleaneddata-checkpoint.ipynb | data301-2020-winter2/course-project-group_1015 | 0718ccbc3910313d6e4f4e492dcf40f5b9ef5928 | [
"MIT"
] | null | null | null | 27.010526 | 109 | 0.450896 | [
[
[
"import pandas as pd\nimport numpy as np\n\ndef load_and_process(url_or_path_to_csv_file):\n\n # Method Chain 1 (Load data and rename the column)\n\n df1 = (\n pd.read_csv('Medical_Cost.csv')\n .rename(columns={\"charges\": \"Medical Costs per region\"})\n .rename(columns={\"sex\": \"Gender\"})\n .rename(columns={\"smoker\": \"Tobacco User\"})\n .rename(columns={\"age\": \"Age\"})\n .rename(columns={\"region\": \"Region\"})\n .rename(columns={\"children\": \"Children\"})\n .rename(columns={\"bmi\": \"BMI\"})\n \n )\n \n # Method Chain 2 ( Cleaning the costs and BMI by riunding them into 2 decimal places)\n\n\n def format(x):\n return \"${:0.2f}\".format(x)\n \n df1['Medical Costs per region'] = (df1['Medical Costs per region'].apply(format)\n )\n \n\n df1['BMI'] = ( round(df1['BMI'])\n \n ) \n # Method Chain 3(Sorting the data )\n df4 = (\n df1.sort_values(by=['Age'], ascending = True)\n )\n # Method Chain 3(Organizing the order of the columns )\n df5 = (\n df4[['Age','Gender','Children','BMI','Tobacco User','Medical Costs per region','Region']]\n ) \n \n return df5\n\n \n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74ab1f4129f2263ddf84688b59be4273966e584 | 4,071 | ipynb | Jupyter Notebook | util/stitch_image_tiles_with_overlap.ipynb | kukionfr/imagenoob | f0163607e436cd7812699aa2dfde7d57ba90d414 | [
"MIT"
] | null | null | null | util/stitch_image_tiles_with_overlap.ipynb | kukionfr/imagenoob | f0163607e436cd7812699aa2dfde7d57ba90d414 | [
"MIT"
] | null | null | null | util/stitch_image_tiles_with_overlap.ipynb | kukionfr/imagenoob | f0163607e436cd7812699aa2dfde7d57ba90d414 | [
"MIT"
] | null | null | null | 27.693878 | 142 | 0.519774 | [
[
[
"import os\nimport glob\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nfrom time import time",
"_____no_output_____"
],
[
"def _time(f):\n def wrapper(*args,**kwargs):\n start=time()\n r=f(*args,**kwargs)\n end=time()\n print(\"%s timed %f\" %(f.__name__,end-start))\n return r\n return wrapper\n\n@_time\ndef stitchtiles(impths):\n # pre-allocate stitched image\n overlap = 0.1\n overlap = overlap/2 # cut this much from all sides of the image\n tilesz = Image.open(impths[0]).size\n pixel2adjust = round(tilesz[0]*overlap) # cut this much from all sides of the image\n cuttile = np.zeros(tilesz)[pixel2adjust:-pixel2adjust,pixel2adjust:-pixel2adjust]\n column = round(np.sqrt(tiles_per_well))\n row = round(np.sqrt(tiles_per_well))\n imstack = np.repeat(cuttile[np.newaxis,:, :], column, axis=0)\n imstack = np.repeat(imstack[np.newaxis, :, :, :], row, axis=0)\n # read tile into each grid\n for idx,im in enumerate(impths):\n rowidx = idx//4\n colidx = idx%4\n imstack[rowidx,colidx,:,:]=np.array(Image.open(im))[pixel2adjust:-pixel2adjust,pixel2adjust:-pixel2adjust]\n # re-order tiles for snake style\n imstack[1::2,:,:,:]=imstack[1::2,::-1,:,:]\n # stitch\n imstack = imstack.swapaxes(1,2)\n imstack = imstack.reshape(cuttile.shape[0]*row,cuttile.shape[1]*column)\n # normalize\n imstack_norm = cv2.normalize(imstack, None, alpha = 0, beta = 65535, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_16U)\n return imstack_norm",
"_____no_output_____"
],
[
"src=r'\\\\fatherserverdw\\Bart\\Stiffness\\^Bleomycin Growth Curve\\Plastic - Low Dose\\Bio 3 - P6\\20220309_GT22_Day0'\ndst=os.path.join(src,'output')\nims = glob.glob(os.path.join(src,'*tif'))\ntiles_per_well = 16\nnum_well = 24\nfor wellID in range(num_well):\n print(wellID+1,'/',num_well)\n ims2 = ims[wellID*tiles_per_well:wellID*tiles_per_well+tiles_per_well] # build for-loop in this line to process the entire folder\n imstack = stitchtiles(ims2)\n png = Image.fromarray(imstack)\n png.save(os.path.join(dst,'{}.png'.format(wellID)))",
"0 16\n16 32\n32 48\n48 64\n64 80\n80 96\n96 112\n112 128\n128 144\n144 160\n160 176\n176 192\n192 208\n208 224\n224 240\n240 256\n256 272\n272 288\n288 304\n304 320\n320 336\n336 352\n352 368\n368 384\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e74abc8c1be9e6c42a37ef7ad45e06d9569c85e7 | 7,189 | ipynb | Jupyter Notebook | DP/Policy Iteration.ipynb | MChaus/reinforcement-learning | d7cbd15adb35111a105dfab0ffd0b799332dd582 | [
"MIT"
] | null | null | null | DP/Policy Iteration.ipynb | MChaus/reinforcement-learning | d7cbd15adb35111a105dfab0ffd0b799332dd582 | [
"MIT"
] | null | null | null | DP/Policy Iteration.ipynb | MChaus/reinforcement-learning | d7cbd15adb35111a105dfab0ffd0b799332dd582 | [
"MIT"
] | null | null | null | 31.809735 | 108 | 0.480039 | [
[
[
"import numpy as np\nimport pprint\nimport sys\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \nfrom lib.envs.gridworld import GridworldEnv",
"_____no_output_____"
],
[
"pp = pprint.PrettyPrinter(indent=2)\nenv = GridworldEnv()",
"_____no_output_____"
],
[
"# Taken from Policy Evaluation Exercise!\n\ndef policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n \"\"\"\n Evaluate a policy given an environment and a full description of the environment's dynamics.\n \n Args:\n policy: [S, A] shaped matrix representing the policy.\n env: OpenAI env. env.P represents the transition probabilities of the environment.\n env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).\n env.nS is a number of states in the environment. \n env.nA is a number of actions in the environment.\n theta: We stop evaluation once our value function change is less than theta for all states.\n discount_factor: Gamma discount factor.\n \n Returns:\n Vector of length env.nS representing the value function.\n \"\"\"\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n while True:\n delta = 0\n # For each state, perform a \"full backup\"\n for s in range(env.nS):\n v = 0\n # Look at the possible next actions\n for a, action_prob in enumerate(policy[s]):\n # For each action, look at the possible next states...\n for prob, next_state, reward, done in env.P[s][a]:\n # Calculate the expected value\n v += action_prob * prob * (reward + discount_factor * V[next_state])\n # How much our value function changed (across any states)\n delta = max(delta, np.abs(v - V[s]))\n V[s] = v\n # Stop evaluating once our value function change is below a threshold\n if delta < theta:\n break\n return np.array(V)",
"_____no_output_____"
],
[
"def policy_improvement(env, policy_eval_fn=policy_eval, discount_factor=1.0):\n \"\"\"\n Policy Improvement Algorithm. Iteratively evaluates and improves a policy\n until an optimal policy is found.\n \n Args:\n env: The OpenAI envrionment.\n policy_eval_fn: Policy Evaluation function that takes 3 arguments:\n policy, env, discount_factor.\n discount_factor: gamma discount factor.\n \n Returns:\n A tuple (policy, V). \n policy is the optimal policy, a matrix of shape [S, A] where each state s\n contains a valid probability distribution over actions.\n V is the value function for the optimal policy.\n \n \"\"\"\n # Start with a random policy\n policy = np.ones([env.nS, env.nA]) / env.nA\n \n while True:\n # Implement this!\n prev_policy = np.copy(policy)\n V = policy_eval_fn(policy, env, discount_factor, theta=0.01)\n for state in range(env.nS):\n action_values = []\n for action, action_prob in enumerate(policy[state]):\n action_value = 0\n policy[state][action] = 0\n for prob, next_state, reward, done in env.P[state][action]:\n action_value += prob * (reward + discount_factor * V[next_state])\n action_values.append(action_value)\n best_action = np.argmax(action_values)\n policy[state][best_action] = 1\n if (prev_policy == policy).all():\n return policy, V",
"_____no_output_____"
],
[
"policy, v = policy_improvement(env)\nprint(\"Policy Probability Distribution:\")\nprint(policy)\nprint(\"\")\n\nprint(\"Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\")\nprint(np.reshape(np.argmax(policy, axis=1), env.shape))\nprint(\"\")\n\nprint(\"Value Function:\")\nprint(v)\nprint(\"\")\n\nprint(\"Reshaped Grid Value Function:\")\nprint(v.reshape(env.shape))\nprint(\"\")\n\n",
"Policy Probability Distribution:\n[[1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [0. 0. 1. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 1. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 0. 1. 0.]\n [1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]]\n\nReshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\n[[0 3 3 2]\n [0 0 0 2]\n [0 0 1 2]\n [0 1 1 0]]\n\nValue Function:\n[ 0. -1. -2. -3. -1. -2. -3. -2. -2. -3. -2. -1. -3. -2. -1. 0.]\n\nReshaped Grid Value Function:\n[[ 0. -1. -2. -3.]\n [-1. -2. -3. -2.]\n [-2. -3. -2. -1.]\n [-3. -2. -1. 0.]]\n\n"
],
[
"# Test the value function\nexpected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])\nnp.testing.assert_array_almost_equal(v, expected_v, decimal=2)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74ad63a653d9482fbbf1a37d5da8a0bc601d003 | 35,334 | ipynb | Jupyter Notebook | notebooks/my first notebook.ipynb | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | notebooks/my first notebook.ipynb | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | notebooks/my first notebook.ipynb | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | 44.057357 | 11,660 | 0.653959 | [
[
[
"print \"hello world\"",
"hello world\n"
]
],
[
[
"## large cells ",
"_____no_output_____"
]
],
[
[
"# simple code \n2 + 2 ",
"_____no_output_____"
],
[
"# simple variable \nx = 3\n\n# print it \nprint x ",
"3\n"
],
[
"# a function \ndef print_hello(name) : \n print 'hello {0}'.format(name)",
"_____no_output_____"
],
[
"print_hello(\"bro\")",
"hello bro\n"
]
],
[
[
"# help key board short cuts to find all the keyboard short cut, esc + l print line numbers\n",
"_____no_output_____"
]
],
[
[
"!python --version \n",
"Python 2.7.15 :: Anaconda, Inc.\n"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.plot(range(100))",
"_____no_output_____"
]
],
[
[
"matplotlib helps you plot stuff on the jupyther notebook",
"_____no_output_____"
]
],
[
[
"%time x = range(10000)",
"Wall time: 1 ms\n"
]
],
[
[
"## time your functions with time\n",
"_____no_output_____"
]
],
[
[
"%%timeit x = range(10000)\nmax(x)",
"1000 loops, best of 3: 221 µs per loop\n"
]
],
[
[
"# writes to files ",
"_____no_output_____"
]
],
[
[
"%%writefile test.txt\nthis is some stuff that i wrote form jupiter notebook ",
"Writing test.txt\n"
]
],
[
[
"# ls list directory ",
"_____no_output_____"
]
],
[
[
"%ls\n",
" Volume in drive F has no label.\n Volume Serial Number is C816-9EA8\n\n Directory of F:\\anacondaWD\n\n23-Jul-18 00:19 <DIR> .\n23-Jul-18 00:19 <DIR> ..\n23-Jul-18 00:00 <DIR> .ipynb_checkpoints\n23-Jul-18 00:19 16,013 My First Notebook.ipynb\n23-Jul-18 00:18 54 test.txt\n 2 File(s) 16,067 bytes\n 3 Dir(s) 528,140,967,936 bytes free\n"
]
],
[
[
"# we can use latex in jupyter with %latex",
"_____no_output_____"
]
],
[
[
"%%latex\n\\begin{align}\nGradient: \\nabla J = -2H^T (Y -HW)\n\\end{align}",
"_____no_output_____"
]
],
[
[
"## load_ext loads extensions\n",
"_____no_output_____"
]
],
[
[
"%%!\npip install ipython-sql\n",
"_____no_output_____"
],
[
"%load_ext sql\n",
"_____no_output_____"
],
[
"%sql sqlite://",
"_____no_output_____"
],
[
"%%sql\ncreate table classT(name, age, marks);\ninsert into classT values(\"bob\", 22, 99);\ninsert into classT values(\"tom\", 21, 88);",
" * sqlite://\nDone.\n1 rows affected.\n1 rows affected.\n"
],
[
"%sql select * from classT;",
" * sqlite://\nDone.\n"
]
],
[
[
"# List all of the magic functions %lsmagic",
"_____no_output_____"
]
],
[
[
"%lsmagic ",
"_____no_output_____"
],
[
"import sklearn.tree \n\nhelp(sklearn.tree._tree.Tree)",
"Help on class Tree in module sklearn.tree._tree:\n\nclass Tree(__builtin__.object)\n | Array-based representation of a binary decision tree.\n | \n | The binary tree is represented as a number of parallel arrays. The i-th\n | element of each array holds information about the node `i`. Node 0 is the\n | tree's root. You can find a detailed description of all arrays in\n | `_tree.pxd`. NOTE: Some of the arrays only apply to either leaves or split\n | nodes, resp. In this case the values of nodes of the other type are\n | arbitrary!\n | \n | Attributes\n | ----------\n | node_count : int\n | The number of nodes (internal nodes + leaves) in the tree.\n | \n | capacity : int\n | The current capacity (i.e., size) of the arrays, which is at least as\n | great as `node_count`.\n | \n | max_depth : int\n | The maximal depth of the tree.\n | \n | children_left : array of int, shape [node_count]\n | children_left[i] holds the node id of the left child of node i.\n | For leaves, children_left[i] == TREE_LEAF. Otherwise,\n | children_left[i] > i. This child handles the case where\n | X[:, feature[i]] <= threshold[i].\n | \n | children_right : array of int, shape [node_count]\n | children_right[i] holds the node id of the right child of node i.\n | For leaves, children_right[i] == TREE_LEAF. Otherwise,\n | children_right[i] > i. This child handles the case where\n | X[:, feature[i]] > threshold[i].\n | \n | feature : array of int, shape [node_count]\n | feature[i] holds the feature to split on, for the internal node i.\n | \n | threshold : array of double, shape [node_count]\n | threshold[i] holds the threshold for the internal node i.\n | \n | value : array of double, shape [node_count, n_outputs, max_n_classes]\n | Contains the constant prediction value of each node.\n | \n | impurity : array of double, shape [node_count]\n | impurity[i] holds the impurity (i.e., the value of the splitting\n | criterion) at node i.\n | \n | n_node_samples : array of int, shape [node_count]\n | n_node_samples[i] holds the number of training samples reaching node i.\n | \n | weighted_n_node_samples : array of int, shape [node_count]\n | weighted_n_node_samples[i] holds the weighted number of training samples\n | reaching node i.\n | \n | Methods defined here:\n | \n | __getstate__(...)\n | Getstate re-implementation, for pickling.\n | \n | __reduce__(...)\n | Reduce re-implementation, for pickling.\n | \n | __setstate__(...)\n | Setstate re-implementation, for unpickling.\n | \n | apply(...)\n | Finds the terminal region (=leaf node) for each sample in X.\n | \n | compute_feature_importances(...)\n | Computes the importance of each feature (aka variable).\n | \n | decision_path(...)\n | Finds the decision path (=node) for each sample in X.\n | \n | predict(...)\n | Predict target for X.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | capacity\n | \n | children_left\n | \n | children_right\n | \n | feature\n | \n | impurity\n | \n | max_depth\n | \n | max_n_classes\n | \n | n_classes\n | \n | n_features\n | \n | n_node_samples\n | \n | n_outputs\n | \n | node_count\n | \n | threshold\n | \n | value\n | \n | weighted_n_node_samples\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | __new__ = <built-in method __new__ of type object>\n | T.__new__(S, ...) -> a new object with type S, a subtype of T\n | \n | __pyx_vtable__ = <capsule object NULL>\n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74adf337f9e3da7eebec72ba44c5af54e9e8967 | 4,455 | ipynb | Jupyter Notebook | notebooks/Toy_problem/Tp2_Generate_Skydome.ipynb | Maxketelaar/thesis | d1bab7dffa414c335b452476733c8b9d8ec24579 | [
"MIT"
] | null | null | null | notebooks/Toy_problem/Tp2_Generate_Skydome.ipynb | Maxketelaar/thesis | d1bab7dffa414c335b452476733c8b9d8ec24579 | [
"MIT"
] | null | null | null | notebooks/Toy_problem/Tp2_Generate_Skydome.ipynb | Maxketelaar/thesis | d1bab7dffa414c335b452476733c8b9d8ec24579 | [
"MIT"
] | 1 | 2021-12-21T15:24:57.000Z | 2021-12-21T15:24:57.000Z | 25.457143 | 148 | 0.550168 | [
[
[
"import trimesh as tm\nimport numpy as np\nimport pyvista as pv\nimport os\nimport topogenesis as tg\nimport pickle as pk",
"_____no_output_____"
],
[
"context_path = os.path.relpath('../../data/movedcontext.obj')\ncontext_mesh = tm.load(context_path)\n\nenv_lat_path = os.path.relpath('../../data/macrovoxels.csv')\nenvelope_lattice = tg.lattice_from_csv(env_lat_path)",
"_____no_output_____"
],
[
"# create a sphere \nsphere = tm.creation.icosphere(subdivisions= 3, radius= 400.0)\nspheremove = sphere.apply_translation([60, -60, 0]) # skydome is moved to correct position\n# extract vertices from sphere\nsphere_vertices = spheremove.vertices\n\n# keep only positive vertices\nskydome = sphere_vertices[sphere_vertices[:,2] > 0.0]\n\n# convert to array\nskydome = np.array(skydome)\n\n# shift skydome down to ground height --> shift down with minimum height of skydome points +10 and height offset of lattice and environment\nskydome[:,2] += -min(skydome[:,2]-10) + min(envelope_lattice.centroids[:,2])\n",
"_____no_output_____"
],
[
"a = skydome\nb = np.array((30,-30,0))\nvec = b - a\nc = tm.creation.icosphere(subdivisions= 1, radius= 400.0)\nvec.shape, skydome.shape, sphere_vertices.shape, c.vertices.shape",
"_____no_output_____"
],
[
"p = pv.Plotter(notebook=True)\n\ndef tri_to_pv(tri_mesh):\n faces = np.pad(tri_mesh.faces, ((0, 0),(1,0)), 'constant', constant_values=3)\n pv_mesh = pv.PolyData(tri_mesh.vertices, faces)\n return pv_mesh\n\n# fast visualization of the lattice\n#envelope_lattice.fast_vis(p)\n\n# add the sky vectors\n#p.add_points(skydome, color='#0013ff')\np.add_arrows(skydome, vec, mag=0.1, show_scalar_bar=False)\n\n\n# add context\n#p.add_mesh(tri_to_pv(context_mesh), opacity=0.1, color='lightgrey')\n\n\n# plotting\np.show(use_ipyvtk=True, screenshot='skydome.png')",
"_____no_output_____"
],
[
"# save skydome to pk\npk.dump(skydome, open(\"../../data/skydome.pk\", \"wb\"))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74ae0cb6c189d8fb7213104f4ea6b340ea8e83a | 341,396 | ipynb | Jupyter Notebook | m03_v01_store_sales_predict.ipynb | artavale/Rossman-Forcast-Sales | 558342a714982b3762c947fb8c1eaa15be5f2aac | [
"MIT"
] | null | null | null | m03_v01_store_sales_predict.ipynb | artavale/Rossman-Forcast-Sales | 558342a714982b3762c947fb8c1eaa15be5f2aac | [
"MIT"
] | null | null | null | m03_v01_store_sales_predict.ipynb | artavale/Rossman-Forcast-Sales | 558342a714982b3762c947fb8c1eaa15be5f2aac | [
"MIT"
] | null | null | null | 130.752968 | 218,580 | 0.820015 | [
[
[
"# 0.0. IMPORTS",
"_____no_output_____"
]
],
[
[
"import math\nimport inflection\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom IPython.core.display import HTML\nfrom IPython.display import Image\nfrom datetime import datetime, timedelta",
"_____no_output_____"
]
],
[
[
"## 0.1. Helper Functions",
"_____no_output_____"
],
[
"## 0.2. Loading data",
"_____no_output_____"
]
],
[
[
"df_sale_raw = pd.read_csv( 'base de dados/train.csv', low_memory=False)\ndf_store_raw = pd.read_csv( 'base de dados/store.csv', low_memory=False)",
"_____no_output_____"
],
[
"df_sale_raw.sample()",
"_____no_output_____"
],
[
"df_store_raw.sample()",
"_____no_output_____"
],
[
"df_raw = pd.merge( df_sale_raw, df_store_raw, how='left', on='Store')\ndf_raw.sample()",
"_____no_output_____"
]
],
[
[
"# 1.0. PASSO 01 - DESCRICAO DOS DADOS",
"_____no_output_____"
]
],
[
[
"df1 = df_raw.copy()",
"_____no_output_____"
]
],
[
[
"## 1.1. Rename Columns",
"_____no_output_____"
]
],
[
[
"df1.columns",
"_____no_output_____"
],
[
"cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo',\n 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'PromoInterval']\n\n\nsnakecase = lambda x: inflection.underscore( x )\n\ncols_new = list( map( snakecase, cols_old ) )\n\n#rename \n\ndf1.columns = cols_new",
"_____no_output_____"
],
[
"df1.columns",
"_____no_output_____"
]
],
[
[
"## 1.2. Data Dimensions",
"_____no_output_____"
]
],
[
[
"print( f'Number of Rows: {df1.shape[0]}')\nprint( f'Number of Columns: {df1.shape[1]}')",
"Number of Rows: 1017209\nNumber of Columns: 18\n"
]
],
[
[
"## 1.3. Data Types",
"_____no_output_____"
]
],
[
[
"df1['date'] = pd.to_datetime( df1['date'] )\ndf1.dtypes",
"_____no_output_____"
]
],
[
[
"## 1.4. Check NA",
"_____no_output_____"
]
],
[
[
"df1.isna().sum()",
"_____no_output_____"
]
],
[
[
"## 1.5. Fillout NA",
"_____no_output_____"
]
],
[
[
"df1.sample()",
"_____no_output_____"
],
[
"# competition_distance \n\ndf1['competition_distance'] = df1['competition_distance'].apply( lambda x: 200000.0 if math.isnan( x ) else x )\n\n# competition_open_since_month \n\ndf1['competition_open_since_month'] = df1.apply( lambda x: x['date'].month if math.isnan( x['competition_open_since_month'] ) else x['competition_open_since_month'], axis = 1)\n\n# competition_open_since_year \n\ndf1['competition_open_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan( x['competition_open_since_year'] ) else x['competition_open_since_year'], axis = 1)\n \n# promo2_since_week \n\ndf1['promo2_since_week'] = df1.apply( lambda x: x['date'].week if math.isnan( x['promo2_since_week'] ) else x['promo2_since_week'], axis = 1)\n\n# promo2_since_year \n\ndf1['promo2_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan( x['promo2_since_year'] ) else x['promo2_since_year'], axis = 1)\n\n#promo_interval\n\nmonth_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}\n\ndf1['promo_interval'].fillna(0, inplace=True )\n\ndf1['month_map'] = df1['date'].dt.month.map( month_map )\n \ndf1['is_promo'] = df1[['promo_interval', 'month_map']].apply( lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split( ',' ) else 0, axis= 1 )",
"_____no_output_____"
],
[
"df1.isna().sum()",
"_____no_output_____"
]
],
[
[
"## 1.6. Change Types",
"_____no_output_____"
]
],
[
[
"df1.dtypes",
"_____no_output_____"
],
[
"df1['competition_open_since_month'] = df1['competition_open_since_month'].astype( int )\ndf1['competition_open_since_year'] = df1['competition_open_since_year'].astype( int )\n\ndf1['promo2_since_week'] = df1['promo2_since_week'].astype( int )\ndf1['promo2_since_year'] = df1['promo2_since_year'].astype( int )",
"_____no_output_____"
]
],
[
[
"## 1.7. Descriptive Statistical",
"_____no_output_____"
]
],
[
[
"num_attributes = df1.select_dtypes( include= [ 'int64', 'float64'] )\ncat_attributes = df1.select_dtypes( exclude= [ 'int64', 'float64', 'datetime64[ns]'] )",
"_____no_output_____"
],
[
"cat_attributes.sample()",
"_____no_output_____"
]
],
[
[
"### 1.7.1. Numerical Attributes",
"_____no_output_____"
]
],
[
[
"# Central Tendency - mean, median\n\nct1 = pd.DataFrame( num_attributes.apply( np.mean ) ).T\nct2 = pd.DataFrame( num_attributes.apply( np.median ) ).T\n\n# Dispersion - std, min, max, range, skew, kurtois \n\nd1 = pd.DataFrame( num_attributes.apply( np.std) ).T\nd2 = pd.DataFrame( num_attributes.apply( min ) ).T\nd3 = pd.DataFrame( num_attributes.apply( max ) ).T\nd4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() ) ).T\nd5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T\nd6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T\n\n# Concatenate\n\nm = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index()\nm.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
],
[
"sns.distplot( df1['competition_distance'] )",
"/opt/anaconda3/envs/store_sales_predict/lib/python3.8/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n"
]
],
[
[
"### 1.7.2. Categorical Attributes",
"_____no_output_____"
]
],
[
[
"cat_attributes.apply( lambda x: x.unique().shape[0])",
"_____no_output_____"
],
[
"aux1 = df1[(df1['state_holiday'] != '0' ) & (df1['sales'] > 0 )]\n\nplt.subplot(1, 3, 1)\nsns.boxplot( x='state_holiday' ,y='sales' , data=aux1 )\n\nplt.subplot(1, 3, 2)\nsns.boxplot( x='store_type' ,y='sales' , data=aux1 )\n\nplt.subplot(1, 3, 3)\nsns.boxplot( x='assortment' ,y='sales' , data=aux1 )",
"_____no_output_____"
]
],
[
[
"# 2.0. PASSO 02 - FEATURE ENGINEERING",
"_____no_output_____"
]
],
[
[
"df2 = df1.copy()",
"_____no_output_____"
],
[
"Image( 'images/MIndMapHypothesis.png')",
"_____no_output_____"
]
],
[
[
"## 2.1. Criacao de Hipótesis ",
"_____no_output_____"
],
[
"### 2.1.1 Hipóteses Loja",
"_____no_output_____"
],
[
"**1.** Lojas com maior quadro de funcionarios deveriam vender mais\n\n**2.** Lojas com maior capacidade de estoque deveriam vender mais\n\n**3.** Lojas com maior porte deveriam vender mais \n\n**4.** Lojas com maior sortimento deveriam vender mais\n\n**5.** Lojas com competidores mais próximos deveriam vender menos\n\n**6.** Lojas com competidores a mais tempo deveriam vender mais",
"_____no_output_____"
],
[
"### 2.1.2 Hipóteses do produto",
"_____no_output_____"
],
[
"**1.** Lojas que investem mais em Marketing deveriam vender mais\n\n**2.** Lojas com maior exposicao do produtos deveriam vender mais\n\n**3.** Lojas com produtos com menor preço deveriam vender mais\n\n**4.** Lojas com promoções mais agressivas (descontos maiores) deveriam vender mais\n\n**5.** Lojas com promoções ativas por mais tempo deveriam vender mais\n\n**6.** Lojas com mais dias de promoções deveriam vender mais\n\n**7** Lojas com mais promoçoes consecultivas deveriam vender mais",
"_____no_output_____"
],
[
"### 2.1.3. Hipóteses Tempo",
"_____no_output_____"
],
[
"**1.** Lojas abertas durante o feriado de Natal deveriam vender mais\n\n**2.** Lojas deveriam vnder mais ao longo dos anos\n\n**3.** Lojas deveriam vender mais no segundo semestre do ano\n\n**4.** Lojas deveriam vender mais depois do dia 10 de cada mês\n\n**5.** Lojas deveriam vender mais nos finais de semana\n\n**6.** Lojas deveriam deveriam vender menos durante os feriados escolares",
"_____no_output_____"
],
[
"## 2.2 Lista de Hipóteses",
"_____no_output_____"
],
[
"**1.** Lojas com maior sortimento deveriam vender mais\n\n**2.** Lojas com competidores mais próximos deveriam vender menos\n\n**3.** Lojas com competidores a mais tempo deveriam vender mais\n\n**4.** Lojas com promoções ativas por mais tempo deveriam vender mais\n\n**5.** Lojas com mais dias de promoções deveriam vender mais\n\n**6.** Lojas com mais promoçoes consecultivas deveriam vender mais\n\n**7.** Lojas abertas durante o feriado de Natal deveriam vender mais\n\n**8.** Lojas deveriam vnder mais ao longo dos anos\n\n**9.** Lojas deveriam vender mais no segundo semestre do ano\n\n**10.** Lojas deveriam vender mais depois do dia 10 de cada mês\n\n**11.** Lojas deveriam vender mais nos finais de semana\n\n**12.** Lojas deveriam deveriam vender menos durante os feriados escolares",
"_____no_output_____"
],
[
"## 2.3. Feature Engineering",
"_____no_output_____"
]
],
[
[
"# year\ndf2['year'] = df2['date'].dt.year\n\n# month\ndf2['month'] = df2['date'].dt.month\n\n# day\ndf2['day'] = df2['date'].dt.day\n\n# week of year\ndf2['week_of_year'] = df2['date'].dt.weekofyear\n\n# year week\ndf2['year_week'] = df2['date'].dt.strftime( '%Y-%W' )\n\n# competition since\ndf2['competition_since'] = df2.apply( lambda x: datetime( year=x['competition_open_since_year'], month=x['competition_open_since_month'], day=1), axis= 1 )\ndf2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply(lambda x: x.days).astype( int )\n\n# promo since\ndf2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )\ndf2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.strptime( x + '-1', '%Y-%W-%w' ) - timedelta( days=7 ) )\ndf2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int )\n\n \n# # assortment\n\n#df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' ) \n\n# state holiday\n\ndf2['state_holiday'] = df2['state_holiday'].apply(lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' )\n",
"<ipython-input-27-6c00d4002b55>:11: FutureWarning: Series.dt.weekofyear and Series.dt.week have been deprecated. Please use Series.dt.isocalendar().week instead.\n df2['week_of_year'] = df2['date'].dt.weekofyear\n"
],
[
"df2.head().T",
"_____no_output_____"
]
],
[
[
"# 3.0. PASSO 03 - FILTRAGEM DE VARIÁVEIS",
"_____no_output_____"
]
],
[
[
"df3 = df2.copy()",
"_____no_output_____"
],
[
"df3.head().T",
"_____no_output_____"
]
],
[
[
"## 3.1. Filtragem das Linhas",
"_____no_output_____"
]
],
[
[
" df3 = df3[(df3['open'] != 0) & (df3['sales'] > 0 )]",
"_____no_output_____"
]
],
[
[
"## 3.2. Seleção das Colunas",
"_____no_output_____"
]
],
[
[
"cols_drop = ['customers' , 'open', 'promo_interval', 'month_map']\ndf3 = df3.drop( cols_drop, axis= 1 )",
"_____no_output_____"
],
[
"df3.columns",
"_____no_output_____"
]
],
[
[
"# 4.0. PASSO 4 - ANÁLISE EXPLORATÓRIA DOS DADOS",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74af12937fc7f432d45313c6c410f7ca4bbf3bb | 7,755 | ipynb | Jupyter Notebook | notebooks/cooling/detect_nei.ipynb | rice-solar-physics/synthetic-observables-paper-models | 80f68bceb7ecbcd238c196e3cc07d19e88617720 | [
"MIT"
] | null | null | null | notebooks/cooling/detect_nei.ipynb | rice-solar-physics/synthetic-observables-paper-models | 80f68bceb7ecbcd238c196e3cc07d19e88617720 | [
"MIT"
] | 8 | 2019-06-11T10:32:49.000Z | 2021-10-19T19:51:00.000Z | notebooks/cooling/detect_nei.ipynb | rice-solar-physics/synthetic-observables-paper-models | 80f68bceb7ecbcd238c196e3cc07d19e88617720 | [
"MIT"
] | null | null | null | 25.343137 | 248 | 0.574339 | [
[
[
"\n# Calculate Detector Counts: NEI\nCompute the AIA response use our full emission model, including non-equilibrium ionization.",
"_____no_output_____"
]
],
[
[
"import os\nimport time\nimport h5py\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OMP_NUM_THREADS'] = '1'\nos.environ['NUMEXPR_NUM_THREADS'] = '1'\nimport numpy as np\nimport astropy.units as u\nimport sunpy.sun.constants\nimport matplotlib.pyplot as plt\nimport dask\nimport distributed\n\nimport synthesizAR\nfrom synthesizAR.instruments import InstrumentSDOAIA\nfrom synthesizAR.atomic import EmissionModel, Element\n#from synthesizAR.interfaces import EbtelInterface\n\n%matplotlib inline",
"_____no_output_____"
],
[
"cluster = distributed.LocalCluster(n_workers=64,threads_per_worker=1)\nclient = distributed.Client(cluster)\nclient",
"_____no_output_____"
]
],
[
[
"Load in the desired field and emission model",
"_____no_output_____"
]
],
[
[
"field = synthesizAR.Field.restore('/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/field_checkpoint/')",
"_____no_output_____"
]
],
[
[
"We are using an emission model which includes only the most dominant ions. Comparisons to the temperature response functions show these provide accurate coverage.",
"_____no_output_____"
]
],
[
[
"em_model = EmissionModel.restore('/storage-home/w/wtb2/data/timelag_synthesis_v2/base_emission_model.json')",
"_____no_output_____"
]
],
[
[
"Compute and store the non-equilibrium ionization populations for each loop",
"_____no_output_____"
]
],
[
[
"futures = em_model.calculate_ionization_fraction(field,\n '/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/nei/ionization_fractions.h5',\n interface=EbtelInterface, client=client)",
"_____no_output_____"
],
[
"em_model.save('/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/nei/emission_model.json')",
"_____no_output_____"
],
[
"futures = None",
"_____no_output_____"
]
],
[
[
"Or just reload the emission model",
"_____no_output_____"
]
],
[
[
"em_model = EmissionModel.restore('/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/nei/emission_model.json')",
"_____no_output_____"
]
],
[
[
"Compute the detector counts",
"_____no_output_____"
]
],
[
[
"aia = InstrumentSDOAIA([0,10000]*u.s, field.magnetogram.observer_coordinate)",
"/storage-home/w/wtb2/anaconda3/envs/synthesized-timelags/lib/python3.6/site-packages/sunpy-1.0.dev9869-py3.6-linux-x86_64.egg/sunpy/map/mapbase.py:645: Warning: Missing metadata for heliographic longitude: assuming longitude of 0 degrees\n lon=self.heliographic_longitude,\n"
],
[
"observer = synthesizAR.Observer(field,[aia],parallel=True)",
"_____no_output_____"
],
[
"observer.build_detector_files('/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/nei/',\n ds=0.5*u.Mm)",
"/storage-home/w/wtb2/anaconda3/envs/synthesized-timelags/lib/python3.6/site-packages/scipy/interpolate/_fitpack_impl.py:299: RuntimeWarning: The maximal number of iterations (20) allowed for finding smoothing\nspline with fp=s has been reached. Probable cause: s too small.\n(abs(fp-s)/s>0.001)\n warnings.warn(RuntimeWarning(_iermess[ier][0]))\n/storage-home/w/wtb2/anaconda3/envs/synthesized-timelags/lib/python3.6/site-packages/scipy/interpolate/_fitpack_impl.py:299: RuntimeWarning: A theoretically impossible result when finding a smoothing spline\nwith fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)\n warnings.warn(RuntimeWarning(_iermess[ier][0]))\n"
],
[
"futures_flat = observer.flatten_detector_counts(emission_model=em_model)",
"_____no_output_____"
]
],
[
[
"And finally build the maps",
"_____no_output_____"
]
],
[
[
"futures_bin = observer.bin_detector_counts(\n '/storage-home/w/wtb2/data/timelag_synthesis_v2/cooling/nei/')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74af52279e8268e9684a340dae8b85b41f1607f | 11,830 | ipynb | Jupyter Notebook | cgames/06_sonic2/sonic2_ddqn.ipynb | deepanshut041/Reinforcement-Learning-Basic | 2a4c28008d2fc73441778ebd2f7e7d3db12f17ff | [
"MIT"
] | 21 | 2020-01-25T12:04:24.000Z | 2022-03-13T10:14:36.000Z | cgames/06_sonic2/sonic2_ddqn.ipynb | deepanshut041/Reinforcement-Learning-Basic | 2a4c28008d2fc73441778ebd2f7e7d3db12f17ff | [
"MIT"
] | null | null | null | cgames/06_sonic2/sonic2_ddqn.ipynb | deepanshut041/Reinforcement-Learning-Basic | 2a4c28008d2fc73441778ebd2f7e7d3db12f17ff | [
"MIT"
] | 14 | 2020-05-15T17:14:02.000Z | 2022-03-30T12:37:13.000Z | 26.465324 | 142 | 0.494252 | [
[
[
"# Sonic The Hedgehog 2 with Dueling dqn\n\n## Step 1: Import the libraries",
"_____no_output_____"
]
],
[
[
"import time\nimport retro\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\nfrom IPython.display import clear_output\nimport math\n\n%matplotlib inline",
"_____no_output_____"
],
[
"import sys\nsys.path.append('../../')\nfrom algos.agents.dqn_agent import DDQNAgent\nfrom algos.models.dqn_cnn import DDQNCnn\nfrom algos.preprocessing.stack_frame import preprocess_frame, stack_frame",
"_____no_output_____"
]
],
[
[
"## Step 2: Create our environment\n\nInitialize the environment in the code cell below.\n",
"_____no_output_____"
]
],
[
[
"env = retro.make(game='SonicTheHedgehog2-Genesis', state='EmeraldHillZone.Act1', scenario='contest')\nenv.seed(0)",
"_____no_output_____"
],
[
"# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Device: \", device)",
"_____no_output_____"
]
],
[
[
"## Step 3: Viewing our Enviroment",
"_____no_output_____"
]
],
[
[
"print(\"The size of frame is: \", env.observation_space.shape)\nprint(\"No. of Actions: \", env.action_space.n)\nenv.reset()\nplt.figure()\nplt.imshow(env.reset())\nplt.title('Original Frame')\nplt.show()",
"_____no_output_____"
],
[
"possible_actions = {\n # No Operation\n 0: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n # Left\n 1: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n # Right\n 2: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n # Left, Down\n 3: [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n # Right, Down\n 4: [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],\n # Down\n 5: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n # Down, B\n 6: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n # B\n 7: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }",
"_____no_output_____"
]
],
[
[
"### Execute the code cell below to play Pong with a random policy.",
"_____no_output_____"
]
],
[
[
"def random_play():\n score = 0\n env.reset()\n for i in range(200):\n env.render()\n action = possible_actions[np.random.randint(len(possible_actions))]\n state, reward, done, _ = env.step(action)\n score += reward\n if done:\n print(\"Your Score at end of game is: \", score)\n break\n env.reset()\n env.render(close=True)\nrandom_play()",
"_____no_output_____"
]
],
[
[
"## Step 4:Preprocessing Frame",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.imshow(preprocess_frame(env.reset(), (1, -1, -1, 1), 84), cmap=\"gray\")\nplt.title('Pre Processed image')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Step 5: Stacking Frame",
"_____no_output_____"
]
],
[
[
"def stack_frames(frames, state, is_new=False):\n frame = preprocess_frame(state, (1, -1, -1, 1), 84)\n frames = stack_frame(frames, frame, is_new)\n\n return frames\n ",
"_____no_output_____"
]
],
[
[
"## Step 6: Creating our Agent",
"_____no_output_____"
]
],
[
[
"INPUT_SHAPE = (4, 84, 84)\nACTION_SIZE = len(possible_actions)\nSEED = 0\nGAMMA = 0.99 # discount factor\nBUFFER_SIZE = 100000 # replay buffer size\nBATCH_SIZE = 32 # Update batch size\nLR = 0.0001 # learning rate \nTAU = 1e-3 # for soft update of target parameters\nUPDATE_EVERY = 100 # how often to update the network\nUPDATE_TARGET = 10000 # After which thershold replay to be started \nEPS_START = 0.99 # starting value of epsilon\nEPS_END = 0.01 # Ending value of epsilon\nEPS_DECAY = 100 # Rate by which epsilon to be decayed\n\nagent = DDQNAgent(INPUT_SHAPE, ACTION_SIZE, SEED, device, BUFFER_SIZE, BATCH_SIZE, GAMMA, LR, TAU, UPDATE_EVERY, UPDATE_TARGET, DDQNCnn)",
"_____no_output_____"
]
],
[
[
"## Step 7: Watching untrained agent play",
"_____no_output_____"
]
],
[
[
"env.viewer = None\n# watch an untrained agent\nstate = stack_frames(None, env.reset(), True) \nfor j in range(200):\n env.render(close=False)\n action = agent.act(state, eps=0.01)\n next_state, reward, done, _ = env.step(possible_actions[action])\n state = stack_frames(state, next_state, False)\n if done:\n env.reset()\n break \nenv.render(close=True)",
"_____no_output_____"
]
],
[
[
"## Step 8: Loading Agent\nUncomment line to load a pretrained agent",
"_____no_output_____"
]
],
[
[
"start_epoch = 0\nscores = []\nscores_window = deque(maxlen=20)",
"_____no_output_____"
]
],
[
[
"## Step 9: Train the Agent with DDQN",
"_____no_output_____"
]
],
[
[
"epsilon_by_epsiode = lambda frame_idx: EPS_END + (EPS_START - EPS_END) * math.exp(-1. * frame_idx /EPS_DECAY)\n\nplt.plot([epsilon_by_epsiode(i) for i in range(1000)])",
"_____no_output_____"
],
[
"def train(n_episodes=1000):\n \"\"\"\n Params\n ======\n n_episodes (int): maximum number of training episodes\n \"\"\"\n for i_episode in range(start_epoch + 1, n_episodes+1):\n state = stack_frames(None, env.reset(), True)\n score = 0\n eps = epsilon_by_epsiode(i_episode)\n\n # Punish the agent for not moving forward\n prev_state = {}\n steps_stuck = 0\n timestamp = 0\n\n while timestamp < 10000:\n action = agent.act(state, eps)\n next_state, reward, done, info = env.step(possible_actions[action])\n score += reward\n\n timestamp += 1\n\n # Punish the agent for standing still for too long.\n if (prev_state == info):\n steps_stuck += 1\n else:\n steps_stuck = 0\n prev_state = info\n \n if (steps_stuck > 20):\n reward -= 1\n \n next_state = stack_frames(state, next_state, False)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n if done:\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n \n \n clear_output(True)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tEpsilon: {:.2f}'.format(i_episode, np.mean(scores_window), eps), end=\"\")\n \n return scores",
"_____no_output_____"
],
[
"scores = train(1000)",
"_____no_output_____"
]
],
[
[
"## Step 10: Watch a Smart Agent!",
"_____no_output_____"
]
],
[
[
"env.viewer = None\n# watch an untrained agent\nstate = stack_frames(None, env.reset(), True) \nfor j in range(10000):\n env.render(close=False)\n action = agent.act(state, eps=0.91)\n next_state, reward, done, _ = env.step(possible_actions[action])\n state = stack_frames(state, next_state, False)\n if done:\n env.reset()\n break \nenv.render(close=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74af8c4665c73dedd9db60d1dcc94c42bc3be84 | 68,201 | ipynb | Jupyter Notebook | Analyze_ab_test_results_notebook.ipynb | Rashwan94/Conversion-A-B-test | ec6ebcb3094c3c48204cbc01607c0d5317689fe3 | [
"MIT"
] | null | null | null | Analyze_ab_test_results_notebook.ipynb | Rashwan94/Conversion-A-B-test | ec6ebcb3094c3c48204cbc01607c0d5317689fe3 | [
"MIT"
] | null | null | null | Analyze_ab_test_results_notebook.ipynb | Rashwan94/Conversion-A-B-test | ec6ebcb3094c3c48204cbc01607c0d5317689fe3 | [
"MIT"
] | null | null | null | 36.686928 | 12,424 | 0.569889 | [
[
[
"## Analyze A/B Test Results\n\nThis project will assure you have mastered the subjects covered in the statistics lessons. The hope is to have this project be as comprehensive of these topics as possible. Good luck!\n\n## Table of Contents\n- [Introduction](#intro)\n- [Part I - Probability](#probability)\n- [Part II - A/B Test](#ab_test)\n- [Part III - Regression](#regression)\n\n\n<a id='intro'></a>\n### Introduction\n\nA/B tests are very commonly performed by data analysts and data scientists. It is important that you get some practice working with the difficulties of these \n\nFor this project, you will be working to understand the results of an A/B test run by an e-commerce website. Your goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision.\n\n**As you work through this notebook, follow along in the classroom and answer the corresponding quiz questions associated with each question.** The labels for each classroom concept are provided for each question. This will assure you are on the right track as you work through the project, and you can feel more confident in your final submission meeting the criteria. As a final check, assure you meet all the criteria on the [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric).\n\n<a id='probability'></a>\n#### Part I - Probability\n\nTo get started, let's import our libraries.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport gc\n%matplotlib inline\n#We are setting the seed to assure you get the same answers on quizzes as we set up\nrandom.seed(42)",
"_____no_output_____"
]
],
[
[
"`1.` Now, read in the `ab_data.csv` data. Store it in `df`. **Use your dataframe to answer the questions in Quiz 1 of the classroom.**\n\na. Read in the dataset and take a look at the top few rows here:",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('ab_data.csv')\ndf.head()",
"_____no_output_____"
]
],
[
[
"b. Use the below cell to find the number of rows in the dataset.",
"_____no_output_____"
]
],
[
[
"df.shape",
"_____no_output_____"
]
],
[
[
"c. The number of unique users in the dataset.",
"_____no_output_____"
]
],
[
[
"df.user_id.nunique()",
"_____no_output_____"
]
],
[
[
"d. The proportion of users converted.",
"_____no_output_____"
]
],
[
[
"df[df['converted'] == 1]['user_id'].nunique() / df.user_id.nunique()",
"_____no_output_____"
]
],
[
[
"e. The number of times the `new_page` and `treatment` don't line up.",
"_____no_output_____"
]
],
[
[
"df[(df['group'] == 'treatment') & (df['landing_page'] == 'old_page')].shape[0] + df[(df['group'] == 'control') & (df['landing_page'] == 'new_page')].shape[0]",
"_____no_output_____"
]
],
[
[
"f. Do any of the rows have missing values?",
"_____no_output_____"
]
],
[
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"`2.` For the rows where **treatment** is not aligned with **new_page** or **control** is not aligned with **old_page**, we cannot be sure if this row truly received the new or old page. Use **Quiz 2** in the classroom to provide how we should handle these rows. \n\na. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.",
"_____no_output_____"
]
],
[
[
"remov = df[(df['group'] == 'treatment') & (df['landing_page'] == 'old_page')].append(df[(df['group'] == 'control') & (df['landing_page'] == 'new_page')])\ndf2 = df.append(remov).drop_duplicates(keep=False)\ndf2.shape",
"_____no_output_____"
],
[
"# Double Check all of the correct rows were removed - this should be 0\ndf2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]",
"_____no_output_____"
]
],
[
[
"`3.` Use **df2** and the cells below to answer questions for **Quiz3** in the classroom.",
"_____no_output_____"
],
[
"a. How many unique **user_id**s are in **df2**?",
"_____no_output_____"
]
],
[
[
"df2.user_id.nunique()",
"_____no_output_____"
]
],
[
[
"b. There is one **user_id** repeated in **df2**. What is it?",
"_____no_output_____"
]
],
[
[
"df2[df2.duplicated(['user_id'], keep='first')].index",
"_____no_output_____"
]
],
[
[
"c. What is the row information for the repeat **user_id**? ",
"_____no_output_____"
]
],
[
[
"df2[df2.duplicated(['user_id'], keep='last')]",
"_____no_output_____"
],
[
"df2.shape",
"_____no_output_____"
]
],
[
[
"d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.",
"_____no_output_____"
]
],
[
[
"df2.user_id.drop_duplicates(keep='first', inplace=False).shape",
"_____no_output_____"
]
],
[
[
"`4.` Use **df2** in the below cells to answer the quiz questions related to **Quiz 4** in the classroom.\n\na. What is the probability of an individual converting regardless of the page they receive?",
"_____no_output_____"
]
],
[
[
"df2[df2['converted'] == 1].shape[0] / df2['converted'].shape[0]",
"_____no_output_____"
]
],
[
[
"b. Given that an individual was in the `control` group, what is the probability they converted?",
"_____no_output_____"
]
],
[
[
"prob_old = df2[df2['group'] == 'control']['converted'].mean()\nprob_old",
"_____no_output_____"
]
],
[
[
"c. Given that an individual was in the `treatment` group, what is the probability they converted?",
"_____no_output_____"
]
],
[
[
"prob_new = df2[df2['group'] == 'treatment']['converted'].mean()\nprob_new",
"_____no_output_____"
]
],
[
[
"d. What is the probability that an individual received the new page?",
"_____no_output_____"
]
],
[
[
"df2[df2['landing_page'] == 'new_page'].shape[0] / df2.shape[0]",
"_____no_output_____"
]
],
[
[
"e. Consider your results from a. through d. above, and explain below whether you think there is sufficient evidence to say that the new treatment page leads to more conversions.",
"_____no_output_____"
],
[
"**Your answer goes here.**\n\n##### From the observations, each group has almost equal proportion. The probability of conversion after seeing the old or new page is $\\approx 12 \\text{%}$. Hence; individuals who were presented with the old page are, equally likely to convert when compared with those with the new page. Therefore; there is no enough evidence to say \"the new page leads to more conversions\".",
"_____no_output_____"
],
[
"<a id='ab_test'></a>\n### Part II - A/B Test\n\nNotice that because of the time stamp associated with each event, you could technically run a hypothesis test continuously as each observation was observed. \n\nHowever, then the hard question is do you stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another? \n\nThese questions are the difficult parts associated with A/B tests in general. \n\n\n`1.` For now, consider you need to make the decision just based on all the data provided. If you want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should your null and alternative hypotheses be? You can state your hypothesis in terms of words or in terms of **$p_{old}$** and **$p_{new}$**, which are the converted rates for the old and new pages.",
"_____no_output_____"
],
[
"**Put your answer here.**\n\n##### Null : conversion mean for treatment group - conversion mean for control group =< 0\n\n##### alternative : conversion mean for treatment group - conversion mean for control group > 0",
"_____no_output_____"
],
[
"`2.` Assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have \"true\" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. <br><br>\n\nUse a sample size for each page equal to the ones in **ab_data.csv**. <br><br>\n\nPerform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null. <br><br>\n\nUse the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use **Quiz 5** in the classroom to make sure you are on the right track.<br><br>",
"_____no_output_____"
],
[
"a. What is the **convert rate** for $p_{new}$ under the null? ",
"_____no_output_____"
]
],
[
[
"p_new = df2[df2['converted'] == 1].shape[0] / df2['converted'].shape[0]\np_new",
"_____no_output_____"
]
],
[
[
"b. What is the **convert rate** for $p_{old}$ under the null? <br><br>",
"_____no_output_____"
]
],
[
[
"p_old = df2[df2['converted'] == 1].shape[0] / df2['converted'].shape[0]\np_old",
"_____no_output_____"
]
],
[
[
"c. What is $n_{new}$?",
"_____no_output_____"
]
],
[
[
"n_new = df2[df2['landing_page'] == 'new_page'].shape[0]\nn_new",
"_____no_output_____"
]
],
[
[
"d. What is $n_{old}$?",
"_____no_output_____"
]
],
[
[
"n_old = df2[df2['landing_page'] == 'old_page'].shape[0]\nn_old",
"_____no_output_____"
]
],
[
[
"e. Simulate $n_{new}$ transactions with a convert rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.",
"_____no_output_____"
]
],
[
[
"new_page_converted = []\n\nsim_new = np.random.choice([0,1], size = n_new, p=[1-p_new, p_new])\nnew_page_converted.append(sim_new)\n\nnew_page_converted = np.array(new_page_converted)",
"_____no_output_____"
]
],
[
[
"f. Simulate $n_{old}$ transactions with a convert rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.",
"_____no_output_____"
]
],
[
[
"old_page_converted = []\n\nsim_old = np.random.choice([0,1], size = n_old, p=[1-p_old, p_old])\nold_page_converted.append(sim_old)\n\nold_page_converted = np.array(old_page_converted)",
"_____no_output_____"
]
],
[
[
"g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).",
"_____no_output_____"
]
],
[
[
"diff = new_page_converted.mean() - old_page_converted.mean()\ndiff",
"_____no_output_____"
]
],
[
[
"h. Simulate 10,000 $p_{new}$ - $p_{old}$ values using this same process similarly to the one you calculated in parts **a. through g.** above. Store all 10,000 values in a numpy array called **p_diffs**.",
"_____no_output_____"
]
],
[
[
"p_diffs = []\n\nfor i in range(10000):\n new_conv = np.random.choice([0,1], size = n_new, p=[1-p_new, p_new])\n old_conv = np.random.choice([0,1], size = n_old, p=[1-p_old, p_old])\n p_diffs.append(new_conv.mean() - old_conv.mean())",
"_____no_output_____"
],
[
"obsv_diff = prob_new - prob_old\nobsv_diff",
"_____no_output_____"
]
],
[
[
"i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.",
"_____no_output_____"
]
],
[
[
"plt.hist(p_diffs, alpha=0.5)\nplt.axvline(x=np.percentile(p_diffs, 2.5), color='red')\nplt.axvline(x=np.percentile(p_diffs, 97.5), color='red')\nplt.axvline(x=obsv_diff, color='green', linestyle = '--')\nplt.title('Sampling distribution of conversion rates')\nplt.ylabel('Frequency')\nplt.xlabel('Sample mean');",
"_____no_output_____"
]
],
[
[
"j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?",
"_____no_output_____"
]
],
[
[
"(np.array(p_diffs) > obsv_diff).mean()",
"_____no_output_____"
]
],
[
[
"k. Please explain using the vocabulary you've learned in this course what you just computed in part **j.** What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages?",
"_____no_output_____"
],
[
"**Put your answer here.**\n\n##### The value computed above is, the probability of seeing the sample statistic if the null hypothesis is true. In scientific studies this value is called p-value. In our case; it means that the probability of getting the observed_diff is high if the null hypothesis is true. Hence; if the null hypothesis is true, conversions through the old page are equal or higher when compared with the new page. As a result; we can conclude that the old page is performing the same as the new webpage.",
"_____no_output_____"
],
[
"l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.",
"_____no_output_____"
]
],
[
[
"import statsmodels.api as sm\n\nconvert_old = len(df2.query('landing_page == \"old_page\" & converted == 1'))\nconvert_new = len(df2.query('landing_page == \"new_page\" & converted == 1'))\n#n_old previously computed \n#n_new previously computed ",
"_____no_output_____"
]
],
[
[
"m. Now use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](https://docs.w3cub.com/statsmodels/generated/statsmodels.stats.proportion.proportions_ztest/) is a helpful link on using the built in.",
"_____no_output_____"
]
],
[
[
"stat, pval = sm.stats.proportions_ztest(count=[convert_old, convert_new], nobs=[n_old,n_new],\n value = 0, alternative='smaller')\nprint('z-statistic=',stat)\nprint('p-value=',pval)",
"z-statistic= 1.3116075339133115\np-value= 0.905173705140591\n"
]
],
[
[
"n. What do the z-score and p-value you computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**?",
"_____no_output_____"
],
[
"**Put your answer here.**\n\n##### With 0.05 type 1 error rate (0.95 confidence level), the null hypothesis gets rejected if Z-score of the sample statistic is less than -1.96 or greater than +1.96. However; Z-score is -1.31, which is out of the rejection region. Furthermore; the p-value is close to 1, which signifies we have to stay with the null hypothesis (the old page is performing in the same way as the new page). So, there is no statistically significant evidence to reject the null hypothesis, and this conclusion matches with the findings in question f. and g.",
"_____no_output_____"
],
[
"<a id='regression'></a>\n### Part III - A regression approach\n\n`1.` In this final part, you will see that the result you achieved in the A/B test in Part II above can also be achieved by performing regression.<br><br> \n\na. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case?",
"_____no_output_____"
],
[
"**Put your answer here.**\n\n##### Logistic regression, since this is a classification task",
"_____no_output_____"
],
[
"b. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create in df2 a column for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.",
"_____no_output_____"
]
],
[
[
"df2['ab_page'] = pd.get_dummies(df2['landing_page']).iloc[:,0]\ndf2['intercept'] = 1\ndf2.head()",
"_____no_output_____"
]
],
[
[
"c. Use **statsmodels** to instantiate your regression model on the two columns you created in part b., then fit the model using the two columns you created in part **b.** to predict whether or not an individual converts. ",
"_____no_output_____"
]
],
[
[
"lm = sm.Logit(df2['converted'], df2[['intercept', 'ab_page']])\nresults = lm.fit()",
"Optimization terminated successfully.\n Current function value: 0.366118\n Iterations 6\n"
]
],
[
[
"d. Provide the summary of your model below, and use it as necessary to answer the following questions.",
"_____no_output_____"
]
],
[
[
"results.summary2()",
"_____no_output_____"
],
[
"print('With all other elements held constant treatment users are', 1/np.exp(-0.0150), 'times less likely to convert than control users')",
"With all other elements held constant treatment users are 1.015113064615719 times less likely to convert than control users\n"
]
],
[
[
"e. What is the p-value associated with **ab_page**? Why does it differ from the value you found in **Part II**?<br><br> **Hint**: What are the null and alternative hypotheses associated with your regression model, and how do they compare to the null and alternative hypotheses in **Part II**?",
"_____no_output_____"
],
[
"**Put your answer here.**\n\n##### The p-value associated with the *ab_page* coeeficient in the regression model is different than the p-value obtained from the z-test. In the case of the resgression model it is the probability of type-1 errors for the null hypothesis which is the coefficient = 0\n\n##### Null: Coefficient = 0\n##### Alternate hypothesis : Coefficient != 0\n\n##### and as shown the p value is at 0.189(almost 19%) which is way farther than out 0.05(5%) confidence threshold. Therefore, we will fail to reject the null and and conclude the coefficient iss *NOT* statistically significant to be considered in prediction of the conversion probability ",
"_____no_output_____"
],
[
"f. Now, you are considering other things that might influence whether or not an individual converts. Discuss why it is a good idea to consider other factors to add into your regression model. Are there any disadvantages to adding additional terms into your regression model?",
"_____no_output_____"
],
[
"**Put your answer here.**",
"_____no_output_____"
],
[
"g. Now along with testing if the conversion rate changes for different pages, also add an effect based on which country a user lives in. You will need to read in the **countries.csv** dataset and merge together your datasets on the appropriate rows. [Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html) are the docs for joining tables. \n\nDoes it appear that country had an impact on conversion? Don't forget to create dummy variables for these country columns - **Hint: You will need two columns for the three dummy variables.** Provide the statistical output as well as a written response to answer this question.",
"_____no_output_____"
]
],
[
[
"country = pd.read_csv('countries.csv')\ncountry.head()",
"_____no_output_____"
],
[
"country.shape",
"_____no_output_____"
],
[
"country.isnull().sum()",
"_____no_output_____"
],
[
"country['country'].value_counts()",
"_____no_output_____"
],
[
"df2 = df2.join(country.set_index('user_id'), on='user_id')\ndf2[['UK', 'CA']] = pd.get_dummies(df2['country'], drop_first=True)\ndf2.head()",
"_____no_output_____"
],
[
"lm = sm.Logit(df2['converted'], df2[['intercept', 'ab_page','UK', 'CA']])\nresults = lm.fit()",
"Optimization terminated successfully.\n Current function value: 0.366112\n Iterations 6\n"
]
],
[
[
"h. Though you have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. Create the necessary additional columns, and fit the new model. \n\nProvide the summary results, and your conclusions based on the results.",
"_____no_output_____"
]
],
[
[
"results.summary2()",
"_____no_output_____"
],
[
"print('With every other factor held constant, users from Uk are',np.exp(0.0506).round(2), 'times more likely to convert than users from the US')\nprint('With every other factor held constant, users from CA are',np.exp(0.0408).round(2), 'times more likely to convert than users from the US')",
"With every other factor held constant, users from Uk are 1.05 times more likely to convert than users from the US\nWith every other factor held constant, users from CA are 1.04 times more likely to convert than users from the US\n"
]
],
[
[
"**Does it appear that country had an impact on conversion?**\n\n##### No, it appears not\n\n\n**Explaination:**\n\n##### The p-value associated with the coeefficiect of both Canada and UK are both past the 0.05(5%) threshold for a 95% confidence interval ",
"_____no_output_____"
]
],
[
[
"from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Analyze_ab_test_results_notebook.ipynb'])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74af9957453c13e7cb7b887534811809535fc63 | 872,747 | ipynb | Jupyter Notebook | Form_ratio_test(Needs files to run).ipynb | aeferreira/similarity_share | a29e3d7982f1c2f0e93433d92e55b467f9433d76 | [
"MIT"
] | null | null | null | Form_ratio_test(Needs files to run).ipynb | aeferreira/similarity_share | a29e3d7982f1c2f0e93433d92e55b467f9433d76 | [
"MIT"
] | null | null | null | Form_ratio_test(Needs files to run).ipynb | aeferreira/similarity_share | a29e3d7982f1c2f0e93433d92e55b467f9433d76 | [
"MIT"
] | null | null | null | 226.334803 | 75,024 | 0.883679 | [
[
[
"# Warning - The notebook needs the following files to run well:",
"_____no_output_____"
],
[
"The database of metabolites used to see the usual ratios between the different elements was from ChEBI, specifically https://www.ebi.ac.uk/chebi/downloadsForward.do. Specifically, two files were used from this site:\n\n- ChEBI_complete_3star.sdf file contains all the chemical structures and associated information of metabolites annotated with a 3 star score in the database (higher reliability) - total of 49266 formulas.\n- ChEBI_complete.sdf contains all the chemical structures and associated information of all metabolites in the database - total of 113402 formulas.\n\n#### These files aren't in the repository due to their size, but they can be obtained from the ChEBI database.\n\nThe same processes were done for the two different files.",
"_____no_output_____"
],
[
"# Formula Generation and Assignment Auxiliary Notebook\n\n# Form_ratio_test - Observation of the most common ratios (in relation to carbon atoms) that metabolites have\n\nIn FormGeneration_Assignment, ratios from Kind and Fiehn, 2007 paper were used to make the Database of possible formulas to be assigned. They estimate that these represent 99,7% of metabolites (based on a database analysis). However, the assignment of the formulas sometimes in the extreme of the allowed ratios instead of formulas with more conventional formulas because they have less heteroatoms or are closer to the peak mass. To avoid that, we want to add an extra criteria that prioritizes formulas with more conventional ratios (in metabolites) between their different elements. This criteria was added in form_checker_ratio function that is present at the end of that notebook.\n\nThis notebook purpose is to find a good set of stricter ranges use to enforce this extra criteria that we want to add. Thus, it is an auxiliary notebook to FormGeneration_Assignment.ipynb. This was done by analysing a freely available metabolite database (that are presented above - after Warning) and was evaluated by observing cumulative graphs of the % of formulas by their 'H/C', 'O/C', 'N/C', 'S/C', 'P/C', 'F/C' and 'Cl/C' ratios to find a suitable range that still encompasses most formulas but limits the stranger ratios that can be found at the extremes.\n\n### Organization of the notebook:\n\n#### The analysis is performed on two files presented below the Warning in the beginning of the notebook, the analysis on both is identical\n\n- Read and store the metabolites from a database (file 1) and filter them for non-repeated monoisotopic and defined formulas with Carbons and Hydrogens and with only C,H,O,N,S,P,F and Cl elements.\n- Calculate the ratios of the formulas of the different elements to carbon ('element'/Carbon) and store.\n- Analysis of the most common 'element'/C ratios metabolites have with histogram and cumulative graphs.\n\n- Repeat the process for the second file.\n\n### Needed imports",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# ChEBI with only 3 star annotated metabolites\n\n## Reading and Storing Formulas from a Database ",
"_____no_output_____"
]
],
[
[
"# Open Database file\nwith open('ChEBI_complete_3star.sdf/ChEBI_complete_3star.sdf') as file: \n formula = False\n form=[]\n for line in file:\n # Append all formulas\n if formula == True:\n form.append(line[:-1])\n #print(line[:-1])\n # Read next line\n if line.startswith('> <Formulae>'):\n formula = True\n else:\n formula = False\n#form ",
"_____no_output_____"
],
[
"# Nº of formulas in the Database\nlen(form)",
"_____no_output_____"
]
],
[
[
"### Filter the formulas in the database\n\nFilter formulas with:\n- an unspecified R group or X halogen, \n- formulas that could have a section repeated n times (polymers), \n- formulas with non-covalent bonds represented by a '.' such as '(C6H8O6)n.H2O',\n- formulas with isotopes such as C6H11[18F]O5,\n- formulas without carbons or hydrogens.",
"_____no_output_____"
]
],
[
[
"formulas = []\ntot_form = len(form)\n\n#c,r,n,p,x,n=0,0,0,0,0,0\n\nfor i in form:\n if 'C' in i: # Has to have Carbon (this is actually: has to have carbon, Cl, Ca, Cu, Cr, Cs or Cd - fixed later)\n #c=c+1\n if 'R' not in i: # No unspecified R groups\n #r=r+1\n if 'n' not in i: # No polymers with sections that can be repeated n times\n #n=n+1\n if '.' not in i: # No formulas that could have non-covalent bonds\n #p=p+1\n if 'X' not in i: # No unspecified X halogens\n #x=x+1\n if '[' not in i: # No\n if 'H' in i: # Has to have Hydrogen (this is actually: has to have hydrogen or mercury - fixed later)\n #h = h+1\n formulas.append(i)\n#print(c,r,n,p,x,h)\nprint('Nº of formulas after filtering:', len(formulas))\n\nprint('Nº of non-repeating formulas after filtering:', len(set(formulas)))",
"Nº of formulas after filtering: 43779\nNº of non-repeating formulas after filtering: 21164\n"
]
],
[
[
"There are 43779 formulas remaining after filtering, 21164 of those were unique different formulas (more than half of the formulas were repeated).",
"_____no_output_____"
]
],
[
[
"#len(set(formulas))\n#formulas = set(formulas)\n#formulas[:20]",
"_____no_output_____"
]
],
[
[
"#### Transform formulas into Dictionary/DataFrame format (from string format) and take out repeating formulas (done automatically by this process)",
"_____no_output_____"
]
],
[
[
"def formula_process(formula):\n \"\"\"Transforms a formula in string format into a DataFrame. Element order: C, H, N, O, S, P, F, Cl.\"\"\"\n \n # Make the row DataFrame to store the results\n #results = pd.DataFrame(np.zeros((1,8)), columns = ['C','H','O','N','S','P','Cl','F'])\n results = {}\n count = ''\n letter = None\n \n # Run through the string\n for i in range(len(formula)):\n if formula[i].isupper():\n if letter:\n # Store results of the previous letter\n results[letter] = int(count or 1)\n count = ''\n if i+1 < len(formula):\n # If 2 letter element\n if formula[i+1].islower():\n letter = formula[i] + formula[i+1]\n continue\n letter = formula[i]\n \n elif formula[i].isdigit():\n count = count + formula[i]\n \n # Store results of the last letter\n results[letter] = int(count or 1)\n \n return results",
"_____no_output_____"
],
[
"# Transform each formula into Dictionary or DataFrame format\n# This also elimiantes repeating formulas\ndb = {}\nfor i in formulas:\n #print(i)\n db[i] = formula_process(i)",
"_____no_output_____"
],
[
"# Transform information into a DataFrame\nfinal_db = pd.DataFrame.from_dict(db).T\nfinal_db",
"_____no_output_____"
]
],
[
[
"See the elements present in the list of formulas and also the number of times each element appear in a formulas\n\nNote that C appear in 21104 and H in 21159 formulas out of the 21164 formulas, when they should appear in all formulas. This discrepancy is explaiend by the comment made in the formula filtering cell a bit above. It will be solved in a bit.",
"_____no_output_____"
]
],
[
[
"#final_db[final_db['Se'].notnull()].T\nfinal_db.notnull().sum()",
"_____no_output_____"
]
],
[
[
"#### Guarantee that each formula has at least 1 C and 1 H, and that only have the following elements: C,H,O,N,S,P,Cl and F\n\nOnly formulas with C,H,O,N,S,P,Cl and F were kept since they are (as we can also see above) by far the most common elements in metabolites and are the elements that can be considered (right now as it is built) in the Formula Assignment algorithm made in FormGeneration_Assignment.ipynb (this notebook is auxiliary to that one to observe the most common ratios between these elemetns in known metabolites).",
"_____no_output_____"
]
],
[
[
"# Only keep formulas that have carbon or hydrogen atoms\nfor i in range(2):\n teste = final_db.iloc[:,i].notnull()\n #print(final_db.iloc[:,i].isnull())\n final_db = final_db.loc[teste]\n \n# Take out formulas that have an element outside of the C,H,O,N,S,P,Cl and F\nfor i in range(8,len(final_db.columns)):\n teste = final_db.iloc[:,i].isnull()\n final_db = final_db.loc[teste]\nfinal_db",
"_____no_output_____"
]
],
[
[
"##### This finally filters the number of formulas from 21164 to 19593 formulas - final number of formulas considered\n\nAs it can be seen by this small filtering, very few formulas had elements outside of the main 8 mentioned.",
"_____no_output_____"
]
],
[
[
"final_db.notnull().sum()",
"_____no_output_____"
],
[
"# Truncate the DataFrame to only the elements we want to see and replace NaNs for 0\ndb_df = final_db[['C','H','O','N','S','P','F','Cl']]\ndb_df = db_df.replace({np.nan:0})\ndb_df",
"_____no_output_____"
]
],
[
[
"Calculate the distribution of ratios to carbon in the 19593 formulas - slowest cell of this analysis",
"_____no_output_____"
]
],
[
[
"# Calculate the different ratios \nratios_df = pd.DataFrame(index=db_df.index, columns = ['H/C','O/C','N/C','S/C','P/C','F/C','Cl/C'])\nc = 0\nfor i in db_df.index:\n ratios_df.loc[i] = [db_df.loc[i,'H']/db_df.loc[i,'C'],\n db_df.loc[i,'O']/db_df.loc[i,'C'],\n db_df.loc[i,'N']/db_df.loc[i,'C'],\n db_df.loc[i,'S']/db_df.loc[i,'C'],\n db_df.loc[i,'P']/db_df.loc[i,'C'],\n db_df.loc[i,'F']/db_df.loc[i,'C'],\n db_df.loc[i,'Cl']/db_df.loc[i,'C']]\n c+=1\n #print(c)\nratios_df",
"_____no_output_____"
],
[
"ratios_df.max()",
"_____no_output_____"
]
],
[
[
"# Analysis of the ratios of 'Element'/Carbon",
"_____no_output_____"
],
[
"### Histograms and more importantly Cumulative Graphs of the ratios - Not eliminating ratios with 0\n\nOutside of the ['H/C'] ratios, the ratios were dominated by formulas that didn't have the element that is not carbon which skewed the following results.",
"_____no_output_____"
],
[
"#### Histograms of the each of the ratios",
"_____no_output_____"
]
],
[
[
"f, ax = plt.subplots(figsize=(4,4))\ndata = ratios_df['H/C']\nplt.hist(data, bins=np.arange(min(data), 4, 0.1))\nplt.title(['H/C'])\nplt.show()",
"_____no_output_____"
],
[
"for i in ratios_df.columns[1:]:\n data = ratios_df[i]\n plt.hist(data, bins=np.arange(min(data), 1.3, 0.05))\n plt.title(i)\n plt.show()",
"_____no_output_____"
]
],
[
[
"#### (Percent) Cumulative Graphs of % of formulas by a certain element/Carbon ratio\n\nFirst is presented the general cumulative graph, and then two different subsections of the graph to present in more detail: the 'end' of the curves (all except 'H/C') and the 'start' of the curves (especially 'H/C').",
"_____no_output_____"
]
],
[
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n # Make the histogram with the intended ranges between the bins\n values, base = np.histogram(data, bins=np.arange(min(data), 4, 0.05)) # Set X\n # Calculate the cumulative % of formulas for each ratio\n cumulative = np.cumsum(values)/len(ratios_df) # \n #print(cumulative)\n plt.plot(base[:-1], cumulative)\n\nplt.plot([0,4],[0.05,0.05], color = 'black');plt.text(x=4.01, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,4],[0.9,0.9], color = 'black');plt.text(x=4.01, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,4],[0.95,0.95], color = 'black');plt.text(x=4.01, y=0.95, s='0.95', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n # Plot the histogram\n values, base = np.histogram(data, bins=np.arange(min(data), 1.3, 0.05)) # Set X\n # Calculate the \n cumulative = (np.cumsum(values)/len(ratios_df))\n #cumulative = np.cumsum(values)/len(ratios_df)\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0.8,1])\nplt.plot([0,1.2],[0.9,0.9], color = 'black');plt.text(x=1.21, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,1.2],[0.95,0.95], color = 'black');plt.text(x=1.21, y=0.95, s='0.95', verticalalignment='center')\nplt.plot([0,1.2],[0.99,0.99], color = 'black');plt.text(x=1.21, y=0.99, s='0.99', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.05, 0.05)) # Set X\n cumulative = np.cumsum(values)/len(ratios_df)\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0,0.2])\n#plt.plot([0,1],[0.9,0.9], color = 'black')\nplt.plot([0,1],[0.05,0.05], color = 'black');plt.text(x=1.005, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,1],[0.025,0.025], color = 'black');plt.text(x=1.005, y=0.025, s='0.025', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
]
],
[
[
"### Cumulative Graphs of the ratios - Eliminating ratios with 0\n\nOutside of the ['H/C'] ratios, the ratios were dominated by formulas that didn't have the element that is not carbon. Thus for each ratio 'element/C', the formulas that didn't have the corresponding elements were not taken into account for the graph.\n\n#### (Percent) Cumulative Graphs of % of formulas by a certain element/Carbon ratio\n\nFirst is presented the general cumulative graph, and then two different subsections of the graph to present in more detail: the 'end' of the curves (all except 'H/C') and the 'start' of the curves (especially 'H/C').",
"_____no_output_____"
]
],
[
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 4, 0.05))\n \n # Subtract the number of formulas that don't have the otehr element that not carbon in the ratio (ratio=0, values[0]) from \n # the number of formulas with ratio <= a value (np.cumsum(values)) and from the total number of formulas (len(ratios_df))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df) - values[0])\n #print(cumulative)\n plt.plot(base[:-1], cumulative)\n\nplt.plot([0,4],[0.05,0.05], color = 'black');plt.text(x=4.01, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,4],[0.9,0.9], color = 'black');plt.text(x=4.01, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,4],[0.95,0.95], color = 'black');plt.text(x=4.01, y=0.95, s='0.95', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.3, 0.05))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df) - values[0])\n #cumulative = np.cumsum(values)/19608\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0.8,1])\nplt.plot([0,1.2],[0.9,0.9], color = 'black');plt.text(x=1.21, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,1.2],[0.95,0.95], color = 'black');plt.text(x=1.21, y=0.95, s='0.95', verticalalignment='center')\nplt.plot([0,1.2],[0.99,0.99], color = 'black');plt.text(x=1.21, y=0.99, s='0.99', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df.columns:\n data = ratios_df[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.05, 0.05))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df) - values[0])\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0,0.2])\n#plt.plot([0,1],[0.9,0.9], color = 'black')\nplt.plot([0,1],[0.05,0.05], color = 'black');plt.text(x=1.005, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,1],[0.025,0.025], color = 'black');plt.text(x=1.005, y=0.025, s='0.025', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
]
],
[
[
"# ChEBI with all metabolites in the Database - Slower\n\nThis analysis is exactly the same as the made before from the 2nd cell down, only the cell that reads the file is different.\n\n## Reading and Storing Formulas from a Database ",
"_____no_output_____"
]
],
[
[
"# Open Database file\nwith open('ChEBI_complete_3star.sdf/ChEBI_complete.sdf') as file: \n formula = False\n form2=[]\n for line in file:\n # Append all formulas\n if formula == True:\n form2.append(line[:-1])\n #print(line[:-1])\n # Read next line\n if line.startswith('> <Formulae>'):\n formula = True\n else:\n formula = False\n#form2 ",
"_____no_output_____"
],
[
"# Nº of formulas in the Database\nlen(form2)",
"_____no_output_____"
]
],
[
[
"### Filter the formulas in the database\n\nFilter formulas with:\n- an unspecified R group or X halogen, \n- formulas that could have a section repeated n times (polymers), \n- formulas with non-covalent bonds represented by a '.' such as '(C6H8O6)n.H2O',\n- formulas with isotopes such as C6H11[18F]O5,\n- formulas without carbons or hydrogens.",
"_____no_output_____"
]
],
[
[
"formulas2 = []\ntot_form = len(form)\n\n#c,r,n,p,x,n=0,0,0,0,0,0\n\nfor i in form2:\n if 'C' in i: # Has to have Carbon (this is actually: has to have carbon, Cl, Ca, Cu, Cr, Cs or Cd - fixed later)\n #c=c+1\n if 'R' not in i: # No unspecified R groups\n #r=r+1\n if 'n' not in i: # No polymers with sections that can be repeated n times\n #n=n+1\n if '.' not in i: # No formulas that could have non-covalent bonds\n #p=p+1\n if 'X' not in i: # No unspecified X halogens\n #x=x+1\n if '[' not in i: # No\n if 'H' in i: # Has to have Hydrogen (this is actually: has to have hydrogen or mercury - fixed later)\n #h = h+1\n formulas2.append(i)\n#print(c,r,n,p,x,h)\nprint('Nº of formulas after filtering:', len(formulas2))\n\nprint('Nº of non-repeating formulas after filtering:', len(set(formulas2)))",
"Nº of formulas after filtering: 106052\nNº of non-repeating formulas after filtering: 37938\n"
]
],
[
[
"There are 106052 formulas remaining after filtering, 37938 of those were unique different formulas (more than half of the formulas were repeated).",
"_____no_output_____"
]
],
[
[
"#len(set(formulas2))\n#formulas2 = set(formulas2)\n#formulas2[:20]",
"_____no_output_____"
]
],
[
[
"#### Transform formulas into Dictionary/DataFrame format (from string format) and take out repeating formulas (done automatically by this process)",
"_____no_output_____"
]
],
[
[
"# Transform each formula into Dictionary or DataFrame format\n# This also elimiantes repeating formulas\ndb2 = {}\nfor i in formulas2:\n #print(i)\n db2[i] = formula_process(i)",
"_____no_output_____"
],
[
"# Transform information into a DataFrame\nfinal_db2 = pd.DataFrame.from_dict(db2).T\nfinal_db2",
"_____no_output_____"
]
],
[
[
"See the elements present in the list of formulas and also the number of times each element appear in a formulas\n\nNote that C appear in 37874 and H in 37933 formulas out of the 37938 formulas, when they should appear in all formulas. This discrepancy is explaiend by the comment made in the formula filtering cell a bit above. It will be solved in a bit.",
"_____no_output_____"
]
],
[
[
"#final_db[final_db['Se'].notnull()].T\nfinal_db2.notnull().sum()",
"_____no_output_____"
]
],
[
[
"#### Guarantee that each formula has at least 1 C and 1 H, and that only have the following elements: C,H,O,N,S,P,Cl and F\n\nOnly formulas with C,H,O,N,S,P,Cl and F were kept since they are (as we can also see above) by far the most common elements in metabolites and are the elements that can be considered (right now as it is built) in the Formula Assignment algorithm made in FormGeneration_Assignment.ipynb (this notebook is auxiliary to that one to observe the most common ratios between these elemetns in known metabolites).",
"_____no_output_____"
]
],
[
[
"# Only keep formulas that have carbon or hydrogen atoms\nfor i in range(2):\n teste = final_db2.iloc[:,i].notnull()\n #print(final_db2.iloc[:,i].isnull())\n final_db2 = final_db2.loc[teste]\n \n# Take out formulas that have an element outside of the C,H,O,N,S,P,Cl and F\nfor i in range(8,len(final_db2.columns)):\n teste = final_db2.iloc[:,i].isnull()\n final_db2 = final_db2.loc[teste]\nfinal_db2",
"_____no_output_____"
]
],
[
[
"##### This finally filters the number of formulas from 37938 to 35245 formulas - final number of formulas considered\n\nAs it can be seen by this small filtering, very few formulas had elements outside of the main 8 mentioned.",
"_____no_output_____"
]
],
[
[
"final_db2.notnull().sum()",
"_____no_output_____"
],
[
"# Truncate the DataFrame to only the elements we want to see and replace NaNs for 0\ndb_df2 = final_db2[['C','H','O','N','S','P','F','Cl']]\ndb_df2 = db_df2.replace({np.nan:0})\ndb_df2",
"_____no_output_____"
]
],
[
[
"Calculate the distribution of ratios to carbon in the 19593 formulas - slowest cell of the notebook",
"_____no_output_____"
]
],
[
[
"# Calculate the different ratios \nratios_df2 = pd.DataFrame(index=db_df2.index, columns = ['H/C','O/C','N/C','S/C','P/C','F/C','Cl/C'])\nc = 0\nfor i in db_df2.index:\n ratios_df2.loc[i] = [db_df2.loc[i,'H']/db_df2.loc[i,'C'],\n db_df2.loc[i,'O']/db_df2.loc[i,'C'],\n db_df2.loc[i,'N']/db_df2.loc[i,'C'],\n db_df2.loc[i,'S']/db_df2.loc[i,'C'],\n db_df2.loc[i,'P']/db_df2.loc[i,'C'],\n db_df2.loc[i,'F']/db_df2.loc[i,'C'],\n db_df2.loc[i,'Cl']/db_df2.loc[i,'C']]\n c+=1\n #print(c)\nratios_df2",
"_____no_output_____"
]
],
[
[
"# Analysis of the ratios of 'Element'/Carbon",
"_____no_output_____"
],
[
"### Histograms and more importantly Cumulative Graphs of the ratios - Not eliminating ratios with 0\n\nOutside of the ['H/C'] ratios, the ratios were dominated by formulas that didn't have the element that is not carbon which skewed the following results.",
"_____no_output_____"
],
[
"#### Histograms of the each of the ratios",
"_____no_output_____"
]
],
[
[
"f, ax = plt.subplots(figsize=(4,4))\ndata = ratios_df2['H/C']\nplt.hist(data, bins=np.arange(min(data), 4, 0.1))\nplt.title(['H/C'])\nplt.show()",
"_____no_output_____"
],
[
"for i in ratios_df2.columns[1:]:\n data = ratios_df2[i]\n plt.hist(data, bins=np.arange(min(data), 1.3, 0.05))\n plt.title(i)\n plt.show()",
"_____no_output_____"
]
],
[
[
"#### (Percent) Cumulative Graphs of % of formulas by a certain element/Carbon ratio\n\nFirst is presented the general cumulative graph, and then two different subsections of the graph to present in more detail: the 'end' of the curves (all except 'H/C') and the 'start' of the curves (especially 'H/C').",
"_____no_output_____"
]
],
[
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n # Make the histogram with the intended ranges between the bins\n values, base = np.histogram(data, bins=np.arange(min(data), 4, 0.05)) # Set X\n # Calculate the cumulative % of formulas for each ratio\n cumulative = np.cumsum(values)/len(ratios_df2) # \n #print(cumulative)\n plt.plot(base[:-1], cumulative)\n\nplt.plot([0,4],[0.05,0.05], color = 'black');plt.text(x=4.01, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,4],[0.9,0.9], color = 'black');plt.text(x=4.01, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,4],[0.95,0.95], color = 'black');plt.text(x=4.01, y=0.95, s='0.95', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n # Plot the histogram\n values, base = np.histogram(data, bins=np.arange(min(data), 1.3, 0.05)) # Set X\n # Calculate the \n cumulative = (np.cumsum(values)/len(ratios_df2))\n #cumulative = np.cumsum(values)/len(ratios_df2)\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0.8,1])\nplt.plot([0,1.2],[0.9,0.9], color = 'black');plt.text(x=1.21, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,1.2],[0.95,0.95], color = 'black');plt.text(x=1.21, y=0.95, s='0.95', verticalalignment='center')\nplt.plot([0,1.2],[0.99,0.99], color = 'black');plt.text(x=1.21, y=0.99, s='0.99', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the cumulative graph and adjust parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.05, 0.05)) # Set X\n cumulative = np.cumsum(values)/len(ratios_df2)\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0,0.2])\n#plt.plot([0,1],[0.9,0.9], color = 'black')\nplt.plot([0,1],[0.05,0.05], color = 'black');plt.text(x=1.005, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,1],[0.025,0.025], color = 'black');plt.text(x=1.005, y=0.025, s='0.025', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
]
],
[
[
"### Cumulative Graphs of the ratios - Eliminating ratios with 0\n\nOutside of the ['H/C'] ratios, the ratios were dominated by formulas that didn't have the element that is not carbon. Thus for each ratio 'element/C', the formulas that didn't have the corresponding elements were not taken into account for the graph.\n\n#### (Percent) Cumulative Graphs of % of formulas by a certain element/Carbon ratio\n\nFirst is presented the general cumulative graph, and then two different subsections of the graph to present in more detail: the 'end' of the curves (all except 'H/C') and the 'start' of the curves (especially 'H/C').",
"_____no_output_____"
]
],
[
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 4, 0.05))\n \n # Subtract the number of formulas that don't have the otehr element that not carbon in the ratio (ratio=0, values[0]) from \n # the number of formulas with ratio <= a value (np.cumsum(values)) and from the total number of formulas (len(ratios_df))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df2) - values[0])\n #print(cumulative)\n plt.plot(base[:-1], cumulative)\n\nplt.plot([0,4],[0.05,0.05], color = 'black');plt.text(x=4.01, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,4],[0.9,0.9], color = 'black');plt.text(x=4.01, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,4],[0.95,0.95], color = 'black');plt.text(x=4.01, y=0.95, s='0.95', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.3, 0.05))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df2) - values[0])\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0.8,1])\nplt.plot([0,1.2],[0.9,0.9], color = 'black');plt.text(x=1.21, y=0.90, s='0.90', verticalalignment='center')\nplt.plot([0,1.2],[0.95,0.95], color = 'black');plt.text(x=1.21, y=0.95, s='0.95', verticalalignment='center')\nplt.plot([0,1.2],[0.99,0.99], color = 'black');plt.text(x=1.21, y=0.99, s='0.99', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
],
[
"# Plot the Cumulative Graph and Adjust Parameters\nf, ax = plt.subplots(figsize=(12,9))\nfor i in ratios_df2.columns:\n data = ratios_df2[i]\n values, base = np.histogram(data, bins=np.arange(min(data), 1.05, 0.05))\n cumulative = (np.cumsum(values) - values[0])/(len(ratios_df2) - values[0])\n plt.plot(base[:-1], cumulative)\n\nplt.ylim([0,0.2])\n#plt.plot([0,1],[0.9,0.9], color = 'black')\nplt.plot([0,1],[0.05,0.05], color = 'black');plt.text(x=1.005, y=0.05, s='0.05', verticalalignment='center')\nplt.plot([0,1],[0.025,0.025], color = 'black');plt.text(x=1.005, y=0.025, s='0.025', verticalalignment='center')\nplt.legend(ratios_df.columns, fontsize = 12)\nplt.xlabel('Ratios', fontsize = 15)\nplt.ylabel('% of Formulas', fontsize = 15)\nplt.grid()",
"_____no_output_____"
]
],
[
[
"## Conclusions about the ranges to use",
"_____no_output_____"
],
[
"## Ranges conclusion\n\nThe ranges presented below (short_range) were applied at the form_checker_ratios function presented at the end of FormGeneration_Assignment.ipynb as an extra criteria for Formula Assignment to impede attribution of formulas with really rare ratios over formulas with more common ratios.\n\nThese ranges chosen should still allow for 95 to 99% of all formulas according to the results obtained with the ChEBI databases. Maybe even more strict ranges should be taken (from 90% or even lower) to make more strict selections.\n\nStricter_ranges is a possible stricter ranges to use",
"_____no_output_____"
]
],
[
[
"short_range = {'H/C':(0.5,2.2),'N/C':(0,0.6),'O/C':(0,1.2),'P/C':(0,0.3),'S/C':(0,0.5),'F/C':(0,0.5), 'Cl/C':(0,0.5)}",
"_____no_output_____"
],
[
"stricter_ranges = {'H/C':(0.6,2.2),'N/C':(0,0.5),'O/C':(0,1),'P/C':(0,0.3),'S/C':(0,0.3),'F/C':(0,0.5), 'Cl/C':(0,0.5)}",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e74afe1fed8dec3e7aa1395a23f0342210ba5449 | 5,217 | ipynb | Jupyter Notebook | t81_558_class_13_03_web.ipynb | rserran/t81_558_deep_learning | ec312cc7a7cef207e55e382594455fe44bcdec11 | [
"Apache-2.0"
] | null | null | null | t81_558_class_13_03_web.ipynb | rserran/t81_558_deep_learning | ec312cc7a7cef207e55e382594455fe44bcdec11 | [
"Apache-2.0"
] | null | null | null | t81_558_class_13_03_web.ipynb | rserran/t81_558_deep_learning | ec312cc7a7cef207e55e382594455fe44bcdec11 | [
"Apache-2.0"
] | null | null | null | 44.589744 | 534 | 0.668583 | [
[
[
"<a href=\"https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_03_web.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# T81-558: Applications of Deep Neural Networks\n**Module 13: Advanced/Other Topics**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).",
"_____no_output_____"
],
[
"# Module 13 Video Material\n\n* Part 13.1: Flask and Deep Learning Web Services [[Video]](https://www.youtube.com/watch?v=H73m9XvKHug&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_01_flask.ipynb)\n* Part 13.2: Interrupting and Continuing Training [[Video]](https://www.youtube.com/watch?v=kaQCdv46OBA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_02_checkpoint.ipynb)\n* **Part 13.3: Using a Keras Deep Neural Network with a Web Application** [[Video]](https://www.youtube.com/watch?v=OBbw0e-UroI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_03_web.ipynb)\n* Part 13.4: When to Retrain Your Neural Network [[Video]](https://www.youtube.com/watch?v=K2Tjdx_1v9g&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_04_retrain.ipynb)\n* Part 13.5: Tensor Processing Units (TPUs) [[Video]](https://www.youtube.com/watch?v=Ygyf3NUqvSc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_13_05_tpu.ipynb)\n\n",
"_____no_output_____"
],
[
"# Part 13.3: Using a Keras Deep Neural Network with a Web Application\n\nIn this part, we will extend the image API developed in Part 13.1 to work with a web application. This technique allows you to use a simple website to upload/predict images, such as in Figure 13.WEB.\n\n**Figure 13.WEB: AI Web Application**\n\n\nI added neural network functionality to a simple ReactJS image upload and preview example. To do this, we will use the same API developed in Module 13.1. However, we will now add a [ReactJS](https://reactjs.org/) website around it. This single-page web application allows you to upload images for classification by the neural network. If you would like to read more about ReactJS and image uploading, you can refer to the [blog post](http://www.hartzis.me/react-image-upload/) that provided some inspiration for this example.\n\nI built this example from the following components:\n\n* [GitHub Location for Web App](./py/)\n* [image_web_server_1.py](./py/image_web_server_1.py) - The code both to start Flask and serve the HTML/JavaScript/CSS needed to provide the web interface.\n* Directory WWW - Contains web assets. \n * [index.html](./py/www/index.html) - The main page for the web application.\n * [style.css](./py/www/style.css) - The stylesheet for the web application.\n * [script.js](./py/www/script.js) - The JavaScript code for the web application.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e74b13166de73b5b5e43b6ceb4709fcbc2225dfd | 3,323 | ipynb | Jupyter Notebook | notebooks/examples_of_function_monad.ipynb | threecifanggen/python-functional-programming | bd17281e5f24db826266f509bc54b25362c0d2a1 | [
"MIT"
] | 3 | 2021-10-05T09:12:36.000Z | 2021-11-30T07:11:58.000Z | notebooks/examples_of_function_monad.ipynb | threecifanggen/python-functional-programming | bd17281e5f24db826266f509bc54b25362c0d2a1 | [
"MIT"
] | 14 | 2021-10-11T05:31:15.000Z | 2021-12-16T12:52:47.000Z | notebooks/examples_of_function_monad.ipynb | threecifanggen/python-functional-programming | bd17281e5f24db826266f509bc54b25362c0d2a1 | [
"MIT"
] | null | null | null | 24.433824 | 102 | 0.496238 | [
[
[
"def logger(x, log_str=\"log is:\\n\"):\n def logger_func(f):\n res = f(x)\n print_res = log_str + f\"\"\"{f.__name__}'s result is {x} => {res}\\n\"\"\"\n return (print_res, logger(res, print_res))\n return logger_func\n\ndef get_logging(logger_tuple): return logger_tuple[0]\ndef get_logger(logger_tuple): return logger_tuple[1]",
"_____no_output_____"
],
[
"def f(x): return x + 1\ndef g(x): return x * 2",
"_____no_output_____"
],
[
"from functools import reduce\n\ndef apply_list(x, f_list):\n return reduce(lambda l_r, f: l_r[1](f), f_list, (\"log is:\\n\", logger(x)))\n\nres1 = apply_list(1, [f, g, f, f, g])\nprint(get_logging(res1))",
"log is:\nf's result is 1 => 2\ng's result is 2 => 4\nf's result is 4 => 5\nf's result is 5 => 6\ng's result is 6 => 12\n\n"
],
[
"def map_apply_list(g):\n def helper(x, f_list):\n return reduce(lambda l_r, f: l_r[1](f)[1](g), f_list, (\"log is:\\n\", logger(x)))\n return helper\n\ndef h(x): return x ** 2\n\napply_h_after_every_run = map_apply_list(h)\nres2 = apply_h_after_every_run(1, [f, g, f, f, g])\nprint(get_logging(res2))",
"log is:\nf's result is 1 => 2\nh's result is 2 => 4\ng's result is 4 => 8\nh's result is 8 => 64\nf's result is 64 => 65\nh's result is 65 => 4225\nf's result is 4225 => 4226\nh's result is 4226 => 17859076\ng's result is 17859076 => 35718152\nh's result is 35718152 => 1275786382295104\n\n"
],
[
"def flat_map_apply_list(g):\n def helper(x, f_list):\n return reduce(lambda l_r, f: l_r[1](f)[1](g)[1], f_list, (\"log is:\\n\", logger(x)))\n return helper\n\ndef h_flat(x): return lox ** 2",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74b1adbec89ab8a639ca3391a2901c57f1ab4fd | 443,171 | ipynb | Jupyter Notebook | pong/pong-PPO.ipynb | tomkommando/deep-reinforcement-learning | ff217af8b49cf55f6083408e4f3806dd1d656361 | [
"MIT"
] | 1 | 2020-09-25T09:05:42.000Z | 2020-09-25T09:05:42.000Z | pong/pong-PPO.ipynb | tomkommando/deep-reinforcement-learning | ff217af8b49cf55f6083408e4f3806dd1d656361 | [
"MIT"
] | null | null | null | pong/pong-PPO.ipynb | tomkommando/deep-reinforcement-learning | ff217af8b49cf55f6083408e4f3806dd1d656361 | [
"MIT"
] | null | null | null | 561.686946 | 10,284 | 0.955552 | [
[
[
"# Welcome!\nBelow, we will learn to implement and train a policy to play atari-pong, using only the pixels as input. We will use convolutional neural nets, multiprocessing, and pytorch to implement and train our policy. Let's get started!",
"_____no_output_____"
]
],
[
[
"# install package for displaying animation\n!pip install JSAnimation\n\n# custom utilies for displaying animation, collecting rollouts and more\nimport pong_utils\n\n%matplotlib inline\n\n# check which device is being used. \n# I recommend disabling gpu until you've made sure that the code runs\ndevice = pong_utils.device\nprint(\"using device: \",device)",
"Requirement already satisfied: JSAnimation in /opt/conda/lib/python3.6/site-packages\n\u001b[33mYou are using pip version 9.0.1, however version 18.0 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\nusing device: cpu\n"
],
[
"# render ai gym environment\nimport gym\nimport time\n\n# PongDeterministic does not contain random frameskip\n# so is faster to train than the vanilla Pong-v4 environment\nenv = gym.make('PongDeterministic-v4')\n\nprint(\"List of available actions: \", env.unwrapped.get_action_meanings())\n\n# we will only use the actions 'RIGHTFIRE' = 4 and 'LEFTFIRE\" = 5\n# the 'FIRE' part ensures that the game starts again after losing a life\n# the actions are hard-coded in pong_utils.py",
"List of available actions: ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE']\n"
]
],
[
[
"# Preprocessing\nTo speed up training, we can simplify the input by cropping the images and use every other pixel\n\n",
"_____no_output_____"
]
],
[
[
"import matplotlib\nimport matplotlib.pyplot as plt\n\n# show what a preprocessed image looks like\nenv.reset()\n_, _, _, _ = env.step(0)\n# get a frame after 20 steps\nfor _ in range(20):\n frame, _, _, _ = env.step(1)\n\nplt.subplot(1,2,1)\nplt.imshow(frame)\nplt.title('original image')\n\nplt.subplot(1,2,2)\nplt.title('preprocessed image')\n\n# 80 x 80 black and white image\nplt.imshow(pong_utils.preprocess_single(frame), cmap='Greys')\nplt.show()\n\n",
"_____no_output_____"
]
],
[
[
"# Policy\n\n## Exercise 1: Implement your policy\n \nHere, we define our policy. The input is the stack of two different frames (which captures the movement), and the output is a number $P_{\\rm right}$, the probability of moving left. Note that $P_{\\rm left}= 1-P_{\\rm right}$",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\n# set up a convolutional neural net\n# the output is the probability of moving right\n# P(left) = 1-P(right)\nclass Policy(nn.Module):\n\n def __init__(self):\n super(Policy, self).__init__()\n \n \n ########\n ## \n ## Modify your neural network\n ##\n ########\n \n # 80x80 to outputsize x outputsize\n # outputsize = (inputsize - kernel_size + stride)/stride \n # (round up if not an integer)\n\n # output = 20x20 here\n self.conv = nn.Conv2d(2, 1, kernel_size=4, stride=4)\n self.size=1*20*20\n \n # 1 fully connected layer\n self.fc = nn.Linear(self.size, 1)\n self.sig = nn.Sigmoid()\n \n def forward(self, x):\n \n ########\n ## \n ## Modify your neural network\n ##\n ########\n \n x = F.relu(self.conv(x))\n # flatten the tensor\n x = x.view(-1,self.size)\n return self.sig(self.fc(x))\n\n\n# run your own policy!\n# policy=Policy().to(device)\npolicy=pong_utils.Policy().to(device)\n\n# we use the adam optimizer with learning rate 2e-4\n# optim.SGD is also possible\nimport torch.optim as optim\noptimizer = optim.Adam(policy.parameters(), lr=1e-4)",
"_____no_output_____"
]
],
[
[
"# Game visualization\npong_utils contain a play function given the environment and a policy. An optional preprocess function can be supplied. Here we define a function that plays a game and shows learning progress",
"_____no_output_____"
]
],
[
[
"pong_utils.play(env, policy, time=200) \n# try to add the option \"preprocess=pong_utils.preprocess_single\"\n# to see what the agent sees",
"_____no_output_____"
]
],
[
[
"# Function Definitions\nHere you will define key functions for training. \n\n## Exercise 2: write your own function for training\n(what I call scalar function is the same as policy_loss up to a negative sign)\n\n### PPO\nLater on, you'll implement the PPO algorithm as well, and the scalar function is given by\n$\\frac{1}{T}\\sum^T_t \\min\\left\\{R_{t}^{\\rm future}\\frac{\\pi_{\\theta'}(a_t|s_t)}{\\pi_{\\theta}(a_t|s_t)},R_{t}^{\\rm future}{\\rm clip}_{\\epsilon}\\!\\left(\\frac{\\pi_{\\theta'}(a_t|s_t)}{\\pi_{\\theta}(a_t|s_t)}\\right)\\right\\}$\n\nthe ${\\rm clip}_\\epsilon$ function is implemented in pytorch as ```torch.clamp(ratio, 1-epsilon, 1+epsilon)```",
"_____no_output_____"
]
],
[
[
"def clipped_surrogate(policy, old_probs, states, actions, rewards,\n discount = 0.995, epsilon=0.1, beta=0.01):\n\n ########\n ## \n ## WRITE YOUR OWN CODE HERE\n ##\n ########\n \n actions = torch.tensor(actions, dtype=torch.int8, device=device)\n\n # convert states to policy (or probability)\n new_probs = pong_utils.states_to_prob(policy, states)\n new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0-new_probs)\n\n # include a regularization term\n # this steers new_policy towards 0.5\n # prevents policy to become exactly 0 or 1 helps exploration\n # add in 1.e-10 to avoid log(0) which gives nan\n entropy = -(new_probs*torch.log(old_probs+1.e-10)+ \\\n (1.0-new_probs)*torch.log(1.0-old_probs+1.e-10))\n\n return torch.mean(beta*entropy)\n",
"_____no_output_____"
]
],
[
[
"# Training\nWe are now ready to train our policy!\nWARNING: make sure to turn on GPU, which also enables multicore processing. It may take up to 45 minutes even with GPU enabled, otherwise it will take much longer!",
"_____no_output_____"
]
],
[
[
"from parallelEnv import parallelEnv\nimport numpy as np\n# keep track of how long training takes\n# WARNING: running through all 800 episodes will take 30-45 minutes\n\n# training loop max iterations\nepisode = 500\n\n# widget bar to display progress\n!pip install progressbar\nimport progressbar as pb\nwidget = ['training loop: ', pb.Percentage(), ' ', \n pb.Bar(), ' ', pb.ETA() ]\ntimer = pb.ProgressBar(widgets=widget, maxval=episode).start()\n\n\nenvs = parallelEnv('PongDeterministic-v4', n=8, seed=1234)\n\ndiscount_rate = .99\nepsilon = 0.1\nbeta = .01\ntmax = 320\nSGD_epoch = 4\n\n# keep track of progress\nmean_rewards = []\n\nfor e in range(episode):\n\n # collect trajectories\n old_probs, states, actions, rewards = \\\n pong_utils.collect_trajectories(envs, policy, tmax=tmax)\n \n total_rewards = np.sum(rewards, axis=0)\n\n\n # gradient ascent step\n for _ in range(SGD_epoch):\n \n # uncomment to utilize your own clipped function!\n # L = -clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta)\n\n L = -pong_utils.clipped_surrogate(policy, old_probs, states, actions, rewards,\n epsilon=epsilon, beta=beta)\n optimizer.zero_grad()\n L.backward()\n optimizer.step()\n del L\n \n # the clipping parameter reduces as time goes on\n epsilon*=.999\n \n # the regulation term also reduces\n # this reduces exploration in later runs\n beta*=.995\n \n # get the average reward of the parallel environments\n mean_rewards.append(np.mean(total_rewards))\n \n # display some progress every 20 iterations\n if (e+1)%20 ==0 :\n print(\"Episode: {0:d}, score: {1:f}\".format(e+1,np.mean(total_rewards)))\n print(total_rewards)\n \n # update progress widget bar\n timer.update(e+1)\n \ntimer.finish()",
"_____no_output_____"
],
[
"pong_utils.play(env, policy, time=200) ",
"_____no_output_____"
],
[
"# save your policy!\ntorch.save(policy, 'PPO.policy')\n\n# load policy if needed\n# policy = torch.load('PPO.policy')\n\n# try and test out the solution \n# make sure GPU is enabled, otherwise loading will fail\n# (the PPO verion can win more often than not)!\n#\n# policy_solution = torch.load('PPO_solution.policy')\n# pong_utils.play(env, policy_solution, time=2000) ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e74b1db0295431adee68ce97397d870ace6f6729 | 314,548 | ipynb | Jupyter Notebook | kaifang_email.ipynb | YYYYYNNNNN/tongjier | 8059bebb75d9e334ab3261e6e3a36a66a2c55f5d | [
"Apache-2.0"
] | 6 | 2019-03-19T08:18:20.000Z | 2020-12-21T02:45:36.000Z | kaifang_email.ipynb | YYYYYNNNNN/tongjier | 8059bebb75d9e334ab3261e6e3a36a66a2c55f5d | [
"Apache-2.0"
] | null | null | null | kaifang_email.ipynb | YYYYYNNNNN/tongjier | 8059bebb75d9e334ab3261e6e3a36a66a2c55f5d | [
"Apache-2.0"
] | null | null | null | 30.907733 | 75 | 0.567694 | [
[
[
"readfile_path='D:\\day01\\kaifangX.txt'\nwritefile_path='/Users/Asus/Desktop/test.txt'\nkaifangline=open(readfile_path,'r',encoding='gbk',errors='ignore')\nwriteline=open(writefile_path,'a',encoding='gbk')\nfor i in range(10000):\n try:\n email=kaifangline.readline().split(',')[9]\n print(email)\n writeline.write(email)\n except Exception as e:\n print(e)\nkaifangline.close()\nwriteline.close()",
"[email protected]\[email protected]\n\n\[email protected]\[email protected] \[email protected]\[email protected]\n\n\[email protected] \[email protected] \[email protected]\[email protected]\[email protected] \[email protected]\n\n\[email protected] \[email protected] \[email protected] \[email protected]\[email protected] \[email protected] \[email protected] \[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected] \[email protected]\[email protected]\[email protected]\n\n\n\n\[email protected]\[email protected] \[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\nlist index out of range\nlist index out of range\nlist index out of range\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected] \[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\n\n\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected] \[email protected] \[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\n-\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\nlist index out of range\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74b2883142920545a34b58b34629b3527aa7da1 | 3,251 | ipynb | Jupyter Notebook | add_places.ipynb | reading-in-the-alps/rita-invs | 21dd5a08555ea75daf39b9aeaf6b9f1446a62533 | [
"MIT"
] | null | null | null | add_places.ipynb | reading-in-the-alps/rita-invs | 21dd5a08555ea75daf39b9aeaf6b9f1446a62533 | [
"MIT"
] | 5 | 2019-05-20T11:47:25.000Z | 2021-06-10T21:12:15.000Z | add_places.ipynb | reading-in-the-alps/rita-invs | 21dd5a08555ea75daf39b9aeaf6b9f1446a62533 | [
"MIT"
] | null | null | null | 22.267123 | 113 | 0.516764 | [
[
[
"# enriched (main) Persons with Places\n* write Persons (main) to .txt file\n* use this file to annotate LOC within prodigy\n* train a LOC-model\n* run it and annotate Persons",
"_____no_output_____"
]
],
[
[
"import ast\nimport random\nfrom spacytei.train import batch_train",
"_____no_output_____"
],
[
"persons = Person.objects.filter(is_main_person__isnull=False)\npersons.count()",
"_____no_output_____"
],
[
"items = [x.written_name for x in persons]\nitems = sorted(iter(items), key=lambda k: random.random())\nfilename = \"person__main.txt\"\nwith open(filename, 'w', encoding=\"utf-8\") as f:\n for x in items:\n f.write(\"{}\".format(x) + '\\n')",
"_____no_output_____"
],
[
"file = r\"C:\\Users\\pandorfer\\Documents\\Redmine\\prodigy\\invs\\invs_loc.jsonl\"",
"_____no_output_____"
],
[
"with open(file) as f:\n TRAIN_DATA = f.readlines()\ntrain_data = [ast.literal_eval(x) for x in TRAIN_DATA]\nprint(len(train_data))\ntrain_data[4]",
"_____no_output_____"
],
[
"batch_train(train_data=train_data, output_dir='./data/main_loc', new_label='LOC', eval_split=0.2, n_iter=8)",
"_____no_output_____"
],
[
"import spacy",
"_____no_output_____"
],
[
"nlp = spacy.load(r\"./data/main_loc\")",
"_____no_output_____"
],
[
"for x in persons:\n doc = nlp(\"{}\".format(x.written_name))\n for ent in doc.ents:\n if ent.label_ == \"LOC\":\n place, _ = Place.objects.get_or_create(\n name=\"{}\".format(ent.text)\n )\n x.belongs_to_place.add(place)\n break\n x.save()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74b2bd8afbbbc6cbb192133cfaeae42bd8e18ec | 168,070 | ipynb | Jupyter Notebook | src/03WelchFFT.ipynb | XiruiXian/Master-thesis-project | 3dad9f0c23c52cbd5c603bea41f04e8c8e4cdd9f | [
"MIT"
] | null | null | null | src/03WelchFFT.ipynb | XiruiXian/Master-thesis-project | 3dad9f0c23c52cbd5c603bea41f04e8c8e4cdd9f | [
"MIT"
] | null | null | null | src/03WelchFFT.ipynb | XiruiXian/Master-thesis-project | 3dad9f0c23c52cbd5c603bea41f04e8c8e4cdd9f | [
"MIT"
] | 1 | 2021-05-18T12:31:30.000Z | 2021-05-18T12:31:30.000Z | 152.513612 | 41,516 | 0.8368 | [
[
[
"#import the necessary packages\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport pandas as pd\nimport mne\nfrom pathlib import Path\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## Read some raw data",
"_____no_output_____"
]
],
[
[
"#EDF file\noriginal_data_folder = Path('/Volumes/Macintosh HD - Data/Master Thesis/chb-mit-scalp-eeg-database-1.0.0')\nPatient = ['chb04','chb06','chb08','chb15','chb17','chb19']\nraw_file = os.path.join(original_data_folder,Patient[0],'{}_{}.edf'.format(Patient[0],'28'))\n\n#Read in raw data\nraw = mne.io.read_raw_edf(raw_file,preload=True)\n\n#Get a list of all channels:\nchannels = raw.ch_names\n\nraw_data = raw.get_data(return_times=True)\nt = raw_data[1]",
"Extracting EDF parameters from /Volumes/Macintosh HD - Data/Master Thesis/chb-mit-scalp-eeg-database-1.0.0/chb04/chb04_28.edf...\nEDF file detected\nSetting channel info structure...\nCreating raw.info structure...\nReading 0 ... 3692287 = 0.000 ... 14422.996 secs...\n"
],
[
"#channels\n\nlen(channels)",
"_____no_output_____"
]
],
[
[
"## FFT",
"_____no_output_____"
]
],
[
[
"raw.plot_psd(picks=channels[0],n_overlap=128,n_fft=256,dB=False)",
"Effective window size : 1.000 (s)\nNeed more than one channel to make topography for eeg. Disabling interactivity.\n"
],
[
"psd,freqs = mne.time_frequency.psd_array_welch(raw_data[0][0],sfreq=raw.info['sfreq'], n_fft=256,n_overlap=128)\nplt.figure(figsize=(10, 4))\nplt.plot(freqs,np.sqrt(psd))\nplt.xlabel('Frequency(Hz)')\nplt.ylabel(r'V/$\\sqrt{Hz}$')\nplt.title('{}_{} EEG Channel {} Welch FFT'.format(Patient[0],'01',channels[0]))\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\nplt.grid(True)",
"Effective window size : 1.000 (s)\n"
],
[
"df = pd.DataFrame(psd,index=freqs)\ndf_mean = np.sqrt(df.mean(axis=1))\n\nplt.figure(figsize=(10, 4))\ndf_mean.plot(label='Average')\nplt.legend()\nplt.xlabel('Frequency(Hz)')\nplt.ylabel(r'V/$\\sqrt{Hz}$')\nplt.title('{}_{} EEG Channel {} Welch FFT'.format(Patient[0],'01',channels[0]))\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\nplt.grid(True)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 8))\nplt.subplot(211)\ndf_mean.plot(label='Average')\nplt.legend()\nplt.ylabel(r'V/$\\sqrt{Hz}$')\nplt.title('{}_{} EEG Channel {} Welch FFT'.format(Patient[0],'01',channels[0]))\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\nplt.grid(True)\nplt.subplot(212)\nplt.plot(freqs,np.sqrt(psd))\nplt.xlabel('Frequency(Hz)')\nplt.ylabel(r'V/$\\sqrt{Hz}$')\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\nplt.grid(True)\nplt.savefig('ch04_01/{}_{} EEG Channel {} Welch FFT.png'.format(Patient[0],'01',channels[0]))",
"_____no_output_____"
],
[
"#for i in range(len(raw_data[0])):\n# psd,freqs = mne.time_frequency.psd_array_welch(raw_data[0][i],sfreq=raw.info['sfreq'],\n# n_fft=256,n_overlap=128,average=None)\n# df = pd.DataFrame(psd,index=freqs)\n# df_mean = np.sqrt(df.mean(axis=1))\n# plt.figure(figsize=(10, 8))\n# plt.subplot(211)\n# df_mean.plot(label='Average')\n# plt.legend()\n# plt.ylabel(r'V/$\\sqrt{Hz}$') \n# plt.title('{}_{} EEG Channel {} Welch FFT'.format(Patient[0],'01',channels[i]))\n# plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n# plt.grid(True)\n# plt.subplot(212)\n# plt.plot(freqs,np.sqrt(psd))\n# plt.xlabel('Frequency(Hz)')\n# plt.ylabel(r'V/$\\sqrt{Hz}$')\n# plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n# plt.grid(True)\n# plt.savefig('ch04_01/{}_{} EEG Channel {} Welch FFT.png'.format(Patient[0],'01',channels[i]))",
"_____no_output_____"
],
[
"psd,freqs = mne.time_frequency.psd_array_welch(raw_data[0][0],sfreq=raw.info['sfreq'], n_fft=256,n_overlap=128,average=None)\ndf = pd.DataFrame(psd,index=freqs)",
"Effective window size : 1.000 (s)\n"
],
[
"df",
"_____no_output_____"
],
[
"def create_bins(df,nobins=16):\n to_rtn = df.copy()\n bins = {}\n for col in to_rtn.columns:\n to_rtn[col],binR = pd.qcut(to_rtn[col],q=nobins,retbins=True,duplicates='drop',labels=False)\n bins[col] = binR\n to_rtn[col] = to_rtn[col].astype('category')\n to_rtn[col] = to_rtn[col].cat.set_categories([str(i) for i in to_rtn[col].cat.categories], rename = True)\n #Infinity\n bins[col][0] = -np.inf\n bins[col][-1] = np.inf\n return to_rtn,bins",
"_____no_output_____"
],
[
"train_df, binning = create_bins(df,nobins=16)",
"_____no_output_____"
],
[
"display(\"train_df\",train_df)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74b30cc486f925ecc75453784835174d8d50b92 | 15,851 | ipynb | Jupyter Notebook | models/Xgboost/experiments/case_study.ipynb | echoyi/RPS_LJE | 7b7f55072eaaf81bcc35cbd25b875521c294ad7c | [
"MIT"
] | 1 | 2021-12-01T09:34:18.000Z | 2021-12-01T09:34:18.000Z | models/Xgboost/experiments/case_study.ipynb | echoyi/RPS_LJE | 7b7f55072eaaf81bcc35cbd25b875521c294ad7c | [
"MIT"
] | null | null | null | models/Xgboost/experiments/case_study.ipynb | echoyi/RPS_LJE | 7b7f55072eaaf81bcc35cbd25b875521c294ad7c | [
"MIT"
] | null | null | null | 63.404 | 4,344 | 0.494985 | [
[
[
"## Explanations of RPS-LJE and Influence Function on German Credit Risk Analysis with XGBoost\nTable 2 and Table 11 (appendix)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport torch\nimport pandas as pd",
"_____no_output_____"
],
[
"path = \"../data\"\nX_train_clean_res = pd.read_csv('{}/X_train_clean_res.csv'.format(path), index_col=0)\ny_train_clean_res = pd.read_csv('{}/Y_train_clean_res.csv'.format(path), index_col=0)\nX_test_clean = pd.read_csv('{}/X_test_clean.csv'.format(path), index_col=0)\ny_test_clean = pd.read_csv('{}/Y_test_clean.csv'.format(path), index_col=0)\nX_train_clean = pd.read_csv('{}/X_train_clean.csv'.format(path), index_col=0)\ny_train_clean = pd.read_csv('{}/Y_train_clean.csv'.format(path), index_col=0)\n\ndata = pd.read_csv('{}/german_data.csv'.format(path), index_col=0)\ndata_translated = pd.read_csv('{}/german_data_translated.csv'.format(path), index_col=0)",
"_____no_output_____"
],
[
"path = '../saved_models/base'\n\nweight_matrix_influence = np.load('{}/calculated_weights/influence_weight_matrix.npz'.format(path), allow_pickle=True)['weight_matrix'].squeeze()\ngrad_test = np.load('{}/calculated_weights/influence_weight_matrix.npz'.format(path), allow_pickle=True)['jaccobian_test']\nweight_matrix_ours = np.load('{}/calculated_weights/ours_weight_matrix_with_lr_0.0001.npz'.format(path), allow_pickle=True)['weight_matrix'].squeeze()\nfile = np.load('{}/model/saved_outputs.npz'.format(path))\nintermediate_train = torch.from_numpy(file['intermediate_train'])\nintermediate_test = torch.from_numpy(file['intermediate_test'])\nlabels_train = file['labels_train']\nlabels_test = file['labels_test']\npred_train = file['pred_train']\npred_test = file['pred_test']",
"_____no_output_____"
],
[
"data_translated.head()\n",
"_____no_output_____"
],
[
"wrongly_predicted_train_ids = np.argwhere(np.abs(pred_train-labels_train)>0).flatten()\nwrongly_predicted_test_ids = np.argwhere(np.abs(pred_test-labels_test)>0).flatten()",
"_____no_output_____"
],
[
"names = ['existingchecking', 'duration', 'credithistory', 'purpose', 'creditamount',\n 'savings', 'employmentsince', 'installmentrate', 'statussex', 'otherdebtors',\n 'residencesince', 'property', 'age', 'otherinstallmentplans', 'housing',\n 'existingcredits', 'job', 'peopleliable', 'telephone', 'foreignworker', 'classification']\ndef get_influence_order(test_point=None):\n tmp = grad_test[test_point, 0]@ np.transpose(weight_matrix_influence)\n pos_idx = np.argsort(tmp)\n return pos_idx\n\ndef get_ours_order(test_point=None):\n true_class = labels_test[test_point]\n tmp = np.dot(weight_matrix_ours,\n intermediate_test[test_point,:])\n if true_class == 1:\n pos_idx = np.flip(np.argsort(tmp), axis=0)\n else:\n pos_idx = np.argsort(tmp)\n return pos_idx",
"_____no_output_____"
],
[
"def get_data_by_cleaned_idx(X_cleaned, idx):\n return data_translated.iloc[X_cleaned.iloc[idx,:]['id']], X_cleaned.iloc[idx,:]['id']\n\ndef sort_by_feature_importance(df):\n df = df[['method','type','id', 'classification',\n 'existingchecking', 'credithistory', 'savings',\n 'otherdebtors','employmentsince', 'otherinstallmentplans',\n 'housing','purpose', 'property', 'duration',\n 'creditamount','statussex','existingcredits',\n 'installmentrate','residencesince', 'age',\n 'job', 'peopleliable', 'telephone',\n 'foreignworker']]\n return df",
"_____no_output_____"
],
[
"def experiment_with_test_data(test_pt):\n columns=['method','type','id'] + names\n data_array_ours = []\n data_array_IF = []\n test_data, data_id = get_data_by_cleaned_idx(X_test_clean, test_pt)\n data_array_ours.append(np.concatenate([[' ','Test point',data_id],test_data.values]))\n data_array_IF.append(np.concatenate([[' ','Test point',data_id],test_data.values]))\n ours_idx_pos_in_res = [i for i in get_ours_order(test_pt) if not i in wrongly_predicted_train_ids]\n inf_idx_pos_in_res = [i for i in get_influence_order(test_pt) if not i in wrongly_predicted_train_ids]\n for i in range(3):\n our_pos_data, data_id = get_data_by_cleaned_idx(X_train_clean_res, ours_idx_pos_in_res[i])\n data_array_ours.append(np.concatenate([['REP-LJE','Positive {}'.format(i+1),\n data_id ],our_pos_data.values]))\n\n inf_pos_data, data_id = get_data_by_cleaned_idx(X_train_clean_res, inf_idx_pos_in_res[i])\n data_array_IF.append(np.concatenate([['Influence function','Positive {}'.format(i+1),\n data_id],inf_pos_data.values]))\n df_ours = sort_by_feature_importance(\n pd.DataFrame(data=data_array_ours, columns=columns))\n df_IF = sort_by_feature_importance(\n pd.DataFrame(data_array_IF, columns=columns))\n df_all = pd.concat([df_ours,df_IF.iloc[1:,:]])\n return df_ours, df_IF, df_all\n",
"_____no_output_____"
],
[
"df_all_list=[]\nfor i in [8, 94, 84, 56, 0, 32]:\n df_ours, df_IF, df_all=experiment_with_test_data(i)\n df_all_list.append(df_all)\ndf_all_stack = pd.concat(df_all_list)\ndf_all_stack.to_csv('results/German_credit.csv')\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74b38e0ce912ee8e9d4f47b3486290b59eac6b6 | 11,616 | ipynb | Jupyter Notebook | day5.ipynb | ElaRom/dw_matrix_car | 59a8f63e72184934b1a721ddc0c37764b2251d93 | [
"MIT"
] | null | null | null | day5.ipynb | ElaRom/dw_matrix_car | 59a8f63e72184934b1a721ddc0c37764b2251d93 | [
"MIT"
] | null | null | null | day5.ipynb | ElaRom/dw_matrix_car | 59a8f63e72184934b1a721ddc0c37764b2251d93 | [
"MIT"
] | null | null | null | 11,616 | 11,616 | 0.689824 | [
[
[
"!pip install --upgrade tables\n!pip install eli5\n!pip install xgboost\n!pip install hyperopt",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score\n\nfrom hyperopt import hp, fmin, tpe, STATUS_OK\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance",
"_____no_output_____"
],
[
"cd '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car'",
"_____no_output_____"
],
[
"df = pd.read_hdf('data/car.h5')\ndf.shape",
"_____no_output_____"
]
],
[
[
"##Feature Engineering",
"_____no_output_____"
]
],
[
[
"SUFFIX_CAT = '__cat'\n\nfor feat in df.columns:\n if isinstance(df[feat][0], list): continue\n\n factorizes_values = df[feat].factorize()[0]\n if SUFFIX_CAT in feat:\n df[feat] = factorizes_values\n else:\n df[feat + SUFFIX_CAT] = factorizes_values",
"_____no_output_____"
],
[
"df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))\ndf['param_moc'] = df['param_moc'].map( lambda x: -1 if str(x) == 'None' else x.split(' ')[0] )\ndf['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map( lambda x: -1 if str(x) == 'None' else x.split('cm')[0].replace(' ', '') )",
"_____no_output_____"
],
[
"def run_model(model, feats):\n X = df[feats].values\n y = df['price_value'].values\n\n scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')\n return np. mean(scores), np.std(scores)",
"_____no_output_____"
],
[
"feats = ['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_skrzynia-biegów__cat','param_faktura-vat__cat','param_moc','param_marka-pojazdu__cat','feature_kamera-cofania__cat','param_typ__cat','param_pojemność-skokowa','seller_name__cat','feature_wspomaganie-kierownicy__cat','param_model-pojazdu__cat','param_wersja__cat','param_kod-silnika__cat','feature_system-start-stop__cat','feature_asystent-pasa-ruchu__cat','feature_czujniki-parkowania-przednie__cat','feature_łopatki-zmiany-biegów__cat','feature_regulowane-zawieszenie__cat']\n\nxgb_params = {\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\n\nrun_model(xgb.XGBRegressor(**xgb_params), feats)",
"[10:03:01] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[10:03:05] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[10:03:09] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
]
],
[
[
"##Hyperopt",
"_____no_output_____"
]
],
[
[
"def obj_func(params):\n print(\"Training with params: \")\n print(params)\n \n mean_mae, scroe_std = run_model(xgb.XGBRegressor(**params), feats)\n\n return {'loss': np.abs(mean_mae), 'status': STATUS_OK}\n\n#space\nxgb_reg_params = {\n 'learning rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),\n 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),\n 'subsample': hp.quniform('subsample', 0.5, 1, 0.05),\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),\n 'objective': 'reg:squarederror',\n 'n_estimators': 100,\n 'seed': 0,\n}\n\n##run\nbest = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25)\n\nbest",
"Training with params: \n{'colsample_bytree': 0.8, 'learning rate': 0.05, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning rate': 0.3, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.8500000000000001, 'learning rate': 0.15000000000000002, 'max_depth': 5, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning rate': 0.15000000000000002, 'max_depth': 15, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.8500000000000001, 'learning rate': 0.25, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001}\nTraining with params: \n{'colsample_bytree': 0.75, 'learning rate': 0.1, 'max_depth': 7, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning rate': 0.25, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning rate': 0.25, 'max_depth': 11, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001}\nTraining with params: \n{'colsample_bytree': 0.5, 'learning rate': 0.2, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning rate': 0.2, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning rate': 0.15000000000000002, 'max_depth': 15, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params: \n{'colsample_bytree': 0.9, 'learning rate': 0.25, 'max_depth': 5, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning rate': 0.05, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 0.65, 'learning rate': 0.05, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params: \n{'colsample_bytree': 0.9500000000000001, 'learning rate': 0.15000000000000002, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning rate': 0.05, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params: \n{'colsample_bytree': 0.6000000000000001, 'learning rate': 0.15000000000000002, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params: \n{'colsample_bytree': 0.55, 'learning rate': 0.15000000000000002, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params: \n{'colsample_bytree': 1.0, 'learning rate': 0.15000000000000002, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params: \n{'colsample_bytree': 0.9, 'learning rate': 0.25, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params: \n{'colsample_bytree': 0.7000000000000001, 'learning rate': 0.05, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params: \n{'colsample_bytree': 0.7000000000000001, 'learning rate': 0.05, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.5}\nTraining with params: \n{'colsample_bytree': 0.75, 'learning rate': 0.05, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.5}\nTraining with params: \n{'colsample_bytree': 0.7000000000000001, 'learning rate': 0.05, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params: \n{'colsample_bytree': 0.8, 'learning rate': 0.3, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\n100%|██████████| 25/25 [22:13<00:00, 53.36s/it, best loss: 7421.965419304863]\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74b45820ad8217bfe915af14315850ab31bed22 | 8,910 | ipynb | Jupyter Notebook | Kaggle_Learning_Deep_Learning.ipynb | shlok-sethia/Kaggle-Findings | 20c616aa2e6f20eb016fe7a39ac990f35bc94892 | [
"MIT"
] | null | null | null | Kaggle_Learning_Deep_Learning.ipynb | shlok-sethia/Kaggle-Findings | 20c616aa2e6f20eb016fe7a39ac990f35bc94892 | [
"MIT"
] | null | null | null | Kaggle_Learning_Deep_Learning.ipynb | shlok-sethia/Kaggle-Findings | 20c616aa2e6f20eb016fe7a39ac990f35bc94892 | [
"MIT"
] | null | null | null | 48.956044 | 274 | 0.630079 | [
[
[
"from os.path import join\n\nimage_dir = '../input/dog-breed-identification/train/'\nimg_paths = [join(image_dir, filename) for filename in \n ['0c8fe33bd89646b678f6b2891df8a1c6.jpg',\n '0c3b282ecbed1ca9eb17de4cb1b6e326.jpg',\n '04fb4d719e9fe2b6ffe32d9ae7be8a22.jpg',\n '0e79be614f12deb4f7cae18614b7391b.jpg']]",
"_____no_output_____"
],
[
"import numpy as np\nfrom tensorflow.python.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.python.keras.preprocessing.image import load_img, img_to_array\n\nimage_size = 224\n\ndef read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):\n imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]\n img_array = np.array([img_to_array(img) for img in imgs])\n output = preprocess_input(img_array)\n return(output)",
"C:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\nC:\\Users\\Shlok\\anaconda3\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
"from tensorflow.python.keras.applications import ResNet50\n\nmy_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')\ntest_data = read_and_prep_images(img_paths)\npreds = my_model.predict(test_data)",
"_____no_output_____"
],
[
"from learntools.deep_learning.decode_predictions import decode_predictions\nfrom IPython.display import Image, display\n\nmost_likely_labels = decode_predictions(preds, top=3, class_list_path='../input/resnet50/imagenet_class_index.json')\n\nfor i, img_path in enumerate(img_paths):\n display(Image(img_path))\n print(most_likely_labels[i])",
"_____no_output_____"
],
[
"from tensorflow.keras.applications import ResNet50\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D\n\nnum_classes = 2\nresnet_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\nmy_new_model = Sequential()\nmy_new_model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))\nmy_new_model.add(Dense(num_classes, activation='softmax'))\n\n# Indicate whether the first layer should be trained/changed or not.\nmy_new_model.layers[0].trainable = False",
"_____no_output_____"
],
[
"my_new_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
],
[
"from tensorflow.python.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n\nimage_size = 224\ndata_generator = ImageDataGenerator(preprocessing_function=preprocess_input)\n\n\ntrain_generator = data_generator.flow_from_directory(\n 'download/dogs-gone-sideways/images/train',\n target_size=(image_size, image_size),\n batch_size=10,\n class_mode='categorical')\n\nvalidation_generator = data_generator.flow_from_directory(\n 'download/dogs-gone-sideways/images/val',\n target_size=(image_size, image_size),\n class_mode='categorical')\n\nmy_new_model.fit_generator(\n train_generator,\n steps_per_epoch=22,\n validation_data=validation_generator,\n validation_steps=1)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74b51b3b914083bdb7ffb391c4a593a2f6d43cf | 63,384 | ipynb | Jupyter Notebook | labs/lab09.ipynb | CloeRomero/mat281_portfolio | 1449a293f6e42d9dd4d041f852c23c51ace65514 | [
"MIT"
] | null | null | null | labs/lab09.ipynb | CloeRomero/mat281_portfolio | 1449a293f6e42d9dd4d041f852c23c51ace65514 | [
"MIT"
] | null | null | null | labs/lab09.ipynb | CloeRomero/mat281_portfolio | 1449a293f6e42d9dd4d041f852c23c51ace65514 | [
"MIT"
] | null | null | null | 93.624815 | 20,989 | 0.530954 | [
[
[
"# Laboratorio 9",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport altair as alt\nimport matplotlib.pyplot as plt\n\nfrom vega_datasets import data\n\nalt.themes.enable('opaque')\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"En este laboratorio utilizaremos un conjunto de datos _famoso_, el GapMinder. Esta es una versión reducida que solo considera países, ingresos, salud y población. \n\n¿Hay alguna forma natural de agrupar a estos países?",
"_____no_output_____"
]
],
[
[
"gapminder = data.gapminder_health_income()\ngapminder",
"_____no_output_____"
]
],
[
[
"## Ejercicio 1\n\n(1 pto.)\n\nRealiza un Análisis exploratorio, como mínimo un `describe` del dataframe y una visualización adecuada, por ejemplo un _scatter matrix_ con los valores numéricos.",
"_____no_output_____"
]
],
[
[
"gapminder.describe()",
"_____no_output_____"
],
[
"alt.Chart(gapminder).mark_circle(opacity=0.5).encode(\n alt.X(alt.repeat(\"column\"),type='quantitative'),\n alt.Y(alt.repeat(\"row\"),type='quantitative')\n).properties(\n width=200,\n height=200\n).repeat(\n row=['population', 'health', 'income'],\n column=['income', 'health', 'population']\n) ",
"_____no_output_____"
]
],
[
[
"__Pregunta:__ ¿Hay alguna variable que te entregue indicios a simple vista donde se puedan separar países en grupos?\n\n__Respuesta:__ A simple vista no pareciera haber alguna variable que nos permita separar los paises en grupo, ya que no se forman grupos de identificables de datos, a lo más 1 o 2 datos aisaldos del resto.",
"_____no_output_____"
],
[
"## Ejercicio 2\n\n(1 pto.)\n\nAplicar un escalamiento a los datos antes de aplicar nuestro algoritmo de clustering. Para ello, definir la variable `X_raw` que corresponde a un `numpy.array` con los valores del dataframe `gapminder` en las columnas _income_, _health_ y _population_. Luego, definir la variable `X` que deben ser los datos escalados de `X_raw`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"X_raw = np.array(gapminder.drop(columns='country'))\nX = StandardScaler().fit(X_raw).transform(X_raw)",
"_____no_output_____"
]
],
[
[
"## Ejercicio 3\n\n(1 pto.)\n\nDefinir un _estimator_ `KMeans` con `k=3` y `random_state=42`, luego ajustar con `X` y finalmente, agregar los _labels_ obtenidos a una nueva columna del dataframe `gapminder` llamada `cluster`. Finalmente, realizar el mismo gráfico del principio pero coloreado por los clusters obtenidos.\n",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import KMeans",
"_____no_output_____"
],
[
"k = 3\nkmeans = KMeans(n_clusters=k, random_state=42)\nkmeans.fit(X)\nclusters = kmeans.labels_\ngapminder[\"cluster\"] = clusters",
"_____no_output_____"
],
[
"alt.Chart(gapminder).mark_circle(opacity=0.8).encode(\n alt.X(alt.repeat(\"column\"),type='quantitative'),\n alt.Y(alt.repeat(\"row\"),type='quantitative'),\n color=alt.Color('cluster:N', scale=alt.Scale(scheme='rainbow')),\n tooltip=['country']\n).properties(\n width=200,\n height=200\n).repeat(\n row=['population', 'health', 'income'],\n column=['income', 'health', 'population']\n) ",
"_____no_output_____"
]
],
[
[
"## Ejercicio 4\n\n(1 pto.)\n\n__Regla del codo__\n\n__¿Cómo escoger la mejor cantidad de _clusters_?__\n\nEn este ejercicio hemos utilizado que el número de clusters es igual a 3. El ajuste del modelo siempre será mejor al aumentar el número de clusters, pero ello no significa que el número de clusters sea el apropiado. De hecho, si tenemos que ajustar $n$ puntos, claramente tomar $n$ clusters generaría un ajuste perfecto, pero no permitiría representar si existen realmente agrupaciones de datos.\n\nCuando no se conoce el número de clusters a priori, se utiliza la [regla del codo](https://jarroba.com/seleccion-del-numero-optimo-clusters/), que indica que el número más apropiado es aquel donde \"cambia la pendiente\" de decrecimiento de la la suma de las distancias a los clusters para cada punto, en función del número de clusters.\n\nA continuación se provee el código para el caso de clustering sobre los datos estandarizados, leídos directamente de un archivo preparado especialmente.En la línea que se declara `kmeans` dentro del ciclo _for_ debes definir un estimador K-Means, con `k` clusters y `random_state` 42. Recuerda aprovechar de ajustar el modelo en una sola línea.",
"_____no_output_____"
]
],
[
[
"elbow = pd.Series(name=\"inertia\", dtype=\"float64\").rename_axis(index=\"k\")\nfor k in range(1, 10):\n kmeans = KMeans(n_clusters=k, random_state=42).fit(X)\n elbow.loc[k] = kmeans.inertia_ # Inertia: Sum of distances of samples to their closest cluster center\nelbow = elbow.reset_index()",
"_____no_output_____"
],
[
"alt.Chart(elbow).mark_line(point=True).encode(\n x=\"k:O\",\n y=\"inertia:Q\"\n).properties(\n height=600,\n width=800\n)",
"_____no_output_____"
]
],
[
[
"__Pregunta:__ Considerando los datos (países) y el gráfico anterior, ¿Cuántos clusters escogerías?\n\n__Respuesta:__ Uno podría pretender escoger 3 clusters ya que sabemos que existe una clasificación de países con este número de clusters, que es 'países subdesarrollados', 'en vías de desarrollo' y 'desarrollados', sin embargo la regla del codo nos indica, a través del gráfico, que el número optimo sería 4, ya que es a partir de ese valor que la pendiente de la gráfica realiza un cambio brusco (en K=3 también se aprecia un cambio en la pendiente, pero no es tan significativo como en k=4). ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74b821c5be450f401b0b32915ca3856a5897f21 | 16,518 | ipynb | Jupyter Notebook | 01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb | KarimaCha/pandas_exercises | 7563644fde177f8071afe163cfef216d538aa98f | [
"BSD-3-Clause"
] | null | null | null | 01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb | KarimaCha/pandas_exercises | 7563644fde177f8071afe163cfef216d538aa98f | [
"BSD-3-Clause"
] | null | null | null | 01_Getting_&_Knowing_Your_Data/World Food Facts/Exercises_with_solutions.ipynb | KarimaCha/pandas_exercises | 7563644fde177f8071afe163cfef216d538aa98f | [
"BSD-3-Clause"
] | null | null | null | 29.708633 | 228 | 0.420269 | [
[
[
"# Ex1 - Getting and knowing your Data\nCheck out [World Food Facts Exercises Video Tutorial](https://youtu.be/_jCSK4cMcVw) to watch a data scientist go through the exercises",
"_____no_output_____"
],
[
"### Step 1. Go to https://www.kaggle.com/openfoodfacts/world-food-facts/data",
"_____no_output_____"
],
[
"### Step 2. Download the dataset to your computer and unzip it.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Step 3. Use the tsv file and assign it to a dataframe called food",
"_____no_output_____"
]
],
[
[
"food = pd.read_csv('~/Desktop/en.openfoodfacts.org.products.tsv', sep='\\t')",
"//anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2717: DtypeWarning: Columns (0,3,5,19,20,24,25,26,27,28,36,37,38,39,48) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
]
],
[
[
"### Step 4. See the first 5 entries",
"_____no_output_____"
]
],
[
[
"food.head()",
"_____no_output_____"
]
],
[
[
"### Step 5. What is the number of observations in the dataset?",
"_____no_output_____"
]
],
[
[
"food.shape #will give you both (observations/rows, columns)",
"_____no_output_____"
],
[
"food.shape[0] #will give you only the observations/rows number",
"_____no_output_____"
]
],
[
[
"### Step 6. What is the number of columns in the dataset?",
"_____no_output_____"
]
],
[
[
"print(food.shape) #will give you both (observations/rows, columns)\nprint(food.shape[1]) #will give you only the columns number\n\n#OR\n\nfood.info() #Columns: 163 entries",
"(356027, 163)\n163\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 356027 entries, 0 to 356026\nColumns: 163 entries, code to water-hardness_100g\ndtypes: float64(107), object(56)\nmemory usage: 442.8+ MB\n"
]
],
[
[
"### Step 7. Print the name of all the columns.",
"_____no_output_____"
]
],
[
[
"food.columns",
"_____no_output_____"
]
],
[
[
"### Step 8. What is the name of 105th column?",
"_____no_output_____"
]
],
[
[
"food.columns[104]",
"_____no_output_____"
]
],
[
[
"### Step 9. What is the type of the observations of the 105th column?",
"_____no_output_____"
]
],
[
[
"food.dtypes['-glucose_100g']",
"_____no_output_____"
]
],
[
[
"### Step 10. How is the dataset indexed?",
"_____no_output_____"
]
],
[
[
"food.index",
"_____no_output_____"
]
],
[
[
"### Step 11. What is the product name of the 19th observation?",
"_____no_output_____"
]
],
[
[
"food.values[18][7]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74b8d6f672185645e79b6837f9810aea743a23c | 88,012 | ipynb | Jupyter Notebook | 1. Robot Moving and Sensing.ipynb | dgander000/P3_Implement_SLAM | 5c2c6325aeb15c1dbbcea3eb2a060cc3c1fe8709 | [
"MIT"
] | null | null | null | 1. Robot Moving and Sensing.ipynb | dgander000/P3_Implement_SLAM | 5c2c6325aeb15c1dbbcea3eb2a060cc3c1fe8709 | [
"MIT"
] | null | null | null | 1. Robot Moving and Sensing.ipynb | dgander000/P3_Implement_SLAM | 5c2c6325aeb15c1dbbcea3eb2a060cc3c1fe8709 | [
"MIT"
] | null | null | null | 177.80202 | 16,892 | 0.687815 | [
[
[
"# Robot Class\n\nIn this project, we'll be localizing a robot in a 2D grid world. The basis for simultaneous localization and mapping (SLAM) is to gather information from a robot's sensors and motions over time, and then use information about measurements and motion to re-construct a map of the world.\n\n### Uncertainty\n\nAs you've learned, robot motion and sensors have some uncertainty associated with them. For example, imagine a car driving up hill and down hill; the speedometer reading will likely overestimate the speed of the car going up hill and underestimate the speed of the car going down hill because it cannot perfectly account for gravity. Similarly, we cannot perfectly predict the *motion* of a robot. A robot is likely to slightly overshoot or undershoot a target location.\n\nIn this notebook, we'll look at the `robot` class that is *partially* given to you for the upcoming SLAM notebook. First, we'll create a robot and move it around a 2D grid world. Then, **you'll be tasked with defining a `sense` function for this robot that allows it to sense landmarks in a given world**! It's important that you understand how this robot moves, senses, and how it keeps track of different landmarks that it sees in a 2D grid world, so that you can work with it's movement and sensor data.\n\n---\n\nBefore we start analyzing robot motion, let's load in our resources and define the `robot` class. You can see that this class initializes the robot's position and adds measures of uncertainty for motion. You'll also see a `sense()` function which is not yet implemented, and you will learn more about that later in this notebook.",
"_____no_output_____"
]
],
[
[
"# import some resources\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n%matplotlib inline",
"_____no_output_____"
],
[
"# the robot class\nclass robot:\n\n # --------\n # init: \n # creates a robot with the specified parameters and initializes \n # the location (self.x, self.y) to the center of the world\n #\n def __init__(self, world_size = 100.0, measurement_range = 30.0,\n motion_noise = 1.0, measurement_noise = 1.0):\n self.measurement_noise = 0.0\n self.world_size = world_size\n self.measurement_range = measurement_range\n self.x = world_size / 2.0\n self.y = world_size / 2.0\n self.motion_noise = motion_noise\n self.measurement_noise = measurement_noise\n self.landmarks = []\n self.num_landmarks = 0\n\n\n # returns a positive, random float\n def rand(self):\n return random.random() * 2.0 - 1.0\n\n\n # --------\n # move: attempts to move robot by dx, dy. If outside world\n # boundary, then the move does nothing and instead returns failure\n #\n def move(self, dx, dy):\n\n x = self.x + dx + self.rand() * self.motion_noise\n y = self.y + dy + self.rand() * self.motion_noise\n\n if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size:\n return False\n else:\n self.x = x\n self.y = y\n return True\n \n\n # --------\n # sense: returns x- and y- distances to landmarks within visibility range\n # because not all landmarks may be in this range, the list of measurements\n # is of variable length. Set measurement_range to -1 if you want all\n # landmarks to be visible at all times\n #\n \n ## TODO: complete the sense function\n def sense(self):\n ''' This function does not take in any parameters, instead it references internal variables\n (such as self.landamrks) to measure the distance between the robot and any landmarks\n that the robot can see (that are within its measurement range).\n This function returns a list of landmark indices, and the measured distances (dx, dy)\n between the robot's position and said landmarks.\n This function should account for measurement_noise and measurement_range.\n One item in the returned list should be in the form: [landmark_index, dx, dy].\n '''\n \n measurements = []\n \n ## TODO: iterate through all of the landmarks in a world\n \n ## TODO: For each landmark\n ## 1. compute dx and dy, the distances between the robot and the landmark\n ## 2. account for measurement noise by *adding* a noise component to dx and dy\n ## - The noise component should be a random value between [-1.0, 1.0)*measurement_noise\n ## - Feel free to use the function self.rand() to help calculate this noise component\n ## - It may help to reference the `move` function for noise calculation\n ## 3. If either of the distances, dx or dy, fall outside of the internal var, measurement_range\n ## then we cannot record them; if they do fall in the range, then add them to the measurements list\n ## as list.append([index, dx, dy]), this format is important for data creation done later\n \n ## TODO: return the final, complete list of measurements\n measurements = []\n for i in range(self.num_landmarks):\n dx = self.landmarks[i][0] - self.x + self.rand() * self.measurement_noise\n dy = self.landmarks[i][1] - self.y + self.rand() * self.measurement_noise \n if self.measurement_range < 0.0 or abs(dx) + abs(dy) <= self.measurement_range:\n measurements.append([i, dx, dy])\n return measurements\n\n \n # --------\n # make_landmarks: \n # make random landmarks located in the world\n #\n def make_landmarks(self, num_landmarks):\n self.landmarks = []\n for i in range(num_landmarks):\n self.landmarks.append([round(random.random() * self.world_size),\n round(random.random() * self.world_size)])\n self.num_landmarks = num_landmarks\n \n \n # called when print(robot) is called; prints the robot's location\n def __repr__(self):\n return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)\n",
"_____no_output_____"
]
],
[
[
"## Define a world and a robot\n\nNext, let's instantiate a robot object. As you can see in `__init__` above, the robot class takes in a number of parameters including a world size and some values that indicate the sensing and movement capabilities of the robot.\n\nIn the next example, we define a small 10x10 square world, a measurement range that is half that of the world and small values for motion and measurement noise. These values will typically be about 10 times larger, but we ust want to demonstrate this behavior on a small scale. You are also free to change these values and note what happens as your robot moves!",
"_____no_output_____"
]
],
[
[
"world_size = 10.0 # size of world (square)\nmeasurement_range = 5.0 # range at which we can sense landmarks\nmotion_noise = 0.2 # noise in robot motion\nmeasurement_noise = 0.2 # noise in the measurements\n\n# instantiate a robot, r\nr = robot(world_size, measurement_range, motion_noise, measurement_noise)\n\n# print out the location of r\nprint(r)",
"Robot: [x=5.00000 y=5.00000]\n"
]
],
[
[
"## Visualizing the World\n\nIn the given example, we can see/print out that the robot is in the middle of the 10x10 world at (x, y) = (5.0, 5.0), which is exactly what we expect!\n\nHowever, it's kind of hard to imagine this robot in the center of a world, without visualizing the grid itself, and so in the next cell we provide a helper visualization function, `display_world`, that will display a grid world in a plot and draw a red `o` at the location of our robot, `r`. The details of how this function wors can be found in the `helpers.py` file in the home directory; you do not have to change anything in this `helpers.py` file.",
"_____no_output_____"
]
],
[
[
"# import helper function\nfrom helpers import display_world\n\n# define figure size\nplt.rcParams[\"figure.figsize\"] = (5,5)\n\n# call display_world and display the robot in it's grid world\nprint(r)\ndisplay_world(int(world_size), [r.x, r.y])",
"Robot: [x=5.00000 y=5.00000]\n"
]
],
[
[
"## Movement\n\nNow you can really picture where the robot is in the world! Next, let's call the robot's `move` function. We'll ask it to move some distance `(dx, dy)` and we'll see that this motion is not perfect by the placement of our robot `o` and by the printed out position of `r`. \n\nTry changing the values of `dx` and `dy` and/or running this cell multiple times; see how the robot moves and how the uncertainty in robot motion accumulates over multiple movements.\n\n#### For a `dx` = 1, does the robot move *exactly* one spot to the right? What about `dx` = -1? What happens if you try to move the robot past the boundaries of the world?",
"_____no_output_____"
]
],
[
[
"# choose values of dx and dy (negative works, too)\ndx = 1\ndy = 2\nr.move(dx, dy)\n\n# print out the exact location\nprint(r)\n\n# display the world after movement, not that this is the same call as before\n# the robot tracks its own movement\ndisplay_world(int(world_size), [r.x, r.y])",
"Robot: [x=6.13189 y=6.86628]\n"
]
],
[
[
"## Landmarks\n\nNext, let's create landmarks, which are measurable features in the map. You can think of landmarks as things like notable buildings, or something smaller such as a tree, rock, or other feature.\n\nThe robot class has a function `make_landmarks` which randomly generates locations for the number of specified landmarks. Try changing `num_landmarks` or running this cell multiple times to see where these landmarks appear. We have to pass these locations as a third argument to the `display_world` function and the list of landmark locations is accessed similar to how we find the robot position `r.landmarks`. \n\nEach landmark is displayed as a purple `x` in the grid world, and we also print out the exact `[x, y]` locations of these landmarks at the end of this cell.",
"_____no_output_____"
]
],
[
[
"# create any number of landmarks\nnum_landmarks = 3\nr.make_landmarks(num_landmarks)\n\n# print out our robot's exact location\nprint(r)\n\n# display the world including these landmarks\ndisplay_world(int(world_size), [r.x, r.y], r.landmarks)\n\n# print the locations of the landmarks\nprint('Landmark locations [x,y]: ', r.landmarks)",
"Robot: [x=6.13189 y=6.86628]\n"
]
],
[
[
"## Sense\n\nOnce we have some landmarks to sense, we need to be able to tell our robot to *try* to sense how far they are away from it. It will be up t you to code the `sense` function in our robot class.\n\nThe `sense` function uses only internal class parameters and returns a list of the the measured/sensed x and y distances to the landmarks it senses within the specified `measurement_range`. \n\n### TODO: Implement the `sense` function \n\nFollow the `##TODO's` in the class code above to complete the `sense` function for the robot class. Once you have tested out your code, please **copy your complete `sense` code to the `robot_class.py` file in the home directory**. By placing this complete code in the `robot_class` Python file, we will be able to refernce this class in a later notebook.\n\nThe measurements have the format, `[i, dx, dy]` where `i` is the landmark index (0, 1, 2, ...) and `dx` and `dy` are the measured distance between the robot's location (x, y) and the landmark's location (x, y). This distance will not be perfect since our sense function has some associated `measurement noise`.\n\n---\n\nIn the example in the following cell, we have a given our robot a range of `5.0` so any landmarks that are within that range of our robot's location, should appear in a list of measurements. Not all landmarks are guaranteed to be in our visibility range, so this list will be variable in length.\n\n*Note: the robot's location is often called the **pose** or `[Pxi, Pyi]` and the landmark locations are often written as `[Lxi, Lyi]`. You'll see this notation in the next notebook.*",
"_____no_output_____"
]
],
[
[
"# try to sense any surrounding landmarks\nmeasurements = r.sense()\n\n# this will print out an empty list if `sense` has not been implemented\nprint(measurements)",
"[[0, 1.9448626199060042, -1.0031616156828798], [2, 0.8032812034875207, 2.1645375343944453]]\n"
]
],
[
[
"**Refer back to the grid map above. Do these measurements make sense to you? Are all the landmarks captured in this list (why/why not)?**",
"_____no_output_____"
],
[
"---\n## Data\n\n#### Putting it all together\n\nTo perform SLAM, we'll collect a series of robot sensor measurements and motions, in that order, over a defined period of time. Then we'll use only this data to re-construct the map of the world with the robot and landmar locations. You can think of SLAM as peforming what we've done in this notebook, only backwards. Instead of defining a world and robot and creating movement and sensor data, it will be up to you to use movement and sensor measurements to reconstruct the world!\n\nIn the next notebook, you'll see this list of movements and measurements (which you'll use to re-construct the world) listed in a structure called `data`. This is an array that holds sensor measurements and movements in a specific order, which will be useful to call upon when you have to extract this data and form constraint matrices and vectors.\n\n`data` is constructed over a series of time steps as follows:",
"_____no_output_____"
]
],
[
[
"data = []\n\n# after a robot first senses, then moves (one time step)\n# that data is appended like so:\ndata.append([measurements, [dx, dy]])\n\n# for our example movement and measurement\nprint(data)",
"[[[[0, 1.9448626199060042, -1.0031616156828798], [2, 0.8032812034875207, 2.1645375343944453]], [1, 2]]]\n"
],
[
"# in this example, we have only created one time step (0)\ntime_step = 0\n\n# so you can access robot measurements:\nprint('Measurements: ', data[time_step][0])\n\n# and its motion for a given time step:\nprint('Motion: ', data[time_step][1])",
"Measurements: [[0, 1.9448626199060042, -1.0031616156828798], [2, 0.8032812034875207, 2.1645375343944453]]\nMotion: [1, 2]\n"
]
],
[
[
"### Final robot class\n\nBefore moving on to the last notebook in this series, please make sure that you have copied your final, completed `sense` function into the `robot_class.py` file in the home directory. We will be using this file in the final implementation of slam!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74b9504aeb83da41442aa17c92a35d84952de29 | 48,571 | ipynb | Jupyter Notebook | silver/C06_State_Conversion_And_Visualization.ipynb | asif-saad/qSilver | 4f7c1d3a402376d9a080994e65f34f38f53658bb | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-07-27T13:39:00.000Z | 2021-07-27T13:39:00.000Z | silver/C06_State_Conversion_And_Visualization.ipynb | asif-saad/qSilver | 4f7c1d3a402376d9a080994e65f34f38f53658bb | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | silver/C06_State_Conversion_And_Visualization.ipynb | asif-saad/qSilver | 4f7c1d3a402376d9a080994e65f34f38f53658bb | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-12-19T20:25:00.000Z | 2021-12-19T20:25:00.000Z | 134.919444 | 18,336 | 0.835581 | [
[
[
"<table align=\"left\" width=\"100%\"> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"../images/qworld.jpg\" width=\"35%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by Maksim Dimitrijev (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n </td> \n</tr></table>",
"_____no_output_____"
],
[
"<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $\n$ \\newcommand{\\Y}{ \\mymatrix{rr}{0 & -i \\\\ i & 0} } $ $ \\newcommand{\\S}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & i} } $ \n$ \\newcommand{\\T}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & e^{i \\frac{pi}{4}}} } $ \n$ \\newcommand{\\Sdg}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -i} } $ \n$ \\newcommand{\\Tdg}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & e^{-i \\frac{pi}{4}}} } $",
"_____no_output_____"
],
[
"<h1> State representation conversion and visualization </h1>",
"_____no_output_____"
],
[
"Let's discuss whether we can convert $\\ket{\\psi} = \\alpha \\ket{0} + \\beta \\ket{1}$ into $\\ket{\\psi} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$ without too much pain.",
"_____no_output_____"
],
[
"<h2> Conversion </h2>\n\nFor successful conversion we need to calculate $\\theta$ and $\\phi$. As the first step we can use the fact that probabilities of observing states $\\ket{0}$ and $\\ket{1}$ are:\n\n<ul>\n <li>$\\mathopen|\\alpha\\mathclose|^2$ and $\\mathopen|\\beta\\mathclose|^2$,</li>\n <li>$(\\cos{\\frac{\\theta}{2}})^2$ and $(\\sin{\\frac{\\theta}{2}})^2$,</li>\n</ul>\n\nrespectively. From this fact we can find the value of $\\theta$, i.e., $\\frac{\\theta}{2} = arcsin \\mypar{ \\sqrt{ \\mathopen|\\beta\\mathclose|^2 } } = arcsin (\\mathopen|\\beta\\mathclose|)$. Since $0 \\leq \\theta \\leq \\pi$, $0 \\leq arcsin (|\\beta\\mathclose|) \\leq \\frac{\\pi}{2}$, and so we do not need to distinguish different cases, $arcsin$ gives us the exact angle. Therefore, we have $\\theta = 2 \\cdot arcsin (\\mathopen|\\beta\\mathclose|)$.",
"_____no_output_____"
],
[
"After that we need to calculate the local phase $e^{i\\phi}$. For this purpose we can first find $a' = \\frac{\\alpha}{\\mathopen|\\alpha\\mathclose|}$ and $b' = \\frac{\\beta}{\\mathopen|\\beta\\mathclose|}$, and we obtain $a'$ and $b'$ each with absolute value 1. Then we need to convert these complex numbers into their polar forms. \n\nHow can we do this? \n\nIf $a' = c + di$ and $a' = e^{i\\lambda}$, then we can calculate $\\lambda$ as $arcsin(\\mathopen| d \\mathclose|)$ and guess the quadrant of the angle $\\lambda$ by knowing the signs of $c$ and $d$:\n<ul>\n <li>if $c \\geq 0$ and $d \\geq 0$, then $0 \\leq \\lambda \\leq \\frac{\\pi}{2}$;</li>\n <li>if $c < 0$ and $d \\geq 0$, then $\\frac{\\pi}{2} < \\lambda \\leq \\pi$;</li>\n <li>if $c < 0$ and $d < 0$, then $\\pi < \\lambda < \\frac{3\\pi}{2}$;</li>\n <li>if $c \\geq 0$ and $d < 0$, then $\\frac{3\\pi}{2} \\leq \\lambda < 2\\pi$.</li>\n</ul>\nSimilarly, we can calculate $\\eta$ for $b' = e^{i\\eta}$.\n\nThe mentioned operations provide us $\\ket{\\psi} = e^{i\\lambda} \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\eta} \\sin{\\frac{\\theta}{2}} \\ket{1}$, and the final step is to get rid of global phase: \n\n$$e^{i\\lambda} \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\eta} \\sin{\\frac{\\theta}{2}} \\ket{1} = e^{i\\lambda} (\\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i(\\eta - \\lambda)} \\sin{\\frac{\\theta}{2}} \\ket{1}) = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i(\\eta - \\lambda)} \\sin{\\frac{\\theta}{2}} \\ket{1}.$$ \n\nWe get $\\phi = \\eta - \\lambda$.",
"_____no_output_____"
],
[
"<h3> Task 1 </h3>\n\nImplement a function in Python that takes quantum state $\\alpha \\ket{0} + \\beta \\ket{1}$ as two complex numbers $\\alpha$ and $\\beta$ and returns the angles $\\theta$ and $\\phi$ of the corresponding state $\\ket{\\psi} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$.\n\nTest it with the state $\\frac{1}{\\sqrt{2}} \\ket{0} + \\frac{1}{\\sqrt{2}}i \\ket{1}$.",
"_____no_output_____"
]
],
[
[
"#\n# your solution is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"C06_State_Conversion_And_Visualization_Solutions.ipynb#task1\">click for our solution</a>",
"_____no_output_____"
],
[
"<h2> Visualization </h2>\n\nWe can visualize the state $\\ket{\\psi} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$ by separately drawing angles $\\frac{\\theta}{2}$ and $\\phi$. In the next notebook we will combine the visualization of both angles.\n\nSuppose that we have $\\theta = \\frac{\\pi}{2}$ and $\\phi = \\frac{4\\pi}{3}$.\n\nFirst, we draw angle $\\frac{\\theta}{2}$ to see which state ($\\ket{0}$ or $\\ket{1}$) has higher probability to be observed - higher absolute value of the amplitude.",
"_____no_output_____"
]
],
[
[
"theta = 90 #pi/2\nmyangle = theta/2\n\nfrom matplotlib.pyplot import figure,gca\nfrom matplotlib.patches import Arc\nfrom math import sin,cos,pi\n\n%run qlatvia.py\n%run drawing.py\n\n#matplotlib.pyplot.subplot(1, 2, 1)\nfigure(figsize=(6,6), dpi=60)\ndraw_real_part()\ngca().add_patch( Arc((0,0),2,2,angle=0,theta1=0,theta2=90,color=\"black\",linewidth=2) )\ngca().add_patch( Arc((0,0),2,2,angle=0,theta1=0,theta2=myangle,color=\"blue\",linewidth=2) )\nmyangle_in_radian = 2*pi*(myangle/360)\nx = cos(myangle_in_radian)\ny = sin(myangle_in_radian)\n\ndraw_quantum_state(x,y,\"|v>\")",
"_____no_output_____"
]
],
[
[
"After that we draw angle $\\phi$ to see the local phase.",
"_____no_output_____"
]
],
[
[
"from matplotlib.pyplot import figure,gca\nfrom matplotlib.patches import Arc\nfrom math import sin,cos,pi\n\n%run qlatvia.py\n%run drawing.py\n\nphi = 240\n\ndraw_imaginary_part()\ngca().add_patch( Arc((0,0),2,2,angle=0,theta1=0,theta2=phi,color=\"blue\",linewidth=2) )\nmyangle_in_radian = 2*pi*(phi/360)\nx = cos(myangle_in_radian)\ny = sin(myangle_in_radian)\n\ndraw_quantum_state(x,y,\"|v>\")",
"_____no_output_____"
]
],
[
[
"As you can see, the visualization of a complex quantum state in two parts is quite demonstrative. The visualization of angle $\\frac{\\theta}{2}$ gives us sense about which state has bigger amplitude, and so - higher probability to be observed. The angle $\\phi$ clearly represents the local phase, and also gives an idea about how to convert the value into regular form of a complex number. Note that since the absolute value is always equal to 1, this does not affect the probabilities of the states to be observed.\n\nTo convert the representation of the state $\\ket{\\psi} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$ into $\\ket{\\psi} = \\alpha \\ket{0} + \\beta \\ket{1}$, we need the following operations:\n\n<ul>\n <li>calculate $\\alpha = \\cos{\\frac{\\theta}{2}}$ and calculate $\\sin{\\frac{\\theta}{2}}$;</li>\n <li>convert $e^{i\\phi} = \\cos \\phi + \\sin \\phi \\cdot i$, and so we get $\\beta = \\sin{\\frac{\\theta}{2}} (\\cos \\phi + \\sin \\phi \\cdot i)$.</li>\n</ul>\n\nAs a result we obtain $\\ket{\\psi} = (\\cos{\\frac{\\theta}{2}}) \\ket{0} + (\\sin{\\frac{\\theta}{2}} (\\cos \\phi + \\sin \\phi \\cdot i)) \\ket{1} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$.",
"_____no_output_____"
],
[
"<h3> Task 2 </h3>\n\nImplement a function in Python that takes quantum state $\\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$ as two numbers $\\frac{\\theta}{2}$ and $\\phi$ and returns the amplitudes $\\alpha$ and $\\beta$ of the corresponding state $\\alpha \\ket{0} + \\beta \\ket{1}$.\n\nTest it with the state $\\cos{\\frac{\\pi/2}{2}} \\ket{0} + e^{i\\frac{\\pi}{2}} \\sin{\\frac{\\pi/2}{2}} \\ket{1}$.",
"_____no_output_____"
]
],
[
[
"#\n# your solution is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"C06_State_Conversion_And_Visualization_Solutions.ipynb#task2\">click for our solution</a>",
"_____no_output_____"
],
[
"<h3> Task 3 </h3>\n\nImplement the code to visualize arbitrary state $\\ket{\\psi} = \\cos{\\frac{\\theta}{2}} \\ket{0} + e^{i\\phi} \\sin{\\frac{\\theta}{2}} \\ket{1}$.\n\nTest it with angles $\\frac{\\theta}{2} = \\frac{\\pi/2}{2}$ and $\\phi = \\frac{4\\pi}{3}$.",
"_____no_output_____"
]
],
[
[
"#\n# your solution is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"C06_State_Conversion_And_Visualization_Solutions.ipynb#task3\">click for our solution</a>",
"_____no_output_____"
],
[
"<h3> Task 4 </h3>\n\nImplement the code to visualize arbitrary state $\\ket{\\psi} = \\alpha \\ket{0} + \\beta \\ket{1}$. You can do the conversion first, and then use the visualization from the previous task.\n\nTest it with the state $\\frac{1}{\\sqrt{2}} \\ket{0} + \\frac{1}{\\sqrt{2}}i \\ket{1}$.",
"_____no_output_____"
]
],
[
[
"#\n# your solution is here\n#\n",
"_____no_output_____"
]
],
[
[
"<a href=\"C06_State_Conversion_And_Visualization_Solutions.ipynb#task4\">click for our solution</a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74b98f39cfe08ff6be7efd06eaf946b8f4e2d54 | 321,382 | ipynb | Jupyter Notebook | Numerical_analysis/Lessons/5 - Differential equations - st.ipynb | StevetheGreek97/Master_DSLS | fcd8b0bd9312977c7a96f9639588d3b799d1a3ad | [
"MIT"
] | null | null | null | Numerical_analysis/Lessons/5 - Differential equations - st.ipynb | StevetheGreek97/Master_DSLS | fcd8b0bd9312977c7a96f9639588d3b799d1a3ad | [
"MIT"
] | null | null | null | Numerical_analysis/Lessons/5 - Differential equations - st.ipynb | StevetheGreek97/Master_DSLS | fcd8b0bd9312977c7a96f9639588d3b799d1a3ad | [
"MIT"
] | null | null | null | 340.808059 | 43,140 | 0.919106 | [
[
[
"# Data Science 2\n## Numerical analysis - Differential equations\n\nThe following material is more elaborately covered in Chapter 7 - *Initial value problems* of the book *Numerical methods in engineering with Python 3* by Jaan Kiusalaas (see BlackBoard).",
"_____no_output_____"
],
[
"### Introduction\n\nA [differential equation](https://www.wikiwand.com/en/Differential_equation) is an equation that relates one or more functions and their derivatives. In applications, the functions generally represent measured quantities, the derivatives represent their rates of change, and the differential equation defines a relationship between the two. Such relations are common in biology, for instance when describing changes in the state of biological systems (molecular assemblies, cells or tissues, organisms, populations, ecological communities) that may depend on the current state that the system is in.\n\nThe general form of a *first-order differential equation* is $y' = f(x, y)$, where $y' = \\frac{dy}{dx}$ and $f(x, y)$ is a given function. The solution of this equation contains an arbitrary constant (the constant of integration). To find this constant, we must know a point on the solution curve; that is, $y$ must be specified at some value of $x$, say at $x = 0$. A numerical solution of differential equations is essentially a table of values $y(x)$ listed at discrete intervals of $x$.\n\nFor example, a simplistic model that describes the growth of organisms in a nutrient-rich environment may state that the rate of increase of the population is proportional to the size of the population itself: $y' = k \\cdot y$. In this case, $f(x,y) = k\\cdot y$ does not depend on $x$ directly. If the initial population size equals $y(0) = y_0$, then this leads to an exact solution $y(t) = y_0 e^{kt}$ describing exponential growth.\n\n\n\nFor more complicated models, the equation may be difficult to solve analytically. For example, the growth rate may depend on the availability of nutrients, which may diminish as population grows and which may vary periodically with the yearly seasons. But in such cases numerical methods may still be used.\n\nThe variable $y$ may also consist of multiple coordinates, in which case it behaves as a vector. The differential equation then becomes $\\boldsymbol{y}' = \\boldsymbol{f}(x, \\boldsymbol{y})$ with initial condition $\\boldsymbol{y}(0) = \\boldsymbol{y}_0$. Although this may be much more complicated to solve analytically, the notation as well as the numerical solution remains very similar.\n\nAn ordinary differential equation of arbitrary order $n$ can be written as $y^{(n)} = f(x, y, y', \\ldots, y^{(n−1)})$. This can always be transformed into $n$ first-order equations. Using the notation $y_0 = y, y_1 = y', y_2 = y'', \\ldots, y_{n−1} = y^{(n−1)}$, the equivalent first-order equations are $y_0' = y_1, y_1' = y_2, y_2' = y_3, \\ldots, y_n' = f(x, y_0, y_1, \\ldots, y_{n−1})$.",
"_____no_output_____"
],
[
"### Euler's Method\n\n[Euler's method](https://en.wikipedia.org/wiki/Euler_method) of solution is conceptually simple. The function $f$ can be linearly approximated as $y(x + h) \\approx y(x) + y'(x) \\cdot h$. By predicting $\\boldsymbol{y}$ at $x + h$ from the information available at $x$, it can be used to move the solution forward in steps of $h$.\n\n$$\n\\boldsymbol{y}(x + h) \\approx \\boldsymbol{y}(x) + \\boldsymbol{f}(\\boldsymbol{y}(x), x) \\cdot h\n$$\n\nEuler's method is seldom used in practice because of its computational inefficiency. Suppressing the truncation error to an acceptable level requires a very small $h$, resulting in many integration steps accompanied by an increase in the roundoff error. The value of the method lies mainly in its simplicity.\n\n**Exercise 1**\n\nComplete the below function `euler` that integrates a differential equation given by some function $\\boldsymbol{f}(x, \\boldsymbol{y})$ over the interval from `x0` to `x1` in a given number of steps using Euler's method, starting from a value `y0` that is provided. The result should be a pair of lists with all the values of $x_i$ and $\\boldsymbol{y}_i$, respectively, with $i$ ranging from zero to the number of steps. Note that all vectors (i.e. the various $\\boldsymbol{y}_i$ but also the return value of the function $\\boldsymbol{f}$) are expressed in the form of a `numpy` column vector.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndef euler(f, y0, x0, x1, steps):\n \"\"\"xs, ys = euler(f, y0, x0, x1, steps).\n Euler's method for solving the\n initial value problem {y}' = {f(x,{y})},\n where {y} = {y[0],y[1],...,y[n-1]}.\n x0, y0 = initial conditions\n x1 = terminal value of x\n steps = number of integration steps\n f = user-supplied function that returns the\n array f(x,y) = {y’[0],y’[1],...,y’[n-1]}.\n \"\"\"\n h = (x1 - x0) / steps\n xs = np.linspace(x0, x1, steps + 1)\n y = y0\n ys =[y]\n for x in xs[:-1]:\n y = y + h * f(x, y)\n ys.append(y)\n return xs, ys",
"_____no_output_____"
]
],
[
[
"We apply the method to an example where we look for a solution to the system of equations $y_0' = -2y_1$ and $y_1' = 2y_0$ with starting values $y_0(0) = 1$ and $y_1(0) = 0$. Because the exact solution equals $y_0(x) = \\cos(2x)$ and $y_1(x) = \\sin(2x)$ (i.e. it describes a two-dimensional circular motion) the solution arrives back at its starting value exactly at $x = \\pi$. This property can be used to estimate the accuracy of the integration method.",
"_____no_output_____"
]
],
[
[
"# Example: Solve {y}' = {-2*y1, 2*y0} with y(0) = {1, 0}\nfunc = lambda x, y: np.array([-2.0 * y[1], 2.0 * y[0]])\nx0, x1, y0 = 0.0, np.pi, np.array([1.0, 0.0])\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\ngrid = np.linspace(-1.5, 1.5, 16)",
"_____no_output_____"
],
[
"# for gx in grid:\n# for gy in grid:\n# print(func(x0, np.array([gx, gy])))",
"_____no_output_____"
],
[
"qx = [[func(x0, np.array([gx, gy]))[0] for gx in grid] for gy in grid]\nqy = [[func(x0, np.array([gx, gy]))[1] for gx in grid] for gy in grid]\n\nxs, ys = euler(func, y0, x0, x1, 100)\n\nplt.quiver(grid, grid, qx, qy, pivot='mid')\nplt.plot([y[0] for y in ys], [y[1] for y in ys], '.-b')\n# plt.plot([x[0] for x in xs], [x[1] for x in xs], '.-r')\nplt.title('$|\\Delta y| = {}$'.format(np.linalg.norm(ys[0]-ys[-1])));\nplt.xlabel('$y_0$'); plt.ylabel('$y_1$'); plt.axis('square'); plt.show()",
"_____no_output_____"
]
],
[
[
"We can investigate the behaviour of the error of the method by varying the step size $h$. Verify below that the order of Euler's method is $\\mathcal{O}(h)$; therefore, it is a first-order method.",
"_____no_output_____"
]
],
[
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xs, ys = euler(func, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {np.linalg.norm(ys[0]-ys[-1]):8.1e}')",
"n = 1: |Δy| = 6.3e+00\nn = 10: |Δy| = 4.5e+00\nn = 100: |Δy| = 2.2e-01\nn = 1000: |Δy| = 2.0e-02\nn = 10000: |Δy| = 2.0e-03\nn = 100000: |Δy| = 2.0e-04\n"
]
],
[
[
"### Heun's method\n\nAs demonstrated in the figure above, the accuracy of Euler's method is limited. The reason is that the derivative $\\boldsymbol{y}'$ is only calculated at the beginning of the step, whereas it is assumed to apply throughout the entire step. Therefore, it cannot account for any changes in $\\boldsymbol{y}'$ that may occur over the interval that is integrated over.\n\nA better method can be obtained by first performing a step according to Euler's method based on $\\boldsymbol{f}(x,\\boldsymbol{y}(x))$, by subsequently calculating the derivative $\\boldsymbol{f}(x+h,\\boldsymbol{y}(x+h))$ at the estimated end value, and then going back and re-calculating the step size based on the average of these two derivatives. Thus, the above Euler's formula is succeeded by an additional calculation according to\n\n$$\n\\boldsymbol{y}(x + h) \\approx \\boldsymbol{y}(x) + \\frac{\\boldsymbol{f}(\\boldsymbol{y}(x), x) + \\boldsymbol{f}(\\boldsymbol{y}(x+h), x+h)}{2} \\cdot h\n$$\n\nThe resulting algorithm is called [Heun's method](https://en.wikipedia.org/wiki/Heun%27s_method), or is also known as the *modified* or *improved Euler's method*. This is an example of a *predictor-corrector* method, since one calculation is used to predict an initial estimation of the step to be taken, and a second calculation provides a more precise correction to that estimate.\n\n**Exercise 2**\n\nWrite a function `heun` that integrates a differential equation given by some function $\\boldsymbol{f}(x, \\boldsymbol{y})$ over the interval from `x0` to `x1` in a given number of steps using Heun's method, starting from a value `y0` that is provided in the form of a `numpy` column vector. The result should be a pairs of lists with all the values of $x_i$ and $\\boldsymbol{y}_i$, respectively, with $i$ ranging from zero to the number of steps. What is the order of Heun's method?",
"_____no_output_____"
]
],
[
[
"def heun(f, y0, x0, x1, steps):\n \"\"\"xs, ys = heun(f, y0, x0, x1, steps).\n Heun's method for solving the\n initial value problem {y}' = {f(x,{y})},\n where {y} = {y[0],y[1],...,y[n-1]}.\n x0, y0 = initial conditions\n x1 = terminal value of x\n steps = number of integration steps\n f = user-supplied function that returns the\n array f(x,y) = {y’[0],y’[1],...,y’[n-1]}.\n \"\"\"\n h = (x1 - x0) / steps\n xs = np.linspace(x0, x1, steps + 1)\n y = y0\n ys =[y]\n for x in xs[:-1]:\n k1 = h * f(x, y)\n k2 = h * f(x + h, y + k1 )\n y = y + 0.5 * (k1 + k2)\n ys.append(y)\n return xs, ys",
"_____no_output_____"
],
[
"# Example: Solve {y}' = {-2*y1, 2*y0} with y(0) = {1, 0}\nxs, ys = heun(func, y0, x0, x1, 50)\n\nplt.quiver(grid, grid, qx, qy, pivot='mid')\nplt.plot([y[0] for y in ys], [y[1] for y in ys], '.-b')\nplt.title('$|\\Delta y| = {}$'.format(np.linalg.norm(ys[0]-ys[-1])));\nplt.xlabel('$y_0$'); plt.ylabel('$y_1$'); plt.axis('square'); plt.show()",
"_____no_output_____"
],
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xs, ys = heun(func, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {np.linalg.norm(ys[0]-ys[-1]):8.1e}')",
"n = 1: |Δy| = 2.1e+01\nn = 10: |Δy| = 4.5e-01\nn = 100: |Δy| = 4.1e-03\nn = 1000: |Δy| = 4.1e-05\nn = 10000: |Δy| = 4.1e-07\nn = 100000: |Δy| = 4.1e-09\n"
]
],
[
[
"### 4th-order Runge-Kutta Method\n\nThe idea of using intermediate evaluations of the derivative $\\boldsymbol{y}'$ for the integration can be further extended. For example, the [fourth-order Runge-Kutta method](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) evaluates four different derivatives according to the following procedure\n\n$$\n\\begin{aligned}\n\\boldsymbol{k}_0 &= h \\cdot \\boldsymbol{f}(x, \\boldsymbol{y}(x))\n\\\\\n\\boldsymbol{k}_1 &= h \\cdot \\boldsymbol{f}(x + \\frac{1}{2} h, \\boldsymbol{y}(x + \\frac{1}{2} \\boldsymbol{k}_0))\n\\\\\n\\boldsymbol{k}_2 &= h \\cdot \\boldsymbol{f}(x + \\frac{1}{2} h, \\boldsymbol{y}(x + \\frac{1}{2} \\boldsymbol{k}_1))\n\\\\\n\\boldsymbol{k}_3 &= h \\cdot \\boldsymbol{f}(x + h, \\boldsymbol{y}(x + \\boldsymbol{k}_2))\n\\end{aligned}\n$$\n\nIn words:\n\n- $\\boldsymbol{k}_0$ evaluates the derivative at the starting point.\n\n- $\\boldsymbol{k}_1$ evaluates the derivative after half a step, based on $\\boldsymbol{k}_0$.\n\n- $\\boldsymbol{k}_2$ again evaluates the derivative after half a step, but now based on the more accurate $\\boldsymbol{k}_1$.\n\n- $\\boldsymbol{k}_3$ evaluates the derivative at the end of the step, based on $\\boldsymbol{k}_2$.\n\nFinally, the value of $\\boldsymbol{y}$ is updated according to\n\n$$\n\\boldsymbol{y}(x + h) \\approx \\boldsymbol{y}(x) + \\frac{\\boldsymbol{k}_0 + 2 \\boldsymbol{k}_1 + 2 \\boldsymbol{k}_2 + \\boldsymbol{k}_3}{6}\n$$\n\nThe various coefficients in this formula have been chosen carefully so that errors cancel each other optimally. The proof of this fact is beyond our scope.\n\n**Exercise 3**\n\nWrite a function `runge_kutta` that integrates a differential equation given by some function $\\boldsymbol{f}(x, \\boldsymbol{y})$ over the interval from `x0` to `x1` in a given number of steps using the 4<sup>th</sup>-order Runge-Kutta method, starting from a value `y0` that is provided in the form of a `numpy` column vector. The result should be a pairs of lists with all the values of $x_i$ and $\\boldsymbol{y}_i$, respectively, with $i$ ranging from zero to the number of steps. Verify the order of the Runge-Kutta method.",
"_____no_output_____"
]
],
[
[
"def runge_kutta(f, y0, x0, x1, steps):\n \"\"\"xs, ys = runge_kutta(f, y0, x0, x1, steps).\n 4th-order Runge-Kutta method for solving the\n initial value problem {y}' = {f(x,{y})},\n where {y} = {y[0],y[1],...,y[n-1]}.\n x0, y0 = initial conditions\n x1 = terminal value of x\n steps = number of integration steps\n f = user-supplied function that returns the\n array f(x,y) = {y’[0],y’[1],...,y’[n-1]}.\n \"\"\"\n h = (x1 - x0) / steps\n xs = np.linspace(x0, x1, steps + 1)\n y = y0\n ys =[y]\n for x in xs[:-1]:\n #Initial calculation\n k0 = h * f(x, y)\n # Middle calculations\n k1 = h * f(x + 0.5 * h, y + 0.5 *k0)\n k2 = h * f(x + 0.5 * h, y + 0.5 *k1)\n # End calculation\n k3 = h * f(x +h, y + k2)\n \n \n y = y + (k0 + 2.0 * k1 + 2.0 * k2 +k3) / 6\n ys.append(y)\n return xs, ys",
"_____no_output_____"
],
[
"# Example: Solve {y}' = {-2*y1, 2*y0} with y(0) = {1, 0}\nxs, ys = runge_kutta(func, y0, x0, x1, 50)\n\nplt.quiver(grid, grid, qx, qy, pivot='mid')\nplt.plot([y[0] for y in ys], [y[1] for y in ys], '.-b')\nplt.title('$|\\Delta y| = {}$'.format(np.linalg.norm(ys[0]-ys[-1])));\nplt.xlabel('$y_0$'); plt.ylabel('$y_1$'); plt.axis('square'); plt.show()",
"_____no_output_____"
],
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xs, ys = runge_kutta(func, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {np.linalg.norm(ys[0]-ys[-1]):8.1e}')",
"n = 1: |Δy| = 5.7e+01\nn = 10: |Δy| = 8.1e-03\nn = 100: |Δy| = 8.2e-07\nn = 1000: |Δy| = 8.2e-11\nn = 10000: |Δy| = 7.0e-15\nn = 100000: |Δy| = 1.6e-14\n"
]
],
[
[
"### The Runge-Kutta Method Family\n\nAll of the above algorithms form special cases in a more general [Runge-Kutta family of methods](https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods) that calculate any number of intermediate derivatives according to the following equations\n\n$$\n\\begin{aligned}\n\\boldsymbol{k}_0 &= h \\cdot \\boldsymbol{f}(x, \\boldsymbol{y}(x))\n\\\\\n\\boldsymbol{k}_1 &= h \\cdot \\boldsymbol{f}(x + q_{1,0} \\cdot h, \\boldsymbol{y}(x) + q_{1,0} \\cdot \\boldsymbol{k}_0)\n\\\\\n\\boldsymbol{k}_2 &= h \\cdot \\boldsymbol{f}(x + (q_{2,0} + q_{2,1}) \\cdot h, \\boldsymbol{y}(x) + q_{2,0} \\cdot \\boldsymbol{k}_0 + q_{2,1} \\cdot \\boldsymbol{k}_1)\n\\\\\n\\vdots\n\\\\\n\\boldsymbol{k}_n &= h \\cdot \\boldsymbol{f}(x + \\sum_{m=0}^{n-1} q_{nm} \\cdot h, \\boldsymbol{y}(x) + \\sum_{m=0}^{n-1} q_{nm} \\cdot \\boldsymbol{k}_m)\n\\end{aligned}\n$$\n\nfollowed by a final single step\n\n$$\n\\boldsymbol{y}(x + h) = \\boldsymbol{y}(x) + \\sum_{m=0}^{n} c_m \\cdot \\boldsymbol{k}_m\n$$\n\nThe coefficients of many such methods can be found in reference books or online; some of the more important examples are listed below in the form of Butcher tables. Verify that the listed coefficients for the Euler's, Heun's and Runge-Kutta methods all describe the formulas that were given earlier.\n\n| Method | Order | $\\boldsymbol{c}$ | $\\boldsymbol{q}$ |\n| - | - | - | - |\n| Euler's | 1 | $1$ | $\\times$ |\n| Heun's | 2 | $\\frac{1}{2}$, $\\frac{1}{2}$ | $1$ |\n| Midpoint | 2 | $0$, $1$ | $\\frac{1}{2}$ |\n| SSPRK3 | 3 | $\\frac{1}{6}$, $\\frac{1}{6}$, $\\frac{2}{3}$ | $\\begin{array}{cc} 1 & \\\\ \\frac{1}{4} & \\frac{1}{4} \\end{array}$ |\n| Kutta's | 3 | $\\frac{1}{6}$, $\\frac{2}{3}$, $\\frac{1}{6}$ | $\\begin{array}{cc} \\frac{1}{2} & \\\\ -1 & 2 \\end{array}$ |\n| Runge-Kutta's | 4 | $\\frac{1}{6}$, $\\frac{1}{3}$, $\\frac{1}{3}$, $\\frac{1}{6}$ | $\\begin{array}{ccc} \\frac{1}{2} & & \\\\ 0 & \\frac{1}{2} & \\\\ 0 & 0 & 1 \\end{array}$ |\n| 3/8-rule | 4 | $\\frac{1}{8}$, $\\frac{3}{8}$, $\\frac{3}{8}$, $\\frac{1}{8}$ | $\\begin{array}{ccc} \\frac{1}{3} & & \\\\ -\\frac{1}{3} & 1 & \\\\ 1 & -1 & 1 \\end{array}$ |\n| SSPRK4 | 4 | $\\frac{1}{6}$, $\\frac{1}{6}$, $\\frac{1}{6}$, $\\frac{1}{2}$ | $\\begin{array}{ccc} \\frac{1}{2} & & \\\\ \\frac{1}{2} & \\frac{1}{2} & \\\\ \\frac{1}{6} & \\frac{1}{6} & \\frac{1}{6} \\end{array}$ |\n| Butcher's | 5 | $\\frac{7}{90}$, $0$, $\\frac{32}{90}$, $\\frac{12}{90}$, $\\frac{32}{90}$, $\\frac{7}{90}$ | $\\begin{array}{ccccc} \\frac{1}{4} & & & & \\\\ \\frac{1}{8} & \\frac{1}{8} & & & \\\\ 0 & -\\frac{1}{2} & 1 & & \\\\ \\frac{3}{16} & 0 & 0 & \\frac{9}{16} & \\\\ -\\frac{3}{7} & \\frac{2}{7} & \\frac{12}{7} & -\\frac{12}{7} & \\frac{8}{7} \\end{array}$ |\n\nThird-order methods are not popular in computer application. Most programmers prefer integration formulas of order four, which achieve a given accuracy with less computational effort. The Runge-Kutta method is popular in particular because it combines an adequate accuracy (being of order 4) with a relative simplicity (because several $q_{nm}$ equal zero).",
"_____no_output_____"
],
[
"**Exercise 4**\n\nTry to understand the *midpoint method* on the basis of the coefficients in the table. Phrase in your own words what it does. Does the method make intuitive sense?",
"_____no_output_____"
]
],
[
[
"def midpoint(f, y0, x0, x1, steps):\n h = (x1 - x0) / steps\n xs = np.linspace(x0, x1, steps + 1)\n y = y0\n ys =[y]\n for x in xs[:-1]:\n k1 = f(x, y)\n k2 = f(x + (h/2), y + (h/2)*k1)\n \n y = y + h*(k2)\n ys.append(y)\n return xs, ys\n ",
"_____no_output_____"
],
[
"# Example: Solve {y}' = {-2*y1, 2*y0} with y(0) = {1, 0}\nxs, ys = midpoint(func, y0, x0, x1, 50)\n\nplt.quiver(grid, grid, qx, qy, pivot='mid')\nplt.plot([y[0] for y in ys], [y[1] for y in ys], '.-b')\nplt.title('$|\\Delta y| = {}$'.format(np.linalg.norm(ys[0]-ys[-1])));\nplt.xlabel('$y_0$'); plt.ylabel('$y_1$'); plt.axis('square'); plt.show()",
"_____no_output_____"
],
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xs, ys = midpoint(func, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {np.linalg.norm(ys[0]-ys[-1]):8.1e}')",
"n = 1: |Δy| = 2.1e+01\nn = 10: |Δy| = 4.5e-01\nn = 100: |Δy| = 4.1e-03\nn = 1000: |Δy| = 4.1e-05\nn = 10000: |Δy| = 4.1e-07\nn = 100000: |Δy| = 4.1e-09\n"
]
],
[
[
"**Exercise 5**\n\nPick one of the methods from the above table that has not been implemented yet. Write a function `my_method` that integrates a differential equation given by some function $\\boldsymbol{f}(x, \\boldsymbol{y})$ over the interval from `x0` to `x1` in a given number of steps using that method, starting from a value `y0` that is provided in the form of a `numpy` column vector. The result should be a pairs of lists with all the values of $x_i$ and $\\boldsymbol{y}_i$, respectively, with $i$ ranging from zero to the number of steps. Verify the order of your method.",
"_____no_output_____"
]
],
[
[
"def ralston(f, y0, x0, x1, steps):\n \"\"\"y = ralston(f, y0, t, h)\"\"\"\n \n h = (x1 - x0) / steps\n xs = np.linspace(x0, x1, steps + 1)\n y = y0\n ys =[y]\n for x in xs[:-1]:\n #Initial calculation\n k1 = f(x, y)\n # Middle calculations\n k2 = f(x + ((3*h)/4), y + ((3*h)/4)*k1)\n\n \n \n y = y + (k1*(1/3) + k2*(2/3))*h\n ys.append(y)\n return xs, ys",
"_____no_output_____"
],
[
"# Example: Solve {y}' = {-2*y1, 2*y0} with y(0) = {1, 0}\nxs, ys = ralston(func, y0, x0, x1, 50)\n\nplt.quiver(grid, grid, qx, qy, pivot='mid')\nplt.plot([y[0] for y in ys], [y[1] for y in ys], '.-b')\nplt.title('$|\\Delta y| = {}$'.format(np.linalg.norm(ys[0]-ys[-1])));\nplt.xlabel('$y_0$'); plt.ylabel('$y_1$'); plt.axis('square'); plt.show()",
"_____no_output_____"
],
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xs, ys = ralston(func, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {np.linalg.norm(ys[0]-ys[-1]):8.1e}')",
"n = 1: |Δy| = 2.1e+01\nn = 10: |Δy| = 4.5e-01\nn = 100: |Δy| = 4.1e-03\nn = 1000: |Δy| = 4.1e-05\nn = 10000: |Δy| = 4.1e-07\nn = 100000: |Δy| = 4.1e-09\n"
]
],
[
[
"### Exercises\n\n**Exercise 6**\n\nSolve the differential equation $y' = 3y - 4e^{-x}$ with initial value $y(0) = 1$ numerically from $x=0$ to $4$ in steps of $h=0.01$. Compare the result with the exact analytical solution $y = e^{-x}$.\n\nDoes it make a visible difference which solver you use? Can you understand what is happening? (See also example 7.5 in the book.)",
"_____no_output_____"
]
],
[
[
"yo = lambda x, y: (3.0 * y) - (4*np.e**(-x))",
"_____no_output_____"
],
[
"def y(x):\n return np.e**(-x)\nx = np.linspace(-1., 5., 50)",
"_____no_output_____"
],
[
"y0 = 1.0\nx0 = 0.0\nx1 = 4.0\nh = 0.01\n\nsteps = int((x1 - x0) / h)\nsteps",
"_____no_output_____"
],
[
"xeul, yeul = euler(yo, y0, x0, x1, steps)\nxheun, yheun = heun(yo, y0, x0, x1, steps)\nxkutta, ykutta = runge_kutta(yo, y0, x0, x1, steps)\n",
"_____no_output_____"
],
[
"ns = [1, 10, 100, 1000, 10000, 100000]\nfor n in ns:\n xkutta, ykutta = runge_kutta(yo, y0, x0, x1, n)\n print(f'n = {n:6}: |Δy| = {ykutta[0]-ykutta[-1]:8.1e}')",
"n = 1: |Δy| = 1.8e+02\nn = 10: |Δy| = 1.8e+02\nn = 100: |Δy| = 1.0e+00\nn = 1000: |Δy| = 9.8e-01\nn = 10000: |Δy| = 9.8e-01\nn = 100000: |Δy| = 9.8e-01\n"
],
[
"plt.plot(x, y(x), \":g\", label = 'actual')\nplt.plot(xeul, yeul, '-b', label = 'euler')\nplt.plot(xheun, yheun, '-r', label = 'heun')\nplt.plot(xkutta, ykutta, '-y', label = 'runge kutta')\n\n\nplt.axis([0,4, -1.5,1.5])\nplt.axhline(0, color = 'black')\nplt.legend()\nplt.plot()\n",
"_____no_output_____"
]
],
[
[
"**Exercise 7**\n\nThe [Lotka-Volterra equations](https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations) describe the dynamics of biological systems in which two species interact, one as a predator and the other as prey. The populations change through time according to the pair of equations\n\n$$\n\\begin{aligned}\n\\frac{dx}{dt} &= \\alpha \\cdot x - \\beta \\cdot x y\n\\\\\n\\frac{dy}{dt} &= \\delta \\cdot x y - \\gamma \\cdot y\n\\end{aligned}\n$$\n\nPlot the population dynamics for $\\alpha = \\frac{2}{3}$, $\\beta = \\frac{4}{3}$, $\\delta = \\gamma = 1$, and initial conditions $x = y = \\frac{3}{2}$.",
"_____no_output_____"
]
],
[
[
"from scipy.integrate import solve_ivp",
"_____no_output_____"
],
[
"\ndef model(t, v):\n return[\n ((2/3)*v[0]) - ((4/3)*v[0]*v[1]),\n (v[0]*v[1]) - v[1] \n ]\n ",
"_____no_output_____"
],
[
"fromto = (0.0, 60.0)\nstart = [2/3, 2/3]\ntimes = np.linspace(0.0, 60.0, 501)",
"_____no_output_____"
],
[
"solution = solve_ivp(model, fromto, start, t_eval=times)\n\nplt.plot(solution.t, solution.y[0], '-y', label='Prey')\nplt.plot(solution.t, solution.y[1], '-r', label='Predator')\n\nplt.xlabel('$t$ [days]'); plt.ylabel('$f$ [-]')\nplt.legend(); plt.show()",
"_____no_output_____"
]
],
[
[
"**Exercise 8**\n\nThe [Bessel function $J_0$](https://en.wikipedia.org/wiki/Bessel_function) is given by the 2<sup>nd</sup>-order differential equation\n\n$$\nJ_0'' + \\frac{1}{x} \\cdot J_0' + J_0 = 0\n$$\n\nwith initial values $J_0(0) = 1$ and $J_0'(0) = 0$. How many zeroes does this function have in the range $x = 0$ to $25$?\n\n(Hint: To avoid the singularity at $x = 0$, start the integration at $x = 10^{-12}$.)",
"_____no_output_____"
]
],
[
[
"y0 = np.array([1.0, 0.0])\nx0 = 1e-12\nx1 = 25\nh = 0.1\n\nsteps = int((x1 - x0) / h)\nsteps",
"_____no_output_____"
]
],
[
[
"***",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74ba5b9d6e596a12d02c8467355cb8fd378619d | 19,538 | ipynb | Jupyter Notebook | src/ipynb/day49_text_preprocessing.ipynb | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/ipynb/day49_text_preprocessing.ipynb | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/ipynb/day49_text_preprocessing.ipynb | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | 25.539869 | 1,212 | 0.431825 | [
[
[
"I eventually want to do text analysis with the Kickstarter data, but I'll need to do some data cleaning and text preprocessing before I can do so.",
"_____no_output_____"
]
],
[
[
"import psycopg2\nimport pandas as pd\n\nimport nltk\nimport re",
"_____no_output_____"
]
],
[
[
"## Load data\nLoad data from database. List of columns found on day44",
"_____no_output_____"
]
],
[
[
"dbname = \"kick\"\ntblname = \"info\"\n\n# Connect to database\nconn = psycopg2.connect(dbname=dbname)\ncur = conn.cursor()",
"_____no_output_____"
],
[
"colnames = [\"id\", \"name\", \"blurb\"]\n\ncur.execute(\"SELECT {col} FROM {tbl}\".format(col=', '.join(colnames), tbl=tblname))\nrows = cur.fetchall()\n\npd.DataFrame(rows, columns=colnames).head()",
"_____no_output_____"
]
],
[
[
"I want to combine `name` and `blurb`. We can use the `concat_ws` command in postgres",
"_____no_output_____"
]
],
[
[
"# Treat name + blurb as 1 document\ncur.execute(\"SELECT id, concat_ws(name, blurb) FROM info\")\nrows = cur.fetchall()\n\ndf = pd.DataFrame(rows, columns=[\"id\", \"document\"])\ndf.head()",
"_____no_output_____"
],
[
"# close communication\ncur.close()\nconn.close()",
"_____no_output_____"
],
[
"# Number of documents\ndf.shape",
"_____no_output_____"
]
],
[
[
"## Text processing for 1 document",
"_____no_output_____"
]
],
[
[
"text = df[\"document\"][1]\ntext",
"_____no_output_____"
]
],
[
[
"### To lower case",
"_____no_output_____"
]
],
[
[
"text = text.lower()\ntext",
"_____no_output_____"
]
],
[
[
"### Bag of word & tokenization\nDigits are also removed",
"_____no_output_____"
]
],
[
[
"words = nltk.wordpunct_tokenize(re.sub('[^a-zA-Z_ ]', '', text))\nwords",
"_____no_output_____"
]
],
[
[
"### Remove stopwords\n\nReference: https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words",
"_____no_output_____"
]
],
[
[
"from nltk.corpus import stopwords\n\nenglish_stopwords = stopwords.words(\"english\")\n\nprint(len(english_stopwords))\nprint(english_stopwords)",
"153\n['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven', 'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren', 'won', 'wouldn']\n"
]
],
[
[
"We have a list of 153 english stopwords",
"_____no_output_____"
]
],
[
[
"# Remove stopwords from document\nwords = [w for w in words if not w in english_stopwords]\nwords",
"_____no_output_____"
]
],
[
[
"### Stemming vs Lemmatization\nReference: http://stackoverflow.com/questions/771918/how-do-i-do-word-stemming-or-lemmatization",
"_____no_output_____"
]
],
[
[
"from nltk.stem import PorterStemmer, WordNetLemmatizer\n\nport = PorterStemmer()\nwnl = WordNetLemmatizer()",
"_____no_output_____"
],
[
"## Stemming\n[port.stem(w) for w in words]",
"_____no_output_____"
],
[
"## Lemmatizing\n[wnl.lemmatize(w) for w in words]",
"_____no_output_____"
]
],
[
[
"### Putting it all together",
"_____no_output_____"
]
],
[
[
"def text_processing(text, method=None):\n # Lower case\n text = text.lower()\n \n # Remove non-letters &\n # Tokenize \n words = nltk.wordpunct_tokenize(re.sub('[^a-zA-Z_ ]', '', text))\n \n # Remove stop words\n words = [w for w in words if not w in stopwords.words(\"english\")]\n \n # Stemming vs Lemmatizing vs do nothing\n if method == \"stem\":\n port = PorterStemmer()\n words = [port.stem(w) for w in words]\n elif method == \"lemm\":\n wnl = WordNetLemmatizer()\n words = [wnl.lemmatize(w) for w in words]\n\n return(words)",
"_____no_output_____"
],
[
"text = df[\"document\"][1]\n\ncompare = {\n \"raw\" : text_processing(text),\n \"stemming\": text_processing(text, method=\"stem\"),\n \"lemmatizing\": text_processing(text, method=\"lemm\") \n}\npd.DataFrame.from_dict(compare)[[\"raw\", \"stemming\", \"lemmatizing\"]]",
"_____no_output_____"
]
],
[
[
"- Find some words are untouched:\n - scifi\n - save\n - world\n- Some words are touched only in stemming:\n - fantsy-fantasi\n - anime->anim\n - styled->style\n - series->seri\n - trying->tri \n - probably->probabl\n- Agreement of stemming and lemmatizng\n - guys->guy",
"_____no_output_____"
],
[
"---\n\n(Aside) How does stemming compare for other words?",
"_____no_output_____"
]
],
[
[
"[port.stem(w) for w in [\"trying\", \"triangle\", \"triple\"]]",
"_____no_output_____"
],
[
"[port.stem(w) for w in [\"series\", \"serious\"]]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e74bbf2cec69fa4f38d5a76eecc58cd56897f6b4 | 3,632 | ipynb | Jupyter Notebook | python/jupyternotebook/0.0 annaconda_test.ipynb | WhitePhosphorus4/xh-learning-code | 025e31500d9f46d97ea634d7fd311c65052fd78e | [
"Apache-2.0"
] | null | null | null | python/jupyternotebook/0.0 annaconda_test.ipynb | WhitePhosphorus4/xh-learning-code | 025e31500d9f46d97ea634d7fd311c65052fd78e | [
"Apache-2.0"
] | null | null | null | python/jupyternotebook/0.0 annaconda_test.ipynb | WhitePhosphorus4/xh-learning-code | 025e31500d9f46d97ea634d7fd311c65052fd78e | [
"Apache-2.0"
] | null | null | null | 23.894737 | 66 | 0.406112 | [
[
[
"# 运行以下代码,注意查看输出",
"_____no_output_____"
]
],
[
[
"import turtle\nimport random\nfrom turtle import *\nfrom time import sleep\n\n# t = turtle.Turtle()\n# w = turtle.Screen()\n\n\ndef tree(branchLen, t):\n if branchLen > 3:\n if 8 <= branchLen <= 12:\n if random.randint(0, 2) == 0:\n t.color('snow')\n else:\n t.color('lightcoral')\n t.pensize(branchLen / 3)\n elif branchLen < 8:\n if random.randint(0, 1) == 0:\n t.color('snow')\n else:\n t.color('lightcoral')\n t.pensize(branchLen / 2)\n else:\n t.color('sienna')\n t.pensize(branchLen / 10)\n\n t.forward(branchLen)\n a = 1.5 * random.random()\n t.right(20*a)\n b = 1.5 * random.random()\n tree(branchLen-10*b, t)\n t.left(40*a)\n tree(branchLen-10*b, t)\n t.right(20*a)\n t.up()\n t.backward(branchLen)\n t.down()\n\n\ndef petal(m, t): # 树下花瓣\n for i in range(m):\n a = 200 - 400 * random.random()\n b = 10 - 20 * random.random()\n t.up()\n t.forward(b)\n t.left(90)\n t.forward(a)\n t.down()\n t.color(\"lightcoral\")\n t.circle(1)\n t.up()\n t.backward(a)\n t.right(90)\n t.backward(b)\n\n\ndef bea_tree():\n print('>>>> Running...')\n t = turtle.Turtle()\n w = turtle.Screen()\n \n \n t = turtle.Turtle()\n myWin = turtle.Screen()\n getscreen().tracer(5, 0)\n turtle.screensize(bg='wheat')\n t.left(90)\n t.up()\n t.backward(150)\n t.down()\n t.color('sienna')\n tree(60, t)\n petal(100, t)\n\n myWin.exitonclick()\n print('Cool !!!\\n请查看图片,并将图片截图发送至群中。\\nShow time...')\n\n\nbea_tree()",
">>>> Running...\nCool !!!\n请查看图片,并将图片截图发送至群中。\nShow time...\n"
],
[
"\"\"\"\n成功运行后,会在另一个界面中输出结果,注意查看。\n你可以多次运行直到输出你满意的结果。\n如果多次运行报错没关系,只需关闭重新运行即可。\n\"\"\"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e74be8bf4027cdcb21067df05393171ac5b6c4f5 | 72,891 | ipynb | Jupyter Notebook | notebooks/2_bit_ripple_adder.ipynb | deadbeatfour/dm-simulator-textbook | 946771592ec2c1d9ff9ce0fc03aedd8b087fdf6d | [
"MIT"
] | null | null | null | notebooks/2_bit_ripple_adder.ipynb | deadbeatfour/dm-simulator-textbook | 946771592ec2c1d9ff9ce0fc03aedd8b087fdf6d | [
"MIT"
] | null | null | null | notebooks/2_bit_ripple_adder.ipynb | deadbeatfour/dm-simulator-textbook | 946771592ec2c1d9ff9ce0fc03aedd8b087fdf6d | [
"MIT"
] | null | null | null | 409.5 | 50,320 | 0.936714 | [
[
[
"%pylab inline\nimport warnings\nfrom itertools import product\nfrom qiskit import *\nwarnings.filterwarnings('ignore')\nmatplotlib.rc('font', size=14)",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"backend = BasicAer.get_backend('dm_simulator')\n# Noise parameters\noptions = {}\noptions_noisy = {\n \"thermal_factor\": 0.,\n \"decoherence_factor\": 0.9,\n \"depolarization_factor\": 0.9,\n \"bell_depolarization_factor\": 0.99,\n \"decay_factor\": 0.8,\n \"rotation_error\": {'rx':[.5, 0.5], 'ry':[.5, 0.5], 'rz': [.9, 0.5]},\n \"tsp_model_error\": [0.5, 0.5]\n }",
"_____no_output_____"
],
[
"# The Circuit\nq= QuantumRegister(7)\nc = ClassicalRegister(7)\nqc = QuantumCircuit(q,c)\n# Preparation\nqc.x(0)\nqc.x(3)\nqc.barrier()\n# Addition\nqc.cx(1,6)\nqc.cx(3,6)\nqc.ccx(1,3,5)\nqc.barrier()\nqc.ccx(0,2,4)\nqc.cx(0,2)\nqc.ccx(2,5,4)\nqc.cx(2,5)\nqc.cx(0,2)\nqc.measure(q,c,basis='Ensemble',add_param='Z')\n# Execution with and without noise\nrun = execute(qc,backend,**options)\nresult = run.result()\nrun_error = execute(qc,backend,**options_noisy)\nresult_error = run_error.result()\n# Final state (probabilities)\nprob = result['results'][0]['data']['ensemble_probability']\nprob_error = result_error['results'][0]['data']['ensemble_probability']\nqc.draw(output='mpl')",
"_____no_output_____"
],
[
"outputs = {\n int(result, 2) : sum([prob[binval] \n for binval in [bstr+result \n for bstr in [''.join(p) for p in product('10', repeat=4)]]]) \n for result in [''.join(p) for p in product('10', repeat=3)]}\noutputs_error = {\n int(result, 2) : sum([prob_error[binval] \n for binval in [bstr+result\n for bstr in [''.join(p) for p in product('10', repeat=4)]]]) \n for result in [''.join(p) for p in product('10', repeat=3)]\n}\nlabels = outputs.keys()\nwithout_noise = outputs.values()\nwith_noise = outputs_error.values()\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.5 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(10,5))\nrects1 = ax.bar(x - width/2, without_noise, width, label='Without Noise')\nrects2 = ax.bar(x + width/2, with_noise, width, label='With Noise')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Probability')\nax.set_title('Ensemble Probabilities with Noise')\nax.set_xticks(x)\nax.set_xticklabels(labels, rotation='vertical')\nax.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e74bf9721512d007beb0d2ef02275ca13dc78a86 | 194,181 | ipynb | Jupyter Notebook | 03_DataPreprocessing/09_Uni/2_Fill_FarsiUni.ipynb | yazdipour/DM17 | bcde44df990938723c843801c1333cbcf4e5bd76 | [
"MIT"
] | 2 | 2018-04-25T09:44:31.000Z | 2018-07-28T20:20:39.000Z | 03_DataPreprocessing/09_Uni/2_Fill_FarsiUni.ipynb | yazdipour/DM17 | bcde44df990938723c843801c1333cbcf4e5bd76 | [
"MIT"
] | 1 | 2019-07-24T21:16:18.000Z | 2020-03-11T11:43:32.000Z | 03_DataPreprocessing/09_Uni/2_Fill_FarsiUni.ipynb | yazdipour/DM17 | bcde44df990938723c843801c1333cbcf4e5bd76 | [
"MIT"
] | null | null | null | 50.739744 | 148 | 0.265901 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_json('Fix2.json')\ndf.sort_index(inplace=True)",
"_____no_output_____"
]
],
[
[
"# uniBachelors",
"_____no_output_____"
]
],
[
[
"x=df.uniBachelors.value_counts()\npd.set_option('display.max_rows', len(x))\nprint(x)\npd.reset_option('display.max_rows')",
"tehran 121\nتهران 112\nazad 111\nشریف 84\nعلم و صنعت 76\nامیرکبیر 71\ntabriz 62\nصنعتی اصفهان 61\nsharif 59\namirkabir 54\nshiraz 48\nتبریز 45\niust 45\nشیراز 43\nفردوسی مشهد 41\nدانشگاه تهران 39\nsharif technology 38\nخواجه نصیر 36\nshahid beheshti 33\nn/a 33\nصنعتی شریف 32\nصنعتی امیرکبیر 32\nشهید بهشتی 31\niau 29\nاصفهان 28\nislamic azad 28\nisfahan 23\nferdowsi mashhad 22\nmazandaran 21\nferdowsi 19\nurmia 18\nguilan 18\nisfahan technology 17\niut 17\nگیلان 17\namirkabir technology 17\nپیام نور 17\niran science and technology 15\nازاد 15\nارومیه 13\nعلامه طباطبایی 12\nالزهرا 12\nسمنان 12\nkntu 12\nمازندران 11\nyazd 11\nazad shiraz 11\nدانشگاه آزاد اسلامی 11\nتهران جنوب 10\nفردوسی 10\nزنجان 10\naut 10\nشريف 10\nدانشگاه تبریز 9\nدانشگاه شیراز 9\nrazi 9\nعلم و صنعت ایران 9\nتهران مرکز 9\nصنعتي شريف 9\nzanjan 9\nsharif tech 8\nدانشگاه آزاد 8\nعلوم و تحقیقات 8\nصنعتی شاهرود 8\nمحقق اردبیلی 8\niaum 8\npetroleum technology 8\ntehran polytechnic 8\nباهنر کرمان 8\nاميركبير 7\nشهید رجایی 7\nشهید باهنر کرمان 7\nیزد 7\nآزاد 7\narak 7\nسراسری 7\nazad tehran 7\nغیرانتفاعی 7\nصنعت نفت 7\nkashan 7\nامیر کبیر 7\nکردستان 7\nisfahan tech 7\nsemnan 6\nعلوم پزشکی شهید بهشتی 6\nkhaje nasir toosi 6\nتهران شمال 6\nدانشگاه آزاد مشهد 6\nazad tabriz 6\nدانشگاه صنعتی اصفهان 6\nmultimedia 6\nikiu 6\nazad mashhad 6\nferdowsi mashhad 6\nشهید چمران اهواز 6\namirkabir tech 6\nqazvin 5\nهنر تهران 5\niran 5\nچمران اهواز 5\nصنعتی شیراز 5\nشاهد 5\nبهشتی 5\nدانشگاه اصفهان 5\nhormozgan 5\npersian gulf 5\nsut 5\nشهرکرد 5\nshomal 5\npnu 5\nallameh tabatabai 5\nazad tehran markaz 5\nدولتی 5\nesfahan 5\nخواجه نصیرالدین طوسی 5\numa 4\nعلم وصنعت 4\niaun 4\nshahid rajaee 4\nalzahra 4\nنجف آباد 4\nازاد مشهد 4\nput 4\nkhaje nasir 4\nbahonar kerman 4\nبین المللی امام خمینی 4\nسوره 4\nدانشگاه آزاد تبریز 4\nsahand technology 4\nصنعتي اصفهان 4\nthe guilan 4\nislamic azad mashhad 4\nshahed 4\nشهيد بهشتي 4\nعلم و فرهنگ 4\nتربیت معلم تهران 4\nدانشگاه هنر تهران 4\nbirjand 4\nدانشگاه آزاد کرج 4\nصنعتی ارومیه 4\npayam noor 4\nدولتی شهرستان 4\nqazvin azad 4\nصنعتی نوشیروانی بابل 3\niau, south tehran branch 3\nدانشگاه آزاد نجف آباد 3\nدانشگاه علم و فرهنگ 3\npgu 3\nصنعتی امیر کبیر 3\nرازی کرمانشاه 3\nislamic azad central tehran branch 3\nبیرجند 3\nدانشگاه آزاد شیراز 3\nچمران 3\nazad qazvin 3\nدانشگاه علم و صنعت 3\nshiraz azad 3\nshahrood 3\nshahrekord 3\nk.n.toosi 3\nدانشگاه آزاد قزوین 3\nشيراز 3\nشهرستان 3\nعلمی کاربردی 3\nشیخ بهایی 3\nbun/aali sina 3\nخواجه نصیر الدین طوسی 3\nفردوسي 3\nشمال 3\nqazvin islamic azad 3\nazad n/atehran 3\nعلوم پزشکی اصفهان 3\nکرمان 3\nدانشگاه زنجان 3\nشهیدبهشتی 3\nرجایی 3\nعلامه طباطبايي 3\nshahrood technology 3\nazad kerman 3\nsharif univ of tech 3\nislamic azad south tehran branch 3\nبوعلی سینا 3\nدانشگاه علم و صنعت ایران 3\niran science &amp; technology 3\nqiau 3\nعلوم پزشکی ایران 3\namir kabir 3\nazad karaj 3\nاراک 3\nbeheshti 3\nkaraj azad 3\nسیستان و بلوچستان 3\nsadjad institute of higher education 3\nazad najafabad 3\niaun/asouth tehran branch 2\nصنعت آب و برق 2\nazad parand 2\nامیرکبیر (پلی تکنیک) 2\nدانشگاه آزاد تهران مرکز 2\nteh 2\nazad zanjan 2\nکاشان 2\niau south tehran branch 2\nazad roudehen 2\niau ahvaz 2\nدانشگاه آزاد واحد تهران جنوب 2\nscience and culture 2\nآزاد نجف آباد 2\nدانشگاه مازندران 2\neastern mediterranean 2\nعلوم پزشکی تهران 2\nkurdistan 2\nسجاد 2\nqom 2\nازاد تهران جنوب 2\niaut 2\namir kabir technology 2\nkhajeh nasir 2\nazad n/a tehran 2\nدانشگاه گیلان 2\nk. n. toosi technology 2\nتهران مرکزn/aآزاد 2\nferdowsi univ. of mashhad 2\nazad eslami 2\nدانشگاه آزاد اسلامی n/a پرند 2\nbahonar 2\nخیام مشهد 2\nکشاورزی گرگان 2\nshahid chamran ahvaz 2\nآزاد اسلامی 2\nازاد شیراز 2\nislamic azad uni 2\nبین المللی امام خمینی قزوین 2\nsistan and baluchestan 2\nدانشگاه اراک 2\nدانشگاه آزاد اسلامی واحد کرج 2\nدانشگاه شهید باهنر کرمان 2\nدانشگاه آزاد اسلامی n/a تهران جنوب 2\nدانشگاه آزاد اسلامی واحد تهران جنوب 2\nدانشگاه آزاد تهران جنوب 2\nدانشگاه آزاد شهرستان 2\nazad karaj 2\nپلی تکنیک تهران 2\nآزاد قزوین 2\nsharif univ. of tech. 2\nشاهرود 2\nisfahan u. of tech 2\nbasu 2\nغیر انتفاعی 2\nmut 2\nهنر و معماری 2\nislamic azad tabriz 2\nدانشگاه صنعت نفت 2\nshiraz univ 2\nisfahan univ. of tech. 2\nدانشگاه آزاد اسلامی واحد مشهد 2\nازاد اسلامی 2\ntehran azad 2\nshahid chamran ahvaz 2\nدانشگاه صنعتی شریف 2\ntarbiat moallem 2\nart 2\nelmi karbordi 2\nآزاد تهران جنوب 2\nشیخ بهایی (غیر انتفاعی) 2\nغیرانتفاعی کار قزوین 2\npayame noor 2\nنوشیروانی بابل 2\nshahid bahonar 2\nخلیج فارس 2\nازاد ابهر 2\nامير کبير 2\nعلوم پزشکی مشهد 2\namirkabir univ. of tech 2\nدانشگاه آزاد واحد علوم و تحقیقات تهران 2\nmohaghegh ardabili 2\nعلامه طباطبائی 2\nazad kashan 2\nدانشگاه صنعتی امیرکبیر (پلی تکنیک تهران) 2\nسراسری زنجان 2\nsbu 2\nباهنر 2\ntehran medical sciences 2\n 2\nخواجه نصير 2\namirkabir univ of tech 2\nفنی تهران 2\nazad n/acentral tehran branch 2\nazad mashad 2\nعلوم پزشكي تهران 2\niasbs 2\nعلوم و تحقیقات تهران 2\nk. n. toosi tech 2\nفردوسي مشهد 2\nدولتی جهرم 2\nخوراسگان 2\nآزاد مشهد 2\nاروميه 2\ntmu 2\nدانشگاه آزاد اسلامی واحد پرند 2\npayam nour 2\nپلی تکنیک 2\nsharif univ. of tech 2\nkharazmi 2\nخلیج فارس (بوشهر) 2\nعلوم پزشکی شیراز 2\nصنعتی سهند 2\nامام صادق 2\nkaraj islamic azad 2\nامام خمینی قزوین 2\nsrttu 2\nmust 2\nmalek ashtar 1\nazad south tehran branch 1\nفرذوسی مشهد 1\nحکیم سبزواری 1\nدانشگاه آزاد بناب 1\nتربیت معلم 1\nbun/aali 1\nisfahan uni. of tech 1\nthe tabriz 1\nimam khomeini international uni 1\nآزادn/aتهران مرکز 1\nazad of mashhad 1\nغیرانتفاعی شمال 1\nصنعت نفت n/a آبادان 1\nشهيد بهشتى 1\nصنعتی خواجه نصیر 1\nprivate 1\nصنعتی سهند تبریز 1\nآزاد اسلامی قزوین n/a باراجین 1\nmarvdasht azad 1\nsutech(shiraz technolo) 1\nazad ghaemshahr 1\nislamic azadn/a south tehran 1\nدانشگاه خلیج فارس(بوشهر) 1\nدانشگاه آزاد اسلامی n/a اراک 1\namirkabir tech &amp; semn. 1\nazad yazd 1\nفنی مهندسی آزاد مشهد 1\nجامع علمي كاربردي 1\nمفید 1\nازاد واحد تهران جنوب 1\nthe tehran 1\nuniverity of tehran 1\nدانشگاه فردوسی 1\nعلوم وفنون مازندران 1\nk n toosi of tech 1\nzandjan islamic azad 1\nمیراث فرهنگی 1\npolytechnic of tehran 1\nazad n/amashhad branch 1\nغيرانتفاعي 1\ndamghan 1\ntehrran 1\nsaveh azad 1\nferdowsi univerity of mashhad 1\nferdowsi mashad 1\niran sci. and tech 1\nalberta 1\namirkabir n/a aut 1\nisalmic azad 1\nآزاد علوم وتحقیقات 1\nsience and technology 1\nazad / north tehran branch 1\ntehran uni 1\nsemanan 1\nislamic azad karaj branch 1\nعلم و فرهنگ تهران 1\nsadjad inst 1\nazad boroujerd 1\nدانشگاه صنعتی خواجه نصیر 1\napu 1\nazad pharmacutical science 1\nsouth tehran n/a iau 1\ntarbiat moalem 1\nislamic azad garmsar 1\niausrb 1\nملایر 1\nazad saveh 1\nshahid bahonar kerman 1\nesfahan technology 1\nدانشگاه آزاد بیرجند 1\nmalayer 1\nscience and research branch 1\nمترجمی زبان انگلیسی 1\nامام رضا 1\nد. آزاد کرج 1\nدانشگاه آزاد بوشهر 1\nferdowsi univeristy of mashad 1\namirkabir technology(tafresh campus) 1\nباهنرکرمان 1\nshariaty 1\nغیاث الدین جمشید کاشانی 1\nazad west tehran branch 1\ntabriz u 1\nمنابع طبيعي گرگان 1\nazadn/ateh. markaz 1\namir kabir uni 1\npower and water 1\nbabol noshirvani 1\nموسسه آموزش عالی حکمت رضوی 1\nscience &amp; tech babol 1\nدانشگاه غیر انتفاعی کوثر قزوین 1\nعلوم پایه گیلان 1\nقزوین آزاد 1\nlian 1\nislamic azad khorasgan(isfahan) 1\nنجف آباد n/aآزاد 1\niau shiraz 1\nazad zanjan 1\nپیام نور قزوین 1\nsadjad inst. of higher edu. mashad 1\nالرهرا 1\nazad borujerd 1\nعلمی و کاربردی جهاد دانشگاهی 1\nآزاد اسلامی واحد علوم و تحقیقات 1\nشهید بهشتی‌ 1\nislamic azad n/a south tehran 1\nazad tehran central branch 1\nايلام 1\nbabol noshirvani univesity of technology 1\ng 1\nchamran univ of ahwaz 1\nدانشگاه آزاد شهر قدس 1\nkiaun/akaraj branch 1\niau lahijan 1\nshahid beheshti unviersity 1\nشهید مدنی آذربایجان 1\nدانشگاه تبريز 1\nدانشگاه آزاد اسلامی واحد تهران مرکزی 1\nشریعتی 1\nadiban gramsar 1\nislamic azadn/atehran jonoob 1\nدانشگاه آزاد تهران 1\nتبریز آموزشکده 1\nshahid beheshti u 1\nia khomeini shahr 1\nislamic azad aliabad 1\nkashan azad 1\nامیرکبیر (پلی‌تکنیک تهران) 1\nازاد تهران مرکزی 1\nferdowai univ of mashhad 1\nرازي كرمانشاهn/a مهندسي الكترونيك 1\nart in tehran 1\nazadn/asouth tehran 1\nazad amol 1\nazad n/a tehran jonoob 1\nیاسوج 1\nدانشگاه آزاد اسلامی جنوب تهران 1\niau bushehr branch 1\nدانشگاه بین المللی امام خمینی 1\nدانشگاه رازي 1\ndaneshgah azad ghaemshahr 1\niran univ of sci and tech 1\nazad qazvin 1\nmcgill 1\nصنعتي نوشيرواني بابل 1\nعلوم پزشکی سمنان 1\nپيام نور مشهد 1\narakn/aiau 1\nshahid rajaee n/a srttu 1\nدانشگاه ازادn/a تهران جنوب 1\nَazad shabestar 1\niran science &amp; technologhy 1\nغیرانتفاعی صنعتی فولاد n/a فولادشهر 1\nآزاد تهران مرکز 1\nدانشگاه ازاد همدان 1\nsadjad e mashhad 1\nihu 1\napplied science and technolognces an 1\nابپتث 1\nlahijan azad 1\nyazd azad uni 1\nپيام نور 1\nازاد کرمانشاه 1\nعلمی کاربردی تبریز 1\nدانشگاه ارومیه 1\nدانشگاه آزاد اسلامی تبریز 1\nتبريز 1\nدانشگاه آزاد اسلامی واحد ن 1\nصنعتی اصهفان 1\nآزاد واحد تهران جنوب 1\nجمشید کاشانی 1\nuniv. of science and culture 1\nموسسه سجاد مشهد 1\nistanbul technical 1\nغیر انتفایی 1\nصنعتي اميركبير 1\nuoftehran 1\nmiddle east tech univ 1\npetroleum tech. 1\nimam khomeini qazvin 1\nscience and research,tehran 1\nshirazn/a islamic azad 1\nbonyan nonn/aprofit institute of high education 1\nislamic azad nishapur 1\nعلوم پزشکی گلستان 1\ntonekabon 1\nعلوم تحقیقات 1\nدانشگاه صنعتی سهند 1\nislamic azad west tehran 1\nدانشگاه صنعت آب و برق 1\nk. n. toosi technologh 1\nازاد تبریز 1\nkarajn/aazad 1\nuai arak 1\nدانشگاه آزاد اسلامی واحد رشت 1\nآزاد تهران مرکزی 1\niau,roudehen 1\nجندی شاپور دزفول 1\nsemnan iau 1\nصوفی رازی 1\ndamqan azad 1\nدانشگاه غيرانتفايي نيما 1\nazad birjand 1\nazad tehran north 1\nدانشگاه آزاد واحد نجف آباد 1\nazad chaloos 1\nعلم و وصنعت 1\nkhaje nasir tosi technology 1\nدانشگاه رازی 1\nzanjan azad 1\nvalin/a en/a asr rafsanjan 1\nislamic azad , shiraz 1\nصنعتی امیرکبیر ( پلی تکنیک ) 1\nجهاد دانشگاهی 1\nkhomeinin/ashahrn/aazad 1\nقزوین (آزاد) 1\nندارم 1\nnajaf abad 1\nazad, science research 1\nelmosanat 1\nتربیت دبیر شهید رجایی تهران 1\nislamic azad n/ascience &amp; research 1\nفني مهندسي آزاد مشهد 1\nدانشگاه آزاد اسلامی واحد قزوین 1\nazad varamin 1\nazad shabestar branch 1\nallameh mohaddese noori 1\nغیرانتفاعی بهمنیار کرمان 1\nشهید عباسپور(صنعت آب و برق) تهران 1\nisfahan univresity of technology 1\nazad damghan 1\nemam khomeini 1\niran science and technology (iust) 1\nislamic azad saveh 1\nدانشگاه شاهد (دولتی)(سهمیه آزاد) 1\nbahonar,kerman 1\namirkabir thechnology 1\niaun/ascience &amp; research 1\nدانشکده شریعتی تهران 1\nپیام نور اهواز 1\nغیر انتفاعی علامه رفیعی 1\nصنعتي سهند 1\nbabol technology 1\nَamirkabir 1\nbu ali sina 1\nپیام نوردامغان 1\nsistan &amp; baluchestan 1\nفنی دولتی در تهران 1\nazad oloom va tahghighat 1\nدانشگاه رازی کرمانشاه 1\ninstitute of advanced studies in basic sciences 1\nallameh tabatabaee 1\nznu 1\nشهید باهنر 1\nعلوم و تحقيقات 1\nهرمزگان سراسری 1\nislamic azad n/a central 1\nploytechnic 1\nهنر n/a تهران 1\nk.n.toosi tech 1\nصنعتی نوشیروانی بابل، روزانه 1\nدولتی زنجان 1\noloom tahghighat 1\nnajafabad branch, azad 1\narak uni 1\nusb 1\nدانشگاه آزاد اسلامی واحد تهران غرب 1\nilam azad 1\nbabol 1\nazad broujerd 1\nدانشکده فنی شمسی پور 1\nصنعت آب و برق (عباسپور) 1\nazad dezful 1\nsadjad institute of higher education, mashhad 1\nصنعت اب و برق شهید عباسپور 1\niau karaj 1\nلاهیجان 1\nislamic azad mashad 1\nدانشگاه ازاد ابهر 1\nmaybod azad 1\nازاد اسلامی واحد زنجان 1\nolom tahghighat 1\nshahid rajaee srttu 1\nجهاد دانشگاهی استان یزد 1\ni a u 1\nدخترانه شریعتی تهران 1\nyu 1\nازاد اسلامی کرج 1\nدانشگاه آزاد اسلامي واحد خرم آباد 1\nazad , north tehran branch 1\nazad n/a saveh branch 1\nدانشگاه ازاد اسلامى واحد مرودشت 1\nعلوم پزشکی مازندران 1\nemamkomeini 1\nazade bandare anzali 1\nشهید رجائی 1\ntums 1\nشهیدرجایی 1\ntabriz.a 1\nدانشگاه هرمزگان 1\nislamic azad south tehran 1\nsalman mashhad 1\nsooreh 1\nهنراسلامی تبریز 1\ncalgary 1\nعلامه محدث نوری 1\nsadjad 1\nدانشگاه آزاد اسلامی n/a تهران مرکز 1\nجندی شاپور 1\ndore moshtarak 1\niust (علم و صنعت) 1\niau, yazd 1\nazad , karaj branch 1\nislamic azad yazd 1\nعلوم و تحقيقات تهران 1\ntabriz technic institut 1\nدانشگاه آزاد تفرش 1\nگيلان 1\nkut 1\ntarbiyat moalem 1\nآزادn/aعلوم و تحقیقات 1\nisfahan univ. of tech 1\nkerman institution of higher education 1\nrazy 1\nدانشگاه آزاد اسلامی واحد تهران 1\nبین الملل امام خمینی قزوین 1\ntabriz medical sciences 1\nislamic azad n/a tehran south 1\nparand islamic azad uni 1\nislamic azad n/a qazvin 1\nدانشگاه آزاد اسلامی واحد نجف آباد 1\nدانشگاه سیستان و بلوچستان 1\nshahid bahonar kerman 1\nazad tehran 1\nخواجه نصیر طوسی 1\nسراسری کاشان 1\nزاهدان 1\nn.a 1\nislamic azad najaf abad 1\npolytechnic uni. of tehran 1\nدانشگاه تفرش (سراسري) 1\nعلوم اقتصادی 1\namirkabir universit technology 1\nصنغتی شریف 1\niau, lahijan 1\nسراسري شيراز 1\nازاد واحد پزشکی 1\napplied science and technology 1\namirkabir univ 1\nجندی شاپور اهواز 1\nwww 1\nqazvin i. azad 1\nاموزشکده+شهرستان 1\niau south tehran 1\ngheyre entefaii 1\nazadn/amashhad 1\nbihe 1\nبین المللی امام قزوین 1\nازاد ارسنجان 1\nيزد 1\njundin/ashapur tech. 1\narak azad 1\niau, rasht 1\nغیرانتفایی 1\nخیام 1\nدانشگاه (دولتی) کاشان 1\nislamic azad n/a shiraz 1\nاميرکبير 1\nصوفی رازی زنجان 1\nدانشگاه آزاد واحد قزوین 1\nعباسپور 1\nallameh tabatabai univ 1\nشریف. 1\nazad univ, khomeinishahr 1\niaum ( آزاد مشهد ) 1\nsharif tech. 1\nامير كبير 1\nazadn/a tehran markaz 1\nآزاد ماهشهر 1\nدانشگاه آزاد اسلامی واحد تهران مرکز 1\nkhajeh nasir toosi 1\nallame tabatabaei 1\nدانشگاه آزاد تهران شمال 1\nshomal , amol 1\nazad tehran jonoob 1\nsutech 1\nsistan &amp; baloochestan 1\nازادتهران شمال 1\nshahid rajaee (srttu) 1\nشهید عباسپور تهران 1\nغیر انتفاعی سجاد مشهد 1\nk.n.toosi technology 1\nموسسه غیرانتفاعی بهمنیار کرمان 1\nazad takestan branch 1\nشمسی پور 1\nسپاهان اصفهان 1\nazadn/asouth tehran branch 1\nمنهدسی نرم افزار 1\nغیر انتفاعی صدرا n/a تهران 1\nferdowsi mashd 1\nsadjad institute of higher ed 1\nدانشگاه تحصيلات تكميلي علوم پايه زنجان 1\nshiraz univesrity 1\nisf tech 1\nدانشگاه سمنان 1\nدانشگاه آزاد اسلامى واحد تهران پزشكى 1\nعلموصنعت 1\niran uni. of sci. &amp; tech. 1\nazad n/a north bra 1\nnorth branch islamic azad 1\nsadjad higher education institution 1\nknt 1\nimam reza n/a mashhad 1\nbirgand 1\nعلامث طباطبایی 1\nazad u tehran markaz 1\nuniv of tehran (mechanical) 1\nدانشگاه آزاد علوم پزشکی تهران 1\nحضرت معصومه قم 1\nuniv of guilan 1\niau arak 1\nُشریف 1\nmashhad medical sciences \\_mums 1\nazad univ north branch 1\nتربيت معلم تهران 1\nazadn/ascience&amp;research 1\nآزاد n/a علوم و تحقیقات 1\nbahonar technical shiraz 1\nazad bushehr 1\nuniv of tehran 1\nازاد بافق 1\nazadn/atehran markaz 1\nkit 1\nazad n/a tehran north 1\nدانشگاه صنعتي سهند تبريز 1\nتهران (فني) 1\nصنعتی مراغه 1\nزنجان(سراسری) 1\nدماوند 1\nislamic yazd 1\ngiulan 1\nآزاد واحد تهران مرکزی 1\nazadn/a hamedan 1\ns&amp;b 1\ntehran central branch azad uni 1\nislamic azad n/a borujerd 1\nazad univeristy of tabriz 1\nعلمی کاربردی گسترش انفورماتیک 1\nغیرانتفاعی سجاد 1\nazad olom daroi 1\nصنعتی بابل 1\nazad of takestan 1\nazad, central tehran 1\nَشریف 1\ninstitute of higher education 1\nsku 1\nدانشگاه بیرجند 1\nshiraz univ. of tech 1\nazad, north tehran 1\nدانشگاه آزاد تاکستان 1\nazad/tehran central branch 1\nخوارزمی 1\nدانشگاه مازدران 1\npayamen/anoor 1\nislamic azad shahrood 1\nمعماري آزاد مشهد 1\nqazvin azad uinvercity 1\nmazandaran medical sciences 1\ngheire entefaee 1\nجامع علمی کاربردی 1\nغیر انتفاعی مشهد 1\nدانشگاه باهنر کرمان 1\nدامغان 1\nشیراز آزاد 1\nazadn/alahijan 1\nfergusson 1\nسراسری علامه 1\nferdowsi univ mashhad 1\nart esfahan 1\nدولتی اصفهان 1\nقزوين 1\nامام صادق(ع) 1\nدانشگاه هنر 1\nَazad tabriz 1\nعلم و صنعت ایران iust 1\nazade ahvaz 1\nbou ali siana hamedan 1\nازاد تهران 1\nكرج 1\nsheikhbahaee 1\nimam khomeini international 1\niauu 1\namirkabir امیرکبیر 1\niran science and tech. 1\nآزاد n/a پرند 1\nدانشگاه آزادزاسلامی مشهد 1\nدانشگاه آزاد اسلامی واحد تبریز 1\nazadn/atehran jonub 1\nالزهرا n/a تهران 1\nshahid chamran ahwaz 1\niau khorasgan branch 1\nشمال n/a آمل 1\nvalin/aen/aasr uni. of rafsanjan 1\nدانشکده نقشه برداریn/aسازمان نقشه برداری 1\nsadjad technology 1\nدولتی اراک 1\nدولتی کاشان 1\nدانشگاه ارشاد دماوند 1\nصنعتی امیرکبیر (پلی‌تکنیک تهران) 1\nazad shiraz 1\nsbmu 1\nislamic azad , central tehran branch 1\nuast tehran 1\nazad ,najaf abad branch 1\nislamic azad tehran south branch 1\nallame tabatabayee 1\nsemnan uni 1\nامیرکبیر (پلی تکنیک تهران) 1\nazad n/a tehrann/a north 1\noloom pezeshki tehran 1\nsan jose 1\nَazad tehran south 1\nعلوم پایه دامغان 1\nآزاد اسلامی واحد تهران مرکز 1\nآزاد قائم شهر 1\nlimkokwing creative technology 1\nدانشکده سازمان جغرافیایی 1\ngorgan agricultural sciences 1\nutm 1\nazad tehrann/anorth 1\nislamic azad borujerd 1\nمرکز تحصیلات تکمیلی زنجان 1\nتهران پزشکی 1\nرجایی تهران 1\nدانشگاه آزاد دماوند 1\nazad sanandaj 1\nامیزکبیر 1\nعلوم اراک 1\nazadn/ateh markaz 1\nمیبد 1\nاهواز 1\nصنعتی همدان 1\nدانشگاه شهید بهشتی 1\nyasuj 1\nshariati 1\nazad north branch 1\niaut آزاد تبریز 1\nخواجه نصير الدين طوسي 1\nteschniche universität sharif 1\ndamqan 1\nsistan and balouchestan 1\nazad , tehran 1\nsocial welfare and rehabilitation sc 1\nfasa( dowlati) 1\nazad tehran south 1\nazad omidieh 1\nبوعلی سینا همدان 1\nكاشان 1\nk.n.toosi univ of tech 1\nدانشگاه آزاد فسا 1\nazad tehran central 1\nدانشگاه آزاد اسلامی واحد امیدیه 1\nعلم‌ و صنعت ایران 1\nدانشگاه صنعت آب و برق شهید عباسپور 1\nbojnourd 1\nامیرکبیر ، پلی تکنیک تهران 1\nغیرانتفاعی سوره تهران 1\nدولتی یزد 1\nدانشکده فنی n/a برق 1\nchemical engineering 1\nbut 1\nَدانشگاه شیراز 1\njahad n/a yazd 1\nazad garmsar 1\ncentral tehran 1\nkhavaran 1\niau central tehran branch 1\nپیام نور مشهد 1\nislamic azad mashhad 1\niau najafabad 1\nدانشگاه یزد 1\nsharif uni 1\nکرج 1\nدانشگاه آزاد خمینی شهر 1\nkhayyam mashhad 1\nbuali sina 1\nsomewhere 1\nخليج فارس 1\nazad n/akaraj 1\nدانشگاه آزاد واحد کرج 1\nعلوم پزشكي زنجان 1\nسراسری تهران 1\namerican beirut 1\nazadn/aen/alahijan 1\nپیام نور اردبیل 1\nدانشگاه شهرکرد 1\nisfahan tech. 1\nsaintn/apetersburg state institute of technology 1\nغیرانتفاعی اشرفی اصفهانی 1\nshahid beheshti univ. med. scin. 1\nصنعتی اميرکبير 1\nazad n/a tehran medical branch 1\nدانشگاه آزادn/aتهران جنوب 1\nshiraz u 1\nازاد خرم اباد 1\nموسسه آموزش عالی سجاد 1\nامام حسين 1\nدانشگاه آزاد ابهر 1\nعلم و صنعت ايران 1\nدانشگاه صنعتی امیرکبیر 1\nامیر کیبر 1\nجهرم 1\nروزبه زنجان 1\nعلامه 1\nislamic azad n/asouth tehran branch 1\nعلم و صنعت ایران ( واحد اراک) 1\nisfahan technologgy 1\nمرکز تحصیلات تکمیلی در علوم پایه زنجان 1\nkhayyam mashhad 1\nazad science&amp; research campus 1\nislamic azad khorasgan 1\nهرمزگان 1\nپاسارگاد شیراز 1\nدانشگاه آزاد بابل 1\nkhajeh nassir toosi 1\nislamic azad nour branch 1\neasy learning 1\nدانشگاه آزاد واحد جنوب 1\nhamedan 1\nyazd azad 1\nshz 1\npayame noor tehran 1\nدانشگاه آزاد واحد خمینی شهر 1\nazad , science &amp; research campus, tehran 1\nدانشگاه آزاد واحد دامغان 1\nسراسری بیرجند 1\nعلوم وتحقیقات فارس 1\nisfahan f technology 1\nگرگان 1\nazad khomeini shahr 1\nshahid chamtan 1\nal zahra, faculty of art 1\nislamic azad universiy of mashhad 1\nsharif ut 1\nislamic azad north tehran branch 1\nbremen 1\nislamic azad isfahan 1\nazad of parand 1\nدانشگاه پيام نور تهران 1\namerican world univercity 1\nislamic azad boroujerd 1\nازاد رامسر 1\nسراسری قزوین (امام خمینی) 1\nart isfahan 1\nراغب اصفهاني 1\namirkabirn/atehran polytechnic 1\nuot 1\nazad n/a marvdasht 1\nallame tabatbaei 1\ntabarestan private 1\nshahid bahonar uni. of kerman 1\nواحد علوم تحقیقات 1\nُsharif tech 1\nاورمیه 1\nشهید بشتی 1\nlulea 1\nعلوم و فنون مازندران 1\nقزوین 1\nدانشگاه شبراز 1\nغیر انتفاعی شیراز 1\nساری 1\nisfahan tech 1\nدانشگاه آزاد واحد مشهد 1\nchamran kerman 1\nواحد علوم و تحقیقات 1\nrazi univ. kermanshah 1\namirkabir uni. of tech. 1\nk.n.tossi tec 1\nآزاد اسلامی امیدیه 1\nazad u 1\nazad saveh 1\ntabriz,iau 1\namirkabir tehran 1\nَazad 1\nislamic azad univercity of najaf abad 1\nazad tehran medial science 1\nدانشگاه آزاد اسلامی تهران شمال 1\nسراسری قزوین 1\niau mashhad branch 1\nشهرستان سراسری 1\nkazeroun azad 1\nازاد رشت 1\nazad, mashhad branch 1\nthe art 1\nkar 1\nazad n/a najaf abad 1\nislamic azad n/a tehran north 1\nدانشگاه دولتی اراک 1\nu.b 1\nazad ghochan 1\nداتشگاه آزاد اسلامی نچف آباد 1\npetroleum tech 1\nazad n/a south tehran 1\niran science 7t 1\nmashhad institute of technology 1\nmit 1\nia science and research branch 1\nislamic azad firoozkooh 1\niau yazd 1\nkhayyam 1\nelmo sanat 1\nازاد اسلامی نجف آباد 1\npwut/iran 1\nazad science and research branch 1\nسراسری تبریز 1\nazad najaf abad 1\nshahidbeheshti 1\nazad boshehr 1\nazad mashhad 1\naiu 1\nguilan universuty 1\nجاسب 1\nazad khoy 1\neastern mediterranean univ 1\nبوعلی همدان 1\nazad takestan 1\nislamic azad science and research fars 1\nبرق کنترلn/a دانشگاه تهران 1\niaun/a south tehran branch 1\nwollongong in dubai 1\nshahrood univ of tech 1\nعلمی و کاربردی کارخانجات مخابراتی ایران n/a itmc 1\nazad univ ghazvin 1\nshahid chamran universtiy of ahvaz 1\nsanandaj azad 1\nazad tehran north branch 1\nuok 1\nسمنان (سراسری) 1\nferdowsi mashhsad 1\niau najaf abad 1\nپیام نور مرکز تبریز 1\nصنعتی خواجه نصیر طوسی 1\nپلی تکنیک بخارست، رومانی n/a معدل 7.67 از 10 1\nysuac/armenia 1\nپیام نور تهران 1\napplied science and technology 1\nدانشگاه آزاد اسلامی واحد سراب 1\nشیراز (دولتی) 1\nazadn/a tehran 1\nislamic azad ,tehran south branch 1\nدانشگاه آزاد اسلامیn/aمشهد 1\nشهید بهشتی تهران 1\npwut 1\nااصفهان 1\nکاردانی و کارشناسی ناپیوسته 1\ngirne american 1\nazad arak 1\nپور 1\ntehran art 1\nqau 1\nشهر ري 1\nدانشگاه آزاد اهواز 1\nصنعتی امیرکبیر (aut) 1\nazad tj 1\nerau 1\nصنعتی مالک اشتر 1\npayamn/aen/anoor karaj 1\niau neyriz 1\nهنر 1\ndolati tehran 1\nصنعتی خواجه نصیرالدین طوسی 1\nرازي 1\npolytechnic tehran 1\nسراسری شهرکرد 1\namirkabir univeristy of technology 1\nsharif univ 1\nusc 1\nغیرانتفاعی آبا 1\nislamic azad tj 1\nchabahar maritme 1\npayam golpayegan 1\nهنر اصفهان 1\niau abadan 1\nbeynolmelali e imam khomeynii.k.i.u 1\nگیلان (سراسری) 1\niran science &amp; technology(iust) 1\nkordestan 1\nدانشگاه آزاد اسلامی واحد همدان 1\nferdowsi univ. mashhad 1\numsa 1\nsahand ut 1\nkhaje nasir toosi technology 1\nدانشگاه آزاد اسلامی قزوین 1\nهدف ساری 1\nislamic azad lahijan 1\nsemnan univ 1\nazad central tehran branch 1\niauctb 1\nuromieh 1\nzihe 1\nbahonar kerman 1\nآزاد واحد تهران مركزي 1\nموسسه اموزش عالی زند 1\nصنعتی سجاد 1\nازاد واحد تهران شمال 1\nاراک دولتی 1\nlorestan 1\nleipzig 1\nدانشگاه آموزش عالی دارالفنون 1\nislamic azad najafabad branch 1\nدانشگاه آزاد اسلامی واحد میانه 1\nabbaspour 1\nsoore 1\nدانشگاه آزاد اسلامی واحدخمینی شهر 1\nآزاد خمینی شهر 1\nazad , tehran south branch 1\nntnu 1\nدانشگاه آزاد اسلامی واحد تهران مرکز ی 1\nkerman bahonar 1\nُراسری تهران 1\nدانشگاه آزاد اسلامی واحد اهواز 1\nعلم و صنعت (iust) 1\nعلم وصنعت ایران iust 1\nعلمی کاربردی شرکت داده پردازی ایران 1\nershad institute of higher education 1\nk.n. toosi 1\nislamic azad n/a karaj 1\nart and architecture 1\nolum tahghighat 1\nmahshahr 1\nazad , hamedan 1\nامام خمینی 1\nazad eslami bojnoord 1\nپلی تکنیک کی یف 1\nدانشگاه آزاد واحد تبریز 1\nName: uniBachelors, dtype: int64\n"
],
[
"df.uniBachelors.replace(u'دانشگاه','',inplace=True)",
"_____no_output_____"
],
[
"df.uniBachelors=df.uniBachelors.str.lower()\ndf['uniBachelorsOLD']=df.uniBachelors.copy()",
"_____no_output_____"
],
[
"def renamer(table):\n for i in df.index:\n f=df.get_value(i,table).strip()\n if (u'ازاد' in f) | (u'آزاد' in f)| ('azad' in f)| (u'مرکز' in f)| (u'شمال' in f)| ('iau' in f): \n df.set_value(i,table,'azad')\n elif (u'نور' in f) | (u'پیام' in f) |('payam' in f): \n df.set_value(i,table,'payam')\n elif (u'تربیت مدرس' in f) | ('modares' in f)| (u'مدرس' in f): \n df.set_value(i,table,'tarbiat-modares')\n elif (u'تبریز' in f) | ('tabriz' in f): \n df.set_value(i,table,'tabriz')\n elif (u'شیراز' in f) | ('shiraz' in f): \n df.set_value(i,table,'shiraz')\n elif (u'مشهد' in f) | (u'فردوسی' in f)| ('mashad' in f)| ('mashhad' in f) |('ferdowsi' in f): \n df.set_value(i,table,'mashhad')\n elif (u'شریف' in f) | ('sharif' in f):\n df.set_value(i,table,'sharif')\n elif (u'بهشتی' in f) | ('beheshti' in f)| ('sbu' in f):\n df.set_value(i,table,'beheshti')\n elif (u'امیرکبیر' in f) |(u'اميرکبير' in f) | (u'امير کبير' in f) | (u'کبیر' in f)| ('aut' in f)| ('amir' in f)| ('kabir' in f):\n df.set_value(i,table,'amir-kabir')\n elif (u'علم و صنعت' in f) | ('iust' in f) | ('iran science and technology' in f):\n df.set_value(i,table,'elmo-sanat')\n elif (u'علوم و تحقیقات' in f) |(u'علوم تحقیقات' in f) | ('srbiau' in f)| ('tahghighat' in f):\n df.set_value(i,table,'olom-tahghighat')\n elif (u'طوسی' in f) | (u'خواجه' in f)| ('kntu' in f)| ('khaje' in f)| ('nasir' in f)| ('tosi' in f)| ('tusi' in f)| ('toosi' in f):\n df.set_value(i,table,'toosi')\n elif (u'اصفهان' in f) | ('iut' in f)| ('isfahan' in f)| ('esfihan' in f)| ('esfahan' in f):\n df.set_value(i,table,'isfahan')\n elif (u'شاهرود' in f) | ('shahroodut' in f)| ('shahrod' in f):\n df.set_value(i,table,'shahroodut')\n elif (u'همدان' in f) | ('hamedan' in f)| ('hut' in f):\n df.set_value(i,table,'hamedan')\n elif (u'نفت' in f) | ('naft' in f)| ('put' in f)| ('oil' in f):\n df.set_value(i,table,'naft')\n elif (u'تهران' in f) | ('tehran' in f): \n df.set_value(i,table,'tehran')\n elif (u'شريف' in f) :\n df.set_value(i,table,'sharif')\n elif (u'گیلان' in f) | ('guilan' in f)| ('gilan' in f): \n df.set_value(i,table,'guilan')\n elif (u'ارومیه' in f) | ('urmia' in f)| ('oromiye' in f): \n df.set_value(i,table,'urmia')\n elif (u'الزهرا' in f) | ('zahra' in f): \n df.set_value(i,table,'al-zahra')\n elif (u'دولتی' in f) | ('dolati' in f): \n df.set_value(i,table,'dolati')\n elif (u'سمنان' in f) | ('semnan' in f): \n df.set_value(i,table,'semnan')\n elif (u'مازندران' in f) | ('mazandaran' in f)| ('shomal' in f):\n df.set_value(i,table,'mazandaran')\n elif (u'علامه' in f) | (u'طباطبایی' in f) | ('alame' in f):\n df.set_value(i,table,'alame-tabatabae')\n elif (u'زنجان' in f) | ('zanjan' in f):\n df.set_value(i,table,'zanjan')",
"_____no_output_____"
],
[
"renamer('uniBachelors')",
"_____no_output_____"
],
[
"df.uniBachelors.value_counts()",
"_____no_output_____"
],
[
"for i in df.index:\n f=df.get_value(i,'uniBachelors').strip()\n if f in ['tehran','sharif','beheshti','amir-kabir','tarbiat-modares','isfahan','elmo-sanat','toosi','mashhad','tabriz']:\n df.set_value(i,'highLevelBachUni',True)\n else:\n df.set_value(i,'highLevelBachUni',False)",
"_____no_output_____"
]
],
[
[
"# uniMasters",
"_____no_output_____"
]
],
[
[
"df['uniMastersOLD']=df.uniMasters.copy()\ndf.uniMasters.value_counts().head()",
"_____no_output_____"
],
[
"renamer('uniMasters')",
"_____no_output_____"
],
[
"df.uniMasters.value_counts()",
"_____no_output_____"
],
[
"for i in df.index:\n f=df.get_value(i,'uniMasters').strip()\n if f in ['tehran','sharif','beheshti','amir-kabir','tarbiat-modares','isfahan','elmo-sanat','toosi','mashhad','tabriz']:\n df.set_value(i,'highLevelMasterUni',True)\n else:\n df.set_value(i,'highLevelMasterUni',False)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.to_json('Data_Uni.json',date_format='utf8')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74bffcfa1fc16b059cef462225e39b187fc72f3 | 2,815 | ipynb | Jupyter Notebook | 04_getCikFilings.ipynb | ikedim01/secscan | 8b024fa6762f3a2b8fb1abcd08c365c7fc8fb568 | [
"Apache-2.0"
] | null | null | null | 04_getCikFilings.ipynb | ikedim01/secscan | 8b024fa6762f3a2b8fb1abcd08c365c7fc8fb568 | [
"Apache-2.0"
] | null | null | null | 04_getCikFilings.ipynb | ikedim01/secscan | 8b024fa6762f3a2b8fb1abcd08c365c7fc8fb568 | [
"Apache-2.0"
] | null | null | null | 21.992188 | 95 | 0.527886 | [
[
[
"# default_exp getCikFilings",
"_____no_output_____"
]
],
[
[
"# getCikFilings\n\n> Get filings list for a CIK using the SEC's RESTful API.",
"_____no_output_____"
]
],
[
[
"#hide\n%load_ext autoreload\n%autoreload 2\nfrom nbdev import show_doc",
"_____no_output_____"
],
[
"#export\n\nfrom secscan import utils",
"_____no_output_____"
]
],
[
[
"Download and parse a list of filings for a CIK:",
"_____no_output_____"
]
],
[
[
"#export\n\ndef getRecent(cik) :\n cik = str(cik).lstrip('0')\n restFilingsUrl = f'/submissions/CIK{cik.zfill(10)}.json'\n filingsJson = utils.downloadSecUrl(restFilingsUrl, restData=True, toFormat='json')\n recentList = filingsJson['filings']['recent']\n accNos = recentList['accessionNumber']\n print(len(accNos),'filings for',filingsJson['name'])\n fDates = [fDate.replace('-','') for fDate in recentList['filingDate']]\n return [(formType,accNo,fDate)\n for formType,accNo,fDate in zip(recentList['form'],accNos,fDates)]",
"_____no_output_____"
]
],
[
[
"Test downloading list of filings for a CIK:",
"_____no_output_____"
]
],
[
[
"testF = getRecent(83350)\nassert all(tup in testF for tup in (\n ('8-K', '0001437749-21-013386', '20210526'),\n ('10-Q', '0001437749-21-012377', '20210517')\n )),\"testing get recent CIK filings\"",
"254 filings for RESERVE PETROLEUM CO\n"
],
[
"#hide\n# uncomment and run to regenerate all library Python files\n# from nbdev.export import notebook2script; notebook2script()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74c23cc73d10b2b0d0a35df8ae002cf9c961c2d | 8,755 | ipynb | Jupyter Notebook | Untitled23.ipynb | Mohan-lal1993/quickstart | b0b2961c8201fea5b1f8fa67c137c9c8db2d392a | [
"MIT"
] | null | null | null | Untitled23.ipynb | Mohan-lal1993/quickstart | b0b2961c8201fea5b1f8fa67c137c9c8db2d392a | [
"MIT"
] | null | null | null | Untitled23.ipynb | Mohan-lal1993/quickstart | b0b2961c8201fea5b1f8fa67c137c9c8db2d392a | [
"MIT"
] | null | null | null | 28.990066 | 92 | 0.351456 | [
[
[
"# The set index() and reset and reset_index()",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"bond=pd.read_csv(\"jamesbond.csv\")",
"_____no_output_____"
],
[
"bond.set_index([\"Film\"],inplace=True)\n",
"_____no_output_____"
],
[
"bond.head()",
"_____no_output_____"
],
[
"bond.reset_index().head()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74c2b5365bd747e4f327fcf6eedce3b8d94e73b | 12,523 | ipynb | Jupyter Notebook | notebooks/Advanced-Notebook-Tricks.ipynb | ddeloss/jupyter-tips-and-tricks | e47d5e5d07d2b9c9ba0b0f8101e8416ae7dd7cbf | [
"MIT"
] | 260 | 2015-09-11T15:57:35.000Z | 2022-01-13T23:42:13.000Z | notebooks/Advanced-Notebook-Tricks.ipynb | Lukematic/jupyter-tips-and-tricks | c0b9f6cf6c422146743ebb53fba8914ade5611f5 | [
"MIT"
] | null | null | null | notebooks/Advanced-Notebook-Tricks.ipynb | Lukematic/jupyter-tips-and-tricks | c0b9f6cf6c422146743ebb53fba8914ade5611f5 | [
"MIT"
] | 144 | 2015-09-10T15:13:43.000Z | 2021-12-28T01:43:24.000Z | 21.082492 | 152 | 0.522878 | [
[
[
"# Advanced Notebook",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tools.plotting import scatter_matrix\nfrom sklearn.datasets import load_boston\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('poster')\nsns.set_style('whitegrid')\nplt.rcParams['figure.figsize'] = 12, 8 # plotsize\n\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"# BQPlot\n\nExamples here are shamelessly stolen from the amazing: https://github.com/maartenbreddels/jupytercon-2017/blob/master/jupytercon2017-widgets.ipynb",
"_____no_output_____"
]
],
[
[
"# mixed feelings about this import\nimport bqplot.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"x = np.linspace(0, 2, 50)\ny = x**2",
"_____no_output_____"
],
[
"fig = plt.figure()\nscatter = plt.scatter(x, y)\nplt.show()",
"_____no_output_____"
],
[
"fig.animation_duration = 500\nscatter.y = 2 * x**.5",
"_____no_output_____"
],
[
"scatter.selected_style = {'stroke':'red', 'fill': 'orange'}\nplt.brush_selector();",
"_____no_output_____"
],
[
"scatter.selected",
"_____no_output_____"
],
[
"scatter.selected = [1,2,10,40]",
"_____no_output_____"
]
],
[
[
"## ipyvolume",
"_____no_output_____"
]
],
[
[
"import ipyvolume as ipv",
"_____no_output_____"
],
[
"N = 1000\nx, y, z = np.random.random((3, N))",
"_____no_output_____"
],
[
"fig = ipv.figure()\nscatter = ipv.scatter(x, y, z, marker='box')\nipv.show()",
"_____no_output_____"
],
[
"scatter.x = scatter.x - 0.5",
"_____no_output_____"
],
[
"scatter.x = x",
"_____no_output_____"
],
[
"scatter.color = \"green\"\nscatter.size = 5",
"_____no_output_____"
],
[
"scatter.color = np.random.random((N,3))",
"_____no_output_____"
],
[
"scatter.size = 2",
"_____no_output_____"
],
[
"ex = ipv.datasets.animated_stream.fetch().data",
"_____no_output_____"
],
[
"ex.shape",
"_____no_output_____"
],
[
"ex[:, ::, ::4].shape",
"_____no_output_____"
],
[
"ipv.figure()\nipv.style.use('dark')\nquiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5)\nipv.animation_control(quiver, interval=200)\nipv.show()\nipv.style.use('light')",
"_____no_output_____"
],
[
"ipv.style.use('light')",
"_____no_output_____"
],
[
"quiver.geo = \"cat\"",
"_____no_output_____"
],
[
"N = 1000*1000\nx, y, z = np.random.random((3, N)).astype('f4')",
"_____no_output_____"
],
[
"ipv.figure()\ns = ipv.scatter(x, y, z, size=0.2)\nipv.show()",
"_____no_output_____"
],
[
"ipv.save(\"3d-example-plot.html\")",
"_____no_output_____"
],
[
"!open 3d-example-plot.html",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74c2d296e714d3ee1c4357e2eeffecb6a94a93d | 31,266 | ipynb | Jupyter Notebook | FinalProjectAI.ipynb | VictorCanLima/NeuralNetworkEEG | ccd1890327173cf4f9838fce85add0c988c84dfd | [
"MIT"
] | null | null | null | FinalProjectAI.ipynb | VictorCanLima/NeuralNetworkEEG | ccd1890327173cf4f9838fce85add0c988c84dfd | [
"MIT"
] | null | null | null | FinalProjectAI.ipynb | VictorCanLima/NeuralNetworkEEG | ccd1890327173cf4f9838fce85add0c988c84dfd | [
"MIT"
] | null | null | null | 46.115044 | 445 | 0.453272 | [
[
[
"<a href=\"https://colab.research.google.com/github/VictorCanLima/NeuralNetworkEEG/blob/main/FinalProjectAI.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# **Neural Network**\n## **Final Project**\n### Computer Systems Engineering\n### LIS3082-1 Artificial Intelligence\n### By Victor Armando Canales Lima (162328)\n### Professor Gerardo Ayala San Martín\n### Department of Computing, Electronics and Mechatronics\n### Universidad de las Am ́ericas Puebla, San Andr ́es Cholula, Puebla, México\n### May 14, 2021\n\n\n\n\n",
"_____no_output_____"
],
[
"In this code we show the application of a Simple Neural Network to classify eeg motor imagery signals (left hand movement and right hand movement)\n\nFirst we do is to install the libraries we will need, make sure we can access the data tests. A folder with data samples should be given with this code. Put the folder in your Google Drive if you want to test this notebook from Google Colab, or leave it in the same directory than the notebook or python file you use to run this code.",
"_____no_output_____"
],
[
"## Data Preprocessing",
"_____no_output_____"
]
],
[
[
"!pip install numpy matplotlib scipy numba scikit-learn mne PyWavelets pandas\n!pip install mne-features",
"Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (1.19.5)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (3.2.2)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (1.4.1)\nRequirement already satisfied: numba in /usr/local/lib/python3.7/dist-packages (0.51.2)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (0.22.2.post1)\nCollecting mne\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/60/f7/2bf5de3fad42b66d00ee27539bc3be0260b4e66fdecc12f740cdf2daf2e7/mne-0.23.0-py3-none-any.whl (6.9MB)\n\u001b[K |████████████████████████████████| 7.0MB 5.0MB/s \n\u001b[?25hRequirement already satisfied: PyWavelets in /usr/local/lib/python3.7/dist-packages (1.1.1)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (1.1.5)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (1.3.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (2.8.1)\nRequirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba) (0.34.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from numba) (56.1.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn) (1.0.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas) (2018.9)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib) (1.15.0)\nInstalling collected packages: mne\nSuccessfully installed mne-0.23.0\nCollecting mne-features\n Downloading https://files.pythonhosted.org/packages/07/3d/443195bc22d7b5ae118cef2fdf969714077c3013d56b5bd609a76c40837d/mne_features-0.1-py3-none-any.whl\nRequirement already satisfied: mne in /usr/local/lib/python3.7/dist-packages (from mne-features) (0.23.0)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from mne-features) (1.1.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from mne-features) (1.4.1)\nRequirement already satisfied: PyWavelets in /usr/local/lib/python3.7/dist-packages (from mne-features) (1.1.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mne-features) (1.19.5)\nRequirement already satisfied: numba in /usr/local/lib/python3.7/dist-packages (from mne-features) (0.51.2)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from mne-features) (0.22.2.post1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->mne-features) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->mne-features) (2.8.1)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from numba->mne-features) (56.1.0)\nRequirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba->mne-features) (0.34.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->mne-features) (1.0.1)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->mne-features) (1.15.0)\nInstalling collected packages: mne-features\nSuccessfully installed mne-features-0.1\n"
],
[
"from mne_features.univariate import compute_hjorth_complexity_spect as hjorthComp\nfrom mne_features.univariate import compute_hjorth_mobility_spect as hjorthMob\nfrom mne_features.univariate import compute_ptp_amp as ptp_amp\nfrom sklearn.model_selection import train_test_split as tts\nfrom scipy.io import loadmat as load\nfrom scipy import signal\nimport pandas as pd\nimport numpy as np\n\n\ntry:#If Running in a Google Colaboratory Notebook\n from google.colab import drive\n drive.mount('/content/drive')\n rawdatapath = '/content/drive/MyDrive/data/S4.mat'\nexcept:#If Running in a Local Jupyter Notebook\n rawdatapath = 'data/S4.mat'\n",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
],
[
[
"We create a class to import and preprocess the signal, we apply filters to clean the brain signal samples, we have 2 classes, right hand movement and left hand movement. ",
"_____no_output_____"
]
],
[
[
"class EEG_Signal_Handler():\n def __init__(self, datapath):\n self.data = load(datapath) # Importando datos\n self.C1=np.array(self.data['C1']) # Movimiento Mano Izquierda 1\n self.C2=np.array(self.data['C2']) # Movimiento Mano Derecha 2\n self.channels=len(self.C1[:,0,0]) # Numero de canales\n self.samples=len(self.C1[0,:,0]) # Número de muestras por experimento\n temp = np.arange(abs(len(self.C1[0,0,:])-len(self.C2[0,0,:])))\n if len(self.C1[0,0,:]) > len(self.C2[0,0,:]):\n self.C1 = np.delete(self.C1,temp,axis=2)\n else:\n self.C2 = np.delete(self.C2,temp,axis=2)\n self.experiments=len(self.C1[0,0,:])\n self.filter('highpass', 1)\n self.filter('lowpass', 30)\n def filter(self,filttype,cutfreq):\n Wn = cutfreq/(250/2)\n num, den = signal.butter(5,Wn,filttype)\n for i in range(self.experiments):\n self.C1[:,:,i] = signal.filtfilt(num,den,self.C1[:,:,i],1)\n self.C2[:,:,i] = signal.filtfilt(num,den,self.C2[:,:,i],1)",
"_____no_output_____"
],
[
"mysig = EEG_Signal_Handler(rawdatapath)",
"_____no_output_____"
]
],
[
[
"## Feature Extraction",
"_____no_output_____"
],
[
"Now we extract the features we are going to use. We have seven different features, but as we have 3 channels (eeg sensors), we have 3 different signals, in this class, we create our datasets to train and test our model. We will have 21 different inputs for our model, one for each channel and one for each feature we extract from the signal. Some of the features are extracted from the library developed by MNE-Features Developers.\n\nExtracted features are:\n\n\n1. Root Mean Value\n2. Variance\n3. Standard Deviation\n4. Mean\n5. Hjorth Mobility\n6. Hjorth Complexity\n7. Peak to Peak Amplitude\n\n",
"_____no_output_____"
]
],
[
[
"class Dataset_Creator():\n def __init__(self,class1,class2):\n featsC1 = self.get_features(class1)\n featsC2 = self.get_features(class2)\n labelsC1 = np.ones(mysig.experiments)\n labelsC2 = np.ones(mysig.experiments)\n self.x_complete = np.concatenate((featsC1,featsC2),axis=0)\n self.y_complete = np.concatenate((labelsC1,labelsC2))\n self.y_complete = np.concatenate((labelsC1,labelsC2))\n self.x_train, self.x_test = None, None\n self.y_train, self.y_test = None, None\n self.test_split()\n def test_split(self):\n X,x,Y,y = tts(self.x_complete,self.y_complete,test_size=0.2) \n self.x_train, self.x_test = X,x\n self.y_train, self.y_test = Y,y\n def get_features(self,c):\n feats = np.empty((mysig.experiments,21))\n for i in range(mysig.experiments):\n feats[i,0] = np.sqrt(np.mean(c[0,:,i]**2))\n feats[i,1] = np.sqrt(np.mean(c[1,:,i]**2))\n feats[i,2] = np.sqrt(np.mean(c[2,:,i]**2))\n feats[i,3] = np.var(c[0,:,i])\n feats[i,4] = np.var(c[1,:,i])\n feats[i,5] = np.var(c[2,:,i])\n feats[i,6] = np.std(c[0,:,i])\n feats[i,7] = np.std(c[1,:,i])\n feats[i,8] = np.std(c[2,:,i])\n feats[i, 9] = np.mean(c[0,:,i])\n feats[i,10] = np.mean(c[1,:,i])\n feats[i,11] = np.mean(c[2,:,i])\n feats[i,12] = hjorthMob(250,c[0,:,i])\n feats[i,13] = hjorthMob(250,c[1,:,i])\n feats[i,14] = hjorthMob(250,c[2,:,i])\n feats[i,15] = hjorthComp(250,c[0,:,i])\n feats[i,16] = hjorthComp(250,c[1,:,i])\n feats[i,17] = hjorthComp(250,c[2,:,i])\n feats[i,18] = ptp_amp(c[0,:,i])\n feats[i,19] = ptp_amp(c[1,:,i])\n feats[i,20] = ptp_amp(c[2,:,i])\n return feats",
"_____no_output_____"
],
[
"myfeatures = Dataset_Creator(mysig.C1,mysig.C2)",
"_____no_output_____"
]
],
[
[
"## Defining Neural Network",
"_____no_output_____"
],
[
"Now we define our Neural Network, inspired in the tutorial of the official YouTube channel of TensorFlow You. Our model will have 2 hiden layers, Rectifier activation function and as decision fuction for classification, the sigmoid fuction. As error function, we pick a binary crossentropy function, an as gain function, RMSprop, a gradient based optimization technique.",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential\nfrom keras.layers import Dense",
"_____no_output_____"
],
[
"classifier = Sequential() # Initialising the ANN\n\nclassifier.add(Dense(units = 11, activation = 'relu', input_dim = 21))\nclassifier.add(Dense(units = 6, activation = 'relu'))\nclassifier.add(Dense(units = 3, activation = 'relu'))\nclassifier.add(Dense(units = 1, activation = 'sigmoid'))",
"_____no_output_____"
],
[
"classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy')",
"_____no_output_____"
]
],
[
[
"## Trainning",
"_____no_output_____"
]
],
[
[
"classifier.fit(myfeatures.x_train, myfeatures.y_train, batch_size = 1, epochs = 100)",
"Epoch 1/100\n316/316 [==============================] - 1s 1ms/step - loss: 9634.6702\nEpoch 2/100\n316/316 [==============================] - 0s 1ms/step - loss: 20.1148\nEpoch 3/100\n316/316 [==============================] - 0s 1ms/step - loss: 0.6192\nEpoch 4/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.9944e-32\nEpoch 5/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.2891e-33\nEpoch 6/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.3675e-33\nEpoch 7/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.6159e-33\nEpoch 8/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.1839e-33\nEpoch 9/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.9713e-32\nEpoch 10/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.0692e-32\nEpoch 11/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.1686e-32\nEpoch 12/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.3244e-33\nEpoch 13/100\n316/316 [==============================] - 0s 990us/step - loss: 5.5224e-33\nEpoch 14/100\n316/316 [==============================] - 0s 997us/step - loss: 2.8732e-32\nEpoch 15/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.7619e-32\nEpoch 16/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.1450e-32\nEpoch 17/100\n316/316 [==============================] - 0s 1ms/step - loss: 8.0210e-33\nEpoch 18/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.1353e-32\nEpoch 19/100\n316/316 [==============================] - 0s 1ms/step - loss: 5.5224e-33\nEpoch 20/100\n316/316 [==============================] - 0s 1ms/step - loss: 8.7644e-33\nEpoch 21/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.7935e-33\nEpoch 22/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.9741e-33\nEpoch 23/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.5428e-33\nEpoch 24/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.1340e-33\nEpoch 25/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.7274e-33\nEpoch 26/100\n316/316 [==============================] - 0s 1000us/step - loss: 4.9091e-33\nEpoch 27/100\n316/316 [==============================] - 0s 1ms/step - loss: 5.3517e-33\nEpoch 28/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.4848e-32\nEpoch 29/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.7253e-32\nEpoch 30/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.8667e-32\nEpoch 31/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.4224e-32\nEpoch 32/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.9711e-32\nEpoch 33/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.1648e-32\nEpoch 34/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.6788e-33\nEpoch 35/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.5499e-32\nEpoch 36/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.8384e-33\nEpoch 37/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.7830e-32\nEpoch 38/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.7370e-33\nEpoch 39/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.0995e-33\nEpoch 40/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.0575e-33\nEpoch 41/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.2371e-32\nEpoch 42/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.7420e-32\nEpoch 43/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.2891e-33\nEpoch 44/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.5660e-33\nEpoch 45/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.2264e-32\nEpoch 46/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.1718e-33\nEpoch 47/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.1340e-33\nEpoch 48/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.7274e-33\nEpoch 49/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.5532e-34\nEpoch 50/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.9944e-32\nEpoch 51/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.7529e-34\nEpoch 52/100\n316/316 [==============================] - 0s 1ms/step - loss: 5.6958e-33\nEpoch 53/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.8175e-34\nEpoch 54/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.7619e-32\nEpoch 55/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.0237e-33\nEpoch 56/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.0791e-32\nEpoch 57/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.8550e-33\nEpoch 58/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.4609e-32\nEpoch 59/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.2571e-33\nEpoch 60/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.8073e-33\nEpoch 61/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.0738e-33\nEpoch 62/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.6062e-33\nEpoch 63/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.9212e-32\nEpoch 64/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.0239e-32\nEpoch 65/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.2690e-33\nEpoch 66/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.2768e-33\nEpoch 67/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.5359e-33\nEpoch 68/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.5144e-32\nEpoch 69/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.5144e-32\nEpoch 70/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.8113e-34\nEpoch 71/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.0791e-32\nEpoch 72/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.0736e-32\nEpoch 73/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.1371e-32\nEpoch 74/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.5822e-33\nEpoch 75/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.0332e-32\nEpoch 76/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.9287e-33\nEpoch 77/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.3575e-32\nEpoch 78/100\n316/316 [==============================] - 0s 1ms/step - loss: 8.3002e-34\nEpoch 79/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.4862e-33\nEpoch 80/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.0738e-33\nEpoch 81/100\n316/316 [==============================] - 0s 1ms/step - loss: 5.7077e-34\nEpoch 82/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.9920e-33\nEpoch 83/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.7406e-32\nEpoch 84/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.2941e-33\nEpoch 85/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.8296e-32\nEpoch 86/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.9780e-34\nEpoch 87/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.2193e-32\nEpoch 88/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.9949e-32\nEpoch 89/100\n316/316 [==============================] - 0s 1ms/step - loss: 2.8175e-34\nEpoch 90/100\n316/316 [==============================] - 0s 1ms/step - loss: 6.0743e-34\nEpoch 91/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.0190e-33\nEpoch 92/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.1749e-33\nEpoch 93/100\n316/316 [==============================] - 0s 1ms/step - loss: 4.1242e-33\nEpoch 94/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.4070e-33\nEpoch 95/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.2159e-32\nEpoch 96/100\n316/316 [==============================] - 0s 1ms/step - loss: 3.3324e-32\nEpoch 97/100\n316/316 [==============================] - 0s 1ms/step - loss: 1.4099e-32\nEpoch 98/100\n316/316 [==============================] - 0s 1ms/step - loss: 8.0210e-33\nEpoch 99/100\n316/316 [==============================] - 0s 1ms/step - loss: 7.1238e-33\nEpoch 100/100\n316/316 [==============================] - 0s 1ms/step - loss: 9.4792e-33\n"
]
],
[
[
"## Testing",
"_____no_output_____"
]
],
[
[
"Y_pred = classifier.predict(myfeatures.x_test)\nY_pred = [ 1 if y>=0.5 else 0 for y in Y_pred ]\n\ntotal = 0\ncorrect = 0\nwrong = 0\nfor i in Y_pred:\n total=total+1\n if(myfeatures.y_test[i] == Y_pred[i]):\n correct=correct+1\n else:\n wrong=wrong+1\n\nprint(\"Total \" + str(total))\nprint(\"Correct \" + str(correct))\nprint(\"Wrong \" + str(wrong))",
"Total 80\nCorrect 80\nWrong 0\n"
]
],
[
[
"As we can see, we have a 100% accuracy. This is very hard to get with EEG signals, but the use of multiple features and the power of neural networks allow this to happen.\n\nNeverthlesNevertheless, for real life application, this is not really a good option as Neural Networks implie high computational costs. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74c30fb7262167c958597ac2c83bbf4a3fb58d0 | 6,240 | ipynb | Jupyter Notebook | nn/neuralPlay.ipynb | jef5ez/jefNets | d13924068d853be8b97e01f313aab4c6f26d20e9 | [
"Unlicense"
] | null | null | null | nn/neuralPlay.ipynb | jef5ez/jefNets | d13924068d853be8b97e01f313aab4c6f26d20e9 | [
"Unlicense"
] | null | null | null | nn/neuralPlay.ipynb | jef5ez/jefNets | d13924068d853be8b97e01f313aab4c6f26d20e9 | [
"Unlicense"
] | null | null | null | 23.197026 | 82 | 0.490224 | [
[
[
"from neuralnet import *\nfrom activations import *\nimport numpy as np",
"_____no_output_____"
],
[
"layer1 = Layer(\"layer1\", 3, 1, LinearActivation(), SimpleLearner(0.005))",
"_____no_output_____"
],
[
"data = np.random.uniform(0, 10, (100,3))\ntargets = data[:, -1].reshape((100,1))",
"_____no_output_____"
],
[
"training = zip(data, targets)",
"_____no_output_____"
],
[
"nn = NeuralNet([layer1], MeanSquaredError(), 0.9)",
"_____no_output_____"
],
[
"nn.train(training, 50)",
"epoch: 0\ncost:3.16907548733\nepoch: 1\ncost:1.81998410289\nepoch: 2\ncost:1.2587013738\nepoch: 3\ncost:0.942421398266\nepoch: 4\ncost:0.707602424938\nepoch: 5\ncost:0.56441135274\nepoch: 6\ncost:0.425360940911\nepoch: 7\ncost:0.319993936035\nepoch: 8\ncost:0.261747508807\nepoch: 9\ncost:0.212195380857\nepoch: 10\ncost:0.173756030171\nepoch: 11\ncost:0.122180152283\nepoch: 12\ncost:0.100535501767\nepoch: 13\ncost:0.0780195817168\nepoch: 14\ncost:0.0583299748963\nepoch: 15\ncost:0.0449164990371\nepoch: 16\ncost:0.0353347997375\nepoch: 17\ncost:0.0305565341201\nepoch: 18\ncost:0.0230029677286\nepoch: 19\ncost:0.0185571221061\nepoch: 20\ncost:0.0152225740659\nepoch: 21\ncost:0.0123142346698\nepoch: 22\ncost:0.0104881207325\nepoch: 23\ncost:0.00817393799651\nepoch: 24\ncost:0.00716153404696\nepoch: 25\ncost:0.00550355998767\nepoch: 26\ncost:0.00462543901228\nepoch: 27\ncost:0.00403649439196\nepoch: 28\ncost:0.00347731711206\nepoch: 29\ncost:0.00306094533208\nepoch: 30\ncost:0.00274912986588\nepoch: 31\ncost:0.00243458840639\nepoch: 32\ncost:0.00216610160523\nepoch: 33\ncost:0.00209072244731\nepoch: 34\ncost:0.00179946740704\nepoch: 35\ncost:0.00175951953511\nepoch: 36\ncost:0.00154790941192\nepoch: 37\ncost:0.0013557209352\nepoch: 38\ncost:0.00121621839265\nepoch: 39\ncost:0.00120535820758\nepoch: 40\ncost:0.0011936592352\nepoch: 41\ncost:0.00117897318199\nepoch: 42\ncost:0.00112183111794\nepoch: 43\ncost:0.00106810801166\nepoch: 44\ncost:0.000988904376303\nepoch: 45\ncost:0.00100936455839\n"
],
[
"layer1.weights",
"_____no_output_____"
],
[
"layer = Layer(\"layer1\", 3, 3, TanhActivation())\nlayer2 = Layer(\"layer2\", 3, 1, LinearActivation())\nnn = NeuralNet([layer, layer2], MeanSquaredError(), 0)\n\nnumGrads = nn.numerical_gradient(data, targets, 1e-5)\ngrads = nn.get_gradients(data, targets)\n\nzipped = zip(numGrads, grads)\n\nnumGrad, grad = zipped[0]\nsub = numGrad - grad\nplus = numGrad + grad\n\nnumGrad2, grad2 = zipped[-1]\nsub2 = numGrad2 - grad2\nplus2 = numGrad2 + grad2\n\nprint numGrads\nprint grads",
"[array([[ 0.26065202, 0.12895171, -0.20813425],\n [ 1.35893086, 0.666569 , -1.02909514],\n [ 1.29858404, 0.62248331, -1.07620399],\n [ 1.83837481, 0.90952504, -1.44492936]]), array([[-5.31237668],\n [-0.08146868],\n [ 1.4177473 ],\n [ 1.77192302]])]\n[array([[ 0.26065202, 0.12895171, -0.20813425],\n [ 1.35893087, 0.666569 , -1.02909514],\n [ 1.29858404, 0.62248331, -1.07620399],\n [ 1.83837481, 0.90952504, -1.44492936]]), array([[-5.31237668],\n [-0.08146868],\n [ 1.4177473 ],\n [ 1.77192302]])]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74c4ea1f42d587f8f1aa88b341183f9c80212dd | 6,527 | ipynb | Jupyter Notebook | code/notebooks/2019-6-1_threshold_model.ipynb | davidwagner/bagnet-patch-defense | 0e38d26cf6e082baf4de89d0cdfece6ba15573eb | [
"BSD-3-Clause"
] | 1 | 2022-03-30T16:38:46.000Z | 2022-03-30T16:38:46.000Z | code/notebooks/2019-6-1_threshold_model.ipynb | davidwagner/bagnet-patch-defense | 0e38d26cf6e082baf4de89d0cdfece6ba15573eb | [
"BSD-3-Clause"
] | null | null | null | code/notebooks/2019-6-1_threshold_model.ipynb | davidwagner/bagnet-patch-defense | 0e38d26cf6e082baf4de89d0cdfece6ba15573eb | [
"BSD-3-Clause"
] | null | null | null | 6,527 | 6,527 | 0.66968 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import os\nfrom google.colab import drive\ndrive.mount('/content/gdrive')\nos.chdir('/content/gdrive/My Drive/dl-security/') #Change the path to the directory that contains all code and data",
"Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
],
[
"#!pip install https://github.com/bethgelab/foolbox/archive/master.zip",
"_____no_output_____"
],
[
"from bagnets.utils import plot_heatmap, generate_heatmap_pytorch\nfrom bagnets.utils import pad_image, convert2channel_last, imagenet_preprocess, extract_patches, bagnet_predict, compare_heatmap\nfrom bagnets.utils import bagnet33_debug, plot_saliency, compute_saliency_map\nfrom bagnets.utils import get_topk_acc, validate\nfrom foolbox.utils import samples\nimport bagnets.pytorch\nfrom bagnets.pytorch import Bottleneck\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport numpy as np\nimport time\nimport os\nimg_path = \"./ILSVRC2012_img_val\"\nroot = \"./\"\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\nif use_cuda:\n print(torch.cuda.get_device_name(0))",
"Tesla T4\n"
]
],
[
[
"## Define clipping functions",
"_____no_output_____"
]
],
[
[
"bagnet33 = bagnets.pytorch.bagnet33(pretrained=True, avg_pool=False).to(device)\nbagnet33.eval()\nprint()",
"\n"
],
[
"def clip_pm1(values, **kwargs):\n \"\"\"Clip values to [-1, 1]\n Input:\n - values(torch tensor): values to be clipped\n Output: (torch tensor) clipped values\n \"\"\"\n return torch.clamp_(values, -1., 1.)\n\ndef clip_bias(values, b):\n \"\"\"Clip values to [-1, 1]\n Input:\n - values(torch tensor): values to be clipped\n - b (float): intersection\n Output: (torch tensor) clipped values\n \"\"\"\n return torch.clamp_(values + b, -1, 1)\n\ndef clip_linear(values, a, b):\n \"\"\"Clip values to [-1, 1]\n Input:\n - values(torch tensor): values to be clipped\n - a (float): coefficient\n - b (float): intersection\n Output: (torch tensor) clipped values\n \"\"\"\n return torch.clamp_(values * a + b, -1, 1)\n\ndef sigmoid_linear(values, a, b):\n \"\"\"Clip values to [-1, 1]\n Input:\n - values(torch tensor): values to be clipped\n - a (float): coefficient\n - b (float): intersection\n Output: (torch tensor) clipped values\n \"\"\"\n x_lin = values * a + b\n return torch.sigmoid(x_lin)\n\ndef tanh_linear(values, a, b):\n \"\"\"Clip values to [-1, 1]\n Input:\n - values(torch tensor): values to be clipped\n - a (float): coefficient\n - b (float): intersection\n Output: (numpy array) clipped values\n \"\"\"\n return torch.tanh(values * a + b)",
"_____no_output_____"
],
[
"def clip_logits(bagnet, clip, images, **kwargs):\n \"\"\"Clip logits returned by patches\n Input:\n - bagnet (pytorch model): Bagnet without average pooling\n - clip (python function): clip function\n - images (pytorch tensor): \n \"\"\"\n with torch.no_grad():\n patch_logits = bagnet(images)\n return clip(patch_logits, **kwargs)",
"_____no_output_____"
],
[
"bs = 20\noriginal, labels = samples(dataset='imagenet', index=1, batchsize=bs, shape=(224, 224), data_format='channels_first')\nimages = imagenet_preprocess(original)\nimages, targets = torch.from_numpy(images).to(device), torch.from_numpy(labels).to(device)",
"_____no_output_____"
],
[
"pm1_clip = clip_logits(bagnet33, clip_pm1, images)\nbias_clip = clip_logits(bagnet33, clip_bias, images, b=10)\nlinear_clip = clip_logits(bagnet33, clip_linear, images, a=0.5, b=10)\nsigmoid_clip = clip_logits(bagnet33, sigmoid_linear, images, a=0.5, b=10)\ntanh_clip = clip_logits(bagnet33, tanh_linear, images, a=0.5, b=10)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74c55abc852b84d2c2add3a1cd2c8ccfbb87a12 | 26,043 | ipynb | Jupyter Notebook | notebook/procs-nl.ipynb | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | notebook/procs-nl.ipynb | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | notebook/procs-nl.ipynb | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | 73.360563 | 2,351 | 0.574204 | [
[
[
"from sagas.nlu.uni_remote_viz import viz_sample\nviz_sample('nl', 'Schrijf je een boek?', 'spacy')",
".. request is {'lang': 'nl', 'sents': 'Schrijf je een boek?', 'engine': 'spacy', 'pipelines': ['predicts']}\nwords count 5\n\u001b[36m✁ chunks. -------------------------\u001b[0m\n❶ verb_domains(schrijven) _\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74c5a9056ee94fb297de3a6ba065fc98a1423e6 | 497,339 | ipynb | Jupyter Notebook | NOAA Reef Bleaching.ipynb | Aadya178/NOAA-Check-Reef-Bleaching | 9e43a908d7f05a6642f916a864121fa2ac9717d6 | [
"BSD-3-Clause"
] | null | null | null | NOAA Reef Bleaching.ipynb | Aadya178/NOAA-Check-Reef-Bleaching | 9e43a908d7f05a6642f916a864121fa2ac9717d6 | [
"BSD-3-Clause"
] | null | null | null | NOAA Reef Bleaching.ipynb | Aadya178/NOAA-Check-Reef-Bleaching | 9e43a908d7f05a6642f916a864121fa2ac9717d6 | [
"BSD-3-Clause"
] | null | null | null | 190.551341 | 113,548 | 0.887513 | [
[
[
"# Data Extraction",
"_____no_output_____"
],
[
"# Importing libraries and NOAA Reef Bleaching dataset",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"from numpy import arange\nimport numpy\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import norm\nimport pandas as pd\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import model_selection",
"_____no_output_____"
],
[
"columns = ['Bleaching','Ocean','Year','Depth','Storms','Human Impact','Siltation','Dynamite','Poison','Sewage','Industrial','Commercial']\ndf = pd.read_csv(r\"C:/Users/Aadya/Downloads/NOAA_reef_check_bleaching_data.csv\")",
"_____no_output_____"
],
[
"df.columns = columns\ndf.head()",
"_____no_output_____"
]
],
[
[
"Removing the first row of the dataset. It will be used later.",
"_____no_output_____"
]
],
[
[
"#Total rows in the original dataframe\nlen(df.axes[0])",
"_____no_output_____"
],
[
"#Original dataframe\ndf.head(2)",
"_____no_output_____"
],
[
"#Assigning first row of the dataframe to 'row' variable\nrow = df.iloc[0]\nrow=list(row)\nprint(row)",
"['No', 'Atlantic', 2005, 4.0, 'yes', 'high', 'often', 'none', 'none', 'high', 'none', 'none']\n"
],
[
"#Removing first row from the dataframe\ndf = df.drop([0],axis=0)",
"_____no_output_____"
],
[
"#New dataframe\ndf.head(1)",
"_____no_output_____"
],
[
"#Total rows in the new dataframe\nlen(df.axes[0])",
"_____no_output_____"
]
],
[
[
"# Data Exploration",
"_____no_output_____"
]
],
[
[
"#Information about the dataset\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 9110 entries, 1 to 9110\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Bleaching 9110 non-null object \n 1 Ocean 9110 non-null object \n 2 Year 9110 non-null int64 \n 3 Depth 9110 non-null float64\n 4 Storms 9110 non-null object \n 5 Human Impact 9110 non-null object \n 6 Siltation 9110 non-null object \n 7 Dynamite 9110 non-null object \n 8 Poison 9110 non-null object \n 9 Sewage 9110 non-null object \n 10 Industrial 9110 non-null object \n 11 Commercial 9110 non-null object \ndtypes: float64(1), int64(1), object(10)\nmemory usage: 925.2+ KB\n"
],
[
"#Data types of the dataset columns\ndf.dtypes",
"_____no_output_____"
],
[
"#Memory used by each column in the dataset\ndf.memory_usage()",
"_____no_output_____"
],
[
"#Total memory used by the dataset\ndf.memory_usage().sum()",
"_____no_output_____"
]
],
[
[
"# Data Cleaning",
"_____no_output_____"
]
],
[
[
"#Check if there are missing values in the dataset\ndf.isnull().sum().sum()",
"_____no_output_____"
],
[
"#Check if there are duplicate rows in the dataset\ndf.duplicated().sum()",
"_____no_output_____"
],
[
"#Removing duplicates from the dataset\ndf.drop_duplicates(keep=\"first\",inplace=True)",
"_____no_output_____"
],
[
"#Check if duplicate rows have been removed successfully from the dataset\ndf.duplicated().sum()",
"_____no_output_____"
]
],
[
[
"Label encoding columns having non-integer values",
"_____no_output_____"
]
],
[
[
"df['Human Impact'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Siltation'].replace({'never':0,'occasionally':1,'often':2,'always':3},inplace=True)",
"_____no_output_____"
],
[
"df['Dynamite'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Poison'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Sewage'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Industrial'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Storms'].replace({'yes':1,'no':0},inplace=True)",
"_____no_output_____"
],
[
"df['Commercial'].replace({'none':0,'low':1,'moderate':2,'high':3},inplace=True)",
"_____no_output_____"
],
[
"df['Bleaching'].replace({'Yes':1,'No':0},inplace=True)",
"_____no_output_____"
],
[
"df['Ocean'].replace({'Atlantic':0,'Pacific':1,'Red Sea':2,'East Pacific':3,'Arabian Gulf':4,'Indian':5},inplace=True)",
"_____no_output_____"
],
[
"#Data after Label encoding\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Data Visualization",
"_____no_output_____"
]
],
[
[
"#Boxplot\ndf['Depth'].plot.box(figsize=(8, 5));",
"_____no_output_____"
],
[
"#Boxplot of all the columns with numerical data\ndf.boxplot(figsize=(20,20))",
"_____no_output_____"
],
[
"#Histogram\ndf['Depth'].hist(bins=30, figsize=(8, 5));",
"_____no_output_____"
],
[
"#Histogram with details\nax = df['Depth'].hist(bins=30, grid=False, color='green', figsize=(8, 5)) #grid turned off and colour changed\nax.set_xlabel('Depth')\nax.set_xlim(0,25) #limiting display range to 0-25 for the x-axis",
"_____no_output_____"
],
[
"#Barplot\ndf_avg_depth = df.groupby('Year')['Depth'].mean()\ndf_avg_depth[:].plot.bar(color='orange');",
"_____no_output_____"
],
[
"#Scatterplot\ndf.plot.scatter('Year','Depth',figsize=(20,5))",
"_____no_output_____"
],
[
"#No. of rows in the dataset after cleaning\nprint(len(df.axes[0]))",
"6698\n"
],
[
"#Data types of dataset columns after label encoding\ndf.dtypes",
"_____no_output_____"
],
[
"#Statistics for all the dataset columns\ndf.describe()",
"_____no_output_____"
],
[
"#Variance\ndf.var()",
"_____no_output_____"
],
[
"#Skewness\ndf.skew()",
"_____no_output_____"
],
[
"#Kurtosis\ndf.kurtosis()",
"_____no_output_____"
]
],
[
[
"# Data Selection",
"_____no_output_____"
]
],
[
[
"#Column-wise correlation in the dataset\ndf.corr()",
"_____no_output_____"
],
[
"#Import seaborn library\nimport seaborn as sns\n#Set the size of the heatmap\nsns.set(rc={'figure.figsize':(15,10)})",
"_____no_output_____"
],
[
"#Pearson correlation\nsns.heatmap(df.corr('pearson'),annot=True)",
"_____no_output_____"
],
[
"#Spearman correlation\nsns.heatmap(df.corr('spearman'),annot=True)",
"_____no_output_____"
],
[
"#Kendall correlation\nsns.heatmap(df.corr('kendall'),annot=True)",
"_____no_output_____"
]
],
[
[
"Pearson, Spearman and Kendall all give similar results.\n\nTarget column: Bleaching\n\nPearson correlation results in:\n\nColumns Year, Siltation and Commercial are the least correlated to the target column Bleaching. They are dropped.",
"_____no_output_____"
]
],
[
[
"df=df.drop(['Year','Siltation','Commercial'],axis=1)",
"_____no_output_____"
],
[
"df.replace('', numpy.nan, inplace=True)",
"_____no_output_____"
],
[
"df.dropna(inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"# Data Splitting and Model Building (Logistic regression)",
"_____no_output_____"
],
[
"Logistic Regression using sklearn",
"_____no_output_____"
]
],
[
[
"#Logistic regression model using sklearn\nX = df.iloc[:, 1:]\ny = df.iloc[:,0]",
"_____no_output_____"
],
[
"#Split in training and testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)",
"_____no_output_____"
],
[
"#Scale\nfrom sklearn.preprocessing import StandardScaler\nX_sca = StandardScaler()\nX_train = X_sca.fit_transform(X_train)\nX_test = X_sca.fit_transform(X_test)",
"_____no_output_____"
],
[
"#Train the model\nfrom sklearn.linear_model import LogisticRegression\nclf = LogisticRegression(random_state=0)\nclf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"#Data predicted by the model\ny_pred = clf.predict(X_test)",
"_____no_output_____"
],
[
"#Confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\ncm",
"_____no_output_____"
],
[
"#Model accuracy\nclf.score(X_test, y_test)",
"_____no_output_____"
],
[
"#Co-efficients of the Logistic regression equation\nclf.coef_",
"_____no_output_____"
],
[
"#y-intercept of the Logistic regression equation\nclf.intercept_",
"_____no_output_____"
]
],
[
[
"# Logistic regression equation",
"_____no_output_____"
],
[
"y -> target variable i.e. Bleaching\n\na -> y-intercept of Bleaching\n\nb0 -> co-efficient of Ocean\n\nb1 -> co-efficient of Depth\n\nb2 -> co-efficient of Storms\n\nb3 -> co-efficient of Human Impact\n\nb4 -> co-efficient of Dynamite\n\nb5 -> co-efficient of Poison\n\nb6 -> co-efficient of Sewage\n\nb7 -> co-efficient of Industrial\n\nGeneral equation: y = a + b0x0 + b1x1 + ... + bnxn\n\nActual equation: Bleaching = -3.4 + 0.1168(Ocean) - 0.027(Depth) + 0.0791(Storms) - 0.0796(Human Impact) + 0.1739(Dynamite) + 0.0208(Poison) + 0.006(Sewage) + 0.0803(Industrial)",
"_____no_output_____"
],
[
"# Model evaluation through k-fold cross validation and evaluation metrics",
"_____no_output_____"
]
],
[
[
"#K-fold cross-validation\n#Logistic Regression\nX = df.iloc[:,1:]\ny = df.iloc[:,0]\nk = 5\nkf = model_selection.KFold(n_splits=k, random_state=None)\nmodel = LogisticRegression(solver= 'liblinear')\nresult = cross_val_score(model , X, y, cv = kf)\nprint(\"Avg accuracy: {}\".format(result.mean()))",
"Avg accuracy: 0.967305518709663\n"
],
[
"#Root mean square error\nimport sklearn\nsklearn.metrics.mean_squared_error(y_test,y_pred)",
"_____no_output_____"
],
[
"#R2 score\nimport sklearn\nsklearn.metrics.r2_score(y_test,y_pred)",
"_____no_output_____"
]
],
[
[
"Performing similar analysis for Pearson, Spearman and Kendall correlations gives the following results:",
"_____no_output_____"
],
[
"Accuracy through Logistic regression using sklearn:\n\nPearson: 0.968955223880597\n\nSpearman: 0.9659701492537314\n\nKendall: 0.9707462686567164\n\nThus, Kendall correlation gives the most accurate results.",
"_____no_output_____"
],
[
"Average accuracy through K-fold cross validation:\n\nPearson: 0.967305518709663\n\nSpearman: 0.966712405114086\n\nKendall: 0.9670109125767725\n\nThus, Pearson correlation gives the most accurate results.",
"_____no_output_____"
],
[
"RMSE (Root Mean Squar Error):\n\nPearson: 0.031044776119402984\n\nSpearman: 0.03402985074626866\n\nKendall: 0.029253731343283584\n\nThus, Kendall correlation gives the least RMSE.",
"_____no_output_____"
],
[
"R2 score:\n\nPearson: -0.03203943314849078\n\nSpearman: -0.03522867737948099\n\nKendall: -0.0509501126818277\n\nThus, Pearson correlation gives the best R2 score.",
"_____no_output_____"
],
[
"# Cross-checking the model with one of the dataset samples",
"_____no_output_____"
]
],
[
[
"#Bleaching didn't occur - 0\n#Bleaching occured - 1\ndf.head(1)",
"_____no_output_____"
],
[
"if bool(clf.predict([[0,4,1,3,0,0,0,0]])):\n print(\"Bleaching will occur\")\nelse:\n print(\"Bleaching will not occur\")",
"Bleaching will not occur\n"
]
],
[
[
"# Using the model to predict whether bleaching will occur for unseen data",
"_____no_output_____"
]
],
[
[
"#Using the \"row\" saved earlier\n#It is a data sample never seen by the model before\nactual_bleaching = row[0]\n\nif actual_bleaching:\n print(\"Bleaching doesn't actually occur\")\nelse:\n print(\"Bleaching Actually occurs\")",
"Bleaching doesn't actually occur\n"
],
[
"#Preparing the 'row' for prediction\nrow.pop()\nrow=[row[1]]+row[3:6]+row[7:]\n\n#Original row\nprint(\"Original row: \",row)\n\n#Label encoded row\nrow=[0,4.0,1,3,0,0,3,0]\nprint(\"Label encoded row: \",row)",
"Original row: ['Atlantic', 4.0, 'yes', 'high', 'none', 'none', 'high', 'none']\nLabel encoded row: [0, 4.0, 1, 3, 0, 0, 3, 0]\n"
],
[
"#Predicting using the model\npredicted_bleaching = clf.predict([row])[0]\n\nif bool(predicted_bleaching):\n predicted_bleaching = \"Yes, Bleaching occurs\"\nelse:\n predicted_bleaching = \"No, Bleaching doesn't occur\"\n\nprint(predicted_bleaching)",
"No, Bleaching doesn't occur\n"
]
],
[
[
"The predicted result for unseen data is correct.",
"_____no_output_____"
],
[
"# Conclusion",
"_____no_output_____"
],
[
"The analysis shows that -\n\nDynamite, Storms, Poison, Ocean and Industrial are the leading features to determine whether or not a coral reef will be bleached, in that order.\n\nDepth, Human Impact and Sewage are important but not very significant factors, in the same order.\n\nCommercial, Siltation and Year are least correlated to Bleaching, in the given order.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e74c5f3a37fdeddb1280e16b0d7798ca48e8f3ba | 43,402 | ipynb | Jupyter Notebook | introduction_to_amazon_algorithms/deepar_electricity/DeepAR-Electricity.ipynb | EthanShouhanCheng/amazon-sagemaker-examples | b09d6027fa7b31c4db3c1505ae4e17753ba7a232 | [
"Apache-2.0"
] | null | null | null | introduction_to_amazon_algorithms/deepar_electricity/DeepAR-Electricity.ipynb | EthanShouhanCheng/amazon-sagemaker-examples | b09d6027fa7b31c4db3c1505ae4e17753ba7a232 | [
"Apache-2.0"
] | null | null | null | introduction_to_amazon_algorithms/deepar_electricity/DeepAR-Electricity.ipynb | EthanShouhanCheng/amazon-sagemaker-examples | b09d6027fa7b31c4db3c1505ae4e17753ba7a232 | [
"Apache-2.0"
] | null | null | null | 35.780709 | 606 | 0.581148 | [
[
[
"# SageMaker/DeepAR demo on electricity dataset\n\nThis notebook complements the [DeepAR introduction notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/deepar_synthetic/deepar_synthetic.ipynb). \n\nHere, we will consider a real use case and show how to use DeepAR on SageMaker for predicting energy consumption of 370 customers over time, based on a [dataset](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014) that was used in the academic papers [[1](https://media.nips.cc/nipsbooks/nipspapers/paper_files/nips29/reviews/526.html)] and [[2](https://arxiv.org/abs/1704.04110)]. \n\nIn particular, we will see how to:\n* Prepare the dataset\n* Use the SageMaker Python SDK to train a DeepAR model and deploy it\n* Make requests to the deployed model to obtain forecasts interactively\n* Illustrate advanced features of DeepAR: missing values, additional time features, non-regular frequencies and category information\n\nRunning this notebook takes around 40 min on a ml.c4.2xlarge for the training, and inference is done on a ml.m5.large (the usage time will depend on how long you leave your served model running).\n\nThis notebook is tested using SageMaker Studio but using classic Notebook (From the SageMaker Menu, go to Help -> select `Launch Classic Notebook`). \n\nFor more information see the DeepAR [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html) or [paper](https://arxiv.org/abs/1704.04110), ",
"_____no_output_____"
]
],
[
[
"import sys\n\n!{sys.executable} -m pip install s3fs",
"_____no_output_____"
],
[
"from __future__ import print_function\n\n%matplotlib inline\n\nimport sys\nfrom urllib.request import urlretrieve\nimport zipfile\nfrom dateutil.parser import parse\nimport json\nfrom random import shuffle\nimport random\nimport datetime\nimport os\n\nimport boto3\nimport s3fs\nimport sagemaker\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\nfrom ipywidgets import IntSlider, FloatSlider, Checkbox",
"_____no_output_____"
],
[
"# set random seeds for reproducibility\nnp.random.seed(42)\nrandom.seed(42)",
"_____no_output_____"
],
[
"sagemaker_session = sagemaker.Session()",
"_____no_output_____"
]
],
[
[
"Before starting, we can override the default values for the following:\n- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.\n- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these.",
"_____no_output_____"
]
],
[
[
"s3_bucket = sagemaker.Session().default_bucket() # replace with an existing bucket if needed\ns3_prefix = \"deepar-electricity-demo-notebook\" # prefix used for all data stored within the bucket\n\nrole = sagemaker.get_execution_role() # IAM role to use by SageMaker",
"_____no_output_____"
],
[
"region = sagemaker_session.boto_region_name\n\ns3_data_path = \"s3://{}/{}/data\".format(s3_bucket, s3_prefix)\ns3_output_path = \"s3://{}/{}/output\".format(s3_bucket, s3_prefix)",
"_____no_output_____"
]
],
[
[
"Next, we configure the container image to be used for the region that we are running in.",
"_____no_output_____"
]
],
[
[
"image_name = sagemaker.amazon.amazon_estimator.get_image_uri(region, \"forecasting-deepar\", \"latest\")",
"_____no_output_____"
]
],
[
[
"### Import electricity dataset and upload it to S3 to make it available for Sagemaker",
"_____no_output_____"
],
[
"As a first step, we need to download the original data set of from the UCI data set repository.",
"_____no_output_____"
]
],
[
[
"DATA_HOST = \"https://archive.ics.uci.edu\"\nDATA_PATH = \"/ml/machine-learning-databases/00321/\"\nARCHIVE_NAME = \"LD2011_2014.txt.zip\"\nFILE_NAME = ARCHIVE_NAME[:-4]",
"_____no_output_____"
],
[
"def progress_report_hook(count, block_size, total_size):\n mb = int(count * block_size // 1e6)\n if count % 500 == 0:\n sys.stdout.write(\"\\r{} MB downloaded\".format(mb))\n sys.stdout.flush()\n\n\nif not os.path.isfile(FILE_NAME):\n print(\"downloading dataset (258MB), can take a few minutes depending on your connection\")\n urlretrieve(DATA_HOST + DATA_PATH + ARCHIVE_NAME, ARCHIVE_NAME, reporthook=progress_report_hook)\n\n print(\"\\nextracting data archive\")\n zip_ref = zipfile.ZipFile(ARCHIVE_NAME, \"r\")\n zip_ref.extractall(\"./\")\n zip_ref.close()\nelse:\n print(\"File found skipping download\")",
"_____no_output_____"
]
],
[
[
"Then, we load and parse the dataset and convert it to a collection of Pandas time series, which makes common time series operations such as indexing by time periods or resampling much easier. The data is originally recorded in 15min interval, which we could use directly. Here we want to forecast longer periods (one week) and resample the data to a granularity of 2 hours.",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(FILE_NAME, sep=\";\", index_col=0, parse_dates=True, decimal=\",\")\nnum_timeseries = data.shape[1]\ndata_kw = data.resample(\"2H\").sum() / 8\ntimeseries = []\nfor i in range(num_timeseries):\n timeseries.append(np.trim_zeros(data_kw.iloc[:, i], trim=\"f\"))",
"_____no_output_____"
]
],
[
[
"Let us plot the resulting time series for the first ten customers for the time period spanning the first two weeks of 2014.",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(5, 2, figsize=(20, 20), sharex=True)\naxx = axs.ravel()\nfor i in range(0, 10):\n timeseries[i].loc[\"2014-01-01\":\"2014-01-14\"].plot(ax=axx[i])\n axx[i].set_xlabel(\"date\")\n axx[i].set_ylabel(\"kW consumption\")\n axx[i].grid(which=\"minor\", axis=\"x\")",
"_____no_output_____"
]
],
[
[
"### Train and Test splits\n\nOften times one is interested in evaluating the model or tuning its hyperparameters by looking at error metrics on a hold-out test set. Here we split the available data into train and test sets for evaluating the trained model. For standard machine learning tasks such as classification and regression, one typically obtains this split by randomly separating examples into train and test sets. However, in forecasting it is important to do this train/test split based on time rather than by time series.\n\nIn this example, we will reserve the last section of each of the time series for evalutation purpose and use only the first part as training data. ",
"_____no_output_____"
]
],
[
[
"# we use 2 hour frequency for the time series\nfreq = \"2H\"\n\n# we predict for 7 days\nprediction_length = 7 * 12\n\n# we also use 7 days as context length, this is the number of state updates accomplished before making predictions\ncontext_length = 7 * 12",
"_____no_output_____"
]
],
[
[
"We specify here the portion of the data that is used for training: the model sees data from 2014-01-01 to 2014-09-01 for training.",
"_____no_output_____"
]
],
[
[
"start_dataset = pd.Timestamp(\"2014-01-01 00:00:00\", freq=freq)\nend_training = pd.Timestamp(\"2014-09-01 00:00:00\", freq=freq)",
"_____no_output_____"
]
],
[
[
"The DeepAR JSON input format represents each time series as a JSON object. In the simplest case each time series just consists of a start time stamp (``start``) and a list of values (``target``). For more complex cases, DeepAR also supports the fields ``dynamic_feat`` for time-series features and ``cat`` for categorical features, which we will use later.",
"_____no_output_____"
]
],
[
[
"training_data = [\n {\n \"start\": str(start_dataset),\n \"target\": ts[\n start_dataset : end_training - timedelta(days=1)\n ].tolist(), # We use -1, because pandas indexing includes the upper bound\n }\n for ts in timeseries\n]\nprint(len(training_data))",
"_____no_output_____"
]
],
[
[
"As test data, we will consider time series extending beyond the training range: these will be used for computing test scores, by using the trained model to forecast their trailing 7 days, and comparing predictions with actual values.\nTo evaluate our model performance on more than one week, we generate test data that extends to 1, 2, 3, 4 weeks beyond the training range. This way we perform *rolling evaluation* of our model.",
"_____no_output_____"
]
],
[
[
"num_test_windows = 4\n\ntest_data = [\n {\n \"start\": str(start_dataset),\n \"target\": ts[start_dataset : end_training + timedelta(days=k * prediction_length)].tolist(),\n }\n for k in range(1, num_test_windows + 1)\n for ts in timeseries\n]\nprint(len(test_data))",
"_____no_output_____"
]
],
[
[
"Let's now write the dictionary to the `jsonlines` file format that DeepAR understands (it also supports gzipped jsonlines and parquet).",
"_____no_output_____"
]
],
[
[
"def write_dicts_to_file(path, data):\n with open(path, \"wb\") as fp:\n for d in data:\n fp.write(json.dumps(d).encode(\"utf-8\"))\n fp.write(\"\\n\".encode(\"utf-8\"))",
"_____no_output_____"
],
[
"%%time\nwrite_dicts_to_file(\"train.json\", training_data)\nwrite_dicts_to_file(\"test.json\", test_data)",
"_____no_output_____"
]
],
[
[
"Now that we have the data files locally, let us copy them to S3 where DeepAR can access them. Depending on your connection, this may take a couple of minutes.",
"_____no_output_____"
]
],
[
[
"s3 = boto3.resource(\"s3\")\n\n\ndef copy_to_s3(local_file, s3_path, override=False):\n assert s3_path.startswith(\"s3://\")\n split = s3_path.split(\"/\")\n bucket = split[2]\n path = \"/\".join(split[3:])\n buk = s3.Bucket(bucket)\n\n if len(list(buk.objects.filter(Prefix=path))) > 0:\n if not override:\n print(\n \"File s3://{}/{} already exists.\\nSet override to upload anyway.\\n\".format(\n s3_bucket, s3_path\n )\n )\n return\n else:\n print(\"Overwriting existing file\")\n with open(local_file, \"rb\") as data:\n print(\"Uploading file to {}\".format(s3_path))\n buk.put_object(Key=path, Body=data)",
"_____no_output_____"
],
[
"%%time\ncopy_to_s3(\"train.json\", s3_data_path + \"/train/train.json\")\ncopy_to_s3(\"test.json\", s3_data_path + \"/test/test.json\")",
"_____no_output_____"
]
],
[
[
"Let's have a look to what we just wrote to S3.",
"_____no_output_____"
]
],
[
[
"s3filesystem = s3fs.S3FileSystem()\nwith s3filesystem.open(s3_data_path + \"/train/train.json\", \"rb\") as fp:\n print(fp.readline().decode(\"utf-8\")[:100] + \"...\")",
"_____no_output_____"
]
],
[
[
"We are all set with our dataset processing, we can now call DeepAR to train a model and generate predictions.",
"_____no_output_____"
],
[
"### Train a model\n\nHere we define the estimator that will launch the training job.",
"_____no_output_____"
]
],
[
[
"estimator = sagemaker.estimator.Estimator(\n image_uri=image_name,\n sagemaker_session=sagemaker_session,\n role=role,\n train_instance_count=1,\n train_instance_type=\"ml.c4.2xlarge\",\n base_job_name=\"deepar-electricity-demo\",\n output_path=s3_output_path,\n)",
"_____no_output_____"
]
],
[
[
"Next we need to set the hyperparameters for the training job. For example frequency of the time series used, number of data points the model will look at in the past, number of predicted data points. The other hyperparameters concern the model to train (number of layers, number of cells per layer, likelihood function) and the training options (number of epochs, batch size, learning rate...). We use default parameters for every optional parameter in this case (you can always use [Sagemaker Automated Model Tuning](https://aws.amazon.com/blogs/aws/sagemaker-automatic-model-tuning/) to tune them).",
"_____no_output_____"
]
],
[
[
"hyperparameters = {\n \"time_freq\": freq,\n \"epochs\": \"400\",\n \"early_stopping_patience\": \"40\",\n \"mini_batch_size\": \"64\",\n \"learning_rate\": \"5E-4\",\n \"context_length\": str(context_length),\n \"prediction_length\": str(prediction_length),\n}",
"_____no_output_____"
],
[
"estimator.set_hyperparameters(**hyperparameters)",
"_____no_output_____"
]
],
[
[
"We are ready to launch the training job. SageMaker will start an EC2 instance, download the data from S3, start training the model and save the trained model.\n\nIf you provide the `test` data channel as we do in this example, DeepAR will also calculate accuracy metrics for the trained model on this test. This is done by predicting the last `prediction_length` points of each time-series in the test set and comparing this to the actual value of the time-series. \n\n**Note:** the next cell may take a few minutes to complete, depending on data size, model complexity, training options.",
"_____no_output_____"
]
],
[
[
"%%time\ndata_channels = {\"train\": \"{}/train/\".format(s3_data_path), \"test\": \"{}/test/\".format(s3_data_path)}\n\nestimator.fit(inputs=data_channels, wait=True)",
"_____no_output_____"
]
],
[
[
"Since you pass a test set in this example, accuracy metrics for the forecast are computed and logged (see bottom of the log).\nYou can find the definition of these metrics from [our documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html). You can use these to optimize the parameters and tune your model or use SageMaker's [Automated Model Tuning service](https://aws.amazon.com/blogs/aws/sagemaker-automatic-model-tuning/) to tune the model for you.",
"_____no_output_____"
],
[
"### Create endpoint and predictor",
"_____no_output_____"
],
[
"Now that we have a trained model, we can use it to perform predictions by deploying it to an endpoint.\n\n**Note: Remember to delete the endpoint after running this experiment. A cell at the very bottom of this notebook will do that: make sure you run it at the end.**",
"_____no_output_____"
],
[
"To query the endpoint and perform predictions, we can define the following utility class: this allows making requests using `pandas.Series` objects rather than raw JSON strings.",
"_____no_output_____"
]
],
[
[
"from sagemaker.serializers import IdentitySerializer",
"_____no_output_____"
],
[
"class DeepARPredictor(sagemaker.predictor.Predictor):\n def __init__(self, *args, **kwargs):\n super().__init__(\n *args,\n # serializer=JSONSerializer(),\n serializer=IdentitySerializer(content_type=\"application/json\"),\n **kwargs,\n )\n\n def predict(\n self,\n ts,\n cat=None,\n dynamic_feat=None,\n num_samples=100,\n return_samples=False,\n quantiles=[\"0.1\", \"0.5\", \"0.9\"],\n ):\n \"\"\"Requests the prediction of for the time series listed in `ts`, each with the (optional)\n corresponding category listed in `cat`.\n\n ts -- `pandas.Series` object, the time series to predict\n cat -- integer, the group associated to the time series (default: None)\n num_samples -- integer, number of samples to compute at prediction time (default: 100)\n return_samples -- boolean indicating whether to include samples in the response (default: False)\n quantiles -- list of strings specifying the quantiles to compute (default: [\"0.1\", \"0.5\", \"0.9\"])\n\n Return value: list of `pandas.DataFrame` objects, each containing the predictions\n \"\"\"\n prediction_time = ts.index[-1] + ts.index.freq\n quantiles = [str(q) for q in quantiles]\n req = self.__encode_request(ts, cat, dynamic_feat, num_samples, return_samples, quantiles)\n res = super(DeepARPredictor, self).predict(req)\n return self.__decode_response(res, ts.index.freq, prediction_time, return_samples)\n\n def __encode_request(self, ts, cat, dynamic_feat, num_samples, return_samples, quantiles):\n instance = series_to_dict(\n ts, cat if cat is not None else None, dynamic_feat if dynamic_feat else None\n )\n\n configuration = {\n \"num_samples\": num_samples,\n \"output_types\": [\"quantiles\", \"samples\"] if return_samples else [\"quantiles\"],\n \"quantiles\": quantiles,\n }\n\n http_request_data = {\"instances\": [instance], \"configuration\": configuration}\n\n return json.dumps(http_request_data).encode(\"utf-8\")\n\n def __decode_response(self, response, freq, prediction_time, return_samples):\n # we only sent one time series so we only receive one in return\n # however, if possible one will pass multiple time series as predictions will then be faster\n predictions = json.loads(response.decode(\"utf-8\"))[\"predictions\"][0]\n prediction_length = len(next(iter(predictions[\"quantiles\"].values())))\n prediction_index = pd.date_range(\n start=prediction_time, freq=freq, periods=prediction_length\n )\n if return_samples:\n dict_of_samples = {\"sample_\" + str(i): s for i, s in enumerate(predictions[\"samples\"])}\n else:\n dict_of_samples = {}\n return pd.DataFrame(\n data={**predictions[\"quantiles\"], **dict_of_samples}, index=prediction_index\n )\n\n def set_frequency(self, freq):\n self.freq = freq\n\n\ndef encode_target(ts):\n return [x if np.isfinite(x) else \"NaN\" for x in ts]\n\n\ndef series_to_dict(ts, cat=None, dynamic_feat=None):\n \"\"\"Given a pandas.Series object, returns a dictionary encoding the time series.\n\n ts -- a pands.Series object with the target time series\n cat -- an integer indicating the time series category\n\n Return value: a dictionary\n \"\"\"\n obj = {\"start\": str(ts.index[0]), \"target\": encode_target(ts)}\n if cat is not None:\n obj[\"cat\"] = cat\n if dynamic_feat is not None:\n obj[\"dynamic_feat\"] = dynamic_feat\n return obj",
"_____no_output_____"
]
],
[
[
"Now we can deploy the model and create and endpoint that can be queried using our custom DeepARPredictor class.",
"_____no_output_____"
]
],
[
[
"predictor = estimator.deploy(\n initial_instance_count=1, instance_type=\"ml.m5.large\", predictor_cls=DeepARPredictor\n)",
"_____no_output_____"
]
],
[
[
"### Make predictions and plot results",
"_____no_output_____"
],
[
"Now we can use the `predictor` object to generate predictions.",
"_____no_output_____"
]
],
[
[
"predictor.predict(ts=timeseries[120], quantiles=[0.10, 0.5, 0.90]).head()",
"_____no_output_____"
]
],
[
[
"Below we define a plotting function that queries the model and displays the forecast.",
"_____no_output_____"
]
],
[
[
"def plot(\n predictor,\n target_ts,\n cat=None,\n dynamic_feat=None,\n forecast_date=end_training,\n show_samples=False,\n plot_history=7 * 12,\n confidence=80,\n):\n freq = target_ts.index.freq\n print(\n \"calling served model to generate predictions starting from {}\".format(str(forecast_date))\n )\n assert confidence > 50 and confidence < 100\n low_quantile = 0.5 - confidence * 0.005\n up_quantile = confidence * 0.005 + 0.5\n\n # we first construct the argument to call our model\n args = {\n \"ts\": target_ts[:forecast_date],\n \"return_samples\": show_samples,\n \"quantiles\": [low_quantile, 0.5, up_quantile],\n \"num_samples\": 100,\n }\n\n if dynamic_feat is not None:\n args[\"dynamic_feat\"] = dynamic_feat\n fig = plt.figure(figsize=(20, 6))\n ax = plt.subplot(2, 1, 1)\n else:\n fig = plt.figure(figsize=(20, 3))\n ax = plt.subplot(1, 1, 1)\n\n if cat is not None:\n args[\"cat\"] = cat\n ax.text(0.9, 0.9, \"cat = {}\".format(cat), transform=ax.transAxes)\n\n # call the end point to get the prediction\n prediction = predictor.predict(**args)\n\n # plot the samples\n if show_samples:\n for key in prediction.keys():\n if \"sample\" in key:\n prediction[key].plot(color=\"lightskyblue\", alpha=0.2, label=\"_nolegend_\")\n\n # plot the target\n target_section = target_ts[\n forecast_date - plot_history * freq : forecast_date + prediction_length * freq\n ]\n target_section.plot(color=\"black\", label=\"target\")\n\n # plot the confidence interval and the median predicted\n ax.fill_between(\n prediction[str(low_quantile)].index,\n prediction[str(low_quantile)].values,\n prediction[str(up_quantile)].values,\n color=\"b\",\n alpha=0.3,\n label=\"{}% confidence interval\".format(confidence),\n )\n prediction[\"0.5\"].plot(color=\"b\", label=\"P50\")\n ax.legend(loc=2)\n\n # fix the scale as the samples may change it\n ax.set_ylim(target_section.min() * 0.5, target_section.max() * 1.5)\n\n if dynamic_feat is not None:\n for i, f in enumerate(dynamic_feat, start=1):\n ax = plt.subplot(len(dynamic_feat) * 2, 1, len(dynamic_feat) + i, sharex=ax)\n feat_ts = pd.Series(\n index=pd.date_range(\n start=target_ts.index[0], freq=target_ts.index.freq, periods=len(f)\n ),\n data=f,\n )\n feat_ts[\n forecast_date - plot_history * freq : forecast_date + prediction_length * freq\n ].plot(ax=ax, color=\"g\")",
"_____no_output_____"
]
],
[
[
"We can interact with the function previously defined, to look at the forecast of any customer at any point in (future) time. \n\nFor each request, the predictions are obtained by calling our served model on the fly.\n\nHere we forecast the consumption of an office after week-end (note the lower week-end consumption). \nYou can select any time series and any forecast date, just click on `Run Interact` to generate the predictions from our served endpoint and see the plot.",
"_____no_output_____"
]
],
[
[
"style = {\"description_width\": \"initial\"}",
"_____no_output_____"
],
[
"@interact_manual(\n customer_id=IntSlider(min=0, max=369, value=91, style=style),\n forecast_day=IntSlider(min=0, max=100, value=51, style=style),\n confidence=IntSlider(min=60, max=95, value=80, step=5, style=style),\n history_weeks_plot=IntSlider(min=1, max=20, value=1, style=style),\n show_samples=Checkbox(value=False),\n continuous_update=False,\n)\ndef plot_interact(customer_id, forecast_day, confidence, history_weeks_plot, show_samples):\n plot(\n predictor,\n target_ts=timeseries[customer_id],\n forecast_date=end_training + datetime.timedelta(days=forecast_day),\n show_samples=show_samples,\n plot_history=history_weeks_plot * 12 * 7,\n confidence=confidence,\n )",
"_____no_output_____"
]
],
[
[
"# Additional features\n\nWe have seen how to prepare a dataset and run DeepAR for a simple example.\n\nIn addition DeepAR supports the following features:\n\n* missing values: DeepAR can handle missing values in the time series during training as well as for inference.\n* Additional time features: DeepAR provides a set default time series features such as hour of day etc. However, you can provide additional feature time series via the `dynamic_feat` field. \n* generalize frequencies: any integer multiple of the previously supported base frequencies (minutes `min`, hours `H`, days `D`, weeks `W`, month `M`) are now allowed; e.g., `15min`. We already demonstrated this above by using `2H` frequency.\n* categories: If your time series belong to different groups (e.g. types of product, regions, etc), this information can be encoded as one or more categorical features using the `cat` field.\n\nWe will now demonstrate the missing values and time features support. For this part we will reuse the electricity dataset but will do some artificial changes to demonstrate the new features: \n* We will randomly mask parts of the time series to demonstrate the missing values support.\n* We will include a \"special-day\" that occurs at different days for different time series during this day we introduce a strong up-lift\n* We train the model on this dataset giving \"special-day\" as a custom time series feature",
"_____no_output_____"
],
[
"## Prepare dataset",
"_____no_output_____"
],
[
"As discussed above we will create a \"special-day\" feature and create an up-lift for the time series during this day. This simulates real world application where you may have things like promotions of a product for a certain time or a special event that influences your time series. ",
"_____no_output_____"
]
],
[
[
"def create_special_day_feature(ts, fraction=0.05):\n # First select random day indices (plus the forecast day)\n num_days = (ts.index[-1] - ts.index[0]).days\n rand_indices = list(np.random.randint(0, num_days, int(num_days * 0.1))) + [num_days]\n\n feature_value = np.zeros_like(ts)\n for i in rand_indices:\n feature_value[i * 12 : (i + 1) * 12] = 1.0\n feature = pd.Series(index=ts.index, data=feature_value)\n return feature\n\n\ndef drop_at_random(ts, drop_probability=0.1):\n assert 0 <= drop_probability < 1\n random_mask = np.random.random(len(ts)) < drop_probability\n return ts.mask(random_mask)",
"_____no_output_____"
],
[
"special_day_features = [create_special_day_feature(ts) for ts in timeseries]",
"_____no_output_____"
]
],
[
[
"We now create the up-lifted time series and randomly remove time points.\n\nThe figures below show some example time series and the `special_day` feature value in green. ",
"_____no_output_____"
]
],
[
[
"timeseries_uplift = [ts * (1.0 + feat) for ts, feat in zip(timeseries, special_day_features)]\ntime_series_processed = [drop_at_random(ts) for ts in timeseries_uplift]",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(5, 2, figsize=(20, 20), sharex=True)\naxx = axs.ravel()\nfor i in range(0, 10):\n ax = axx[i]\n ts = time_series_processed[i][:400]\n ts.plot(ax=ax)\n ax.set_ylim(-0.1 * ts.max(), ts.max())\n ax2 = ax.twinx()\n special_day_features[i][:400].plot(ax=ax2, color=\"g\")\n ax2.set_ylim(-0.2, 7)",
"_____no_output_____"
],
[
"%%time\n\ntraining_data_new_features = [\n {\n \"start\": str(start_dataset),\n \"target\": encode_target(ts[start_dataset:end_training]),\n \"dynamic_feat\": [special_day_features[i][start_dataset:end_training].tolist()],\n }\n for i, ts in enumerate(time_series_processed)\n]\nprint(len(training_data_new_features))\n\n# as in our previous example, we do a rolling evaluation over the next 7 days\nnum_test_windows = 7\n\ntest_data_new_features = [\n {\n \"start\": str(start_dataset),\n \"target\": encode_target(\n ts[start_dataset : end_training + 2 * k * prediction_length * ts.index.freq]\n ),\n \"dynamic_feat\": [\n special_day_features[i][\n start_dataset : end_training + 2 * k * prediction_length * ts.index.freq\n ].tolist()\n ],\n }\n for k in range(1, num_test_windows + 1)\n for i, ts in enumerate(timeseries_uplift)\n]",
"_____no_output_____"
],
[
"def check_dataset_consistency(train_dataset, test_dataset=None):\n d = train_dataset[0]\n has_dynamic_feat = \"dynamic_feat\" in d\n if has_dynamic_feat:\n num_dynamic_feat = len(d[\"dynamic_feat\"])\n has_cat = \"cat\" in d\n if has_cat:\n num_cat = len(d[\"cat\"])\n\n def check_ds(ds):\n for i, d in enumerate(ds):\n if has_dynamic_feat:\n assert \"dynamic_feat\" in d\n assert num_dynamic_feat == len(d[\"dynamic_feat\"])\n for f in d[\"dynamic_feat\"]:\n assert len(d[\"target\"]) == len(f)\n if has_cat:\n assert \"cat\" in d\n assert len(d[\"cat\"]) == num_cat\n\n check_ds(train_dataset)\n if test_dataset is not None:\n check_ds(test_dataset)\n\n\ncheck_dataset_consistency(training_data_new_features, test_data_new_features)",
"_____no_output_____"
],
[
"%%time\nwrite_dicts_to_file(\"train_new_features.json\", training_data_new_features)\nwrite_dicts_to_file(\"test_new_features.json\", test_data_new_features)",
"_____no_output_____"
],
[
"%%time\n\ns3_data_path_new_features = \"s3://{}/{}-new-features/data\".format(s3_bucket, s3_prefix)\ns3_output_path_new_features = \"s3://{}/{}-new-features/output\".format(s3_bucket, s3_prefix)\n\nprint(\"Uploading to S3 this may take a few minutes depending on your connection.\")\ncopy_to_s3(\n \"train_new_features.json\",\n s3_data_path_new_features + \"/train/train_new_features.json\",\n override=True,\n)\ncopy_to_s3(\n \"test_new_features.json\",\n s3_data_path_new_features + \"/test/test_new_features.json\",\n override=True,\n)",
"_____no_output_____"
],
[
"%%time\nestimator_new_features = sagemaker.estimator.Estimator(\n image_uri=image_name,\n sagemaker_session=sagemaker_session,\n role=role,\n train_instance_count=1,\n train_instance_type=\"ml.c4.2xlarge\",\n base_job_name=\"deepar-electricity-demo-new-features\",\n output_path=s3_output_path_new_features,\n)\n\nhyperparameters = {\n \"time_freq\": freq,\n \"context_length\": str(context_length),\n \"prediction_length\": str(prediction_length),\n \"epochs\": \"400\",\n \"learning_rate\": \"5E-4\",\n \"mini_batch_size\": \"64\",\n \"early_stopping_patience\": \"40\",\n \"num_dynamic_feat\": \"auto\", # this will use the `dynamic_feat` field if it's present in the data\n}\nestimator_new_features.set_hyperparameters(**hyperparameters)\n\nestimator_new_features.fit(\n inputs={\n \"train\": \"{}/train/\".format(s3_data_path_new_features),\n \"test\": \"{}/test/\".format(s3_data_path_new_features),\n },\n wait=True,\n)",
"_____no_output_____"
]
],
[
[
"As before, we spawn an endpoint to visualize our forecasts on examples we send on the fly.",
"_____no_output_____"
]
],
[
[
"%%time\npredictor_new_features = estimator_new_features.deploy(\n initial_instance_count=1, instance_type=\"ml.m5.large\", predictor_cls=DeepARPredictor\n)",
"_____no_output_____"
],
[
"customer_id = 120\npredictor_new_features.predict(\n ts=time_series_processed[customer_id][:-prediction_length],\n dynamic_feat=[special_day_features[customer_id].tolist()],\n quantiles=[0.1, 0.5, 0.9],\n).head()",
"_____no_output_____"
]
],
[
[
"As before, we can query the endpoint to see predictions for arbitrary time series and time points.",
"_____no_output_____"
]
],
[
[
"@interact_manual(\n customer_id=IntSlider(min=0, max=369, value=13, style=style),\n forecast_day=IntSlider(min=0, max=100, value=21, style=style),\n confidence=IntSlider(min=60, max=95, value=80, step=5, style=style),\n missing_ratio=FloatSlider(min=0.0, max=0.95, value=0.2, step=0.05, style=style),\n show_samples=Checkbox(value=False),\n continuous_update=False,\n)\ndef plot_interact(customer_id, forecast_day, confidence, missing_ratio, show_samples):\n forecast_date = end_training + datetime.timedelta(days=forecast_day)\n ts = time_series_processed[customer_id]\n freq = ts.index.freq\n target = ts[start_dataset : forecast_date + prediction_length * freq]\n target = drop_at_random(target, missing_ratio)\n dynamic_feat = [\n special_day_features[customer_id][\n start_dataset : forecast_date + prediction_length * freq\n ].tolist()\n ]\n plot(\n predictor_new_features,\n target_ts=target,\n dynamic_feat=dynamic_feat,\n forecast_date=forecast_date,\n show_samples=show_samples,\n plot_history=7 * 12,\n confidence=confidence,\n )",
"_____no_output_____"
]
],
[
[
"### Delete endpoints",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
],
[
"predictor_new_features.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74c642855b4420e510a6966362c5e196ea312f8 | 592,721 | ipynb | Jupyter Notebook | conditional/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_6_rlw_0_1_run1.ipynb | minhtannguyen/ffjord | f3418249eaa4647f4339aea8d814cf2ce33be141 | [
"MIT"
] | null | null | null | conditional/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_6_rlw_0_1_run1.ipynb | minhtannguyen/ffjord | f3418249eaa4647f4339aea8d814cf2ce33be141 | [
"MIT"
] | null | null | null | conditional/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_6_rlw_0_1_run1.ipynb | minhtannguyen/ffjord | f3418249eaa4647f4339aea8d814cf2ce33be141 | [
"MIT"
] | null | null | null | 90.936023 | 2,034 | 0.517836 | [
[
[
"import os\nos.environ['CUDA_VISIBLE_DEVICES']='4,5,6,7'",
"_____no_output_____"
],
[
"%run -p ../train_cnf_disentangle_rl.py --data cifar10 --dims 64,64,64 --strides 1,1,1,1 --num_blocks 2 --layer_type concat --multiscale True --rademacher True --batch_size 900 --test_batch_size 500 --save ../experiments_published/cnf_conditional_disentangle_cifar10_bs900_sratio_0_5_drop_0_5_rl_stdscale_6_rlw_0_1_run1 --seed 1 --lr 0.001 --conditional True --controlled_tol False --train_mode semisup --log_freq 10 --weight_y 0.5 --condition_ratio 0.5 --dropout_rate 0.5 --scale_fac 1.0 --scale_std 6.0 --rl-weight 0.1\n#",
"/tancode/repos/tan-ffjord/train_cnf_disentangle_rl.py\nimport argparse\nimport os\nimport time\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as tforms\nfrom torchvision.utils import save_image\n\nimport lib.layers as layers\nimport lib.utils as utils\nimport lib.multiscale_parallel as multiscale_parallel\nimport lib.modules as modules\nimport lib.thops as thops\n\nfrom train_misc import standard_normal_logprob\nfrom train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time\nfrom train_misc import add_spectral_norm, spectral_norm_power_iteration\nfrom train_misc import create_regularization_fns, get_regularization, append_regularization_to_log\n\nfrom tensorboardX import SummaryWriter\n\n# go fast boi!!\ntorch.backends.cudnn.benchmark = True\nSOLVERS = [\"dopri5\", \"bdf\", \"rk4\", \"midpoint\", 'adams', 'explicit_adams']\nGATES = [\"cnn1\", \"cnn2\", \"rnn\"]\n\nparser = argparse.ArgumentParser(\"Continuous Normalizing Flow\")\nparser.add_argument(\"--data\", choices=[\"mnist\", \"svhn\", \"cifar10\", 'lsun_church'], type=str, default=\"mnist\")\nparser.add_argument(\"--dims\", type=str, default=\"8,32,32,8\")\nparser.add_argument(\"--strides\", type=str, default=\"2,2,1,-2,-2\")\nparser.add_argument(\"--num_blocks\", type=int, default=1, help='Number of stacked CNFs.')\n\nparser.add_argument(\"--conv\", type=eval, default=True, choices=[True, False])\nparser.add_argument(\n \"--layer_type\", type=str, default=\"ignore\",\n choices=[\"ignore\", \"concat\", \"concat_v2\", \"squash\", \"concatsquash\", \"concatcoord\", \"hyper\", \"blend\"]\n)\nparser.add_argument(\"--divergence_fn\", type=str, default=\"approximate\", choices=[\"brute_force\", \"approximate\"])\nparser.add_argument(\n \"--nonlinearity\", type=str, default=\"softplus\", choices=[\"tanh\", \"relu\", \"softplus\", \"elu\", \"swish\"]\n)\n\nparser.add_argument(\"--seed\", type=int, default=0)\n\nparser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)\nparser.add_argument('--atol', type=float, default=1e-5)\nparser.add_argument('--rtol', type=float, default=1e-5)\nparser.add_argument(\"--step_size\", type=float, default=None, help=\"Optional fixed step size.\")\n\nparser.add_argument('--gate', type=str, default='cnn1', choices=GATES)\nparser.add_argument('--scale', type=float, default=1.0)\nparser.add_argument('--scale_fac', type=float, default=1.0)\nparser.add_argument('--scale_std', type=float, default=1.0)\nparser.add_argument('--eta', default=0.1, type=float,\n help='tuning parameter that allows us to trade-off the competing goals of' \n 'minimizing the prediction loss and maximizing the gate rewards ')\nparser.add_argument('--rl-weight', default=0.01, type=float,\n help='rl weight')\n\nparser.add_argument('--gamma', default=0.99, type=float,\n help='discount factor, default: (0.99)')\n\nparser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])\nparser.add_argument('--test_atol', type=float, default=None)\nparser.add_argument('--test_rtol', type=float, default=None)\n\nparser.add_argument(\"--imagesize\", type=int, default=None)\nparser.add_argument(\"--alpha\", type=float, default=1e-6)\nparser.add_argument('--time_length', type=float, default=1.0)\nparser.add_argument('--train_T', type=eval, default=True)\n\nparser.add_argument(\"--num_epochs\", type=int, default=1000)\nparser.add_argument(\"--batch_size\", type=int, default=200)\nparser.add_argument(\n \"--batch_size_schedule\", type=str, default=\"\", help=\"Increases the batchsize at every given epoch, dash separated.\"\n)\nparser.add_argument(\"--test_batch_size\", type=int, default=200)\nparser.add_argument(\"--lr\", type=float, default=1e-3)\nparser.add_argument(\"--warmup_iters\", type=float, default=1000)\nparser.add_argument(\"--weight_decay\", type=float, default=0.0)\nparser.add_argument(\"--spectral_norm_niter\", type=int, default=10)\nparser.add_argument(\"--weight_y\", type=float, default=0.5)\n\nparser.add_argument(\"--add_noise\", type=eval, default=True, choices=[True, False])\nparser.add_argument(\"--batch_norm\", type=eval, default=False, choices=[True, False])\nparser.add_argument('--residual', type=eval, default=False, choices=[True, False])\nparser.add_argument('--autoencode', type=eval, default=False, choices=[True, False])\nparser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])\nparser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])\nparser.add_argument('--multiscale', type=eval, default=False, choices=[True, False])\nparser.add_argument('--parallel', type=eval, default=False, choices=[True, False])\nparser.add_argument('--conditional', type=eval, default=False, choices=[True, False])\nparser.add_argument('--controlled_tol', type=eval, default=False, choices=[True, False])\nparser.add_argument(\"--train_mode\", choices=[\"semisup\", \"sup\", \"unsup\"], type=str, default=\"semisup\")\nparser.add_argument(\"--condition_ratio\", type=float, default=0.5)\nparser.add_argument(\"--dropout_rate\", type=float, default=0.0)\n\n\n# Regularizations\nparser.add_argument('--l1int', type=float, default=None, help=\"int_t ||f||_1\")\nparser.add_argument('--l2int', type=float, default=None, help=\"int_t ||f||_2\")\nparser.add_argument('--dl2int', type=float, default=None, help=\"int_t ||f^T df/dt||_2\")\nparser.add_argument('--JFrobint', type=float, default=None, help=\"int_t ||df/dx||_F\")\nparser.add_argument('--JdiagFrobint', type=float, default=None, help=\"int_t ||df_i/dx_i||_F\")\nparser.add_argument('--JoffdiagFrobint', type=float, default=None, help=\"int_t ||df/dx - df_i/dx_i||_F\")\n\nparser.add_argument(\"--time_penalty\", type=float, default=0, help=\"Regularization on the end_time.\")\nparser.add_argument(\n \"--max_grad_norm\", type=float, default=1e10,\n help=\"Max norm of graidents (default is just stupidly high to avoid any clipping)\"\n)\n\nparser.add_argument(\"--begin_epoch\", type=int, default=1)\nparser.add_argument(\"--resume\", type=str, default=None)\nparser.add_argument(\"--save\", type=str, default=\"experiments/cnf\")\nparser.add_argument(\"--val_freq\", type=int, default=1)\nparser.add_argument(\"--log_freq\", type=int, default=1)\n\nargs = parser.parse_args()\n\nimport lib.odenvp_conditional_rl as odenvp\n \n# set seed\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\n# logger\nutils.makedirs(args.save)\nlogger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) # write to log file\nwriter = SummaryWriter(os.path.join(args.save, 'tensorboard')) # write to tensorboard\n\nif args.layer_type == \"blend\":\n logger.info(\"!! Setting time_length from None to 1.0 due to use of Blend layers.\")\n args.time_length = 1.0\n\nlogger.info(args)\n\n\ndef add_noise(x):\n \"\"\"\n [0, 1] -> [0, 255] -> add noise -> [0, 1]\n \"\"\"\n if args.add_noise:\n noise = x.new().resize_as_(x).uniform_()\n x = x * 255 + noise\n x = x / 256\n return x\n\n\ndef update_lr(optimizer, itr):\n iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0)\n lr = args.lr * iter_frac\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef get_train_loader(train_set, epoch):\n if args.batch_size_schedule != \"\":\n epochs = [0] + list(map(int, args.batch_size_schedule.split(\"-\")))\n n_passed = sum(np.array(epochs) <= epoch)\n current_batch_size = int(args.batch_size * n_passed)\n else:\n current_batch_size = args.batch_size\n train_loader = torch.utils.data.DataLoader(\n dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True\n )\n logger.info(\"===> Using batch size {}. Total {} iterations/epoch.\".format(current_batch_size, len(train_loader)))\n return train_loader\n\n\ndef get_dataset(args):\n trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])\n\n if args.data == \"mnist\":\n im_dim = 1\n im_size = 28 if args.imagesize is None else args.imagesize\n train_set = dset.MNIST(root=\"../data\", train=True, transform=trans(im_size), download=True)\n test_set = dset.MNIST(root=\"../data\", train=False, transform=trans(im_size), download=True)\n elif args.data == \"svhn\":\n im_dim = 3\n im_size = 32 if args.imagesize is None else args.imagesize\n train_set = dset.SVHN(root=\"../data\", split=\"train\", transform=trans(im_size), download=True)\n test_set = dset.SVHN(root=\"../data\", split=\"test\", transform=trans(im_size), download=True)\n elif args.data == \"cifar10\":\n im_dim = 3\n im_size = 32 if args.imagesize is None else args.imagesize\n train_set = dset.CIFAR10(\n root=\"../data\", train=True, transform=tforms.Compose([\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ]), download=True\n )\n test_set = dset.CIFAR10(root=\"../data\", train=False, transform=trans(im_size), download=True)\n elif args.data == 'celeba':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.CelebA(\n train=True, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.CelebA(\n train=False, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n elif args.data == 'lsun_church':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.LSUN(\n '../data', ['church_outdoor_train'], transform=tforms.Compose([\n tforms.Resize(96),\n tforms.RandomCrop(64),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.LSUN(\n '../data', ['church_outdoor_val'], transform=tforms.Compose([\n tforms.Resize(96),\n tforms.RandomCrop(64),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n ) \n elif args.data == 'imagenet_64':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.ImageFolder(\n train=True, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.ImageFolder(\n train=False, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n \n data_shape = (im_dim, im_size, im_size)\n if not args.conv:\n data_shape = (im_dim * im_size * im_size,)\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True\n )\n return train_set, test_loader, data_shape\n\n\ndef compute_bits_per_dim(x, model):\n zero = torch.zeros(x.shape[0], 1).to(x)\n\n # Don't use data parallelize if batch size is small.\n # if x.shape[0] < 200:\n # model = model.module\n \n z, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward\n\n logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)\n logpx = logpz - delta_logp\n\n logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches\n bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)\n\n return bits_per_dim, atol, rtol, logp_actions, nfe\n\ndef compute_bits_per_dim_conditional(x, y, model):\n zero = torch.zeros(x.shape[0], 1).to(x)\n y_onehot = thops.onehot(y, num_classes=model.module.y_class).to(x)\n\n # Don't use data parallelize if batch size is small.\n # if x.shape[0] < 200:\n # model = model.module\n \n z, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward\n \n dim_sup = int(args.condition_ratio * np.prod(z.size()[1:]))\n \n # prior\n mean, logs = model.module._prior(y_onehot)\n\n logpz_sup = modules.GaussianDiag.logp(mean, logs, z[:, 0:dim_sup]).view(-1,1) # logp(z)_sup\n logpz_unsup = standard_normal_logprob(z[:, dim_sup:]).view(z.shape[0], -1).sum(1, keepdim=True)\n logpz = logpz_sup + logpz_unsup\n logpx = logpz - delta_logp\n\n logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches\n bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)\n \n # dropout\n if args.dropout_rate > 0:\n zsup = model.module.dropout(z[:, 0:dim_sup])\n else:\n zsup = z[:, 0:dim_sup]\n \n # compute xentropy loss\n y_logits = model.module.project_class(zsup)\n loss_xent = model.module.loss_class(y_logits, y.to(x.get_device()))\n y_predicted = np.argmax(y_logits.cpu().detach().numpy(), axis=1)\n\n return bits_per_dim, loss_xent, y_predicted, atol, rtol, logp_actions, nfe\n\ndef create_model(args, data_shape, regularization_fns):\n hidden_dims = tuple(map(int, args.dims.split(\",\")))\n strides = tuple(map(int, args.strides.split(\",\")))\n\n if args.multiscale:\n model = odenvp.ODENVP(\n (args.batch_size, *data_shape),\n n_blocks=args.num_blocks,\n intermediate_dims=hidden_dims,\n nonlinearity=args.nonlinearity,\n alpha=args.alpha,\n cnf_kwargs={\"T\": args.time_length, \"train_T\": args.train_T, \"regularization_fns\": regularization_fns, \"solver\": args.solver, \"atol\": args.atol, \"rtol\": args.rtol, \"scale\": args.scale, \"scale_fac\": args.scale_fac, \"scale_std\": args.scale_std, \"gate\": args.gate},\n condition_ratio=args.condition_ratio,\n dropout_rate=args.dropout_rate,)\n elif args.parallel:\n model = multiscale_parallel.MultiscaleParallelCNF(\n (args.batch_size, *data_shape),\n n_blocks=args.num_blocks,\n intermediate_dims=hidden_dims,\n alpha=args.alpha,\n time_length=args.time_length,\n )\n else:\n if args.autoencode:\n\n def build_cnf():\n autoencoder_diffeq = layers.AutoencoderDiffEqNet(\n hidden_dims=hidden_dims,\n input_shape=data_shape,\n strides=strides,\n conv=args.conv,\n layer_type=args.layer_type,\n nonlinearity=args.nonlinearity,\n )\n odefunc = layers.AutoencoderODEfunc(\n autoencoder_diffeq=autoencoder_diffeq,\n divergence_fn=args.divergence_fn,\n residual=args.residual,\n rademacher=args.rademacher,\n )\n cnf = layers.CNF(\n odefunc=odefunc,\n T=args.time_length,\n regularization_fns=regularization_fns,\n solver=args.solver,\n )\n return cnf\n else:\n\n def build_cnf():\n diffeq = layers.ODEnet(\n hidden_dims=hidden_dims,\n input_shape=data_shape,\n strides=strides,\n conv=args.conv,\n layer_type=args.layer_type,\n nonlinearity=args.nonlinearity,\n )\n odefunc = layers.ODEfunc(\n diffeq=diffeq,\n divergence_fn=args.divergence_fn,\n residual=args.residual,\n rademacher=args.rademacher,\n )\n cnf = layers.CNF(\n odefunc=odefunc,\n T=args.time_length,\n train_T=args.train_T,\n regularization_fns=regularization_fns,\n solver=args.solver,\n )\n return cnf\n\n chain = [layers.LogitTransform(alpha=args.alpha)] if args.alpha > 0 else [layers.ZeroMeanTransform()]\n chain = chain + [build_cnf() for _ in range(args.num_blocks)]\n if args.batch_norm:\n chain.append(layers.MovingBatchNorm2d(data_shape[0]))\n model = layers.SequentialFlow(chain)\n return model\n\n\nif __name__ == \"__main__\":\n\n # get deivce\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)\n\n # load dataset\n train_set, test_loader, data_shape = get_dataset(args)\n\n # build model\n regularization_fns, regularization_coeffs = create_regularization_fns(args)\n model = create_model(args, data_shape, regularization_fns)\n\n if args.spectral_norm: add_spectral_norm(model, logger)\n set_cnf_options(args, model)\n\n logger.info(model)\n logger.info(\"Number of trainable parameters: {}\".format(count_parameters(model)))\n \n writer.add_text('info', \"Number of trainable parameters: {}\".format(count_parameters(model)))\n\n # optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n \n # set initial iter\n itr = 1\n \n # set the meters\n time_epoch_meter = utils.RunningAverageMeter(0.97)\n time_meter = utils.RunningAverageMeter(0.97)\n loss_meter = utils.RunningAverageMeter(0.97) # track total loss\n nll_meter = utils.RunningAverageMeter(0.97) # track negative log-likelihood\n xent_meter = utils.RunningAverageMeter(0.97) # track xentropy score\n error_meter = utils.RunningAverageMeter(0.97) # track error score\n steps_meter = utils.RunningAverageMeter(0.97)\n grad_meter = utils.RunningAverageMeter(0.97)\n tt_meter = utils.RunningAverageMeter(0.97)\n\n # restore parameters\n if args.resume is not None:\n checkpt = torch.load(args.resume, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpt[\"state_dict\"])\n if \"optim_state_dict\" in checkpt.keys():\n optimizer.load_state_dict(checkpt[\"optim_state_dict\"])\n # Manually move optimizer state to device.\n for state in optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = cvt(v)\n args.begin_epoch = checkpt['epoch'] + 1\n itr = checkpt['iter'] + 1\n time_epoch_meter.set(checkpt['epoch_time_avg'])\n time_meter.set(checkpt['time_train'])\n loss_meter.set(checkpt['loss_train'])\n nll_meter.set(checkpt['bits_per_dim_train'])\n xent_meter.set(checkpt['xent_train'])\n error_meter.set(checkpt['error_train'])\n steps_meter.set(checkpt['nfe_train'])\n grad_meter.set(checkpt['grad_train'])\n tt_meter.set(checkpt['total_time_train'])\n\n if torch.cuda.is_available():\n model = torch.nn.DataParallel(model).cuda()\n\n # For visualization.\n if args.conditional:\n dim_unsup = int((1.0 - args.condition_ratio) * np.prod(data_shape))\n fixed_y = torch.from_numpy(np.arange(model.module.y_class)).repeat(model.module.y_class).type(torch.long).to(device, non_blocking=True)\n fixed_y_onehot = thops.onehot(fixed_y, num_classes=model.module.y_class)\n with torch.no_grad():\n mean, logs = model.module._prior(fixed_y_onehot)\n fixed_z_sup = modules.GaussianDiag.sample(mean, logs)\n fixed_z_unsup = cvt(torch.randn(model.module.y_class**2, dim_unsup))\n fixed_z = torch.cat((fixed_z_sup, fixed_z_unsup),1)\n else:\n fixed_z = cvt(torch.randn(100, *data_shape))\n \n\n if args.spectral_norm and not args.resume: spectral_norm_power_iteration(model, 500)\n\n best_loss_nll = float(\"inf\")\n best_error_score = float(\"inf\")\n \n for epoch in range(args.begin_epoch, args.num_epochs + 1):\n start_epoch = time.time()\n model.train()\n train_loader = get_train_loader(train_set, epoch)\n for _, (x, y) in enumerate(train_loader):\n start = time.time()\n update_lr(optimizer, itr)\n optimizer.zero_grad()\n\n if not args.conv:\n x = x.view(x.shape[0], -1)\n\n # cast data and move to device\n x = cvt(x)\n \n # compute loss\n if args.conditional:\n loss_nll, loss_xent, y_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, model)\n if args.train_mode == \"semisup\":\n loss = loss_nll + args.weight_y * loss_xent\n elif args.train_mode == \"sup\":\n loss = loss_xent\n elif args.train_mode == \"unsup\":\n loss = loss_nll\n else:\n raise ValueError('Choose supported train_mode: semisup, sup, unsup')\n error_score = 1. - np.mean(y_predicted.astype(int) == y.numpy()) \n \n else:\n loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)\n loss_nll, loss_xent, error_score = loss, 0., 0.\n \n if regularization_coeffs:\n reg_states = get_regularization(model, regularization_coeffs)\n reg_loss = sum(\n reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0\n )\n loss = loss + reg_loss\n total_time = count_total_time(model)\n loss = loss + total_time * args.time_penalty\n\n # re-weight the gate rewards\n normalized_eta = args.eta / len(logp_actions)\n \n # collect cumulative future rewards\n R = - loss\n cum_rewards = []\n for r in nfe[::-1]:\n R = -normalized_eta * r.view(-1,1) + args.gamma * R\n cum_rewards.insert(0,R)\n \n # apply REINFORCE\n rl_loss = 0\n for lpa, r in zip(logp_actions, cum_rewards):\n rl_loss = rl_loss - lpa.view(-1,1) * args.rl_weight * r\n \n loss = loss + rl_loss.mean()\n \n loss.backward()\n \n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n\n if args.spectral_norm: spectral_norm_power_iteration(model, args.spectral_norm_niter)\n \n time_meter.update(time.time() - start)\n loss_meter.update(loss.item())\n nll_meter.update(loss_nll.item())\n if args.conditional:\n xent_meter.update(loss_xent.item())\n else:\n xent_meter.update(loss_xent)\n error_meter.update(error_score)\n steps_meter.update(count_nfe(model))\n grad_meter.update(grad_norm)\n tt_meter.update(total_time)\n \n for idx in range(len(model.module.transforms)):\n for layer in model.module.transforms[idx].chain:\n if hasattr(layer, 'atol'):\n layer.odefunc.after_odeint()\n \n # write to tensorboard\n writer.add_scalars('time', {'train_iter': time_meter.val}, itr)\n writer.add_scalars('loss', {'train_iter': loss_meter.val}, itr)\n writer.add_scalars('bits_per_dim', {'train_iter': nll_meter.val}, itr)\n writer.add_scalars('xent', {'train_iter': xent_meter.val}, itr)\n writer.add_scalars('error', {'train_iter': error_meter.val}, itr)\n writer.add_scalars('nfe', {'train_iter': steps_meter.val}, itr)\n writer.add_scalars('grad', {'train_iter': grad_meter.val}, itr)\n writer.add_scalars('total_time', {'train_iter': tt_meter.val}, itr)\n\n if itr % args.log_freq == 0:\n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_%i'%tol_indx, {'train': atol[tol_indx].mean()}, itr)\n writer.add_scalars('rtol_%i'%tol_indx, {'train': rtol[tol_indx].mean()}, itr)\n \n log_message = (\n \"Iter {:04d} | Time {:.4f}({:.4f}) | Bit/dim {:.4f}({:.4f}) | Xent {:.4f}({:.4f}) | Loss {:.4f}({:.4f}) | Error {:.4f}({:.4f}) \"\n \"Steps {:.0f}({:.2f}) | Grad Norm {:.4f}({:.4f}) | Total Time {:.2f}({:.2f})\".format(\n itr, time_meter.val, time_meter.avg, nll_meter.val, nll_meter.avg, xent_meter.val, xent_meter.avg, loss_meter.val, loss_meter.avg, error_meter.val, error_meter.avg, steps_meter.val, steps_meter.avg, grad_meter.val, grad_meter.avg, tt_meter.val, tt_meter.avg\n )\n )\n if regularization_coeffs:\n log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)\n logger.info(log_message)\n writer.add_text('info', log_message, itr)\n\n itr += 1\n \n # compute test loss\n model.eval()\n if epoch % args.val_freq == 0:\n with torch.no_grad():\n # write to tensorboard\n writer.add_scalars('time', {'train_epoch': time_meter.avg}, epoch)\n writer.add_scalars('loss', {'train_epoch': loss_meter.avg}, epoch)\n writer.add_scalars('bits_per_dim', {'train_epoch': nll_meter.avg}, epoch)\n writer.add_scalars('xent', {'train_epoch': xent_meter.avg}, epoch)\n writer.add_scalars('error', {'train_epoch': error_meter.avg}, epoch)\n writer.add_scalars('nfe', {'train_epoch': steps_meter.avg}, epoch)\n writer.add_scalars('grad', {'train_epoch': grad_meter.avg}, epoch)\n writer.add_scalars('total_time', {'train_epoch': tt_meter.avg}, epoch)\n \n start = time.time()\n logger.info(\"validating...\")\n writer.add_text('info', \"validating...\", epoch)\n losses_nll = []; losses_xent = []; losses = []\n total_correct = 0\n \n for (x, y) in test_loader:\n if not args.conv:\n x = x.view(x.shape[0], -1)\n x = cvt(x)\n if args.conditional:\n loss_nll, loss_xent, y_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, model)\n if args.train_mode == \"semisup\":\n loss = loss_nll + args.weight_y * loss_xent\n elif args.train_mode == \"sup\":\n loss = loss_xent\n elif args.train_mode == \"unsup\":\n loss = loss_nll\n else:\n raise ValueError('Choose supported train_mode: semisup, sup, unsup')\n total_correct += np.sum(y_predicted.astype(int) == y.numpy())\n else:\n loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)\n loss_nll, loss_xent = loss, 0.\n losses_nll.append(loss_nll.cpu().numpy()); losses.append(loss.cpu().numpy())\n if args.conditional: \n losses_xent.append(loss_xent.cpu().numpy())\n else:\n losses_xent.append(loss_xent)\n \n loss_nll = np.mean(losses_nll); loss_xent = np.mean(losses_xent); loss = np.mean(losses)\n error_score = 1. - total_correct / len(test_loader.dataset)\n time_epoch_meter.update(time.time() - start_epoch)\n \n # write to tensorboard\n test_time_spent = time.time() - start\n writer.add_scalars('time', {'validation': test_time_spent}, epoch)\n writer.add_scalars('epoch_time', {'validation': time_epoch_meter.val}, epoch)\n writer.add_scalars('bits_per_dim', {'validation': loss_nll}, epoch)\n writer.add_scalars('xent', {'validation': loss_xent}, epoch)\n writer.add_scalars('loss', {'validation': loss}, epoch)\n writer.add_scalars('error', {'validation': error_score}, epoch)\n \n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)\n writer.add_scalars('rtol_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)\n \n log_message = \"Epoch {:04d} | Time {:.4f}, Epoch Time {:.4f}({:.4f}), Bit/dim {:.4f}(best: {:.4f}), Xent {:.4f}, Loss {:.4f}, Error {:.4f}(best: {:.4f})\".format(epoch, time.time() - start, time_epoch_meter.val, time_epoch_meter.avg, loss_nll, best_loss_nll, loss_xent, loss, error_score, best_error_score)\n logger.info(log_message)\n writer.add_text('info', log_message, epoch)\n \n for name, param in model.named_parameters():\n writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)\n \n \n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"epoch_%i_checkpt.pth\"%epoch))\n \n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"current_checkpt.pth\"))\n \n if loss_nll < best_loss_nll:\n best_loss_nll = loss_nll\n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"best_nll_checkpt.pth\"))\n \n if args.conditional:\n if error_score < best_error_score:\n best_error_score = error_score\n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"best_error_checkpt.pth\"))\n \n\n # visualize samples and density\n with torch.no_grad():\n fig_filename = os.path.join(args.save, \"figs\", \"{:04d}.jpg\".format(epoch))\n utils.makedirs(os.path.dirname(fig_filename))\n generated_samples, atol, rtol, logp_actions, nfe = model(fixed_z, reverse=True)\n generated_samples = generated_samples.view(-1, *data_shape)\n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_gen_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)\n writer.add_scalars('rtol_gen_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)\n save_image(generated_samples, fig_filename, nrow=10)\n if args.data == \"mnist\":\n writer.add_images('generated_images', generated_samples.repeat(1,3,1,1), epoch)\n else:\n writer.add_images('generated_images', generated_samples.repeat(1,1,1,1), epoch)\nNamespace(JFrobint=None, JdiagFrobint=None, JoffdiagFrobint=None, add_noise=True, alpha=1e-06, atol=1e-05, autoencode=False, batch_norm=False, batch_size=900, batch_size_schedule='', begin_epoch=1, condition_ratio=0.5, conditional=True, controlled_tol=False, conv=True, data='cifar10', dims='64,64,64', divergence_fn='approximate', dl2int=None, dropout_rate=0.5, eta=0.1, gamma=0.99, gate='cnn1', imagesize=None, l1int=None, l2int=None, layer_type='concat', log_freq=10, lr=0.001, max_grad_norm=10000000000.0, multiscale=True, nonlinearity='softplus', num_blocks=2, num_epochs=1000, parallel=False, rademacher=True, residual=False, resume=None, rl_weight=0.1, rtol=1e-05, save='../experiments_published/cnf_conditional_disentangle_cifar10_bs900_sratio_0_5_drop_0_5_rl_stdscale_6_rlw_0_1_run1', scale=1.0, scale_fac=1.0, scale_std=6.0, seed=1, solver='dopri5', spectral_norm=False, spectral_norm_niter=10, step_size=None, strides='1,1,1,1', test_atol=None, test_batch_size=500, test_rtol=None, test_solver=None, time_length=1.0, time_penalty=0, train_T=True, train_mode='semisup', val_freq=1, warmup_iters=1000, weight_decay=0.0, weight_y=0.5)\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e74c9efb3e0e89514a6c19324605b209b22aedbf | 51,207 | ipynb | Jupyter Notebook | notebooks/training_model.ipynb | nft-appraiser/nft-appraiser-ml | 02ae79defaf0840234eb1494fda4e97101503044 | [
"MIT"
] | null | null | null | notebooks/training_model.ipynb | nft-appraiser/nft-appraiser-ml | 02ae79defaf0840234eb1494fda4e97101503044 | [
"MIT"
] | null | null | null | notebooks/training_model.ipynb | nft-appraiser/nft-appraiser-ml | 02ae79defaf0840234eb1494fda4e97101503044 | [
"MIT"
] | null | null | null | 45.275862 | 271 | 0.498838 | [
[
[
"# 価格予測モデルのBaseline \n- CNNを用いたモデルを作成する. \n- 価格予測とクラス分類でタスクが大きく異なるので,imagenetで学習したモデルを用いないものを最初に作成する. \n- サイトに載せられる画像を教師データとしており,画像が大きく回転したりなどは不要と考えられるためそのような前処理は行わない. \n- 損失関数にはmaeもしくはrmseを用いる. \n\n## モデルの構築 \n- EfficientNetB0(未学習)を用いて特徴量を抽出. \n- num_sales, コレクション名のone-hotベクトルを抽出した特徴量に結合. \n- 全結合層を重ねて出力. \n- ImageNetを用いて事前学習したものとしていないもので比較する. \n- 目的変数をそのまま予測するとスケールが大きすぎるので,先に対数変換して評価関数にRMSE, MAEなどを用いるほうが良いかも. \n- **このノートブックでやっているのは事前学習有り.** \n\n## 評価関数 \n- RMSLEを用いる. \n$$RMSLE = \\sqrt{\\frac{1}{n}\\sum_{i=1}^n (\\log{(y_i+1)} - \\log{(\\hat{y_i} +1)})^2}$$\n\n- 追加でMAPEを用いてみる. \n$$MAPE = \\frac{100}{n} \\sum_{i=1}^n |\\frac{\\hat{y}_i - y_i}{y_i}|$$\n\nタスクAに関してはデータ不足の可能性が考えられるため,特徴量抽出とともにデータを追加で収集する. \n\n## 変数(タスクA) \n- 目的変数: last_sale.total_price \n- 説明変数: 画像データ,コレクション名(collection.name),num_sales,\n\n## 変数(タスクB) \n- 目的変数: last_sale.total_price \n- 説明変数: 画像データ ",
"_____no_output_____"
]
],
[
[
"import os\nfrom typing import List, Optional, Tuple, Dict\nimport math\nimport tempfile\nimport random\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nimport cv2\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\nimport tensorflow.keras.models as models\nimport tensorflow.keras.losses as losses\nimport tensorflow.keras.optimizers as optim\nimport tensorflow.keras.activations as activations\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.wrappers.scikit_learn import KerasRegressor\nimport tensorflow.keras.callbacks as callbacks\nfrom tensorflow.keras.applications import EfficientNetB0 as efn\nimport cloudpickle",
"_____no_output_____"
],
[
"A_IMGPATH = \"../data/taskA/img\"\nA_DFPATH = \"../data/taskA/table\"\nB_IMGPATH = \"../data/taskB/img\"\nB_DFPATH = \"../data/taskB/table\"\nasset_df_A = pd.read_csv(os.path.join(A_DFPATH, \"asset_data.csv\"))\nasset_df_B = pd.read_csv(os.path.join(B_DFPATH, \"asset_data.csv\"))\n\nasset_df_A = asset_df_A.rename(columns={\"last_sale.total_price\": \"target\"})\nasset_df_B = asset_df_B.rename(columns={\"last_sale.total_price\": \"target\"})\n\nasset_df_A = pd.concat((asset_df_A, pd.get_dummies(asset_df_A[\"collection.name\"])), axis=1)\nasset_df_B[asset_df_A[\"collection.name\"].unique()] = 0\n\nasset_df_A[\"full_path\"] =\\\n asset_df_A[\"image_id\"].apply(lambda x: A_IMGPATH + \"/\" + x)\nasset_df_B[\"full_path\"] =\\\n asset_df_B[\"image_id\"].apply(lambda x: B_IMGPATH + \"/\" + x)\n\nasset_df_A['target'] = asset_df_A['target'].astype(float) * 1e-18\nasset_df_B['target'] = asset_df_B['target'].astype(float) * 1e-18\nasset_df_A = asset_df_A.query('target > 0')\nasset_df_B = asset_df_B.query('target > 0')\nasset_df_A['target'] = asset_df_A['target'].apply(lambda x: np.log1p(x))\nasset_df_B['target'] = asset_df_B['target'].apply(lambda x: np.log1p(x))\n\nos.makedirs(\"../models\", exist_ok=True)\n\nprint(f\"data shape: {asset_df_A.shape}\")\nprint(f\"data shape: {asset_df_B.shape}\")",
"/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py:3444: DtypeWarning: Columns (3,27,28,71,88,119) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py:3444: DtypeWarning: Columns (121,122,123,124) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n"
]
],
[
[
"## Helper Functions ",
"_____no_output_____"
],
[
"### DataLoader ",
"_____no_output_____"
]
],
[
[
"class FullPathDataLoader(Sequence):\n \"\"\"\n Data loader that load images, meta data and targets.\n This class is inherited Sequence class of Keras.\n \"\"\"\n\n def __init__(self, path_list: np.ndarray, target: Optional[np.ndarray],\n meta_data: Optional[np.ndarray] = None, batch_size: int = 16,\n task: str = \"B\", width: int = 256, height: int = 256,\n resize: bool = True, shuffle: bool = True, is_train: bool = True):\n \"\"\"\n Constructor. This method determines class variables.\n\n Parameters\n ----------\n path_list : np.ndarray[str]\n The array of absolute paths of images.\n meta_data : np.ndarray[int]\n One-hot vector of collections.\n target : np.ndarray\n Array of target variavles.\n batch_size : int\n Batch size used when model training.\n task : str\n Please determine this data loader will be used for task A or B(default=A).\n width : int\n Width of resized image.\n height : int\n Height of resize image.\n resize : bool\n Flag determine whether to resize.\n shuffle : bool\n Flag determine whether to shuffle on epoch end.\n is_train : bool\n Determine whether this data loader will be used training model.\n if you won't this data loader, you have set 'is_train'=False.\n \"\"\"\n self.path_list = path_list\n self.batch_size = batch_size\n self.task = task\n self.width = width\n self.height = height\n self.resize = resize\n self.shuffle = shuffle\n self.is_train = is_train\n self.length = math.ceil(len(self.path_list) / self.batch_size)\n\n if self.is_train:\n self.target = target\n if self.task == \"A\":\n self.meta_data = meta_data\n\n def __len__(self):\n \"\"\"\n Returns\n -------\n self.length : data length\n \"\"\"\n return self.length\n\n def get_img(self, path_list: np.ndarray):\n \"\"\"\n Load image data and resize image if 'resize'=True.\n\n Parameters\n ----------\n path_liist : np.ndarray\n The array of relative image paths from directory 'dir_name'.\n Size of this array is 'batch_size'.\n\n Returns\n -------\n img_list : np.ndarray\n The array of image data.\n Size of an image is (width, height, 3) if 'resize'=True.\n '\"\"\"\n img_list = []\n for path in path_list:\n img = cv2.imread(path)\n img = cv2.resize(img, (self.width, self.height))\n img = img / 255.\n img_list.append(img)\n\n img_list = np.array(img_list)\n return img_list\n\n def _shuffle(self):\n \"\"\"\n Shuffle path_list, meta model.\n If 'is_train' is True, target is shuffled in association path_list.\n \"\"\"\n idx = np.random.permutation(len(self.path_list))\n self.path_list = self.path_list[idx]\n if self.task == \"A\":\n self.meta_data = self.meta_data[idx]\n if self.is_train:\n self.target = self.target[idx]\n\n def __getitem__(self, idx):\n path_list = self.path_list[self.batch_size*idx:self.batch_size*(idx+1)]\n img_list = self.get_img(path_list)\n if self.is_train:\n target_list = self.target[self.batch_size*idx:self.batch_size*(idx+1)]\n if self.task == \"A\":\n meta = self.meta_data[self.batch_size*idx:self.batch_size*(idx+1)]\n return (img_list, meta), target_list\n else:\n return img_list, target_list\n else:\n if self.task == \"A\":\n meta = self.meta_data[self.batch_size*idx:self.batch_size*(idx+1)]\n return ((img_list, meta),)\n else:\n return img_list\n\n def on_epoch_end(self):\n if self.is_train:\n self._shuffle()",
"_____no_output_____"
]
],
[
[
"### seed settings ",
"_____no_output_____"
]
],
[
[
"def set_seed(random_state=6174):\n tf.random.set_seed(random_state)\n np.random.seed(random_state)\n random.seed(random_state)\n os.environ['PYTHONHASHSEED'] = str(random_state)",
"_____no_output_____"
]
],
[
[
"### Create model ",
"_____no_output_____"
]
],
[
[
"def create_model(input_shape: Tuple[int], output_shape: int,\n activation, loss, meta_shape: Optional[int] = None,\n task: str = \"B\", learning_rate: float = 0.001,\n pretrain: bool = False) -> models.Model:\n \"\"\"\n The function for creating model.\n\n Parameters\n ----------\n input_shape : int\n Shape of input image data.\n output_shape : int\n Shape of model output.\n activation : function\n The activation function used hidden layers.\n loss : function\n The loss function of model.\n meta_shape : int\n Shape of input meta data of image.\n task : str\n Please determine this model will be used for task A or B(default=A).\n learning_rate : float\n The learning rate of model.\n pretrain : bool\n Flag that deterimine whether use pretrain model(default=False).\n\n Returns\n -------\n model : keras.models.Model\n Model instance.\n \"\"\"\n if pretrain:\n weights = 'imagenet'\n else:\n weights = None\n\n inputs = layers.Input(shape=input_shape)\n efn_model = efn(include_top=False, input_shape=input_shape,\n weights=weights)(inputs)\n ga = layers.GlobalAveragePooling2D()(efn_model)\n\n if task == \"A\":\n meta_inputs = layers.Input(shape=meta_shape)\n concate = layers.Concatenate()([ga, meta_inputs])\n dense1 = layers.Dense(units=128)(concate)\n av1 = layers.Activation(activation)(dense1)\n dr1 = layers.Dropout(0.3)(av1)\n dense2 = layers.Dense(units=64)(dr1)\n av2 = layers.Activation(activation)(dense2)\n dr2 = layers.Dropout(0.3)(av2)\n outputs = layers.Dense(output_shape)(dr2)\n\n model = models.Model(inputs=[inputs, meta_inputs], outputs=[outputs])\n\n elif task == \"B\":\n dense1 = layers.Dense(units=128)(ga)\n av1 = layers.Activation(activation)(dense1)\n dr1 = layers.Dropout(0.3)(av1)\n dense2 = layers.Dense(units=64)(dr1)\n av2 = layers.Activation(activation)(dense2)\n dr2 = layers.Dropout(0.3)(av2)\n outputs = layers.Dense(output_shape)(dr2)\n\n model = models.Model(inputs=[inputs], outputs=[outputs])\n\n else:\n raise Exception(\"Please set task is A or B.\")\n\n model.compile(loss=loss,\n optimizer=optim.SGD(learning_rate=learning_rate, momentum=0.9),\n metrics=['mae', 'mse'])\n return model",
"_____no_output_____"
]
],
[
[
"### Training model ",
"_____no_output_____"
]
],
[
[
"def train(path_list: np.ndarray, target: np.ndarray, loss,\n meta_data: Optional[np.ndarray] = None, task: str = \"B\"):\n \"\"\"\n The function for training model.\n\n Parameters\n ----------\n path_list : np.ndarray\n The path list of all image data.\n target : np.ndarray\n The array of targets data.\n loss : function\n The loss function of keras.\n meta_data : np.ndarray\n The array of meta data of image.\n task : str\n Please determine you train model for task A or B(default=A).\n \"\"\"\n if task == \"A\":\n train_path, val_path, train_meta, val_meta, train_y, val_y =\\\n train_test_split(path_list, meta_data, target, test_size=0.1, random_state=6174)\n train_gen = FullPathDataLoader(path_list=train_path, target=train_y,\n meta_data=train_meta, batch_size=16,\n task=task)\n val_gen = FullPathDataLoader(path_list=val_path, target=train_y,\n meta_data=val_meta, batch_size=1,\n task=task)\n elif task == \"B\":\n train_path, val_path, train_y, val_y =\\\n train_test_split(path_list, target, test_size=0.1, random_state=6174)\n train_gen = FullPathDataLoader(path_list=train_path, target=train_y,\n batch_size=16, task=task)\n val_gen = FullPathDataLoader(path_list=val_path, target=val_y,\n batch_size=1, task=task)\n else:\n raise Exception(\"Please set task is A or B\")\n\n set_seed()\n model = NFTModel(\n create_model(input_shape=(256, 256, 3), output_shape=1,\n activation=activations.relu, loss=loss,\n meta_shape=len(meta_features), task=task,\n learning_rate=0.00001, pretrain=True)\n )\n\n ES = callbacks.EarlyStopping(monitor='val_loss', patience=10,\n restore_best_weights=True)\n\n print(\"starting training\")\n print('*' + '-' * 30 + '*')\n\n model.fit(train_gen, val_gen, epochs=100, batch_size=16,\n callbacks=[ES])\n\n print(\"finished training\")\n print('*' + '-' * 30 + '*' + '\\n')\n\n if task == \"A\":\n val_gen = FullPathDataLoader(path_list=val_path, target=train_y,\n meta_data=val_meta, batch_size=1, task=task,\n shuffle=False, is_train=False)\n else:\n val_gen = FullPathDataLoader(path_list=val_path, target=train_y,\n batch_size=1, task=task,\n shuffle=False, is_train=False)\n print(\"starting evaluate\")\n print('*' + '-' * 30 + '*')\n\n model.evaluate(val_gen, val_y)\n\n print(\"finished evaluate\")\n print('*' + '-' * 30 + '*' + '\\n')\n\n return model",
"_____no_output_____"
],
[
"class NFTModel(KerasRegressor):\n \"\"\"\n Model class.\n This class is inherited KerasRegressor class of keras.\n \"\"\"\n\n def __init__(self, model_func):\n \"\"\"\n Constructor.\n\n Prameters\n ---------\n model_func : function\n The function for creating model.\n \"\"\"\n super().__init__(build_fn=model_func)\n\n def __getstate__(self):\n result = {'sk_params': self.sk_params}\n with tempfile.TemporaryDirectory() as dir:\n if hasattr(self, 'model'):\n self.model.save(dir + '/output.h5', include_optimizer=False)\n with open(dir + '/output.h5', 'rb') as f:\n result['model'] = f.read()\n return result\n\n def __setstate__(self, serialized):\n self.sk_params = serialized['sk_params']\n with tempfile.TemporaryDirectory() as dir:\n model_data = serialized.get('model')\n if model_data:\n with open(dir + '/input.h5', 'wb') as f:\n f.write(model_data)\n self.model = tf.keras.models.load_model(dir + '/input.h5')\n\n def fit(self, train_gen, val_gen, epochs, batch_size, callbacks):\n \"\"\"\n Training model.\n\n Parameters\n ----------\n train_gen : iterator\n The generator of train data.\n val_gen : iterator\n The generator of validation data.\n epochs : int\n Number of epochs for training model.\n batch_size : int\n Size of batch for training model.\n callbacks : list\n The list of callbacks.\n For example [EarlyStopping instance, ModelCheckpoint instance]\n \"\"\"\n self.model = self.build_fn\n self.model.fit(train_gen, epochs=epochs, batch_size=batch_size,\n validation_data=val_gen, callbacks=callbacks)\n\n def evaluate(self, test_X, test_y):\n \"\"\"\n Evaluate model.\n\n Parameters\n ----------\n test_X : iterator\n The generator of test data.\n test_y : np.ndarray\n The array of targets of test data.\n \"\"\"\n pred = self.model.predict(test_X)\n pred = np.where(pred < 0, 0, pred)\n rmse = np.sqrt(mean_squared_error(test_y, pred))\n mae = np.sqrt(mean_absolute_error(test_y, pred))\n\n print(f\"RMSE Score: {rmse}\")\n print(f\"MAE Score: {mae}\")\n\n def predict(self, img_path: str, collection_name: str, num_sales: int,\n task: str = \"B\"):\n \"\"\"\n Predict data using trained model.\n\n Parameters\n ----------\n img_path : str\n The path of image data.\n collection_name : str\n Name of collection of the NFT.\n num_sales : int\n Number of times the NFT sold.\n \"\"\"\n if task == \"A\":\n collections = ['CryptoPunks',\n 'Bored Ape Yacht Club',\n 'Edifice by Ben Kovach',\n 'Mutant Ape Yacht Club',\n 'The Sandbox',\n 'Divine Anarchy',\n 'Cosmic Labs',\n 'Parallel Alpha',\n 'Art Wars | AW',\n 'Neo Tokyo Identities',\n 'Neo Tokyo Part 2 Vault Cards',\n 'Cool Cats NFT',\n 'CrypToadz by GREMPLIN',\n 'BearXLabs',\n 'Desperate ApeWives',\n 'Decentraland',\n 'Neo Tokyo Part 3 Item Caches',\n 'Doodles',\n 'The Doge Pound',\n 'Playboy Rabbitars Official',\n 'THE SHIBOSHIS',\n 'THE REAL GOAT SOCIETY',\n 'Sipherian Flash',\n 'Party Ape | Billionaire Club',\n 'Treeverse',\n 'Angry Apes United',\n 'CyberKongz',\n 'Emblem Vault [Ethereum]',\n 'Fat Ape Club',\n 'VeeFriends',\n 'JUNGLE FREAKS BY TROSLEY',\n 'Meebits',\n 'Furballs.com Official',\n 'Kaiju Kingz',\n 'Bears Deluxe',\n 'PUNKS Comic',\n 'Hor1zon Troopers',\n 'Lazy Lions',\n 'LOSTPOETS',\n 'Chain Runners',\n 'Chromie Squiggle by Snowfro',\n 'MekaVerse',\n 'Vox Collectibles',\n 'MutantCats',\n 'World of Women',\n 'SuperFarm Genesis Series',\n 'Eponym by ART AI',]\n collection_dict = {\n collections[i]: i for i in range(len(collections))\n }\n meta_data = np.zeros(shape=(len(collection_dict)+1))\n if collection_name in collection_dict.keys():\n meta_data[collection_dict[collection_name]] = 1\n meta_data[-1] = num_sales\n meta_data = meta_data.reshape(1, -1)\n\n img = cv2.resize(cv2.imread(img_path)/256., (256, 256))\n img = img.reshape(1, 256, 256, 3)\n\n pred = self.model.predict([img, meta_data])\n elif task == \"B\":\n img = cv2.resize(cv2.imread(img_path)/256., (256, 256))\n img = img.reshape(1, 256, 256, 3)\n\n pred = self.model.predict(img)\n else:\n raise Exception(\"Please set task is A or B\")\n\n return pred[0][0]",
"_____no_output_____"
],
[
"def save_model(instance, file_name: str):\n \"\"\"\n Save model as pickle file\n\n Parameters\n ----------\n instance : Class instance\n The class instance you want to save as pickle file.\n file_name : str\n The absolute path of file saved the instance.\n \"\"\"\n with open(file_name, mode='wb') as f:\n cloudpickle.dump(instance, f)",
"_____no_output_____"
],
[
"def load_model(file_name: str):\n \"\"\"\n Load the model file of pickle.\n\n Parameters\n ----------\n file_name : str\n The absolute path of the model file.\n\n Returns\n -------\n model : tf.keras.models.Model\n Trained model object.\n \"\"\"\n with open(file_name, mode='rb') as f:\n model = cloudpickle.load(f)\n\n return model",
"_____no_output_____"
]
],
[
[
"## Training models ",
"_____no_output_____"
],
[
"### TaskA",
"_____no_output_____"
]
],
[
[
"meta_features =\\\n asset_df_A['collection.name'].unique().tolist() + ['num_sales']\n\npath_list = asset_df_A['full_path'].values\nmeta_data = asset_df_A[meta_features].values\ntarget = asset_df_A['target'].values\n\nmodel_A = train(path_list, target, losses.mean_squared_error, meta_data,\n task=\"A\")\n# save_model(model_A, \"../models/baselineA.pkl\")",
"2021-11-14 08:31:09.150668: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.155139: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.155625: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.156568: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2021-11-14 08:31:09.157185: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.157740: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.158165: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.430405: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.430815: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.431178: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-11-14 08:31:09.431526: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 9809 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3060, pci bus id: 0000:01:00.0, compute capability: 8.6\n"
]
],
[
[
"### TaskA(画像のみ) ",
"_____no_output_____"
]
],
[
[
"path_list = asset_df_A['full_path'].values\ntarget = asset_df_A['target'].values\n\nmodel_A = train(path_list, target, losses.mean_squared_error,\n task=\"B\")",
"starting training\n*------------------------------*\nEpoch 1/100\n1224/1224 [==============================] - 217s 175ms/step - loss: 2.7349 - mae: 1.0369 - mse: 2.7349 - val_loss: 2.2998 - val_mae: 0.9054 - val_mse: 2.2998\nEpoch 2/100\n1224/1224 [==============================] - 211s 172ms/step - loss: 1.7788 - mae: 0.8671 - mse: 1.7788 - val_loss: 2.3431 - val_mae: 0.9206 - val_mse: 2.3431\nEpoch 3/100\n1224/1224 [==============================] - 213s 174ms/step - loss: 1.4670 - mae: 0.7940 - mse: 1.4670 - val_loss: 14.4225 - val_mae: 1.9665 - val_mse: 14.4225\nEpoch 4/100\n1224/1224 [==============================] - 211s 173ms/step - loss: 1.3555 - mae: 0.7492 - mse: 1.3555 - val_loss: 1.9551 - val_mae: 0.9155 - val_mse: 1.9551\nEpoch 5/100\n1224/1224 [==============================] - 211s 173ms/step - loss: 1.2367 - mae: 0.7087 - mse: 1.2367 - val_loss: 53.2000 - val_mae: 4.1408 - val_mse: 53.2000\nEpoch 6/100\n1224/1224 [==============================] - 212s 174ms/step - loss: 1.2245 - mae: 0.6949 - mse: 1.2245 - val_loss: 9.0278 - val_mae: 1.8043 - val_mse: 9.0278\nEpoch 7/100\n1224/1224 [==============================] - 211s 172ms/step - loss: 1.1627 - mae: 0.6682 - mse: 1.1627 - val_loss: 1.3537 - val_mae: 0.7341 - val_mse: 1.3537\nEpoch 8/100\n1224/1224 [==============================] - 212s 173ms/step - loss: 1.1215 - mae: 0.6564 - mse: 1.1215 - val_loss: 2.0738 - val_mae: 0.9297 - val_mse: 2.0738\nEpoch 9/100\n1224/1224 [==============================] - 212s 173ms/step - loss: 1.1207 - mae: 0.6435 - mse: 1.1207 - val_loss: 1.7477 - val_mae: 0.8601 - val_mse: 1.7477\nEpoch 10/100\n1224/1224 [==============================] - 211s 172ms/step - loss: 1.0747 - mae: 0.6293 - mse: 1.0747 - val_loss: 2.0650 - val_mae: 0.8836 - val_mse: 2.0650\nEpoch 11/100\n1224/1224 [==============================] - 213s 174ms/step - loss: 1.0472 - mae: 0.6181 - mse: 1.0472 - val_loss: 2.0495 - val_mae: 0.8843 - val_mse: 2.0495\nEpoch 12/100\n1224/1224 [==============================] - 210s 172ms/step - loss: 1.0379 - mae: 0.6120 - mse: 1.0379 - val_loss: 1.9776 - val_mae: 0.8993 - val_mse: 1.9776\nEpoch 13/100\n1224/1224 [==============================] - 210s 172ms/step - loss: 1.0076 - mae: 0.5968 - mse: 1.0076 - val_loss: 3.0278 - val_mae: 1.0280 - val_mse: 3.0278\nEpoch 14/100\n1224/1224 [==============================] - 210s 172ms/step - loss: 0.9805 - mae: 0.5888 - mse: 0.9805 - val_loss: 1.8633 - val_mae: 0.8609 - val_mse: 1.8633\nEpoch 15/100\n1224/1224 [==============================] - 212s 173ms/step - loss: 0.9796 - mae: 0.5860 - mse: 0.9796 - val_loss: 6.3747 - val_mae: 1.3898 - val_mse: 6.3747\nEpoch 16/100\n1224/1224 [==============================] - 211s 172ms/step - loss: 0.9655 - mae: 0.5759 - mse: 0.9655 - val_loss: 1.8551 - val_mae: 0.8143 - val_mse: 1.8551\nEpoch 17/100\n1224/1224 [==============================] - 211s 172ms/step - loss: 0.9659 - mae: 0.5754 - mse: 0.9659 - val_loss: 3.7506 - val_mae: 1.1146 - val_mse: 3.7506\nfinished training\n*------------------------------*\n\nstarting evaluate\n*------------------------------*\nRMSE Score: 1.16349655089246\nMAE Score: 0.8567837679315085\nfinished evaluate\n*------------------------------*\n\n"
]
],
[
[
"### TaskB",
"_____no_output_____"
]
],
[
[
"path_list = asset_df_B['full_path'].values\ntarget = asset_df_B['target'].values\n\nmodel_B = train(path_list, target, losses.mean_squared_error)\n# save_model(model_B, \"../models/baselineB.pkl\")",
"starting training\n*------------------------------*\nEpoch 1/100\n293/293 [==============================] - 57s 181ms/step - loss: 0.5716 - mae: 0.5127 - mse: 0.5716 - val_loss: 0.3407 - val_mae: 0.3353 - val_mse: 0.3407\nEpoch 2/100\n293/293 [==============================] - 52s 176ms/step - loss: 0.4381 - mae: 0.4247 - mse: 0.4381 - val_loss: 0.3661 - val_mae: 0.2986 - val_mse: 0.3661\nEpoch 3/100\n293/293 [==============================] - 52s 177ms/step - loss: 0.3878 - mae: 0.3914 - mse: 0.3878 - val_loss: 0.3510 - val_mae: 0.3146 - val_mse: 0.3510\nEpoch 4/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.3690 - mae: 0.3748 - mse: 0.3690 - val_loss: 0.3481 - val_mae: 0.2835 - val_mse: 0.3481\nEpoch 5/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.3544 - mae: 0.3763 - mse: 0.3544 - val_loss: 0.3389 - val_mae: 0.3086 - val_mse: 0.3389\nEpoch 6/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.3485 - mae: 0.3657 - mse: 0.3485 - val_loss: 0.3289 - val_mae: 0.3119 - val_mse: 0.3289\nEpoch 7/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.3376 - mae: 0.3559 - mse: 0.3376 - val_loss: 0.3242 - val_mae: 0.2918 - val_mse: 0.3242\nEpoch 8/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.3219 - mae: 0.3496 - mse: 0.3219 - val_loss: 0.3509 - val_mae: 0.3229 - val_mse: 0.3509\nEpoch 9/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.3223 - mae: 0.3495 - mse: 0.3223 - val_loss: 0.3283 - val_mae: 0.3215 - val_mse: 0.3283\nEpoch 10/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.3272 - mae: 0.3480 - mse: 0.3272 - val_loss: 0.3287 - val_mae: 0.3198 - val_mse: 0.3287\nEpoch 11/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.3151 - mae: 0.3407 - mse: 0.3151 - val_loss: 0.3364 - val_mae: 0.3479 - val_mse: 0.3364\nEpoch 12/100\n293/293 [==============================] - 52s 176ms/step - loss: 0.3094 - mae: 0.3405 - mse: 0.3094 - val_loss: 0.3221 - val_mae: 0.3068 - val_mse: 0.3221\nEpoch 13/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.3039 - mae: 0.3319 - mse: 0.3039 - val_loss: 0.3249 - val_mae: 0.3248 - val_mse: 0.3249\nEpoch 14/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.3003 - mae: 0.3327 - mse: 0.3003 - val_loss: 0.3387 - val_mae: 0.3266 - val_mse: 0.3387\nEpoch 15/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.3073 - mae: 0.3326 - mse: 0.3073 - val_loss: 0.3018 - val_mae: 0.2656 - val_mse: 0.3018\nEpoch 16/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.3016 - mae: 0.3265 - mse: 0.3016 - val_loss: 0.3408 - val_mae: 0.3381 - val_mse: 0.3408\nEpoch 17/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.2898 - mae: 0.3267 - mse: 0.2898 - val_loss: 0.3852 - val_mae: 0.4113 - val_mse: 0.3852\nEpoch 18/100\n293/293 [==============================] - 51s 175ms/step - loss: 0.2999 - mae: 0.3274 - mse: 0.2999 - val_loss: 0.3065 - val_mae: 0.2868 - val_mse: 0.3065\nEpoch 19/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.2872 - mae: 0.3235 - mse: 0.2872 - val_loss: 0.3361 - val_mae: 0.3306 - val_mse: 0.3361\nEpoch 20/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.2893 - mae: 0.3199 - mse: 0.2893 - val_loss: 0.3377 - val_mae: 0.3297 - val_mse: 0.3377\nEpoch 21/100\n293/293 [==============================] - 51s 174ms/step - loss: 0.2799 - mae: 0.3151 - mse: 0.2799 - val_loss: 0.3188 - val_mae: 0.3051 - val_mse: 0.3188\nEpoch 22/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.2814 - mae: 0.3128 - mse: 0.2814 - val_loss: 0.3297 - val_mae: 0.3360 - val_mse: 0.3297\nEpoch 23/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.2776 - mae: 0.3141 - mse: 0.2776 - val_loss: 0.3198 - val_mae: 0.3276 - val_mse: 0.3198\nEpoch 24/100\n293/293 [==============================] - 51s 173ms/step - loss: 0.2766 - mae: 0.3102 - mse: 0.2766 - val_loss: 0.3612 - val_mae: 0.3689 - val_mse: 0.3612\nEpoch 25/100\n293/293 [==============================] - 49s 167ms/step - loss: 0.2772 - mae: 0.3097 - mse: 0.2772 - val_loss: 0.2974 - val_mae: 0.2814 - val_mse: 0.2974\nEpoch 26/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2722 - mae: 0.3058 - mse: 0.2722 - val_loss: 0.2990 - val_mae: 0.2906 - val_mse: 0.2990\nEpoch 27/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2727 - mae: 0.3066 - mse: 0.2727 - val_loss: 0.3029 - val_mae: 0.3024 - val_mse: 0.3029\nEpoch 28/100\n293/293 [==============================] - 47s 161ms/step - loss: 0.2719 - mae: 0.3039 - mse: 0.2719 - val_loss: 0.3423 - val_mae: 0.3297 - val_mse: 0.3423\nEpoch 29/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2794 - mae: 0.3111 - mse: 0.2794 - val_loss: 0.3190 - val_mae: 0.3518 - val_mse: 0.3190\nEpoch 30/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2674 - mae: 0.3090 - mse: 0.2674 - val_loss: 0.3025 - val_mae: 0.2918 - val_mse: 0.3025\nEpoch 31/100\n293/293 [==============================] - 48s 162ms/step - loss: 0.2658 - mae: 0.3017 - mse: 0.2658 - val_loss: 0.3213 - val_mae: 0.3124 - val_mse: 0.3213\nEpoch 32/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2635 - mae: 0.3010 - mse: 0.2635 - val_loss: 0.3302 - val_mae: 0.3274 - val_mse: 0.3302\nEpoch 33/100\n293/293 [==============================] - 48s 163ms/step - loss: 0.2645 - mae: 0.2966 - mse: 0.2645 - val_loss: 0.3282 - val_mae: 0.3341 - val_mse: 0.3282\nEpoch 34/100\n293/293 [==============================] - 47s 162ms/step - loss: 0.2638 - mae: 0.3049 - mse: 0.2638 - val_loss: 0.3029 - val_mae: 0.2901 - val_mse: 0.3029\nEpoch 35/100\n293/293 [==============================] - 47s 161ms/step - loss: 0.2669 - mae: 0.3020 - mse: 0.2669 - val_loss: 0.3440 - val_mae: 0.3437 - val_mse: 0.3440\nfinished training\n*------------------------------*\n\nstarting evaluate\n*------------------------------*\nRMSE Score: 0.5453789268884527\nMAE Score: 0.5304499481845472\nfinished evaluate\n*------------------------------*\n\n"
]
],
[
[
"## Evaluate model ",
"_____no_output_____"
],
[
"### Task A",
"_____no_output_____"
]
],
[
[
"file_name = \"../models/baselineA.pkl\"\nmodel = load_model(file_name)\n\nmeta_features =\\\n asset_df_A['collection.name'].unique().tolist() + ['num_sales']\n\npath_list = np.vstack(\n (asset_df_A['full_path'].values.reshape(-1, 1),\n asset_df_B['full_path'].values.reshape(-1, 1))\n).reshape(-1)\nmeta_data = np.vstack(\n (asset_df_A[meta_features].values.reshape(-1, len(meta_features)),\n asset_df_B[meta_features].values.reshape(-1, len(meta_features)))\n)\ntarget = np.vstack(\n (asset_df_A['target'].values.reshape(-1, 1),\n asset_df_B['target'].values.reshape(-1, 1))\n).reshape(-1)\n\ntrain_path, val_path, train_meta, val_meta, train_y, val_y =\\\n train_test_split(path_list, meta_data, target, test_size=0.1, random_state=6174)\n\nval_gen = FullPathDataLoader(path_list=val_path,\n meta_data=val_meta, target=val_y,\n batch_size=1, shuffle=False, is_train=False)\n\nmodel.evaluate(val_gen, val_y)",
"_____no_output_____"
]
],
[
[
"### Task B",
"_____no_output_____"
]
],
[
[
"file_name = \"../models/baselineB.pkl\"\nmodel = load_model(file_name)\n\npath_list = asset_df_B['full_path'].values\nmeta_data = asset_df_B[meta_features].values\ntarget = asset_df_B['target'].values\n\ntrain_path, val_path, train_meta, val_meta, train_y, val_y =\\\n train_test_split(path_list, meta_data, target, test_size=0.1, random_state=6174)\n\nval_gen = FullPathDataLoader(path_list=val_path,\n meta_data=val_meta, target=val_y,\n batch_size=1, shuffle=False, is_train=False)\n\nmodel.evaluate(val_gen, val_y)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"markdown",
"raw"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"raw"
]
] |
e74cb20f287ecfc6fa0c61a48392ea14635fa658 | 42,656 | ipynb | Jupyter Notebook | eHostXML_ext.ipynb | phzpan/6950_nlp | 815ad1eb97e5d9ca103ec622518a9863819b13eb | [
"Apache-2.0"
] | null | null | null | eHostXML_ext.ipynb | phzpan/6950_nlp | 815ad1eb97e5d9ca103ec622518a9863819b13eb | [
"Apache-2.0"
] | null | null | null | eHostXML_ext.ipynb | phzpan/6950_nlp | 815ad1eb97e5d9ca103ec622518a9863819b13eb | [
"Apache-2.0"
] | 1 | 2018-11-10T22:50:59.000Z | 2018-11-10T22:50:59.000Z | 44.387097 | 359 | 0.627438 | [
[
[
"# extract social isolation notes\n\ndef ehostxml(filename='filename', class_checker = {'class_checker'}):\n '''Extract social isolation notes from eHost xml'''\n tree = ET.parse(filename)\n root = tree.getroot()\n file_name = root.attrib['textSource']\n #print(file_name)\n \n return_check='no_note'\n \n txt=set()\n list1=[]\n for j in root.iter('mentionClass'):\n class_id = j.attrib['id']\n class_id1 = set(class_id.lower().split('-'))\n #-----\n list2=[] # remove possibe spaces\n for j1 in class_id1:\n st = j1.strip()\n list2.append(st)\n class_id1=set(list2)\n #-----\n #print(class_id)\n \n if class_checker.issubset(class_id1):\n txt.add(file_name)\n txt.add(j.text)\n class_id2 = class_id\n #print(file_name)\n #print ('---', j.text)\n \n if len(txt)>1:\n list1=list(txt)\n list1.sort()\n print(class_id2)\n print(list1) \n print('-----')\n return_check='yes_note'\n \n return return_check, file_name",
"_____no_output_____"
],
[
"try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport csv\nimport chardet\nimport os\npath = \"C:/Users/HuaiZhong/2018/ann_xml\"\nfiles = os.listdir(path)\n#filename=\"123_456-789.txt.knowtator.xml\"\nclass_checker = {'social isolation'}\nprint('Number of notes')\nprint(len(files))\n#note1=[]\n\ncount = 0\npos_files=[]\nneg_files=[]\npos_txt_files=[]\nneg_txt_files=[]\nfor file in files[:]:\n filename=os.path.join(path,file)\n iso, txt_file=ehostxml(filename=filename, class_checker = class_checker)\n if iso == 'yes_note':\n count=count+1\n pos_files.append(filename)\n pos_txt_files.append(txt_file)\n else:\n neg_files.append(filename)\n neg_txt_files.append(txt_file)\nprint('Number of notes with social isolation --- ', str(count))",
"Number of notes\n500\nSocial Isolation-Distancing self from relationships\n['09696212_199472695_469775619.txt', 'trying to get rid of it and fighting back']\n-----\nSocial Isolation- Isolation\n['09910266_203543374_585135519.txt', 'isolating,', 'isolative']\n-----\nSocial Isolation- Being asked to leave others or groups\n['09914912_202033473_535253049.txt', 'felt rejected by peers']\n-----\nSocial Isolation- Isolation\n['11016888_205116311_615981390.txt', 'Parents expressed resistance to day treatment and the pt being home']\n-----\nSocial Isolation-Distancing self from relationships\n['11686193_213835101_805247603.txt', 'Pt was currently evicted from his apartment', 'leave the hotel room \" I kicked him out']\n-----\nSocial Isolation- Isolation\n['12487922_212327999_774824418.txt', 'dad', 'isolation,']\n-----\nSocial Isolation- Isolation\n['12676151_214054249_811823990.txt', 'abruptly leaving her visit', 'cried for 30 mins\" in her room by herself', 'did not feel mom understood nor was responsive to her feelings']\n-----\nSocial Isolation- physiological barriers\n['14036883_202619599_547820501.txt', 'Slightly difficult to understand at times', 'autism, intellectual disability']\n-----\nSocial Isolation-Distancing self from relationships\n['14841753_205016198_604281393.txt', 'running away']\n-----\nSocial Isolation- Isolation\n['14936900_211889304_765462708.txt', 'jail', 'went to jail']\n-----\nSocial Isolation- Lack of meaningful social group\n['15157993_211465548_756717735.txt', 'struggles to feel as if she belongs']\n-----\nSocial Isolation- Isolation\n['18040154_217125099_873865284.txt', 'fears people are out to get him or harm him in someway', 'spends hours a day in his room \"thinking\" about this project']\n-----\nSocial Isolation-Distancing self from relationships\n['18060657_201488970_520178436.txt', 'ran away']\n-----\nSocial Isolation-Distancing self from relationships\n['18662361_198390082_443960398.txt', 'run away']\n-----\nSocial Isolation- Isolation\n['20212729_217179169_876623254.txt', 'isolating']\n-----\nSocial Isolation-Being restricted from contact with others\n['20216794_202892313_554798920.txt', 'amount of restrictions that are placed on her', 'oppositional with the restrictions', 'overly restrictive', 'restrict her access to friends', 'restricted from seeing them', 'restrictions', 'restrictive', 'restrictive about her access to technology, friends']\n-----\nSocial Isolation- Loneliness\n['20255655_202349676_540733077.txt', 'lonely']\n-----\nSocial Isolation- physiological barriers\n['20262210_215245992_834654130.txt', 'Autism spectrum disorder', 'thought no one would care if he died']\n-----\nSocial Isolation- Isolation\n['20339954_206905172_658259209.txt', 'I isolate']\n-----\nSocial Isolation- Isolation\n['20344406_202715654_550711534.txt', 'isolating']\n-----\nSocial Isolation- Loneliness\n['20385528_209856795_727953742.txt', 'feeling lonely']\n-----\nSocial Isolation- physiological barriers\n['20417324_205998272_634299118.txt', 'Memory: impaired', 'Mild intellectual disability']\n-----\nSocial Isolation-Being restricted from contact with others\n['20427474_205942535_631639605.txt', 'discussion of limitations of social media']\n-----\nSocial Isolation-Distancing self from relationships\n['20530486_199785789_477860675.txt', 'not wanting to \"connect\" with people because it will hurt to leave them']\n-----\nSocial Isolation- Lack of meaningful social group\n['\"nobody wants me', '20533187_210964276_747903319.txt', 'does not feel she has \"someone to talk to when I need it', 'isolating']\n-----\nSocial Isolation- Not being understood\n['20654237_205414801_615444426.txt', 'argument when her phone was taken away for being disrespectful to her parents', 'misunderstanding about her debit card not coming in the mail and feeling like her family was mad at her']\n-----\nSocial Isolation-Distancing self from relationships\n['20678342_198482886_446083331.txt', 'run away multiple times', 'running away']\n-----\nSocial Isolation- Loneliness\n['20750184_198461822_445962734.txt', 'she was missing people']\n-----\nSocial Isolation- Not being understood\n['20775416_211943811_768217262.txt', 'prevents her from being able to connect with others and receive comfort']\n-----\nSocial Isolation- Being asked to leave others or groups\n['20795402_201181395_512308777.txt', 'feeling like she is constantly being moved from place to place', 'other adolescents at Youth Care said they \"hated her.']\n-----\nSocial Isolation-Being restricted from contact with others\n['20800119_201700015_529878025.txt', 'isolative', 'wants to go home and is sad about being in the hospital']\n-----\nSocial Isolation-Being restricted from contact with others\n['20807876_203271265_563334844.txt', 'grounded until May']\n-----\nSocial Isolation- Not being understood\n['20815593_201393354_519148758.txt', 'grounded', 'not to have contact with the boyfriend', \"parents don't understand her feelings\", \"parents don't understand the importance of the relationship with her boyfriend\", 'restrict contact with the boyfriend', 'rules of no contact with her boyfriend']\n-----\nSocial Isolation-Distancing self from relationships\n['20825148_201800302_531441970.txt', 'grabbed her so she could not runaway', 'restraining order placed on him', 'runaway', 'running away']\n-----\nSocial Isolation-Distancing self from relationships\n['20836878_202292769_539722381.txt', 'prefers to be alone', 'used to run away']\n-----\nSocial Isolation- physiological barriers\n['20841417_202481581_544378727.txt', 'grandmother']\n-----\nSocial Isolation- Isolation\n['20843073_202542228_545734755.txt', 'Peer or social isolation']\n-----\nSocial Isolation-Being restricted from contact with others\n['20849468_202825756_574679938.txt', 'should not be allowed to receive calls']\n-----\nSocial Isolation-Distancing self from relationships\n['20851667_210705031_741804682.txt', 'Mother', 'distance herself from her', 'run away from home', 'running away']\n-----\nSocial Isolation-Being restricted from contact with others\n['20854748_203033031_558372034.txt', 'frustrated and wants to go home']\n-----\nSocial Isolation-Distancing self from relationships\n['20856187_206187924_636656905.txt', 'did not interact with anyone', 'isolated himself']\n-----\nSocial Isolation- Being asked to leave others or groups\n['20857491_203146537_560399085.txt', 'kicked the pt out of the home']\n-----\nSocial Isolation- Isolation\n['20862864_203394699_565794334.txt', 'Isolate', 'Peer or social isolation', 'socially isolative']\n-----\nSocial Isolation-Distancing self from relationships\n['20878801_211192075_752731208.txt', 'Father', 'they moved out of the home with him']\n-----\nSocial Isolation-Distancing self from relationships\n['20899944_205648645_623162699.txt', 'felt constantly berated', 'turn off her emotions and feel \"numb\" as a self protective mechanism in the context of this chaos']\n-----\nSocial Isolation-Being restricted from contact with others\n['20908292_205523773_618568676.txt', 'She took the phone away a few months ago', \"can't talk with people because her phone is blocked\", \"can't talk with people because her phone is blocked.\", 'her phone was taken away', 'not to be able to talk with him whenever she wants', 'ran away from home for days', 'took the phone away a few months ago']\n-----\nSocial Isolation- Isolation\n['20910261_205630129_623461832.txt', 'isolative behavior']\n-----\nSocial Isolation-Being restricted from contact with others\n['20924702_206241964_645030399.txt', 'isolate to her room', 'no visits/phone calls', 'no visits/phone calls until', 'prefer to isolate to her room', 'she was restricting and avoid social interactions']\n-----\nSocial Isolation-Distancing self from relationships\n['20948164_208071439_683748490.txt', 'afraid of being lonely', 'attempting to run away from her problems at home', 'feels lonely', 'fleeing the hospital', 'thoughts of fleeing the hospital']\n-----\nSocial Isolation-Distancing self from relationships\n['20959901_207470438_670248229.txt', 'run way towards a \"busy highway', 'run-away\"', 'running away']\n-----\nSocial Isolation-Distancing self from relationships\n['20959901_207764648_679990946.txt', 'leaving the house', 'tried to leave the house']\n-----\nSocial Isolation-Being restricted from contact with others\n['20960880_208010073_683357038.txt', 'stuck in his room\" for several days']\n-----\nSocial Isolation-Being restricted from contact with others\n['20965046_208125781_684712848.txt', 'her mother', 'taken away from her mother at age 6']\n-----\n"
]
],
[
[
"Being asked to leave others or groups\n\nBeing restricted from contact with others\n\nDistancing self from relationships\n\nIsolation\n\nLack of meaningful social group\n\nLoneliness\n\nNot being understood\n\nphysiological barriers",
"_____no_output_____"
]
],
[
[
"pos_txt_files[0:5]",
"_____no_output_____"
],
[
"pos_files1 = []\nfor pos_file in pos_files:\n pos_file = pos_file.split('\\\\')\n pos_file = pos_file[1].split('.knowtator')\n pos_file = pos_file[0]\n pos_files1.append(pos_file)\npos_files1",
"_____no_output_____"
],
[
"neg_files1 = []\nfor neg_file in neg_files:\n neg_file = neg_file.split('\\\\')\n neg_file = neg_file[1].split('.knowtator')\n neg_file = neg_file[0]\n neg_files1.append(neg_file)\nneg_files1",
"_____no_output_____"
],
[
"# copy file\nimport shutil\ndef copyFile(src, dest):\n try:\n shutil.copy2(src, dest) \n except shutil.Error as e:\n print('Error: %s' % e)\n except IOError as e:\n print('Error: %s' % e.strerror)",
"_____no_output_____"
],
[
"#from pathlib import Path\nimport os.path\npath_src =\"C:/Users/HuaiZhong/git/6950_nlp/test/corpus\"\n#path_tgt=\"C:/Users/HuaiZhong/git/6950_nlp/test/corpusp\" \npath_tgt=\"C:/Users/HuaiZhong/git/6950_nlp/test/corpusn\" \n\n#for i in pos_txt_files:\nfor i in neg_txt_files:\n path_file = os.path.join(path_src, i)\n if os.path.isfile(path_file):\n copyFile(path_file, path_tgt) \n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e74cbabe93391f8b90e573310812d50fdf9aa20e | 12,619 | ipynb | Jupyter Notebook | aqua/algorithm_introduction_with_vqe.ipynb | renier/qiskit-tutorials-community | 6a2dbc618ec012e957ed975f8984737d204681fb | [
"Apache-2.0"
] | 2 | 2021-04-29T15:11:27.000Z | 2021-05-09T20:52:21.000Z | aqua/algorithm_introduction_with_vqe.ipynb | renier/qiskit-tutorials-community | 6a2dbc618ec012e957ed975f8984737d204681fb | [
"Apache-2.0"
] | 1 | 2020-05-08T20:25:11.000Z | 2020-05-08T20:25:11.000Z | aqua/algorithm_introduction_with_vqe.ipynb | renier/qiskit-tutorials-community | 6a2dbc618ec012e957ed975f8984737d204681fb | [
"Apache-2.0"
] | 1 | 2019-09-02T00:35:21.000Z | 2019-09-02T00:35:21.000Z | 42.204013 | 807 | 0.644584 | [
[
[
"## _*Using Qiskit Aqua algorithms, a how to guide*_\n\nThis notebook demonstrates how to use the `Qiskit Aqua` library to invoke an algorithm and process the result.\n\nFurther information may be found for the algorithms in the online [Aqua documentation](https://qiskit.org/documentation/aqua/algorithms.html).\n\nAlgorithms in Aqua can be created and run as usual in Python by constructing instances and calling methods. There is also a high level `run_algorithm` method that takes a configuration dictionary with data describing which algorithm to use, which components etc along with an InputInstance type to supply data to the algorithm. This latter approach is what we call `declarative` with the former, the regular Python way, `programmatic`. This tutorial will show both approaches.\n\nAqua has many `algorithms` for solving different problems. For some we also have classical algorithms, that take the exact same input data, to solve the problem. This can be useful in the near term as Quantum algorithms are developed since we are still at a stage where we can do classical comparison of the result.\n\nAqua also has various `components` which are dependent objects used by algorithms, such as variational forms, qfts, initial states etc. We will see more on this below.\n\nLastly for developers we also have a collections of `circuits` and gates which can be used to help build out new components and algorithms.\n\nHere we will choose to show some of the main aspects of Aqua by solving a ground state energy problem.",
"_____no_output_____"
]
],
[
[
"from qiskit.aqua import Operator",
"_____no_output_____"
]
],
[
[
"As input, for an energy problem, we need a Hamiltonian and so we first create a suitable `Operator ` instance. In this case we have a paulis list, as below, from a previously computed Hamiltonian, that we saved, so as to focus this notebook on using the algorithms. We simply load these paulis to create the original Operator.\n\nThis Hamiltonian was created originally using Qiskit Chemistry for an H2 molecule at 0.735A interatomic distance. Please refer to the chemistry tutorials here if you are interested in understanding more. Suffice to say at this level Aqua does not really care about the source of the Operator.",
"_____no_output_____"
]
],
[
[
"pauli_dict = {\n 'paulis': [{\"coeff\": {\"imag\": 0.0, \"real\": -1.052373245772859}, \"label\": \"II\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": 0.39793742484318045}, \"label\": \"ZI\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": -0.39793742484318045}, \"label\": \"IZ\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": -0.01128010425623538}, \"label\": \"ZZ\"},\n {\"coeff\": {\"imag\": 0.0, \"real\": 0.18093119978423156}, \"label\": \"XX\"}\n ]\n}\n\nqubit_op = Operator.load_from_dict(pauli_dict)",
"_____no_output_____"
]
],
[
[
"### Let's start with a classical algorithm\n\nWe can now use the Operator without regard to how it was created. We chose to start this tutorial with a classical algorithm as it involves a little less setting up than the `VQE` quantum algorithm we will use later. Here we will use `ExactEigensolver` to compute the minimum eigenvalue of the Operator (Hamiltonian).\n\n#### First let's show the `programmatic` approach.\n\nWe construct an `ExactEigensolver` instance, passing in the Operator, and then call `run()` on in order to compute the result. All Aqua algorithms have the run method (it is defined by a base class which all algorithms extend) and while no parameters are need for classical algorithms a quantum algorithm will require a backend (quantum simulator or real device) on which it will be run. The `result` object returned is a dictionary. While the results fields can be different for algorithms solving different problems, and even within a given problem type there may be algorithm specific data returned, for a given problem the fields core to that problem are common across algorithms in order that different algorithms can be chosen to solve the same problem in a consistent fashion.",
"_____no_output_____"
]
],
[
[
"from qiskit.aqua.algorithms import ExactEigensolver\n\nee = ExactEigensolver(qubit_op)\nresult = ee.run()\nprint(result['energy'])",
"-1.857275030202378\n"
]
],
[
[
"#### Now let's show the `declarative` approach. \n\nHere we need to prepare a configuration dictionary of parameters to define the algorithm. Again we we will use the ExactEigensolver and need to create an `algorithm` where it is named by `name`. The name comes from a `CONFIGURATION` dictionary in the algorithm and this name is registered to the Aqua discovery framework so we can load the corresponding class and run it during the exceution of `run_algorithm`. `run_algorithm` requires the configuration dictionary and input data passed via an InputInstance class. For an energy problem the data is supplied via an EnergyInput (extends InputInstance), other problem types have their own specific InputInstance. `run_algorithm` returns the same dictionary as above (internally it calls the run() method of the algorithm and passes back the result)\n\nNote: there are other fields such `problem` that could have been added below. This field defaults to `energy`, which is what we want so it has been omitted. Defaults are convenient in the declarative form too as algorithms can define for both their properties as well as defaults for dependent components.",
"_____no_output_____"
]
],
[
[
"from qiskit.aqua import run_algorithm\nfrom qiskit.aqua.input import EnergyInput\n\naqua_cfg_dict = {\n 'algorithm': {\n 'name': 'ExactEigensolver'\n }\n}\n\nalgo_input = EnergyInput(qubit_op)\nresult = run_algorithm(aqua_cfg_dict, algo_input)\nprint(result['energy'])",
"-1.8572750302023808\n"
]
],
[
[
"### Lets switch now to using a Quantum algorithm.\n\nWe will use the Variational Quantum Eigensolver (VQE) to solve the same problem as above. As its name implies its uses a variational approach. An ansatz (a variational form) is supplied and using a quantum/classical hybrid technique the energy resulting from evaluating the Operator with the variational form on a quantum backend is taken down to a minimum using a classical optimizer that varies the parameters of the variational form.\n\n#### Lets do the `declarative` approach first this time\n\nIn the description above we talked about `VQE` a `variational form` and an `optimizer`. We can now set this up as a dictionary. While we can omit them from the dictionary, such that defaults are used, here we specify them explicitly so we can set their parameters as we desire.\n\nAs this is a quantum algorithm we need to specify a backend. Here we use the `statevector_simpulator` from the `qiskit.BasicAer` provider from `Qiskit Terra`. As this is a variational algorithm going from quantum to classical and looping until it finds a minimum it takes a few seconds. The result here is very close to our classical result above.",
"_____no_output_____"
]
],
[
[
"aqua_cfg_dict = {\n 'algorithm': {\n 'name': 'VQE',\n 'operator_mode': 'matrix'\n },\n 'variational_form': {\n 'name': 'RYRZ',\n 'depth': 3,\n 'entanglement': 'linear'\n },\n 'optimizer': {\n 'name': 'L_BFGS_B',\n 'maxfun': 1000\n },\n 'backend': {\n 'name': 'statevector_simulator',\n 'provider': 'qiskit.BasicAer'\n }\n}\n\nalgo_input = EnergyInput(qubit_op)\nresult = run_algorithm(aqua_cfg_dict, algo_input)\nprint(result['energy'])",
"-1.8572750302012253\n"
]
],
[
[
"#### And now `programmatic`\n \nHere we create the variational form and optimizer and then pass them to VQE along with the Operator. The backend is created and passed to the algorithm so it can be run there.",
"_____no_output_____"
]
],
[
[
"from qiskit import BasicAer\nfrom qiskit.aqua.algorithms import VQE\nfrom qiskit.aqua.components.variational_forms import RYRZ\nfrom qiskit.aqua.components.optimizers import L_BFGS_B\n\nvar_form = RYRZ(qubit_op.num_qubits, depth=3, entanglement='linear')\noptimizer = L_BFGS_B(maxfun=1000)\nvqe = VQE(qubit_op, var_form, optimizer)\nbackend = BasicAer.get_backend('statevector_simulator')\nresult = vqe.run(backend)\nprint(result['energy'])",
"-1.8572750301886618\n"
]
],
[
[
"While a backend can be passed directly to the quantum algorithm run(), internally it will be detected as such and wrapped as a QuantumInstance. However by doing this explicitly yourself, as below, various parameters governing the execution can be set, including in more advanced cases ability to set noise models, coupling maps etc. The following shows the above but using a QuantumInstance and setting up a default transpiler PassManager for circuit processing.",
"_____no_output_____"
]
],
[
[
"from qiskit.aqua import QuantumInstance\nfrom qiskit.transpiler import PassManager\n\nvar_form = RYRZ(qubit_op.num_qubits, depth=3, entanglement='linear')\noptimizer = L_BFGS_B(maxfun=1000)\nvqe = VQE(qubit_op, var_form, optimizer)\nbackend = BasicAer.get_backend('statevector_simulator')\nqi = QuantumInstance(backend=backend, pass_manager=PassManager())\nresult = vqe.run(qi)\nprint(result['energy'])",
"-1.8572750302012366\n"
]
],
[
[
"### Concluding\n\nThis completes an introduction to programming and using Aqua algorithms. There are plenty of other tutorials showing Aqua being used to solve other problems, including AI, Finance, Optimization and Chemistry. We encourage you to explore these further and see that various capabilities and techniques employed.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74cc7ecc9de39864b0f47f5d43b5dae1b76a30c | 46,126 | ipynb | Jupyter Notebook | Notebook_2/Notebook_2.ipynb | nguyenst1/facebook-api-analysis | 8cae723155f39e9a5993a7c07dc2600fa09b4c51 | [
"MIT"
] | null | null | null | Notebook_2/Notebook_2.ipynb | nguyenst1/facebook-api-analysis | 8cae723155f39e9a5993a7c07dc2600fa09b4c51 | [
"MIT"
] | null | null | null | Notebook_2/Notebook_2.ipynb | nguyenst1/facebook-api-analysis | 8cae723155f39e9a5993a7c07dc2600fa09b4c51 | [
"MIT"
] | null | null | null | 39.695353 | 1,186 | 0.505203 | [
[
[
"# Notebook 2: Requesting information",
"_____no_output_____"
],
[
"After getting all the access token as well as refreshing the token, we started requesting information for our analysis. Just to remind, our four goals are to find out which are the top twenty friends that like our post the most, demographic for places we have been tagged, reactions for every Trump's post, and lastly, the events on Facebook. My partner and I divide the tasks in half and each of us worked on each half in order to save times. I started working on getting top friends that like our post the most and the places that we have beend tagged and my partner worked on the other half. \n\nBefore starting getting the information, we played around with the Graph API Explorer to see what kind of information we can get from Facebook. Then, we started with the first question by using python to get the list of photo and see what kind of format it looked like. We used GET method to get the list of photo by parsing the url \"https://graph.facebook.com/me?fields=photos.limit(200)\". We would then decode the result and convert it into json. As soon as it was in json, we created a list that contain or the photo ids so that we could get number of reactions out of them. This was due to the fact that by parsing only the post id, we can get any type of information such as likes, reactions, and even comments over the Facebook Graph API. \n\nAfter creating a list, we then started to parse in each item to the list(which are the photo ids) into a GET request to obtain the reaction types. We then struggled with counting the total number of likes for each friend for every photo. Therefore, we had to stop for a moment and design an algorithm that could count total number of likes from each friend. Eventually, we figured that out and created a dictionary in which for every friend, it shows the total number of likes that they give for evey of our photo. Lastly, we wrapped up the first question by generate a dictionary that contains top friends who like our post the most. Furthermore, we also imported the dictionary into dataframe then into csv to prepared for the third notebook.",
"_____no_output_____"
],
[
"As explained in Notebook 1: \n\nFacebook does not provide a way to refresh their tokens once expired. To get a new token the login flow must be followed again to obtain a short-lived token which needs to be exhanged, once again, for a long lived token. This is expressed in the facebook documentations as follows:\n\n\"Even the long-lived access token will eventually expire. At any point, you can generate a new long-lived token by sending the person back to the login flow used by your web app - note that the person will not actually need to login again, they have already authorized your app, so they will immediately redirect back to your app from the login flow with a refreshed token\" \n\n\nFor this notebook, we are using the long-lived token which lasts for over 2 months. ",
"_____no_output_____"
]
],
[
[
"import requests\nimport importlib\nimport json\nimport pandas as pd\nimport keys_project\nimportlib.reload(keys_project)\nkeychain = keys_project.keychain\n\nd={}\nd['access_token']=keychain['facebook']['access_token'] # Getting the long-lived access token",
"_____no_output_____"
]
],
[
[
"Below are all of the helper functions that we have used. The return type of a response from the graph api is not easy to parse and hence we convert all repsonses to JSON. The other functions are supplementing our data requests and modifications as described in the program level docs. ",
"_____no_output_____"
]
],
[
[
"def response_to_json(response):\n '''\n This function converts the response into json format\n Parameter:\n response: the request response to convert to json\n Return: \n the response in json\n \n ''' \n string_response = response.content.decode('utf-8') #decoding the response to string\n return json.loads(string_response) # converting the string to json\n\ndef get_reaction_count(object_id,reaction_type):\n '''\n This function gets the total reactions for each post \n Parameter:\n object_id: the id of the object to get reaction data\n reaction_type: the reaction_type to retrieve from NONE, LIKE, LOVE, WOW, HAHA, SAD, ANGRY, THANKFUL\n Return: \n the number of reactions on the request object of type reaction_type\n '''\n request_url=\"https://graph.facebook.com/\"+str(object_id)+\\\n \"/reactions?summary=true&type=\"+reaction_type # getting reaction summary data\n\n response= requests.get(request_url,params=d)\n response_json=response_to_json(response)\n return response_json['summary']['total_count'] #getting the count for reaction reaction_type\n\ndef most_frequent(myDict,number_top):\n '''\n This function creates a dictionary which includes the friend's name and the number of likes\n Parameter:\n myDict: A dictionary with the key as facebook friend's name and value of the number of times they liked the upload type\n number_top: The number of top friends who have made likes\n Return: \n A dictionary of the top 20 friends\n \n '''\n \n # Frequency for top 20 people who like your upload_type\n value = []\n\n for key in myDict:\n value.append(myDict[key])\n value = sorted(value,reverse=True)\n values = value[0:number_top]\n most_liked_Dict = {}\n \n for key in myDict:\n if myDict[key] in values:\n most_liked_Dict[key] = myDict[key]\n \n return most_liked_Dict\n\ndef feed_(feed_id):\n '''\n This function get the feed data from Facebook\n Parameter:\n feed_id:the id of the feed in string\n Return: \n a dictionary of feed data\n '''\n \n \n request_url=\"https://graph.facebook.com/\"+feed_id+\\\n \"?fields=type,name,created_time,status_type,shares\" #creating the url based on the feed_id\n \n response= requests.get(request_url,params=d)\n response_json=response_to_json(response)\n \n return response_json\n\ndef to_csv(filename,df):\n '''\n This function creates a CSV file. It exports data from a pandas dataframe to the file. \n \n Parameters: \n String of filename desired, pandas dataframe\n Returns: \n None\n '''\n df.to_csv(filename,encoding='utf-8') # exporting to a csv file\n \n ",
"_____no_output_____"
]
],
[
[
"Last but not least, we imported the dictionary into csv file for later analysis in Notebook 3. This question took us quite long time. However, the questions later on were pretty straightforward and similar to this question.",
"_____no_output_____"
],
[
"### Question: Getting the number of facebook reactions of each reaction type for a particular upload type. \n\nThis function takes a user_id which can be any facebook user or page, a limit which is the number of upload types we want to check for and upload type which are facebook ulpoad objects such as pictures or posts. By offereing these paramteres, we offer flexibity on the kind of data recieved. Inititally, we used Facebook's Graph API explorer to test our requests. The link to the explorer is : https://developers.facebook.com/tools/explorer/. \n\nIn the facebook graph, information is composed in the following format: \n\n1. nodes: \"things\" such as a User, a Photo, a Page, a Comment\n2. edges: the connections between those \"things\", such as a Page's Photos, or a Photo's Comments\n3. fields:info about those \"things\", such as a person's birthday, or the name of a Page\n\nUnderstanding how to query all three of these parts of the social graph were important in obtaining good data. For this question, we had to first had to get a 'User' or 'Page' node. From which we had to query the user's edges to find its ulploads (posts or photos). Once we got the ID asscoiated with each edge, we used the fields of those edges to get reaction counts. \n\nFor our anaylasis, get the reaction counts for Donald Trump and Hillary Clinton to compare their social media presence and following. \n\nFor each of our questions, we also had to modify our JSON response to clear it of noise and get it in the format to be accepted by a pandas dataframe\n",
"_____no_output_____"
]
],
[
[
"def reaction_statistics(id_,limit,fb_upload_type):\n '''\n This function gets the total reactions of each feed \n ParameterL\n id_: a string id to a facebook object such as a page or person\n limit: the limit to the numner of posts obtained from the request in string\n fb_upload_type: a valid type of upload as specified in FB docs: photo, post, videos etc in string\n Return: \n a list of dictionary of the number of each different kind of reaction for each post\n '''\n request_url=\"https://graph.facebook.com/\"+id_+\"?fields=\"+fb_upload_type+\".limit(\"+limit+\"){id}\" #creating request url\n \n response= requests.get(request_url,params=d)\n response_json=response_to_json(response) # converting response to json\n user=[]\n reaction_type=['LIKE','LOVE','WOW','HAHA','SAD','ANGRY','THANKFUL']\n\n for object_ in response_json[fb_upload_type]['data']:\n buffer={}\n for type_ in reaction_type:\n buffer[type_]=get_reaction_count(object_['id'],type_) #getting the count of each reaction\n \n buffer['id']=object_['id']\n user.append(buffer)\n \n return user",
"_____no_output_____"
],
[
"donald_trump=pd.DataFrame(reaction_statistics('153080620724','5','posts'))\nhillary_clinton=pd.DataFrame(reaction_statistics('889307941125736','5','posts'))\n\ndonald_trump.head(5)",
"_____no_output_____"
],
[
"hillary_clinton.head(5)",
"_____no_output_____"
]
],
[
[
"Hence, for each cell we can see the upload_type ID to identify the post or photo and the number of reactions for each upload. ",
"_____no_output_____"
],
[
"### QUESTION: Obtaining feed data to anaylize the kinds, times and popularity of a user or page's feed. \n\nIn this question, we get feed information for the artist Bon Dylan (though are function us abstracted to get information for any user whose feed is publically available or a user who has authenticated us though OAUTH 2.0)\n\nAfter obtaining the user ID, we used the Facebook Graph API explorer to see the response contents of a request to the fields to the user's feed. There were various kind of data available whihc can also be found on FB's docs (https://developers.facebook.com/docs/graph-api/reference/v2.11/user/feed).From the different fields we picked ones which would be interesting to look at such as number of shares on the feed post, the times and dates of the posts to see the frequency of the user's FB usage, the kind of post(status,story,video etc) and other such information. Once again, we had to modify the JSON response so that it would be accepted by a pandas DF. ",
"_____no_output_____"
]
],
[
[
"def feed_data(object_id,limit):\n '''\n This function generates a list of dictionaries for each feed of information\n Parameters:\n object_id: the id of the object posting events in string\n limit: the number of most recent events in string\n Return: \n a list of dictionaries where each data is a single feed of information\n \n '''\n request_url=\"https://graph.facebook.com/\"+object_id+\"?fields=feed.limit(\"+str(limit)+\"){id}\"\n \n response= requests.get(request_url,params=d)\n response_json=response_to_json(response) # converting response to json\n \n feed_list=[] #creaing an empty list to hold feed dictionaries\n for feed_id in response_json['feed']['data']:\n \n feed_info={}\n feed_info= feed_(feed_id['id'])\n feed_info['share_count']=feed_info['shares']['count']\n del feed_info['shares']\n feed_list.append(feed_info)\n \n return feed_list #returning the feed list",
"_____no_output_____"
],
[
"Bob_Dylan=pd.DataFrame(feed_data('153080620724','10'))\nBob_Dylan.head(5)",
"_____no_output_____"
]
],
[
[
"### Question: Get the top twenty frequency of friends who like our post \n\nIn the cell below, it is our code for the first question, which is the top friends who like our post the most. First, we created a function to convert the response into json format since we would be making a lot of requests and create dictionary from them. This was quite easy and did not take a lot of our time. Next, we wrote a function to get the total number of reactions. We did this by parsing the ids of the object(which are the posts) into a GET request so that it can get the information from all objects. Then, we wrote another function called most_frequent to get the number of likes from each friend. This function took most of our time since we had to design an algorithm to sum up the total of likes from every friend. When this function worked, the rest was easier since we only had to put them in a dictionary and get the top 20 frequency. Lastly, we imported the dataframe of top 20 frequency into dataframe and to csv. \n\nIn this question overall, another problem that we also struggled with was getting the top 20 frequency. First, after getting the total likes from everyone, we had to append the likes into a list. Then , we sorted the list from the most likes to the least likes and got the top 20. Then, we checked if the names and likes in the total likes dictionary were also in the top 20 list. If they were, we would put them into a new dictionary, whose keys were names and values were number of likes. Beside the frequency and the total likes algorithm that we designed, the other functions were quite straightforward. The function takes a facebook object id which could be a user or page, the numner of posts or photos we want to check for and the type of the post we want to check for. Hence, we offer a good amount of flexibility.",
"_____no_output_____"
]
],
[
[
"def friend_likes(id_,limit,fb_upload_type):\n '''\n This function gets a dictionary for each kind of reactions for each post\n Parameter:\n id_: a string id to a facebook object such as a page or person\n limit: the limit to the numner of posts obtained from the request in string\n fb_upload_type: a valid type of upload as specified in FB docs: photo, post, videos etc in string\n Return: \n a list of dictionary of the number of each different kind of reaction for each post\n '''\n request_url=\"https://graph.facebook.com/\"+id_+\"?fields=\"+fb_upload_type+\".limit(\"+limit+\"){id}\"\n \n response= requests.get(request_url,params=d)\n photoID_list=response_to_json(response) # converting response to json\n \n myDict={} # Dictionary that contains the frequency of likes for each friend\n \n \n for object_ in photoID_list[fb_upload_type]['data']:\n \n response=requests.get(\"https://graph.facebook.com/\"+object_['id']+\"/reactions\",params=d) # Get the likes data\n \n response_json=response_to_json(response)\n # For each ulpoad_type, let's get the list of friends and the number of time they like the \n for name_dict in response_json['data']:\n name=name_dict['name'] \n if name not in myDict.keys() : # Check if the friends have already like the photo\n myDict[name] = 1\n else:\n myDict[name]= myDict[name]+1\n \n return most_frequent(myDict,20)\n\nfriend_likes('me','200','posts')",
"_____no_output_____"
],
[
"# Getting the like frequency for top 20 friends for past 200 posts\ndf_likes_posts= pd.DataFrame([friend_likes('me','200','posts')])\n# Getting the like frequency for top 20 friends for past 200 posts\ndf_likes_photo= pd.DataFrame([friend_likes('me','200','photos')])",
"_____no_output_____"
],
[
"to_csv('df_likes_posts.csv',df_likes_posts)\nto_csv('df_likes_photos.csv',df_likes_photo)",
"_____no_output_____"
],
[
"df_likes_posts",
"_____no_output_____"
],
[
"df_likes_photo",
"_____no_output_____"
]
],
[
[
"### Question: Demographic analysis for place that we have been tagged \nIn this question, we want to explore the places that we have travelled and been tagged on Facebook. We want to create a demographic plot that show where we have been based on the latitudes and longitudes. Since we already know how to perform a GET request from the previous questions, this question did not take us a lot of times. We did this question by writing a function called tagged_data.\n\nFirst, this function took object_id, which was the id for places, as a parameter. The parameter then would be parsed into the GET request to get the locations for each id. Once the request was successful, we converted the response into json format and perform an iteration. We used list apprehension to create a list that include the data for places that we have been tagged. Then, we created a dictionary such that for each location data in the list, we would put the latitudes, longitudes and location names as the keys for the dictionary, and their values are the values in the dictionary. We later on appended each tagged location dictionary to a list.",
"_____no_output_____"
]
],
[
[
"def tagged_data(object_id):\n '''\n This function generates a dictionary which includes the longitudes, latitudes, and names for places.\n Parameter:\n id_: a string id to a facebook object such as a page or person\n Return: \n a list of dictionaries of latitude,longitude, country and name of tagged places\n '''\n \n request_url=\"https://graph.facebook.com/\"+object_id+\"?fields=tagged_places.limit(200)\"\n response= requests.get(request_url,params=d)\n place_list=response_to_json(response) # converting response to json\n\n tagged_place_list = [element['place'] for element in place_list['tagged_places']['data']] # Create a list of photo id\n tagged_list=[]\n for place in tagged_place_list:\n buffer_dict={} #creating a buffer dictionary\n \n buffer_dict['latitude']= place['location']['latitude']\n buffer_dict['longitude']= place['location']['longitude']\n buffer_dict['name']=place['name']\n \n tagged_list.append(buffer_dict) # appending each tagged location dictionary to a list\n \n return tagged_list",
"_____no_output_____"
]
],
[
[
"We will import a dataframe that contains the data about latitude, longitude, and name. Then, we created a csv file out of this dataframe.",
"_____no_output_____"
]
],
[
[
"df_tagged_places= pd.DataFrame(tagged_data('me'))\nto_csv('df_tagged_places.csv',df_tagged_places) ",
"_____no_output_____"
]
],
[
[
"We then showed the first ten row in this dataframe.",
"_____no_output_____"
]
],
[
[
"df_tagged_places.head(10)",
"_____no_output_____"
]
],
[
[
"### Conclusion\nThis notebook is the major part for this project, where we try to succeed the goals that we have set out. The four goals are getting the top 20 frequency of friends who like our post, Donald Trump's posts reactions, events on Facebook, and the places that we have been tagged in. Throughout this API Project, the second notebook is the most time-consuming notebook, and also the most complicated, in which we have to figure out so many things. First, we have to play around with the Graph API Explorer to learn the syntax for our GET request. Then, we have to design many algorithms so that it would retun what we want to analyze. For example, the algorithm for Trump's post reactions, all the algorithms for the total likes for every friend on Facebook. Lastly, we have to manipulate the data that we get to make it turn into dataframe for our third notebook. By and large, this notebook is the most complicated, but it is also the most fun notebook. I learn a lot from this notebook, not just about computer science or the API itself, but also about how to work with a partner and how to self-explore. Throughout this notebook, I have sharpened many skills for my future career.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74cce7ea5205d65b107943454143a316e6aebdf | 119,010 | ipynb | Jupyter Notebook | nlu/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | 687 | 2018-09-07T03:45:39.000Z | 2022-03-20T17:11:20.000Z | nlu/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | 89 | 2018-09-18T02:04:42.000Z | 2022-02-24T18:22:27.000Z | nlu/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb | fcivardi/spark-nlp-workshop | aedb1f5d93577c81bc3dd0da5e46e02586941541 | [
"Apache-2.0"
] | 407 | 2018-09-07T03:45:44.000Z | 2022-03-20T05:12:25.000Z | 119,010 | 119,010 | 0.680951 | [
[
[
"\n\n[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb)\n\n\n# Training a Sentiment Analysis Classifier with NLU \n## 2 class IMDB Movie sentiment classifier training\nWith the [SentimentDL model](https://nlp.johnsnowlabs.com/docs/en/annotators#sentimentdl-multi-class-sentiment-analysis-annotator) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem \n\nThis notebook showcases the following features : \n\n- How to train the deep learning classifier\n- How to store a pipeline to disk\n- How to load the pipeline from disk (Enables NLU offline mode)\n\nYou can achieve these results or even better on this dataset with training data:\n\n\n<br>\n\n\n\n\n\n\nYou can achieve these results or even better on this dataset with test data:\n\n\n<br>\n\n\n\n",
"_____no_output_____"
],
[
"# 1. Install Java 8 and NLU",
"_____no_output_____"
]
],
[
[
"!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash\nimport nlu",
"--2021-05-05 05:38:30-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1671 (1.6K) [text/plain]\nSaving to: ‘STDOUT’\n\n\r- 0%[ ] 0 --.-KB/s \rInstalling NLU 3.0.0 with PySpark 3.0.2 and Spark NLP 3.0.1 for Google Colab ...\n- 100%[===================>] 1.63K --.-KB/s in 0s \n\n2021-05-05 05:38:30 (63.2 MB/s) - written to stdout [1671/1671]\n\n\u001b[K |████████████████████████████████| 204.8MB 67kB/s \n\u001b[K |████████████████████████████████| 153kB 73.9MB/s \n\u001b[K |████████████████████████████████| 204kB 23.9MB/s \n\u001b[K |████████████████████████████████| 204kB 65.5MB/s \n\u001b[?25h Building wheel for pyspark (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
]
],
[
[
"# 2. Download IMDB dataset\nhttps://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews\n\nIMDB dataset having 50K movie reviews for natural language processing or Text analytics.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training and 25,000 for testing. So, predict the number of positive and negative reviews using either classification or deep learning algorithms.\nFor more dataset information, please go through the following link,\nhttp://ai.stanford.edu/~amaas/data/sentiment/",
"_____no_output_____"
]
],
[
[
"! wget http://ckl-it.de/wp-content/uploads/2021/01/IMDB-Dataset.csv\n",
"--2021-05-05 05:40:25-- http://ckl-it.de/wp-content/uploads/2021/01/IMDB-Dataset.csv\nResolving ckl-it.de (ckl-it.de)... 217.160.0.108, 2001:8d8:100f:f000::209\nConnecting to ckl-it.de (ckl-it.de)|217.160.0.108|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 3288450 (3.1M) [text/csv]\nSaving to: ‘IMDB-Dataset.csv’\n\nIMDB-Dataset.csv 100%[===================>] 3.14M 20.6MB/s in 0.2s \n\n2021-05-05 05:40:25 (20.6 MB/s) - ‘IMDB-Dataset.csv’ saved [3288450/3288450]\n\n"
],
[
"import pandas as pd\ntrain_path = '/content/IMDB-Dataset.csv'\n\ntrain_df = pd.read_csv(train_path)\n# the text data to use for classification should be in a column named 'text'\n# the label column must have name 'y' name be of type str\ncolumns=['text','y']\ntrain_df = train_df[columns]\nfrom sklearn.model_selection import train_test_split\n\ntrain_df, test_df = train_test_split(train_df, test_size=0.2)\ntrain_df",
"_____no_output_____"
]
],
[
[
"# 3. Train Deep Learning Classifier using nlu.load('train.sentiment')\n\nYou dataset label column should be named 'y' and the feature column with text data should be named 'text'",
"_____no_output_____"
]
],
[
[
"import nlu \nfrom sklearn.metrics import classification_report\n\n# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns\n# by default the Universal Sentence Encoder (USE) Sentence embeddings are used for generation\ntrainable_pipe = nlu.load('train.sentiment')\nfitted_pipe = trainable_pipe.fit(train_df.iloc[:50])\n\n# predict with the trainable pipeline on dataset and get predictions\npreds = fitted_pipe.predict(train_df.iloc[:50],output_level='document')\n#sentence detector that is part of the pipe generates sone NaNs. lets drop them first\npreds.dropna(inplace=True)\nprint(classification_report(preds['y'], preds['trained_sentiment']))\n\npreds",
"tfhub_use download started this may take some time.\nApproximate size to download 923.7 MB\n[OK!]\nsentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\n precision recall f1-score support\n\n negative 0.82 0.88 0.85 26\n neutral 0.00 0.00 0.00 0\n positive 0.85 0.71 0.77 24\n\n accuracy 0.80 50\n macro avg 0.56 0.53 0.54 50\nweighted avg 0.84 0.80 0.81 50\n\n"
]
],
[
[
"# 4. Test the fitted pipe on new example",
"_____no_output_____"
]
],
[
[
"fitted_pipe.predict('It was one of the best films i have ever watched in my entire life !!')",
"_____no_output_____"
]
],
[
[
"## 5. Configure pipe training parameters",
"_____no_output_____"
]
],
[
[
"trainable_pipe.print_info()",
"The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :\n>>> pipe['sentiment_dl'] has settable params:\npipe['sentiment_dl'].setMaxEpochs(1) | Info: Maximum number of epochs to train | Currently set to : 1\npipe['sentiment_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005\npipe['sentiment_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64\npipe['sentiment_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5\npipe['sentiment_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True\npipe['sentiment_dl'].setThreshold(0.6) | Info: The minimum threshold for the final result otheriwse it will be neutral | Currently set to : 0.6\npipe['sentiment_dl'].setThresholdLabel('neutral') | Info: In case the score is less than threshold, what should be the label. Default is neutral. | Currently set to : neutral\n>>> pipe['use@tfhub_use'] has settable params:\npipe['use@tfhub_use'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512\npipe['use@tfhub_use'].setLoadSP(False) | Info: Whether to load SentencePiece ops file which is required only by multi-lingual models. This is not changeable after it's set with a pretrained model nor it is compatible with Windows. | Currently set to : False\npipe['use@tfhub_use'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use\n>>> pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'] has settable params:\npipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False\npipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setStorageRef('SentenceDetectorDLModel_c83c27f46b97') | Info: storage unique identifier | Currently set to : SentenceDetectorDLModel_c83c27f46b97\npipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setEncoder(com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@260a728d) | Info: Data encoder | Currently set to : com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@260a728d\npipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setImpossiblePenultimates(['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e']) | Info: Impossible penultimates | Currently set to : ['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e']\npipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setModelArchitecture('cnn') | Info: Model architecture (CNN) | Currently set to : cnn\n>>> pipe['document_assembler'] has settable params:\npipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink\n"
]
],
[
[
"## 6. Retrain with new parameters",
"_____no_output_____"
]
],
[
[
"# Train longer!\ntrainable_pipe['sentiment_dl'].setMaxEpochs(5) \nfitted_pipe = trainable_pipe.fit(train_df.iloc[:50])\n# predict with the trainable pipeline on dataset and get predictions\npreds = fitted_pipe.predict(train_df.iloc[:50],output_level='document')\n\n#sentence detector that is part of the pipe generates sone NaNs. lets drop them first\npreds.dropna(inplace=True)\nprint(classification_report(preds['y'], preds['trained_sentiment']))\n\npreds",
" precision recall f1-score support\n\n negative 0.92 0.92 0.92 26\n neutral 0.00 0.00 0.00 0\n positive 1.00 0.75 0.86 24\n\n accuracy 0.84 50\n macro avg 0.64 0.56 0.59 50\nweighted avg 0.96 0.84 0.89 50\n\n"
]
],
[
[
"# 7. Try training with different Embeddings",
"_____no_output_____"
]
],
[
[
"# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!\nnlu.print_components(action='embed_sentence')",
"For language <en> NLU provides the following Models : \nnlu.load('en.embed_sentence') returns Spark NLP model tfhub_use\nnlu.load('en.embed_sentence.use') returns Spark NLP model tfhub_use\nnlu.load('en.embed_sentence.tfhub_use') returns Spark NLP model tfhub_use\nnlu.load('en.embed_sentence.use.lg') returns Spark NLP model tfhub_use_lg\nnlu.load('en.embed_sentence.tfhub_use.lg') returns Spark NLP model tfhub_use_lg\nnlu.load('en.embed_sentence.albert') returns Spark NLP model albert_base_uncased\nnlu.load('en.embed_sentence.electra') returns Spark NLP model sent_electra_small_uncased\nnlu.load('en.embed_sentence.electra_small_uncased') returns Spark NLP model sent_electra_small_uncased\nnlu.load('en.embed_sentence.electra_base_uncased') returns Spark NLP model sent_electra_base_uncased\nnlu.load('en.embed_sentence.electra_large_uncased') returns Spark NLP model sent_electra_large_uncased\nnlu.load('en.embed_sentence.bert') returns Spark NLP model sent_bert_base_uncased\nnlu.load('en.embed_sentence.bert_base_uncased') returns Spark NLP model sent_bert_base_uncased\nnlu.load('en.embed_sentence.bert_base_cased') returns Spark NLP model sent_bert_base_cased\nnlu.load('en.embed_sentence.bert_large_uncased') returns Spark NLP model sent_bert_large_uncased\nnlu.load('en.embed_sentence.bert_large_cased') returns Spark NLP model sent_bert_large_cased\nnlu.load('en.embed_sentence.biobert.pubmed_base_cased') returns Spark NLP model sent_biobert_pubmed_base_cased\nnlu.load('en.embed_sentence.biobert.pubmed_large_cased') returns Spark NLP model sent_biobert_pubmed_large_cased\nnlu.load('en.embed_sentence.biobert.pmc_base_cased') returns Spark NLP model sent_biobert_pmc_base_cased\nnlu.load('en.embed_sentence.biobert.pubmed_pmc_base_cased') returns Spark NLP model sent_biobert_pubmed_pmc_base_cased\nnlu.load('en.embed_sentence.biobert.clinical_base_cased') returns Spark NLP model sent_biobert_clinical_base_cased\nnlu.load('en.embed_sentence.biobert.discharge_base_cased') returns Spark NLP model sent_biobert_discharge_base_cased\nnlu.load('en.embed_sentence.covidbert.large_uncased') returns Spark NLP model sent_covidbert_large_uncased\nnlu.load('en.embed_sentence.small_bert_L2_128') returns Spark NLP model sent_small_bert_L2_128\nnlu.load('en.embed_sentence.small_bert_L4_128') returns Spark NLP model sent_small_bert_L4_128\nnlu.load('en.embed_sentence.small_bert_L6_128') returns Spark NLP model sent_small_bert_L6_128\nnlu.load('en.embed_sentence.small_bert_L8_128') returns Spark NLP model sent_small_bert_L8_128\nnlu.load('en.embed_sentence.small_bert_L10_128') returns Spark NLP model sent_small_bert_L10_128\nnlu.load('en.embed_sentence.small_bert_L12_128') returns Spark NLP model sent_small_bert_L12_128\nnlu.load('en.embed_sentence.small_bert_L2_256') returns Spark NLP model sent_small_bert_L2_256\nnlu.load('en.embed_sentence.small_bert_L4_256') returns Spark NLP model sent_small_bert_L4_256\nnlu.load('en.embed_sentence.small_bert_L6_256') returns Spark NLP model sent_small_bert_L6_256\nnlu.load('en.embed_sentence.small_bert_L8_256') returns Spark NLP model sent_small_bert_L8_256\nnlu.load('en.embed_sentence.small_bert_L10_256') returns Spark NLP model sent_small_bert_L10_256\nnlu.load('en.embed_sentence.small_bert_L12_256') returns Spark NLP model sent_small_bert_L12_256\nnlu.load('en.embed_sentence.small_bert_L2_512') returns Spark NLP model sent_small_bert_L2_512\nnlu.load('en.embed_sentence.small_bert_L4_512') returns Spark NLP model sent_small_bert_L4_512\nnlu.load('en.embed_sentence.small_bert_L6_512') returns Spark NLP model sent_small_bert_L6_512\nnlu.load('en.embed_sentence.small_bert_L8_512') returns Spark NLP model sent_small_bert_L8_512\nnlu.load('en.embed_sentence.small_bert_L10_512') returns Spark NLP model sent_small_bert_L10_512\nnlu.load('en.embed_sentence.small_bert_L12_512') returns Spark NLP model sent_small_bert_L12_512\nnlu.load('en.embed_sentence.small_bert_L2_768') returns Spark NLP model sent_small_bert_L2_768\nnlu.load('en.embed_sentence.small_bert_L4_768') returns Spark NLP model sent_small_bert_L4_768\nnlu.load('en.embed_sentence.small_bert_L6_768') returns Spark NLP model sent_small_bert_L6_768\nnlu.load('en.embed_sentence.small_bert_L8_768') returns Spark NLP model sent_small_bert_L8_768\nnlu.load('en.embed_sentence.small_bert_L10_768') returns Spark NLP model sent_small_bert_L10_768\nnlu.load('en.embed_sentence.small_bert_L12_768') returns Spark NLP model sent_small_bert_L12_768\nFor language <fi> NLU provides the following Models : \nnlu.load('fi.embed_sentence') returns Spark NLP model sent_bert_finnish_cased\nnlu.load('fi.embed_sentence.bert.cased') returns Spark NLP model sent_bert_finnish_cased\nnlu.load('fi.embed_sentence.bert.uncased') returns Spark NLP model sent_bert_finnish_uncased\nFor language <xx> NLU provides the following Models : \nnlu.load('xx.embed_sentence') returns Spark NLP model sent_bert_multi_cased\nnlu.load('xx.embed_sentence.bert') returns Spark NLP model sent_bert_multi_cased\nnlu.load('xx.embed_sentence.bert.cased') returns Spark NLP model sent_bert_multi_cased\nnlu.load('xx.embed_sentence.labse') returns Spark NLP model labse\n"
],
[
"trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.sentiment')\n# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually\n# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch\n# Also longer training gives more accuracy\ntrainable_pipe['sentiment_dl'].setMaxEpochs(120) \ntrainable_pipe['sentiment_dl'].setLr(0.0005) \nfitted_pipe = trainable_pipe.fit(train_df)\n# predict with the trainable pipeline on dataset and get predictions\npreds = fitted_pipe.predict(train_df,output_level='document')\n\n#sentence detector that is part of the pipe generates sone NaNs. lets drop them first\npreds.dropna(inplace=True)\nprint(classification_report(preds['y'], preds['trained_sentiment']))\n\n#preds",
"sent_small_bert_L12_768 download started this may take some time.\nApproximate size to download 392.9 MB\n[OK!]\nsentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\n precision recall f1-score support\n\n negative 0.87 0.77 0.82 988\n neutral 0.00 0.00 0.00 0\n positive 0.85 0.83 0.84 1012\n\n accuracy 0.80 2000\n macro avg 0.57 0.53 0.55 2000\nweighted avg 0.86 0.80 0.83 2000\n\n"
]
],
[
[
"# 7.1 evaluate on Test Data",
"_____no_output_____"
]
],
[
[
"preds = fitted_pipe.predict(test_df,output_level='document')\n\n#sentence detector that is part of the pipe generates sone NaNs. lets drop them first\npreds.dropna(inplace=True)\nprint(classification_report(preds['y'], preds['trained_sentiment']))",
" precision recall f1-score support\n\n negative 0.85 0.75 0.80 246\n neutral 0.00 0.00 0.00 0\n positive 0.84 0.81 0.83 254\n\n accuracy 0.78 500\n macro avg 0.56 0.52 0.54 500\nweighted avg 0.85 0.78 0.81 500\n\n"
]
],
[
[
"# 8. Lets save the model",
"_____no_output_____"
]
],
[
[
"stored_model_path = './models/classifier_dl_trained' \nfitted_pipe.save(stored_model_path)",
"Stored model in ./models/classifier_dl_trained\n"
]
],
[
[
"# 9. Lets load the model from HDD.\nThis makes Offlien NLU usage possible! \nYou need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.",
"_____no_output_____"
]
],
[
[
"hdd_pipe = nlu.load(path=stored_model_path)\n\npreds = hdd_pipe.predict('It was one of the best films i have ever watched in my entire life !!')\npreds",
"_____no_output_____"
],
[
"hdd_pipe.print_info()",
"The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :\n>>> pipe['document_assembler'] has settable params:\npipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink\n>>> pipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'] has settable params:\npipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False\npipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setStorageRef('SentenceDetectorDLModel_c83c27f46b97') | Info: storage unique identifier | Currently set to : SentenceDetectorDLModel_c83c27f46b97\npipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setEncoder(com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@2350f35a) | Info: Data encoder | Currently set to : com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@2350f35a\npipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setImpossiblePenultimates(['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e']) | Info: Impossible penultimates | Currently set to : ['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e']\npipe['sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setModelArchitecture('cnn') | Info: Model architecture (CNN) | Currently set to : cnn\n>>> pipe['bert_sentence@sent_small_bert_L12_768'] has settable params:\npipe['bert_sentence@sent_small_bert_L12_768'].setBatchSize(8) | Info: Size of every batch | Currently set to : 8\npipe['bert_sentence@sent_small_bert_L12_768'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False\npipe['bert_sentence@sent_small_bert_L12_768'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768\npipe['bert_sentence@sent_small_bert_L12_768'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128\npipe['bert_sentence@sent_small_bert_L12_768'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False\npipe['bert_sentence@sent_small_bert_L12_768'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768\n>>> pipe['sentiment_dl@sent_small_bert_L12_768'] has settable params:\npipe['sentiment_dl@sent_small_bert_L12_768'].setThreshold(0.6) | Info: The minimum threshold for the final result otheriwse it will be neutral | Currently set to : 0.6\npipe['sentiment_dl@sent_small_bert_L12_768'].setThresholdLabel('neutral') | Info: In case the score is less than threshold, what should be the label. Default is neutral. | Currently set to : neutral\npipe['sentiment_dl@sent_small_bert_L12_768'].setClasses(['positive', 'negative']) | Info: get the tags used to trained this SentimentDLModel | Currently set to : ['positive', 'negative']\npipe['sentiment_dl@sent_small_bert_L12_768'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e74cd262e88d14db68e140b215558259ce6d12da | 783,571 | ipynb | Jupyter Notebook | OpenCV_Image Filtering.ipynb | deepnetworks555/openCV-jupyter | 2e35346016374ec38a8f8a1a676d29b4f6e08e94 | [
"MIT"
] | null | null | null | OpenCV_Image Filtering.ipynb | deepnetworks555/openCV-jupyter | 2e35346016374ec38a8f8a1a676d29b4f6e08e94 | [
"MIT"
] | null | null | null | OpenCV_Image Filtering.ipynb | deepnetworks555/openCV-jupyter | 2e35346016374ec38a8f8a1a676d29b4f6e08e94 | [
"MIT"
] | null | null | null | 4,777.871951 | 479,684 | 0.966385 | [
[
[
"# Image filtering-Convolution",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# loading an orange image\nimageBGR = cv2.imread('orange.jpg',-1)\n# convert the image from BGR color space to RGB\nimageRGB=cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RGB)",
"_____no_output_____"
],
[
"plt.imshow(imageRGB)\nimageRGB.shape",
"_____no_output_____"
]
],
[
[
"# Averaging",
"_____no_output_____"
]
],
[
[
"kernel = np.ones((10,10),np.float32)/100\nresult = cv2.filter2D(imageRGB,-1,kernel)\n\nplt.subplot(121),plt.imshow(imageRGB),plt.title('Original')\nplt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(result),plt.title('Averaging')\nplt.xticks([]), plt.yticks([])\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Gaussian Blur",
"_____no_output_____"
]
],
[
[
"blured_image = cv2.GaussianBlur(imageRGB,(21,21),10)\n\nplt.figure(figsize=(10,10))\nplt.subplot(121),plt.imshow(imageRGB),plt.title('Original')\nplt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(blured_image),plt.title('blured_image')\nplt.xticks([]), plt.yticks([])\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74cdbad0cccc39de35f2f5ce9dcccc8fd097064 | 10,299 | ipynb | Jupyter Notebook | static_files/assignments/Assignment7.ipynb | phonchi/nsysu-math524 | 9615ecbaa3f3693c8293769b69f6b6d086b39711 | [
"MIT"
] | null | null | null | static_files/assignments/Assignment7.ipynb | phonchi/nsysu-math524 | 9615ecbaa3f3693c8293769b69f6b6d086b39711 | [
"MIT"
] | null | null | null | static_files/assignments/Assignment7.ipynb | phonchi/nsysu-math524 | 9615ecbaa3f3693c8293769b69f6b6d086b39711 | [
"MIT"
] | 3 | 2021-09-28T09:02:46.000Z | 2021-11-09T10:52:34.000Z | 25.7475 | 284 | 0.559763 | [
[
[
"# Assignment 7\n## Chapter 6",
"_____no_output_____"
],
[
"#### Student ID: *Double click here to fill the Student ID*\n\n#### Name: *Double click here to fill the name*",
"_____no_output_____"
],
[
"## 1\nWe perform best subset, forward stepwise, and backward stepwise selection on a single data set. For each approach, we obtain $p + 1$ models, containing $0, 1, 2, . . . ,p$ predictors. Explain your answers:",
"_____no_output_____"
],
[
"(a) Which of the three models with $k$ predictors has the smallest $training$ RSS?",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(b) Which of the three models with $k$ predictors has the smallest $test$ RSS?",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(c) True or False:\n> i. The predictors in the $k$-variable model identified by forward stepwise are a subset of the predictors in the $(k+1)$-variable model identified by forward stepwise selection.\n\n> ii. The predictors in the $k$-variable model identified by backward stepwise are a subset of the predictors in the $(k+1)$- variable model identified by backward stepwise selection.\n\n> iii. The predictors in the $k$-variable model identified by backward stepwise are a subset of the predictors in the $(k+1)$- variable model identified by forward stepwise selection.\n\n> iv. The predictors in the $k$-variable model identified by forward stepwise are a subset of the predictors in the $(k+1)$-variable model identified by backward stepwise selection.\n\n> v. The predictors in the $k$-variable model identified by best subset are a subset of the predictors in the $(k+1)$-variable model identified by best subset selection.",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"## 6\nWe will now explore $(6.12)$ and $(6.13)$ further.",
"_____no_output_____"
],
[
"The loss function of ridge regression $(6.12)$ and lasso regression $(6.13)$:\n\n$$\\sum_{j=1}^p(y_j-\\beta_j)^2+\\lambda\\sum_{j=1}^{p}\\beta_j^2$$<div style=\" text-align:right\">$(6.12)$</div>\n\n$$\\sum_{j=1}^p(y_j-\\beta_j)^2+\\lambda\\sum_{j=1}^{p}|\\beta_j|$$<div style=\" text-align:right\">$(6.13)$</div>\n\n$$\\hat{\\beta}_j^R=\\frac{y_j}{(1+\\lambda)}$$<div style=\" text-align:right\">$(6.14)$</div>\n\n$$\n\\hat{\\beta}_j^L=\n\\begin{cases}\n y_j-\\frac{\\lambda}{2} & ,\\mbox{if }y_j>\\frac{\\lambda}{2}\\\\\n y_j-\\frac{\\lambda}{2} & ,\\mbox{if }y_j<-\\frac{\\lambda}{2}\\\\\n 0 & ,\\mbox{if }|y_j|\\leq\\frac{\\lambda}{2}\\\\\n\\end{cases}\n$$\n<div style=\" text-align:right\">$(6.15)$</div>\n",
"_____no_output_____"
],
[
"(a) Consider (6.12) with $p = 1$. For some choice of $y_1$ and $\\lambda > 0$, plot $(6.12)$ as a function of $\\beta_1$. Your plot should confirm that $(6.12)$ is solved by $(6.14)$.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(b) Consider $(6.13)$ with $p=1$. For some choice of $y_1$ and $\\lambda>0$, plot $(6.13)$ as a function of $\\beta_1$. Your plot should confirm that $(6.13)$ is solved by $(6.15)$.",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"## 9\nIn this exercise, we will predict the number of applications received using the other variables in the `College` data set.",
"_____no_output_____"
],
[
"(a) Split the data set into a training set and a test set. Use `train_test_split()` function.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"(b) Fit a **linear** model using least squares on the training set, and report the test error obtained.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(c) Fit a **ridge** regression model on the training set, with $λ$ chosen by cross-validation. Report the test error obtained.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(d) Fit a **lasso** model on the training set, with $λ$ chosen by cross-validation. Report the test error obtained, along with the number of non-zero coefficient estimates.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(e) Fit a **PCR** model on the training set, with *M* chosen by crossvalidation. Report the test error obtained, along with the value of *M* selected by cross-validation.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(f) Fit a **PLS** model on the training set, with *M* chosen by cross-validation. Report the test error obtained, along with the value of *M* selected by cross-validation.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(g) Comment on the results obtained. How accurately can we predict the number of college applications received? Is there much difference among the test errors resulting from these five approaches?",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"## 11\nWe will now try to predict per capita crime rate in the `Boston` data set.",
"_____no_output_____"
],
[
"(a) Try out some of the regression methods explored in this chapter, such as **best subset** selection, the **lasso**, **ridge** regression, and **PCR**. Present and discuss results for the approaches that you consider.",
"_____no_output_____"
]
],
[
[
"# coding your answer here.",
"_____no_output_____"
]
],
[
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(b) Propose a model (or set of models) that seem to perform well on this data set, and justify your answer. Make sure that you are evaluating model performance using validation set error, crossvalidation, or some other reasonable alternative, as opposed to using training error.",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
],
[
"(c) Does your chosen model involve all of the features in the data set? Why or why not?",
"_____no_output_____"
],
[
"> Ans: *double click here to answer the question.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e74cee77cdb4f66dc1f7f3d503628d71d2444170 | 103,607 | ipynb | Jupyter Notebook | basic/Numbers.ipynb | sanikamal/awesome-python-examples | 998dd2b1ef31714f20f6e6aa061ac1f303026e84 | [
"MIT"
] | 1 | 2020-07-07T23:36:51.000Z | 2020-07-07T23:36:51.000Z | basic/Numbers.ipynb | sanikamal/python-atoz | 998dd2b1ef31714f20f6e6aa061ac1f303026e84 | [
"MIT"
] | null | null | null | basic/Numbers.ipynb | sanikamal/python-atoz | 998dd2b1ef31714f20f6e6aa061ac1f303026e84 | [
"MIT"
] | null | null | null | 31.301208 | 329 | 0.463945 | [
[
[
"## Integers\nPython represents integers (positive and negative whole numbers) using the\n`int` (immutable) type. For immutable objects, there is no difference between\na variable and an object di\u000berenc",
"_____no_output_____"
]
],
[
[
"(58).bit_length()",
"_____no_output_____"
],
[
"str='11'",
"_____no_output_____"
],
[
"d=int(str)",
"_____no_output_____"
],
[
"d",
"_____no_output_____"
],
[
"b=int(str,2)",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"divmod(23,5)",
"_____no_output_____"
],
[
"round(100.89,2)",
"_____no_output_____"
],
[
"round(100.89,-2)",
"_____no_output_____"
],
[
"round(100.8936,3)",
"_____no_output_____"
],
[
"(4.50).as_integer_ratio()",
"_____no_output_____"
]
],
[
[
"## The `fractions` Module\nPython has the fraction module to deal with parts of a fraction.",
"_____no_output_____"
]
],
[
[
"import fractions",
"_____no_output_____"
],
[
"dir(fractions)",
"_____no_output_____"
],
[
"help(fractions.Fraction)",
"Help on class Fraction in module fractions:\n\nclass Fraction(numbers.Rational)\n | This class implements rational numbers.\n | \n | In the two-argument form of the constructor, Fraction(8, 6) will\n | produce a rational number equivalent to 4/3. Both arguments must\n | be Rational. The numerator defaults to 0 and the denominator\n | defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.\n | \n | Fractions can also be constructed from:\n | \n | - numeric strings similar to those accepted by the\n | float constructor (for example, '-2.3' or '1e10')\n | \n | - strings of the form '123/456'\n | \n | - float and Decimal instances\n | \n | - other Rational instances (including integers)\n | \n | Method resolution order:\n | Fraction\n | numbers.Rational\n | numbers.Real\n | numbers.Complex\n | numbers.Number\n | builtins.object\n | \n | Methods defined here:\n | \n | __abs__(a)\n | abs(a)\n | \n | __add__(a, b)\n | a + b\n | \n | __bool__(a)\n | a != 0\n | \n | __ceil__(a)\n | Will be math.ceil(a) in 3.0.\n | \n | __copy__(self)\n | \n | __deepcopy__(self, memo)\n | \n | __eq__(a, b)\n | a == b\n | \n | __floor__(a)\n | Will be math.floor(a) in 3.0.\n | \n | __floordiv__(a, b)\n | a // b\n | \n | __ge__(a, b)\n | a >= b\n | \n | __gt__(a, b)\n | a > b\n | \n | __hash__(self)\n | hash(self)\n | \n | __le__(a, b)\n | a <= b\n | \n | __lt__(a, b)\n | a < b\n | \n | __mod__(a, b)\n | a % b\n | \n | __mul__(a, b)\n | a * b\n | \n | __neg__(a)\n | -a\n | \n | __pos__(a)\n | +a: Coerces a subclass instance to Fraction\n | \n | __pow__(a, b)\n | a ** b\n | \n | If b is not an integer, the result will be a float or complex\n | since roots are generally irrational. If b is an integer, the\n | result will be rational.\n | \n | __radd__(b, a)\n | a + b\n | \n | __reduce__(self)\n | helper for pickle\n | \n | __repr__(self)\n | repr(self)\n | \n | __rfloordiv__(b, a)\n | a // b\n | \n | __rmod__(b, a)\n | a % b\n | \n | __rmul__(b, a)\n | a * b\n | \n | __round__(self, ndigits=None)\n | Will be round(self, ndigits) in 3.0.\n | \n | Rounds half toward even.\n | \n | __rpow__(b, a)\n | a ** b\n | \n | __rsub__(b, a)\n | a - b\n | \n | __rtruediv__(b, a)\n | a / b\n | \n | __str__(self)\n | str(self)\n | \n | __sub__(a, b)\n | a - b\n | \n | __truediv__(a, b)\n | a / b\n | \n | __trunc__(a)\n | trunc(a)\n | \n | limit_denominator(self, max_denominator=1000000)\n | Closest Fraction to self with denominator at most max_denominator.\n | \n | >>> Fraction('3.141592653589793').limit_denominator(10)\n | Fraction(22, 7)\n | >>> Fraction('3.141592653589793').limit_denominator(100)\n | Fraction(311, 99)\n | >>> Fraction(4321, 8765).limit_denominator(10000)\n | Fraction(4321, 8765)\n | \n | ----------------------------------------------------------------------\n | Class methods defined here:\n | \n | from_decimal(dec) from abc.ABCMeta\n | Converts a finite Decimal instance to a rational number, exactly.\n | \n | from_float(f) from abc.ABCMeta\n | Converts a finite float to a rational number, exactly.\n | \n | Beware that Fraction.from_float(0.3) != Fraction(3, 10).\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | __new__(cls, numerator=0, denominator=None, *, _normalize=True)\n | Constructs a Rational.\n | \n | Takes a string like '3/2' or '1.5', another Rational instance, a\n | numerator/denominator pair, or a float.\n | \n | Examples\n | --------\n | \n | >>> Fraction(10, -8)\n | Fraction(-5, 4)\n | >>> Fraction(Fraction(1, 7), 5)\n | Fraction(1, 35)\n | >>> Fraction(Fraction(1, 7), Fraction(2, 3))\n | Fraction(3, 14)\n | >>> Fraction('314')\n | Fraction(314, 1)\n | >>> Fraction('-35/4')\n | Fraction(-35, 4)\n | >>> Fraction('3.1415') # conversion from numeric string\n | Fraction(6283, 2000)\n | >>> Fraction('-47e-2') # string may include a decimal exponent\n | Fraction(-47, 100)\n | >>> Fraction(1.47) # direct construction from float (exact conversion)\n | Fraction(6620291452234629, 4503599627370496)\n | >>> Fraction(2.25)\n | Fraction(9, 4)\n | >>> Fraction(Decimal('1.47'))\n | Fraction(147, 100)\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | denominator\n | \n | numerator\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | __abstractmethods__ = frozenset()\n | \n | ----------------------------------------------------------------------\n | Methods inherited from numbers.Rational:\n | \n | __float__(self)\n | float(self) = self.numerator / self.denominator\n | \n | It's important that this conversion use the integer's \"true\"\n | division rather than casting one side to float before dividing\n | so that ratios of huge integers convert without overflowing.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from numbers.Real:\n | \n | __complex__(self)\n | complex(self) == complex(float(self), 0)\n | \n | __divmod__(self, other)\n | divmod(self, other): The pair (self // other, self % other).\n | \n | Sometimes this can be computed faster than the pair of\n | operations.\n | \n | __rdivmod__(self, other)\n | divmod(other, self): The pair (self // other, self % other).\n | \n | Sometimes this can be computed faster than the pair of\n | operations.\n | \n | conjugate(self)\n | Conjugate is a no-op for Reals.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from numbers.Real:\n | \n | imag\n | Real numbers have no imaginary component.\n | \n | real\n | Real numbers are their real component.\n\n"
],
[
"from fractions import Fraction",
"_____no_output_____"
],
[
"def rounding_float(number,place):\n return round(number,place)",
"_____no_output_____"
],
[
"rounding_float(120.6765545362663,5)",
"_____no_output_____"
],
[
"def float_to_fractions(number):\n return Fraction(*number.as_integer_ratio())",
"_____no_output_____"
],
[
"float_to_fractions(12.5)",
"_____no_output_____"
],
[
"def get_denominator(num1,num2):\n a=Fraction(num1,num2)\n return a.denominator",
"_____no_output_____"
],
[
"get_denominator(2,3)",
"_____no_output_____"
],
[
"def get_numerator(num1,num2):\n a=Fraction(num1,num2)\n return a.numerator",
"_____no_output_____"
],
[
"get_numerator(3,4)",
"_____no_output_____"
],
[
"assert(get_numerator(2,3)==2)",
"_____no_output_____"
],
[
"# assert(get_numerator(2,3)==4)",
"_____no_output_____"
]
],
[
[
"# The `decimal` Module\nWhen we need exact decimal foating-point numbers, Python has an additional immutable \nfloat type, the decimal.Decimal.",
"_____no_output_____"
]
],
[
[
"import decimal",
"_____no_output_____"
],
[
"# dir(decimal)\nhelp(decimal.Decimal)",
"Help on class Decimal in module decimal:\n\nclass Decimal(builtins.object)\n | Construct a new Decimal object. 'value' can be an integer, string, tuple,\n | or another Decimal object. If no value is given, return Decimal('0'). The\n | context does not affect the conversion and is only passed to determine if\n | the InvalidOperation trap is active.\n | \n | Methods defined here:\n | \n | __abs__(self, /)\n | abs(self)\n | \n | __add__(self, value, /)\n | Return self+value.\n | \n | __bool__(self, /)\n | self != 0\n | \n | __ceil__(...)\n | \n | __complex__(...)\n | \n | __copy__(...)\n | \n | __deepcopy__(...)\n | \n | __divmod__(self, value, /)\n | Return divmod(self, value).\n | \n | __eq__(self, value, /)\n | Return self==value.\n | \n | __float__(self, /)\n | float(self)\n | \n | __floor__(...)\n | \n | __floordiv__(self, value, /)\n | Return self//value.\n | \n | __format__(...)\n | default object formatter\n | \n | __ge__(self, value, /)\n | Return self>=value.\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __gt__(self, value, /)\n | Return self>value.\n | \n | __hash__(self, /)\n | Return hash(self).\n | \n | __int__(self, /)\n | int(self)\n | \n | __le__(self, value, /)\n | Return self<=value.\n | \n | __lt__(self, value, /)\n | Return self<value.\n | \n | __mod__(self, value, /)\n | Return self%value.\n | \n | __mul__(self, value, /)\n | Return self*value.\n | \n | __ne__(self, value, /)\n | Return self!=value.\n | \n | __neg__(self, /)\n | -self\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | __pos__(self, /)\n | +self\n | \n | __pow__(self, value, mod=None, /)\n | Return pow(self, value, mod).\n | \n | __radd__(self, value, /)\n | Return value+self.\n | \n | __rdivmod__(self, value, /)\n | Return divmod(value, self).\n | \n | __reduce__(...)\n | helper for pickle\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __rfloordiv__(self, value, /)\n | Return value//self.\n | \n | __rmod__(self, value, /)\n | Return value%self.\n | \n | __rmul__(self, value, /)\n | Return value*self.\n | \n | __round__(...)\n | \n | __rpow__(self, value, mod=None, /)\n | Return pow(value, self, mod).\n | \n | __rsub__(self, value, /)\n | Return value-self.\n | \n | __rtruediv__(self, value, /)\n | Return value/self.\n | \n | __sizeof__(...)\n | __sizeof__() -> int\n | size of object in memory, in bytes\n | \n | __str__(self, /)\n | Return str(self).\n | \n | __sub__(self, value, /)\n | Return self-value.\n | \n | __truediv__(self, value, /)\n | Return self/value.\n | \n | __trunc__(...)\n | \n | adjusted(self, /)\n | Return the adjusted exponent of the number. Defined as exp + digits - 1.\n | \n | as_integer_ratio(self, /)\n | Decimal.as_integer_ratio() -> (int, int)\n | \n | Return a pair of integers, whose ratio is exactly equal to the original\n | Decimal and with a positive denominator. The ratio is in lowest terms.\n | Raise OverflowError on infinities and a ValueError on NaNs.\n | \n | as_tuple(self, /)\n | Return a tuple representation of the number.\n | \n | canonical(self, /)\n | Return the canonical encoding of the argument. Currently, the encoding\n | of a Decimal instance is always canonical, so this operation returns its\n | argument unchanged.\n | \n | compare(self, /, other, context=None)\n | Compare self to other. Return a decimal value:\n | \n | a or b is a NaN ==> Decimal('NaN')\n | a < b ==> Decimal('-1')\n | a == b ==> Decimal('0')\n | a > b ==> Decimal('1')\n | \n | compare_signal(self, /, other, context=None)\n | Identical to compare, except that all NaNs signal.\n | \n | compare_total(self, /, other, context=None)\n | Compare two operands using their abstract representation rather than\n | their numerical value. Similar to the compare() method, but the result\n | gives a total ordering on Decimal instances. Two Decimal instances with\n | the same numeric value but different representations compare unequal\n | in this ordering:\n | \n | >>> Decimal('12.0').compare_total(Decimal('12'))\n | Decimal('-1')\n | \n | Quiet and signaling NaNs are also included in the total ordering. The result\n | of this function is Decimal('0') if both operands have the same representation,\n | Decimal('-1') if the first operand is lower in the total order than the second,\n | and Decimal('1') if the first operand is higher in the total order than the\n | second operand. See the specification for details of the total order.\n | \n | This operation is unaffected by context and is quiet: no flags are changed\n | and no rounding is performed. As an exception, the C version may raise\n | InvalidOperation if the second operand cannot be converted exactly.\n | \n | compare_total_mag(self, /, other, context=None)\n | Compare two operands using their abstract representation rather than their\n | value as in compare_total(), but ignoring the sign of each operand.\n | \n | x.compare_total_mag(y) is equivalent to x.copy_abs().compare_total(y.copy_abs()).\n | \n | This operation is unaffected by context and is quiet: no flags are changed\n | and no rounding is performed. As an exception, the C version may raise\n | InvalidOperation if the second operand cannot be converted exactly.\n | \n | conjugate(self, /)\n | Return self.\n | \n | copy_abs(self, /)\n | Return the absolute value of the argument. This operation is unaffected by\n | context and is quiet: no flags are changed and no rounding is performed.\n | \n | copy_negate(self, /)\n | Return the negation of the argument. This operation is unaffected by context\n | and is quiet: no flags are changed and no rounding is performed.\n | \n | copy_sign(self, /, other, context=None)\n | Return a copy of the first operand with the sign set to be the same as the\n | sign of the second operand. For example:\n | \n | >>> Decimal('2.3').copy_sign(Decimal('-1.5'))\n | Decimal('-2.3')\n | \n | This operation is unaffected by context and is quiet: no flags are changed\n | and no rounding is performed. As an exception, the C version may raise\n | InvalidOperation if the second operand cannot be converted exactly.\n | \n | exp(self, /, context=None)\n | Return the value of the (natural) exponential function e**x at the given\n | number. The function always uses the ROUND_HALF_EVEN mode and the result\n | is correctly rounded.\n | \n | fma(self, /, other, third, context=None)\n | Fused multiply-add. Return self*other+third with no rounding of the\n | intermediate product self*other.\n | \n | >>> Decimal(2).fma(3, 5)\n | Decimal('11')\n | \n | from_float(f, /) from builtins.type\n | Class method that converts a float to a decimal number, exactly.\n | Since 0.1 is not exactly representable in binary floating point,\n | Decimal.from_float(0.1) is not the same as Decimal('0.1').\n | \n | >>> Decimal.from_float(0.1)\n | Decimal('0.1000000000000000055511151231257827021181583404541015625')\n | >>> Decimal.from_float(float('nan'))\n | Decimal('NaN')\n | >>> Decimal.from_float(float('inf'))\n | Decimal('Infinity')\n | >>> Decimal.from_float(float('-inf'))\n | Decimal('-Infinity')\n | \n | is_canonical(self, /)\n | Return True if the argument is canonical and False otherwise. Currently,\n | a Decimal instance is always canonical, so this operation always returns\n | True.\n | \n | is_finite(self, /)\n | Return True if the argument is a finite number, and False if the argument\n | is infinite or a NaN.\n | \n | is_infinite(self, /)\n | Return True if the argument is either positive or negative infinity and\n | False otherwise.\n | \n | is_nan(self, /)\n | Return True if the argument is a (quiet or signaling) NaN and False\n | otherwise.\n | \n | is_normal(self, /, context=None)\n | Return True if the argument is a normal finite non-zero number with an\n | adjusted exponent greater than or equal to Emin. Return False if the\n | argument is zero, subnormal, infinite or a NaN.\n | \n | is_qnan(self, /)\n | Return True if the argument is a quiet NaN, and False otherwise.\n | \n | is_signed(self, /)\n | Return True if the argument has a negative sign and False otherwise.\n | Note that both zeros and NaNs can carry signs.\n | \n | is_snan(self, /)\n | Return True if the argument is a signaling NaN and False otherwise.\n | \n | is_subnormal(self, /, context=None)\n | Return True if the argument is subnormal, and False otherwise. A number is\n | subnormal if it is non-zero, finite, and has an adjusted exponent less\n | than Emin.\n | \n | is_zero(self, /)\n | Return True if the argument is a (positive or negative) zero and False\n | otherwise.\n | \n | ln(self, /, context=None)\n | Return the natural (base e) logarithm of the operand. The function always\n | uses the ROUND_HALF_EVEN mode and the result is correctly rounded.\n | \n | log10(self, /, context=None)\n | Return the base ten logarithm of the operand. The function always uses the\n | ROUND_HALF_EVEN mode and the result is correctly rounded.\n | \n | logb(self, /, context=None)\n | For a non-zero number, return the adjusted exponent of the operand as a\n | Decimal instance. If the operand is a zero, then Decimal('-Infinity') is\n | returned and the DivisionByZero condition is raised. If the operand is\n | an infinity then Decimal('Infinity') is returned.\n | \n | logical_and(self, /, other, context=None)\n | Return the digit-wise 'and' of the two (logical) operands.\n | \n | logical_invert(self, /, context=None)\n | Return the digit-wise inversion of the (logical) operand.\n | \n | logical_or(self, /, other, context=None)\n | Return the digit-wise 'or' of the two (logical) operands.\n | \n | logical_xor(self, /, other, context=None)\n | Return the digit-wise 'exclusive or' of the two (logical) operands.\n | \n | max(self, /, other, context=None)\n | Maximum of self and other. If one operand is a quiet NaN and the other is\n | numeric, the numeric operand is returned.\n | \n | max_mag(self, /, other, context=None)\n | Similar to the max() method, but the comparison is done using the absolute\n | values of the operands.\n | \n | min(self, /, other, context=None)\n | Minimum of self and other. If one operand is a quiet NaN and the other is\n | numeric, the numeric operand is returned.\n | \n | min_mag(self, /, other, context=None)\n | Similar to the min() method, but the comparison is done using the absolute\n | values of the operands.\n | \n | next_minus(self, /, context=None)\n | Return the largest number representable in the given context (or in the\n | current default context if no context is given) that is smaller than the\n | given operand.\n | \n | next_plus(self, /, context=None)\n | Return the smallest number representable in the given context (or in the\n | current default context if no context is given) that is larger than the\n | given operand.\n | \n | next_toward(self, /, other, context=None)\n | If the two operands are unequal, return the number closest to the first\n | operand in the direction of the second operand. If both operands are\n | numerically equal, return a copy of the first operand with the sign set\n | to be the same as the sign of the second operand.\n | \n | normalize(self, /, context=None)\n | Normalize the number by stripping the rightmost trailing zeros and\n | converting any result equal to Decimal('0') to Decimal('0e0'). Used\n | for producing canonical values for members of an equivalence class.\n | For example, Decimal('32.100') and Decimal('0.321000e+2') both normalize\n | to the equivalent value Decimal('32.1').\n | \n | number_class(self, /, context=None)\n | Return a string describing the class of the operand. The returned value\n | is one of the following ten strings:\n | \n | * '-Infinity', indicating that the operand is negative infinity.\n | * '-Normal', indicating that the operand is a negative normal number.\n | * '-Subnormal', indicating that the operand is negative and subnormal.\n | * '-Zero', indicating that the operand is a negative zero.\n | * '+Zero', indicating that the operand is a positive zero.\n | * '+Subnormal', indicating that the operand is positive and subnormal.\n | * '+Normal', indicating that the operand is a positive normal number.\n | * '+Infinity', indicating that the operand is positive infinity.\n | * 'NaN', indicating that the operand is a quiet NaN (Not a Number).\n | * 'sNaN', indicating that the operand is a signaling NaN.\n | \n | quantize(self, /, exp, rounding=None, context=None)\n | Return a value equal to the first operand after rounding and having the\n | exponent of the second operand.\n | \n | >>> Decimal('1.41421356').quantize(Decimal('1.000'))\n | Decimal('1.414')\n | \n | Unlike other operations, if the length of the coefficient after the quantize\n | operation would be greater than precision, then an InvalidOperation is signaled.\n | This guarantees that, unless there is an error condition, the quantized exponent\n | is always equal to that of the right-hand operand.\n | \n | Also unlike other operations, quantize never signals Underflow, even if the\n | result is subnormal and inexact.\n | \n | If the exponent of the second operand is larger than that of the first, then\n | rounding may be necessary. In this case, the rounding mode is determined by the\n | rounding argument if given, else by the given context argument; if neither\n | argument is given, the rounding mode of the current thread's context is used.\n | \n | radix(self, /)\n | Return Decimal(10), the radix (base) in which the Decimal class does\n | all its arithmetic. Included for compatibility with the specification.\n | \n | remainder_near(self, /, other, context=None)\n | Return the remainder from dividing self by other. This differs from\n | self % other in that the sign of the remainder is chosen so as to minimize\n | its absolute value. More precisely, the return value is self - n * other\n | where n is the integer nearest to the exact value of self / other, and\n | if two integers are equally near then the even one is chosen.\n | \n | If the result is zero then its sign will be the sign of self.\n | \n | rotate(self, /, other, context=None)\n | Return the result of rotating the digits of the first operand by an amount\n | specified by the second operand. The second operand must be an integer in\n | the range -precision through precision. The absolute value of the second\n | operand gives the number of places to rotate. If the second operand is\n | positive then rotation is to the left; otherwise rotation is to the right.\n | The coefficient of the first operand is padded on the left with zeros to\n | length precision if necessary. The sign and exponent of the first operand are\n | unchanged.\n | \n | same_quantum(self, /, other, context=None)\n | Test whether self and other have the same exponent or whether both are NaN.\n | \n | This operation is unaffected by context and is quiet: no flags are changed\n | and no rounding is performed. As an exception, the C version may raise\n | InvalidOperation if the second operand cannot be converted exactly.\n | \n | scaleb(self, /, other, context=None)\n | Return the first operand with the exponent adjusted the second. Equivalently,\n | return the first operand multiplied by 10**other. The second operand must be\n | an integer.\n | \n | shift(self, /, other, context=None)\n | Return the result of shifting the digits of the first operand by an amount\n | specified by the second operand. The second operand must be an integer in\n | the range -precision through precision. The absolute value of the second\n | operand gives the number of places to shift. If the second operand is\n | positive, then the shift is to the left; otherwise the shift is to the\n | right. Digits shifted into the coefficient are zeros. The sign and exponent\n | of the first operand are unchanged.\n | \n | sqrt(self, /, context=None)\n | Return the square root of the argument to full precision. The result is\n | correctly rounded using the ROUND_HALF_EVEN rounding mode.\n | \n | to_eng_string(self, /, context=None)\n | Convert to an engineering-type string. Engineering notation has an exponent\n | which is a multiple of 3, so there are up to 3 digits left of the decimal\n | place. For example, Decimal('123E+1') is converted to Decimal('1.23E+3').\n | \n | The value of context.capitals determines whether the exponent sign is lower\n | or upper case. Otherwise, the context does not affect the operation.\n | \n | to_integral(self, /, rounding=None, context=None)\n | Identical to the to_integral_value() method. The to_integral() name has been\n | kept for compatibility with older versions.\n | \n | to_integral_exact(self, /, rounding=None, context=None)\n | Round to the nearest integer, signaling Inexact or Rounded as appropriate if\n | rounding occurs. The rounding mode is determined by the rounding parameter\n | if given, else by the given context. If neither parameter is given, then the\n | rounding mode of the current default context is used.\n | \n | to_integral_value(self, /, rounding=None, context=None)\n | Round to the nearest integer without signaling Inexact or Rounded. The\n | rounding mode is determined by the rounding parameter if given, else by\n | the given context. If neither parameter is given, then the rounding mode\n | of the current default context is used.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | imag\n | \n | real\n\n"
],
[
"sum(0.1 for i in range(10))==1.0",
"_____no_output_____"
],
[
"from decimal import Decimal",
"_____no_output_____"
],
[
"sum(Decimal('0.1') for i in range(10))==1.0",
"_____no_output_____"
]
],
[
[
"While The `math` and `cmath` modules are not suitable for the decimal\nmodule, its built-in functions such as `decimal.Decimal.exp(x)` are enough\nto most of the problems.",
"_____no_output_____"
],
[
"## Other Representations",
"_____no_output_____"
]
],
[
[
"bin(120)",
"_____no_output_____"
],
[
"hex(123)",
"_____no_output_____"
],
[
"oct(345)",
"_____no_output_____"
]
],
[
[
"### Functions to Convert Between Different Bases\nConverts a number in any base smaller than 10 to the decimal base:",
"_____no_output_____"
]
],
[
[
"def convert_to_decimal(number, base):\n multiplier, result = 1, 0\n while number > 0:\n result += number%10*multiplier\n multiplier *= base\n number = number//10\n return result",
"_____no_output_____"
],
[
"def test_convert_to_decimal():\n number, base = 1001, 2\n assert(convert_to_decimal(number, base) == 9)\n print('Tests passed!')",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n test_convert_to_decimal()",
"Tests passed!\n"
],
[
"def convert_from_decimal(number, base):\n multiplier, result = 1, 0\n while number > 0:\n result += number%base*multiplier\n multiplier *= 10\n number = number//base\n return result",
"_____no_output_____"
],
[
"def test_convert_from_decimal():\n number, base = 9, 2\n assert(convert_from_decimal(number, base) == 1001)\n print('Tests passed!')",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n test_convert_from_decimal()",
"Tests passed!\n"
]
],
[
[
"Convert a number from a decimal base to any\nother base (up to 20)",
"_____no_output_____"
]
],
[
[
"def convert_from_decimal_larger_bases(number, base):\n strings = \"0123456789ABCDEFGHIJ\"\n result = \"\"\n while number > 0:\n digit = number%base\n result = strings[digit] + result\n number = number//base\n return result",
"_____no_output_____"
],
[
"def test_convert_from_decimal_larger_bases():\n number, base = 31, 16\n assert(convert_from_decimal_larger_bases(number, base) == '1F')\n print('Tests passed!')",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n test_convert_from_decimal_larger_bases()",
"Tests passed!\n"
],
[
"def convert_dec_to_any_base_rec(number, base):\n ''' convert an integer to a string in any base'''\n convertString = '012345679ABCDEF'\n if number < base: return convertString[number]\n else:\n return convert_dec_to_any_base_rec(number//base, base) + convertString[number%base]",
"_____no_output_____"
],
[
"def test_convert_dec_to_any_base_rec(module_name='this module'):\n number = 9\n base = 2\n assert(convert_dec_to_any_base_rec(number, base) == '1001')\n s = 'Tests in {name} have {con}!'\n print(s.format(name=module_name, con='passed'))",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n test_convert_dec_to_any_base_rec()",
"Tests in this module have passed!\n"
]
],
[
[
"### Greatest Common Divisor\nThe greatest common divisor (gcd) between\ntwo given integers:",
"_____no_output_____"
]
],
[
[
"def finding_gcd(a, b):\n ''' implements the greatest common divider algorithm '''\n while(b != 0):\n result = b\n a, b = b, a % b\n return result",
"_____no_output_____"
],
[
"finding_gcd(2,5)",
"_____no_output_____"
],
[
"finding_gcd(3,6)",
"_____no_output_____"
]
],
[
[
"# The `Random` Module",
"_____no_output_____"
]
],
[
[
"import random",
"_____no_output_____"
],
[
"help(random)",
"Help on module random:\n\nNAME\n random - Random variable generators.\n\nDESCRIPTION\n integers\n --------\n uniform within range\n \n sequences\n ---------\n pick random element\n pick random sample\n pick weighted random sample\n generate random permutation\n \n distributions on the real line:\n ------------------------------\n uniform\n triangular\n normal (Gaussian)\n lognormal\n negative exponential\n gamma\n beta\n pareto\n Weibull\n \n distributions on the circle (angles 0 to 2pi)\n ---------------------------------------------\n circular uniform\n von Mises\n \n General notes on the underlying Mersenne Twister core generator:\n \n * The period is 2**19937-1.\n * It is one of the most extensively tested generators in existence.\n * The random() method is implemented in C, executes in a single Python step,\n and is, therefore, threadsafe.\n\nCLASSES\n _random.Random(builtins.object)\n Random\n SystemRandom\n \n class Random(_random.Random)\n | Random number generator base class used by bound module functions.\n | \n | Used to instantiate instances of Random to get generators that don't\n | share state.\n | \n | Class Random can also be subclassed if you want to use a different basic\n | generator of your own devising: in that case, override the following\n | methods: random(), seed(), getstate(), and setstate().\n | Optionally, implement a getrandbits() method so that randrange()\n | can cover arbitrarily large ranges.\n | \n | Method resolution order:\n | Random\n | _random.Random\n | builtins.object\n | \n | Methods defined here:\n | \n | __getstate__(self)\n | # Issue 17489: Since __reduce__ was defined to fix #759889 this is no\n | # longer called; we leave it here because it has been here since random was\n | # rewritten back in 2001 and why risk breaking something.\n | \n | __init__(self, x=None)\n | Initialize an instance.\n | \n | Optional argument x controls seeding, as for Random.seed().\n | \n | __reduce__(self)\n | helper for pickle\n | \n | __setstate__(self, state)\n | \n | betavariate(self, alpha, beta)\n | Beta distribution.\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | Returned values range between 0 and 1.\n | \n | choice(self, seq)\n | Choose a random element from a non-empty sequence.\n | \n | choices(self, population, weights=None, *, cum_weights=None, k=1)\n | Return a k sized list of population elements chosen with replacement.\n | \n | If the relative weights or cumulative weights are not specified,\n | the selections are made with equal probability.\n | \n | expovariate(self, lambd)\n | Exponential distribution.\n | \n | lambd is 1.0 divided by the desired mean. It should be\n | nonzero. (The parameter would be called \"lambda\", but that is\n | a reserved word in Python.) Returned values range from 0 to\n | positive infinity if lambd is positive, and from negative\n | infinity to 0 if lambd is negative.\n | \n | gammavariate(self, alpha, beta)\n | Gamma distribution. Not the gamma function!\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | \n | The probability distribution function is:\n | \n | x ** (alpha - 1) * math.exp(-x / beta)\n | pdf(x) = --------------------------------------\n | math.gamma(alpha) * beta ** alpha\n | \n | gauss(self, mu, sigma)\n | Gaussian distribution.\n | \n | mu is the mean, and sigma is the standard deviation. This is\n | slightly faster than the normalvariate() function.\n | \n | Not thread-safe without a lock around calls.\n | \n | getstate(self)\n | Return internal state; can be passed to setstate() later.\n | \n | lognormvariate(self, mu, sigma)\n | Log normal distribution.\n | \n | If you take the natural logarithm of this distribution, you'll get a\n | normal distribution with mean mu and standard deviation sigma.\n | mu can have any value, and sigma must be greater than zero.\n | \n | normalvariate(self, mu, sigma)\n | Normal distribution.\n | \n | mu is the mean, and sigma is the standard deviation.\n | \n | paretovariate(self, alpha)\n | Pareto distribution. alpha is the shape parameter.\n | \n | randint(self, a, b)\n | Return random integer in range [a, b], including both end points.\n | \n | randrange(self, start, stop=None, step=1, _int=<class 'int'>)\n | Choose a random item from range(start, stop[, step]).\n | \n | This fixes the problem with randint() which includes the\n | endpoint; in Python this is usually not what you want.\n | \n | sample(self, population, k)\n | Chooses k unique random elements from a population sequence or set.\n | \n | Returns a new list containing elements from the population while\n | leaving the original population unchanged. The resulting list is\n | in selection order so that all sub-slices will also be valid random\n | samples. This allows raffle winners (the sample) to be partitioned\n | into grand prize and second place winners (the subslices).\n | \n | Members of the population need not be hashable or unique. If the\n | population contains repeats, then each occurrence is a possible\n | selection in the sample.\n | \n | To choose a sample in a range of integers, use range as an argument.\n | This is especially fast and space efficient for sampling from a\n | large population: sample(range(10000000), 60)\n | \n | seed(self, a=None, version=2)\n | Initialize internal state from hashable object.\n | \n | None or no argument seeds from current time or from an operating\n | system specific randomness source if available.\n | \n | If *a* is an int, all bits are used.\n | \n | For version 2 (the default), all of the bits are used if *a* is a str,\n | bytes, or bytearray. For version 1 (provided for reproducing random\n | sequences from older versions of Python), the algorithm for str and\n | bytes generates a narrower range of seeds.\n | \n | setstate(self, state)\n | Restore internal state from object returned by getstate().\n | \n | shuffle(self, x, random=None)\n | Shuffle list x in place, and return None.\n | \n | Optional argument random is a 0-argument function returning a\n | random float in [0.0, 1.0); if it is the default None, the\n | standard random.random will be used.\n | \n | triangular(self, low=0.0, high=1.0, mode=None)\n | Triangular distribution.\n | \n | Continuous distribution bounded by given lower and upper limits,\n | and having a given mode value in-between.\n | \n | http://en.wikipedia.org/wiki/Triangular_distribution\n | \n | uniform(self, a, b)\n | Get a random number in the range [a, b) or [a, b] depending on rounding.\n | \n | vonmisesvariate(self, mu, kappa)\n | Circular data distribution.\n | \n | mu is the mean angle, expressed in radians between 0 and 2*pi, and\n | kappa is the concentration parameter, which must be greater than or\n | equal to zero. If kappa is equal to zero, this distribution reduces\n | to a uniform random angle over the range 0 to 2*pi.\n | \n | weibullvariate(self, alpha, beta)\n | Weibull distribution.\n | \n | alpha is the scale parameter and beta is the shape parameter.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | VERSION = 3\n | \n | ----------------------------------------------------------------------\n | Methods inherited from _random.Random:\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | getrandbits(...)\n | getrandbits(k) -> x. Generates an int with k random bits.\n | \n | random(...)\n | random() -> x in the interval [0, 1).\n \n class SystemRandom(Random)\n | Alternate random number generator using sources provided\n | by the operating system (such as /dev/urandom on Unix or\n | CryptGenRandom on Windows).\n | \n | Not available on all systems (see os.urandom() for details).\n | \n | Method resolution order:\n | SystemRandom\n | Random\n | _random.Random\n | builtins.object\n | \n | Methods defined here:\n | \n | getrandbits(self, k)\n | getrandbits(k) -> x. Generates an int with k random bits.\n | \n | getstate = _notimplemented(self, *args, **kwds)\n | \n | random(self)\n | Get the next random number in the range [0.0, 1.0).\n | \n | seed(self, *args, **kwds)\n | Stub method. Not used for a system random number generator.\n | \n | setstate = _notimplemented(self, *args, **kwds)\n | \n | ----------------------------------------------------------------------\n | Methods inherited from Random:\n | \n | __getstate__(self)\n | # Issue 17489: Since __reduce__ was defined to fix #759889 this is no\n | # longer called; we leave it here because it has been here since random was\n | # rewritten back in 2001 and why risk breaking something.\n | \n | __init__(self, x=None)\n | Initialize an instance.\n | \n | Optional argument x controls seeding, as for Random.seed().\n | \n | __reduce__(self)\n | helper for pickle\n | \n | __setstate__(self, state)\n | \n | betavariate(self, alpha, beta)\n | Beta distribution.\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | Returned values range between 0 and 1.\n | \n | choice(self, seq)\n | Choose a random element from a non-empty sequence.\n | \n | choices(self, population, weights=None, *, cum_weights=None, k=1)\n | Return a k sized list of population elements chosen with replacement.\n | \n | If the relative weights or cumulative weights are not specified,\n | the selections are made with equal probability.\n | \n | expovariate(self, lambd)\n | Exponential distribution.\n | \n | lambd is 1.0 divided by the desired mean. It should be\n | nonzero. (The parameter would be called \"lambda\", but that is\n | a reserved word in Python.) Returned values range from 0 to\n | positive infinity if lambd is positive, and from negative\n | infinity to 0 if lambd is negative.\n | \n | gammavariate(self, alpha, beta)\n | Gamma distribution. Not the gamma function!\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | \n | The probability distribution function is:\n | \n | x ** (alpha - 1) * math.exp(-x / beta)\n | pdf(x) = --------------------------------------\n | math.gamma(alpha) * beta ** alpha\n | \n | gauss(self, mu, sigma)\n | Gaussian distribution.\n | \n | mu is the mean, and sigma is the standard deviation. This is\n | slightly faster than the normalvariate() function.\n | \n | Not thread-safe without a lock around calls.\n | \n | lognormvariate(self, mu, sigma)\n | Log normal distribution.\n | \n | If you take the natural logarithm of this distribution, you'll get a\n | normal distribution with mean mu and standard deviation sigma.\n | mu can have any value, and sigma must be greater than zero.\n | \n | normalvariate(self, mu, sigma)\n | Normal distribution.\n | \n | mu is the mean, and sigma is the standard deviation.\n | \n | paretovariate(self, alpha)\n | Pareto distribution. alpha is the shape parameter.\n | \n | randint(self, a, b)\n | Return random integer in range [a, b], including both end points.\n | \n | randrange(self, start, stop=None, step=1, _int=<class 'int'>)\n | Choose a random item from range(start, stop[, step]).\n | \n | This fixes the problem with randint() which includes the\n | endpoint; in Python this is usually not what you want.\n | \n | sample(self, population, k)\n | Chooses k unique random elements from a population sequence or set.\n | \n | Returns a new list containing elements from the population while\n | leaving the original population unchanged. The resulting list is\n | in selection order so that all sub-slices will also be valid random\n | samples. This allows raffle winners (the sample) to be partitioned\n | into grand prize and second place winners (the subslices).\n | \n | Members of the population need not be hashable or unique. If the\n | population contains repeats, then each occurrence is a possible\n | selection in the sample.\n | \n | To choose a sample in a range of integers, use range as an argument.\n | This is especially fast and space efficient for sampling from a\n | large population: sample(range(10000000), 60)\n | \n | shuffle(self, x, random=None)\n | Shuffle list x in place, and return None.\n | \n | Optional argument random is a 0-argument function returning a\n | random float in [0.0, 1.0); if it is the default None, the\n | standard random.random will be used.\n | \n | triangular(self, low=0.0, high=1.0, mode=None)\n | Triangular distribution.\n | \n | Continuous distribution bounded by given lower and upper limits,\n | and having a given mode value in-between.\n | \n | http://en.wikipedia.org/wiki/Triangular_distribution\n | \n | uniform(self, a, b)\n | Get a random number in the range [a, b) or [a, b] depending on rounding.\n | \n | vonmisesvariate(self, mu, kappa)\n | Circular data distribution.\n | \n | mu is the mean angle, expressed in radians between 0 and 2*pi, and\n | kappa is the concentration parameter, which must be greater than or\n | equal to zero. If kappa is equal to zero, this distribution reduces\n | to a uniform random angle over the range 0 to 2*pi.\n | \n | weibullvariate(self, alpha, beta)\n | Weibull distribution.\n | \n | alpha is the scale parameter and beta is the shape parameter.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from Random:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from Random:\n | \n | VERSION = 3\n | \n | ----------------------------------------------------------------------\n | Methods inherited from _random.Random:\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n\nFUNCTIONS\n betavariate(alpha, beta) method of Random instance\n Beta distribution.\n \n Conditions on the parameters are alpha > 0 and beta > 0.\n Returned values range between 0 and 1.\n \n choice(seq) method of Random instance\n Choose a random element from a non-empty sequence.\n \n choices(population, weights=None, *, cum_weights=None, k=1) method of Random instance\n Return a k sized list of population elements chosen with replacement.\n \n If the relative weights or cumulative weights are not specified,\n the selections are made with equal probability.\n \n expovariate(lambd) method of Random instance\n Exponential distribution.\n \n lambd is 1.0 divided by the desired mean. It should be\n nonzero. (The parameter would be called \"lambda\", but that is\n a reserved word in Python.) Returned values range from 0 to\n positive infinity if lambd is positive, and from negative\n infinity to 0 if lambd is negative.\n \n gammavariate(alpha, beta) method of Random instance\n Gamma distribution. Not the gamma function!\n \n Conditions on the parameters are alpha > 0 and beta > 0.\n \n The probability distribution function is:\n \n x ** (alpha - 1) * math.exp(-x / beta)\n pdf(x) = --------------------------------------\n math.gamma(alpha) * beta ** alpha\n \n gauss(mu, sigma) method of Random instance\n Gaussian distribution.\n \n mu is the mean, and sigma is the standard deviation. This is\n slightly faster than the normalvariate() function.\n \n Not thread-safe without a lock around calls.\n \n getrandbits(...) method of Random instance\n getrandbits(k) -> x. Generates an int with k random bits.\n \n getstate() method of Random instance\n Return internal state; can be passed to setstate() later.\n \n lognormvariate(mu, sigma) method of Random instance\n Log normal distribution.\n \n If you take the natural logarithm of this distribution, you'll get a\n normal distribution with mean mu and standard deviation sigma.\n mu can have any value, and sigma must be greater than zero.\n \n normalvariate(mu, sigma) method of Random instance\n Normal distribution.\n \n mu is the mean, and sigma is the standard deviation.\n \n paretovariate(alpha) method of Random instance\n Pareto distribution. alpha is the shape parameter.\n \n randint(a, b) method of Random instance\n Return random integer in range [a, b], including both end points.\n \n random(...) method of Random instance\n random() -> x in the interval [0, 1).\n \n randrange(start, stop=None, step=1, _int=<class 'int'>) method of Random instance\n Choose a random item from range(start, stop[, step]).\n \n This fixes the problem with randint() which includes the\n endpoint; in Python this is usually not what you want.\n \n sample(population, k) method of Random instance\n Chooses k unique random elements from a population sequence or set.\n \n Returns a new list containing elements from the population while\n leaving the original population unchanged. The resulting list is\n in selection order so that all sub-slices will also be valid random\n samples. This allows raffle winners (the sample) to be partitioned\n into grand prize and second place winners (the subslices).\n \n Members of the population need not be hashable or unique. If the\n population contains repeats, then each occurrence is a possible\n selection in the sample.\n \n To choose a sample in a range of integers, use range as an argument.\n This is especially fast and space efficient for sampling from a\n large population: sample(range(10000000), 60)\n \n seed(a=None, version=2) method of Random instance\n Initialize internal state from hashable object.\n \n None or no argument seeds from current time or from an operating\n system specific randomness source if available.\n \n If *a* is an int, all bits are used.\n \n For version 2 (the default), all of the bits are used if *a* is a str,\n bytes, or bytearray. For version 1 (provided for reproducing random\n sequences from older versions of Python), the algorithm for str and\n bytes generates a narrower range of seeds.\n \n setstate(state) method of Random instance\n Restore internal state from object returned by getstate().\n \n shuffle(x, random=None) method of Random instance\n Shuffle list x in place, and return None.\n \n Optional argument random is a 0-argument function returning a\n random float in [0.0, 1.0); if it is the default None, the\n standard random.random will be used.\n \n triangular(low=0.0, high=1.0, mode=None) method of Random instance\n Triangular distribution.\n \n Continuous distribution bounded by given lower and upper limits,\n and having a given mode value in-between.\n \n http://en.wikipedia.org/wiki/Triangular_distribution\n \n uniform(a, b) method of Random instance\n Get a random number in the range [a, b) or [a, b] depending on rounding.\n \n vonmisesvariate(mu, kappa) method of Random instance\n Circular data distribution.\n \n mu is the mean angle, expressed in radians between 0 and 2*pi, and\n kappa is the concentration parameter, which must be greater than or\n equal to zero. If kappa is equal to zero, this distribution reduces\n to a uniform random angle over the range 0 to 2*pi.\n \n weibullvariate(alpha, beta) method of Random instance\n Weibull distribution.\n \n alpha is the scale parameter and beta is the shape parameter.\n\nDATA\n __all__ = ['Random', 'seed', 'random', 'uniform', 'randint', 'choice',...\n\nFILE\n c:\\anaconda3\\lib\\random.py\n\n\n"
],
[
"my_list=[2,5,6,7,8,9]",
"_____no_output_____"
],
[
"random.choice(my_list)",
"_____no_output_____"
],
[
"random.sample(my_list,2)",
"_____no_output_____"
],
[
"random.shuffle(my_list)",
"_____no_output_____"
],
[
"my_list",
"_____no_output_____"
],
[
"random.randint(1,10)",
"_____no_output_____"
]
],
[
[
"### Fibonacci Sequences\nTo find the nth number in a Fibonacci sequence in three ways: \n\n (a) with a recursive O(2<sup>n</sup>) runtime; \n \n (b) with a iterative O(n<sup>2</sup>) runtime; and \n \n (c) using a formula that gives a O(1) runtime but is not precise after around the 70th element:",
"_____no_output_____"
]
],
[
[
"def find_fibonacci_seq_rec(n):\n if n < 2:\n return n\n return find_fibonacci_seq_rec(n-1) + find_fibonacci_seq_rec(n-2)",
"_____no_output_____"
],
[
"find_fibonacci_seq_rec(8)",
"_____no_output_____"
],
[
"def find_fibonacci_seq_iter(n):\n if n < 2: \n return n\n a, b = 0, 1\n for i in range(n):\n a, b = b, a + b\n return a",
"_____no_output_____"
],
[
"find_fibonacci_seq_iter(8)",
"_____no_output_____"
],
[
"def find_fibonacci_seq_form(n):\n sq5 = math.sqrt(5)\n phi = (1 + sq5) / 2\n return int(math.floor(phi ** n / sq5))",
"_____no_output_____"
],
[
"import math\nfind_fibonacci_seq_form(8)",
"_____no_output_____"
]
],
[
[
"### Primes\nThe following program finds whether a number is a prime in three ways:\n(a) brute force; (b) rejecting all the candidates up to the square root of the\nnumber; and (c) using the Fermat's theorem with probabilistic tests:",
"_____no_output_____"
]
],
[
[
"import math\nimport random\n\ndef finding_prime(number):\n num = abs(number)\n if num < 4 : \n return True\n for x in range(2, num):\n if num % x == 0:\n return False\n return True",
"_____no_output_____"
],
[
"finding_prime(5)",
"_____no_output_____"
],
[
"finding_prime(4)",
"_____no_output_____"
],
[
"def finding_prime_sqrt(number):\n num = abs(number)\n if num < 4 :\n return True\n for x in range(2, int(math.sqrt(num)) + 1):\n if number % x == 0:\n return False\n return True",
"_____no_output_____"
],
[
"finding_prime_sqrt(3)",
"_____no_output_____"
],
[
"finding_prime_sqrt(9)",
"_____no_output_____"
],
[
"def finding_prime_fermat(number):\n if number <= 102:\n for a in range(2, number):\n if pow(a, number- 1, number) != 1:\n return False\n return True\n else:\n for i in range(100):\n a = random.randint(2, number - 1)\n if pow(a, number - 1, number) != 1:\n return False\n return True",
"_____no_output_____"
],
[
"finding_prime_fermat(4)",
"_____no_output_____"
],
[
"finding_prime_fermat(7)",
"_____no_output_____"
],
[
"import math\nimport random\nimport sys\n\n\ndef generate_prime(number=3):\n while 1:\n p = random.randint(pow(2, number-2), pow(2, number-1)-1)\n p = 2 * p + 1\n if finding_prime_sqrt(p):\n return p",
"_____no_output_____"
]
],
[
[
"# The `math` module",
"_____no_output_____"
]
],
[
[
"import math",
"_____no_output_____"
],
[
"help(math)",
"Help on built-in module math:\n\nNAME\n math\n\nDESCRIPTION\n This module is always available. It provides access to the\n mathematical functions defined by the C standard.\n\nFUNCTIONS\n acos(...)\n acos(x)\n \n Return the arc cosine (measured in radians) of x.\n \n acosh(...)\n acosh(x)\n \n Return the inverse hyperbolic cosine of x.\n \n asin(...)\n asin(x)\n \n Return the arc sine (measured in radians) of x.\n \n asinh(...)\n asinh(x)\n \n Return the inverse hyperbolic sine of x.\n \n atan(...)\n atan(x)\n \n Return the arc tangent (measured in radians) of x.\n \n atan2(...)\n atan2(y, x)\n \n Return the arc tangent (measured in radians) of y/x.\n Unlike atan(y/x), the signs of both x and y are considered.\n \n atanh(...)\n atanh(x)\n \n Return the inverse hyperbolic tangent of x.\n \n ceil(...)\n ceil(x)\n \n Return the ceiling of x as an Integral.\n This is the smallest integer >= x.\n \n copysign(...)\n copysign(x, y)\n \n Return a float with the magnitude (absolute value) of x but the sign \n of y. On platforms that support signed zeros, copysign(1.0, -0.0) \n returns -1.0.\n \n cos(...)\n cos(x)\n \n Return the cosine of x (measured in radians).\n \n cosh(...)\n cosh(x)\n \n Return the hyperbolic cosine of x.\n \n degrees(...)\n degrees(x)\n \n Convert angle x from radians to degrees.\n \n erf(...)\n erf(x)\n \n Error function at x.\n \n erfc(...)\n erfc(x)\n \n Complementary error function at x.\n \n exp(...)\n exp(x)\n \n Return e raised to the power of x.\n \n expm1(...)\n expm1(x)\n \n Return exp(x)-1.\n This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.\n \n fabs(...)\n fabs(x)\n \n Return the absolute value of the float x.\n \n factorial(...)\n factorial(x) -> Integral\n \n Find x!. Raise a ValueError if x is negative or non-integral.\n \n floor(...)\n floor(x)\n \n Return the floor of x as an Integral.\n This is the largest integer <= x.\n \n fmod(...)\n fmod(x, y)\n \n Return fmod(x, y), according to platform C. x % y may differ.\n \n frexp(...)\n frexp(x)\n \n Return the mantissa and exponent of x, as pair (m, e).\n m is a float and e is an int, such that x = m * 2.**e.\n If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.\n \n fsum(...)\n fsum(iterable)\n \n Return an accurate floating point sum of values in the iterable.\n Assumes IEEE-754 floating point arithmetic.\n \n gamma(...)\n gamma(x)\n \n Gamma function at x.\n \n gcd(...)\n gcd(x, y) -> int\n greatest common divisor of x and y\n \n hypot(...)\n hypot(x, y)\n \n Return the Euclidean distance, sqrt(x*x + y*y).\n \n isclose(...)\n isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0) -> bool\n \n Determine whether two floating point numbers are close in value.\n \n rel_tol\n maximum difference for being considered \"close\", relative to the\n magnitude of the input values\n abs_tol\n maximum difference for being considered \"close\", regardless of the\n magnitude of the input values\n \n Return True if a is close in value to b, and False otherwise.\n \n For the values to be considered close, the difference between them\n must be smaller than at least one of the tolerances.\n \n -inf, inf and NaN behave similarly to the IEEE 754 Standard. That\n is, NaN is not close to anything, even itself. inf and -inf are\n only close to themselves.\n \n isfinite(...)\n isfinite(x) -> bool\n \n Return True if x is neither an infinity nor a NaN, and False otherwise.\n \n isinf(...)\n isinf(x) -> bool\n \n Return True if x is a positive or negative infinity, and False otherwise.\n \n isnan(...)\n isnan(x) -> bool\n \n Return True if x is a NaN (not a number), and False otherwise.\n \n ldexp(...)\n ldexp(x, i)\n \n Return x * (2**i).\n \n lgamma(...)\n lgamma(x)\n \n Natural logarithm of absolute value of Gamma function at x.\n \n log(...)\n log(x[, base])\n \n Return the logarithm of x to the given base.\n If the base not specified, returns the natural logarithm (base e) of x.\n \n log10(...)\n log10(x)\n \n Return the base 10 logarithm of x.\n \n log1p(...)\n log1p(x)\n \n Return the natural logarithm of 1+x (base e).\n The result is computed in a way which is accurate for x near zero.\n \n log2(...)\n log2(x)\n \n Return the base 2 logarithm of x.\n \n modf(...)\n modf(x)\n \n Return the fractional and integer parts of x. Both results carry the sign\n of x and are floats.\n \n pow(...)\n pow(x, y)\n \n Return x**y (x to the power of y).\n \n radians(...)\n radians(x)\n \n Convert angle x from degrees to radians.\n \n sin(...)\n sin(x)\n \n Return the sine of x (measured in radians).\n \n sinh(...)\n sinh(x)\n \n Return the hyperbolic sine of x.\n \n sqrt(...)\n sqrt(x)\n \n Return the square root of x.\n \n tan(...)\n tan(x)\n \n Return the tangent of x (measured in radians).\n \n tanh(...)\n tanh(x)\n \n Return the hyperbolic tangent of x.\n \n trunc(...)\n trunc(x:Real) -> Integral\n \n Truncates x to the nearest Integral toward 0. Uses the __trunc__ magic method.\n\nDATA\n e = 2.718281828459045\n inf = inf\n nan = nan\n pi = 3.141592653589793\n tau = 6.283185307179586\n\nFILE\n (built-in)\n\n\n"
],
[
"dir(math)",
"_____no_output_____"
],
[
"math.__spec__",
"_____no_output_____"
]
],
[
[
"## Number-theoretic and representation functions",
"_____no_output_____"
]
],
[
[
"for i in dir(math):\n if i[0] !='_':\n print(i,end=\"\\t\")\nprint(len(dir(math)))",
"acos\tacosh\tasin\tasinh\tatan\tatan2\tatanh\tceil\tcopysign\tcos\tcosh\tdegrees\te\terf\terfc\texp\texpm1\tfabs\tfactorial\tfloor\tfmod\tfrexp\tfsum\tgamma\tgcd\thypot\tinf\tisclose\tisfinite\tisinf\tisnan\tldexp\tlgamma\tlog\tlog10\tlog1p\tlog2\tmodf\tnan\tpi\tpow\tradians\tsin\tsinh\tsqrt\ttan\ttanh\ttau\ttrunc\t54\n"
],
[
"num1=6\nnum2= -56\nnum3=45.9086\nnum4= -45.898",
"_____no_output_____"
],
[
"math.ceil(num3)",
"_____no_output_____"
],
[
"math.ceil(num4)",
"_____no_output_____"
],
[
"math.floor(num3)",
"_____no_output_____"
],
[
"math.floor(num4)",
"_____no_output_____"
],
[
"math.copysign(num1,num2)",
"_____no_output_____"
],
[
"math.fabs(num2)",
"_____no_output_____"
],
[
"math.factorial(5)",
"_____no_output_____"
],
[
"num=9\nmath.isnan(num)",
"_____no_output_____"
]
],
[
[
"# The `NumPy` Module\nThe NumPy module provides array sequences that can store numbers or\ncharacters in a space-efficient way. Arrays in NumPy can have any ar-\nbitrary dimension. They can be generated from a list or a tuple with the\narray-method, which transforms sequences of sequences into two dimensional\narrays:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nx = np.array( ((11,12,13), (21,22,23), (31,32,33)) )",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"x.ndim",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e74cf9d00c679ad93d8259d0e41fd6500d08e3b2 | 43,085 | ipynb | Jupyter Notebook | Untitled28.ipynb | sfeshencko/JupyterWorks | d5f2706272920a81dd55f3b581aec904bd1a14e8 | [
"MIT"
] | 1 | 2020-10-24T13:43:10.000Z | 2020-10-24T13:43:10.000Z | Untitled28.ipynb | sfeshencko/JupyterWorks | d5f2706272920a81dd55f3b581aec904bd1a14e8 | [
"MIT"
] | null | null | null | Untitled28.ipynb | sfeshencko/JupyterWorks | d5f2706272920a81dd55f3b581aec904bd1a14e8 | [
"MIT"
] | null | null | null | 418.300971 | 28,424 | 0.870164 | [
[
[
"import numpy as np\nimport matplotlib .pyplot as plt\n%matplotlib inline\ndef fac(k):\n if k == 0:\n return 1\n if k == 1:\n return 1\n else:\n return k*fac(k-1)\ndef cnk(n, k):\n s = fac(n)/(fac(k)*fac(n-k)) # k > n\n return s\ndef lag(z, n):\n sum1 = sum2 = 0.0\n for k in range(n+1):\n if k % 2 == 0:\n sum1 += cnk(n, k)*((pow(z, k)) / float(fac(k)))\n else:\n sum2 += cnk(n, k)*((pow(z, k)) / float(fac(k)))\n return sum1 - sum2\nx = np.linspace(-5,5,100)\ny = [lag(i,1) for i in x]\ny1=[lag(i,1) for i in x]\ny2=[lag(i,2) for i in x]\ny3=[lag(i,3) for i in x]\ny4=[lag(i,4) for i in x]\ny5=[lag(i,5) for i in x]\ny6=[lag(i,6) for i in x]\ny7=[lag(i,7) for i in x]\nplt.figure(figsize = (9,9))\nplt.plot(x,y,x,y1,x,y2,x,y3,x,y4,x,y5,x,y6,x,y7)\nplt.subplot(x,y1)\nplt.grid(True)\n\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74cfcee2bd8aff6388dbbc9766eaea5c9830f03 | 24,143 | ipynb | Jupyter Notebook | novice/python/string-formatting.ipynb | richford/2015-01-22-stonybrook | b9d5dd4af5df2ae39dcd1d7a8cfeeb07b6f35e26 | [
"CC-BY-3.0"
] | null | null | null | novice/python/string-formatting.ipynb | richford/2015-01-22-stonybrook | b9d5dd4af5df2ae39dcd1d7a8cfeeb07b6f35e26 | [
"CC-BY-3.0"
] | null | null | null | novice/python/string-formatting.ipynb | richford/2015-01-22-stonybrook | b9d5dd4af5df2ae39dcd1d7a8cfeeb07b6f35e26 | [
"CC-BY-3.0"
] | null | null | null | 32.062417 | 332 | 0.518577 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74d00bf381c372df7c3e34707c0ccc9f565c62a | 4,196 | ipynb | Jupyter Notebook | demo/predict-taxi-trip-duration-nb/develop_ml_application_tour.ipynb | heiyan1shengdun/OpenMLDB | 150bd9c456adccd5cb93c8faf55bfb9f7ad7405e | [
"Apache-2.0"
] | 36 | 2021-01-20T06:15:11.000Z | 2021-05-28T05:15:36.000Z | demo/predict-taxi-trip-duration-nb/develop_ml_application_tour.ipynb | xiaominghao2000/OpenMLDB | 8242cb6e4cb77ac6bf67b2e9e43655942c6ffa8a | [
"Apache-2.0"
] | 33 | 2021-04-15T05:55:37.000Z | 2021-05-27T06:47:48.000Z | demo/predict-taxi-trip-duration-nb/develop_ml_application_tour.ipynb | xiaominghao2000/OpenMLDB | 8242cb6e4cb77ac6bf67b2e9e43655942c6ffa8a | [
"Apache-2.0"
] | 13 | 2021-02-02T06:43:47.000Z | 2021-05-17T09:51:06.000Z | 25.585366 | 225 | 0.584128 | [
[
[
"# 基于机器学习数据库飞速上线AI应用\n大家平时可能都会打车,从出发的地点到目的地,行程耗时可能会存在多种因素,比如天气,是否周五,如果获取更准确的耗时预测,对人来说是一个复杂的问题,而对机器就变得很简单,今天的任务就是开发一个通过机器学习模型进行出租车行程耗时的实时智能应用,整个应用开发是基于[notebook](http://ipython.org/notebook.html)\n\n\n\n",
"_____no_output_____"
],
[
"## 初始化环境\n整个初始化过程包含安装fedb,以及相关运行环境,初始化脚步可以参考https://github.com/4paradigm/DemoApps/blob/main/predict-taxi-trip-duration-nb/demo/init.sh",
"_____no_output_____"
]
],
[
[
"!cd demo && sh init.sh",
"_____no_output_____"
]
],
[
[
"## 导入行程历史数据到fedb\n\n使用fedb进行时序特征计算是需要历史数据的,所以我们将历史的行程数据导入到fedb,以便实时推理可以使用历史数据进行特征推理,导入代码可以参考https://github.com/4paradigm/DemoApps/blob/main/predict-taxi-trip-duration-nb/demo/import.py",
"_____no_output_____"
]
],
[
[
"!cd demo && python3 import.py",
"_____no_output_____"
]
],
[
[
"## 使用行程数据进行模型训练\n\n通过label数据进行模型训练,一下是这次任务使用的代码\n\n* 训练脚本代码 https://github.com/4paradigm/DemoApps/blob/main/predict-taxi-trip-duration-nb/demo/train_sql.py \n* 训练数据 https://github.com/4paradigm/DemoApps/blob/main/predict-taxi-trip-duration-nb/demo/data/taxi_tour_table_train_simple.snappy.parquet\n\n整个任务最终会生成一个model.txt",
"_____no_output_____"
]
],
[
[
"!cd demo && python3 train.py ./fe.sql /tmp/model.txt",
"_____no_output_____"
]
],
[
[
"## 使用训练的模型搭建链接fedb的实时推理http服务\n\n基于上一步生成的模型和fedb历史数据,搭建一个实时推理服务,整个推理服务代码参考https://github.com/4paradigm/DemoApps/blob/main/predict-taxi-trip-duration-nb/demo/predict_server.py",
"_____no_output_____"
]
],
[
[
"!cd demo && sh start_predict_server.sh ./fe.sql 8887 /tmp/model.txt",
"_____no_output_____"
]
],
[
[
"## 通过http请求发送一个推理请求\n\n整个请求很简单,具体代码如下\n\n```python\nurl = \"http://127.0.0.1:8887/predict\"\nreq ={\"id\":\"id0376262\",\n\t\"vendor_id\":1,\n\t\"pickup_datetime\":1467302350000,\n\t\"dropoff_datetime\":1467304896000,\n\t\"passenger_count\":2,\n\t\"pickup_longitude\":-73.873093,\n\t\"pickup_latitude\":40.774097,\n\t\"dropoff_longitude\":-73.926704,\n\t\"dropoff_latitude\":40.856739,\n\t\"store_and_fwd_flag\":\"N\",\n\t\"trip_duration\":1}\nr = requests.post(url, json=req)\nprint(r.text)\nprint(\"Congraduation! You have finished the task.\")\ntmp = os.urandom(44)\nsecret_key = base64.b64encode(tmp)\nprint(\"Your Key:\" + str(secret_key))\n```",
"_____no_output_____"
]
],
[
[
"!cd demo && python3 predict.py",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74d1e84532b48fe1585f415f153bb96c6329fac | 87,163 | ipynb | Jupyter Notebook | brazil_ecommerce/working/ref-te-method01-Feature_Engineer.ipynb | gonsoomoon-ml/predict-delivery-time | 615bba4b4268f57326c0f5ee27a8e0cafc625090 | [
"MIT"
] | 1 | 2020-11-14T13:39:41.000Z | 2020-11-14T13:39:41.000Z | brazil_ecommerce/working/ref-te-method01-Feature_Engineer.ipynb | gonsoomoon-ml/predict-delivery-time | 615bba4b4268f57326c0f5ee27a8e0cafc625090 | [
"MIT"
] | null | null | null | brazil_ecommerce/working/ref-te-method01-Feature_Engineer.ipynb | gonsoomoon-ml/predict-delivery-time | 615bba4b4268f57326c0f5ee27a8e0cafc625090 | [
"MIT"
] | 1 | 2020-11-14T13:39:43.000Z | 2020-11-14T13:39:43.000Z | 36.137231 | 171 | 0.422347 | [
[
[
"# [Module 2.1] 피쳐 엔지니어링\n\n이 노트북은 아래와 같은 피쳐 엔지니어링을 통하여 새로운 피쳐를 생성 합니다.\n- 날짜관련 피쳐 생성(월, 일, 요일)\n- 기존의 피쳐들을 결합하여 새로운 피쳐 생성 (피쳐1 + 피쳐2 = 뉴피쳐)\n- Product_ID를 기준으로 Target Encoding 하여 새로운 피쳐 생성\n- Product_ID를 기준으로 Target Encoding Smoothing 하여 새로운 피쳐 생성\n- Category 피쳐를 레이블 인코딩 함\n- 로컬에 데이터 저장\n - 최종 레이블 인코딩 된 데이터 세트 저장 (XGBoost, CatBoost 용)\n - 레이블 인코딩 안한 데이터 세트 저장 (AutoGluon 용)",
"_____no_output_____"
]
],
[
[
"import pandas as pd\npd.options.display.max_rows=5\nimport numpy as np",
"_____no_output_____"
],
[
"%store -r full_data_file_name",
"_____no_output_____"
]
],
[
[
"### 데이터 로딩 및 셔플링",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(full_data_file_name)\ndf = df.sample(frac=1.0, random_state=1000)\ndf",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"## 날짜 피쳐 생성: Month, Day, WeeoOfDay(요일)",
"_____no_output_____"
]
],
[
[
"def create_date_feature(raw_df):\n df = raw_df.copy()\n df['order_date'] = pd.to_datetime(df['order_approved_at']) \n df['order_weekday'] = df['order_date'].dt.weekday\n df['order_day'] = df['order_date'].dt.day \n df['order_month'] = df['order_date'].dt.month \n return df\n\nf_df = create_date_feature(df)\nf_df",
"_____no_output_____"
]
],
[
[
"## 기존 피쳐 결합하여 새로운 피쳐 생성 (컬럼1 + 컬럼2 = 뉴피쳐)",
"_____no_output_____"
]
],
[
[
"def change_var_type(f_df):\n df = f_df.copy()\n df['customer_zip_code_prefix'] = df['customer_zip_code_prefix'].astype(str)\n df['seller_zip_code_prefix'] = df['seller_zip_code_prefix'].astype(str) \n return df\n\ndef comnbine_columns(f_df,src_col1, src_col2,new_col):\n df = f_df.copy()\n df[new_col] = df[str(src_col1)] + '_' + df[str(src_col2)]\n print(\"df shape: \", df.shape)\n return df\n\n\n\nf_df = change_var_type(f_df)",
"_____no_output_____"
]
],
[
[
"### custoemr_state + seller_state",
"_____no_output_____"
]
],
[
[
"f_df = comnbine_columns(f_df,src_col1='customer_state', src_col2='seller_state',new_col='customer_seller_state')",
"df shape: (67176, 22)\n"
]
],
[
[
"### custoemr_city + seller_city",
"_____no_output_____"
]
],
[
[
"f_df = comnbine_columns(f_df,src_col1='customer_city', src_col2='seller_city',new_col='customer_seller_city')",
"df shape: (67176, 23)\n"
]
],
[
[
"### custoemr_zip + seller_zip",
"_____no_output_____"
]
],
[
[
"f_df = comnbine_columns(f_df,src_col1='customer_zip_code_prefix', \n src_col2='seller_zip_code_prefix',new_col='customer_seller_zip_code_prefix')",
"df shape: (67176, 24)\n"
],
[
"f_df",
"_____no_output_____"
]
],
[
[
"## product volume 컬럼 생성 (가로 * 세로 * 높이 의 계산값)",
"_____no_output_____"
]
],
[
[
"def add_product_volume(raw_df):\n df = raw_df.copy()\n df['product_volume'] = df.product_length_cm * df.product_width_cm * df.product_height_cm\n return df\n\nf_df = add_product_volume(f_df)",
"_____no_output_____"
],
[
"f_df.columns",
"_____no_output_____"
]
],
[
[
"## Train, Test 데이터 셋 분리",
"_____no_output_____"
]
],
[
[
"\ndef split_data_2(raw_df, sort_col='order_approved_at',val_ratio=0.3):\n '''\n train, test 데이터 분리\n '''\n df = raw_df.copy()\n val_ratio = 1 - val_ratio # 1 - 0.3 = 0.7\n\n \n df = df.sort_values(by= sort_col) # 시간 순으로 정렬\n # One-Hot-Encoding\n data1,data2, = np.split(df, \n [int(val_ratio * len(df))]) # Randomly sort the data then split out first 70%, second 20%, and last 10%\n \n print(f\"data1, data2 shape: {data1.shape},{data2.shape}\")\n \n return data1, data2\n\ntrain_df, test_df = split_data_2(f_df, val_ratio=0.2)\n\n\n",
"data1, data2 shape: (53740, 25),(13436, 25)\n"
]
],
[
[
"## Target Encoding 관련 피쳐 생성\n- Product_ID 별 Classes의 평균, 갯수 (te_pdid_mean, te_pdid_count)\n- Target Error (classes - te_pdid_mean)\n\n## Target Encoding with Smoothing\n아래 비디오 및 코드 참조 함\n- Feature Engineering\n - RecSys 2020 Tutorial: Feature Engineering for Recommender Systems\n - https://www.youtube.com/watch?v=uROvhp7cj6Q\n - Git Repo\n - https://github.com/rapidsai/deeplearning/tree/main/RecSys2020Tutorial\n\n",
"_____no_output_____"
],
[
"\\begin{equation} \\label{eq:te}\nTE_{target}([Categories]) = \\frac{count([Categories]) * mean_{target}([Categories]) + w_{smoothing} * mean_{target}(global)}{count([Categories]) + w_{smoothing}}\n\\end{equation}",
"_____no_output_____"
]
],
[
[
"\ndef create_target_encoding(cat, raw_df):\n '''\n te_mean, te_count 피쳐 생성\n '''\n df = raw_df.copy()\n te = df.groupby(cat).classes.agg(['mean','count']).reset_index()\n te_mean_col = 'te_' + cat + '_mean'\n te_count_col = 'te_' + cat + '_count' \n\n cat = [cat]\n te.columns = cat + [te_mean_col,te_count_col]\n te_df = df.merge(te, on=cat, how='left')\n \n return te_df\n\nw = 20 # global 평균에 더 높은 가중치를 주는 값\ndef create_target_encoding_smoothe(cat, raw_df, w):\n '''\n te_mean, te_count를 기반으로 te_mean_smoothed 생성\n '''\n df = raw_df.copy()\n te_mean_col = 'te_' + cat + '_mean'\n te_count_col = 'te_' + cat + '_count' \n te_target_col = 'te_' + cat + '_mean_smoothed'\n \n classes_mean_global = df.classes.mean()\n # print(\" classes_mean_global: \", classes_mean_global)\n df[te_target_col] = (df[te_mean_col] * df[te_count_col]) + (classes_mean_global * w) / (df[te_count_col] + w)\n df.drop([te_mean_col,te_count_col],axis=1, inplace=True)\n\n return df\n\ndef add_te_on_test(raw_train_df, raw_test_df, join_col, te_col_name):\n '''\n train의 te_mean_smoothed를 기반으로 test데이터에도 te_mean_smoothed를 생성함.\n 만일 train에 해당 값이 없으면, 전체 평균값으로 대체 함.\n '''\n train_df = raw_train_df[[join_col,te_col_name]]\n test_df = raw_test_df.copy()\n \n # global_mean = train_df[te_col_name].mean() # 전체 레코드의 평균\n avg_train = train_df.groupby(join_col)[te_col_name].mean() # join_col 의 평균값을 구함\n avg_train_df = pd.DataFrame(avg_train).reset_index()\n # display(avg_train_df)\n \n target_df = pd.merge(\n test_df,\n avg_train_df,\n on = join_col,\n how = 'left' \n )\n \n global_mean = avg_train_df[te_col_name].mean() # 전체 레코드의 평균 \n \n # print(\"global mean: \", global_mean)\n # train에 있고 test에 없으면 train의 평균값을 채움\n target_df[te_col_name].fillna(global_mean, inplace=True)\n \n print(\"targe_df and test shape: \", target_df.shape, test_df.shape) \n assert(test_df.shape[0] == target_df.shape[0])\n \n return target_df\n\n\n",
"_____no_output_____"
],
[
"def add_noise(series, noise_level):\n return series * (1 + noise_level * np.random.randn(len(series)))\n\ndef target_encode(trn_series=None, \n tst_series=None, \n target=None, \n min_samples_leaf=1, \n smoothing=1,\n noise_level=0):\n \"\"\"\n Smoothing is computed like in the following paper by Daniele Micci-Barreca\n https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf\n trn_series : training categorical feature as a pd.Series\n tst_series : test categorical feature as a pd.Series\n target : target data as a pd.Series\n min_samples_leaf (int) : minimum samples to take category average into account\n smoothing (int) : smoothing effect to balance categorical average vs prior \n \"\"\" \n assert len(trn_series) == len(target)\n assert trn_series.name == tst_series.name\n temp = pd.concat([trn_series, target], axis=1)\n # Compute target mean \n averages = temp.groupby(by=trn_series.name)[target.name].agg([\"mean\", \"count\"])\n# display(averages)\n # Compute smoothing\n smoothing = 1 / (1 + np.exp(-(averages[\"count\"] - min_samples_leaf) / smoothing))\n # display(smoothing)\n # Apply average function to all target data\n prior = target.mean()\n # The bigger the count the less full_avg is taken into account\n averages[target.name] = prior * (1 - smoothing) + averages[\"mean\"] * smoothing\n averages.drop([\"mean\", \"count\"], axis=1, inplace=True)\n display(averages) \n # Apply averages to trn and tst series\n ft_trn_series = pd.merge(\n trn_series.to_frame(trn_series.name),\n averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),\n on=trn_series.name,\n how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)\n # pd.merge does not keep the index so restore it\n ft_trn_series.index = trn_series.index \n ft_tst_series = pd.merge(\n tst_series.to_frame(tst_series.name),\n averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),\n on=tst_series.name,\n how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)\n # pd.merge does not keep the index so restore it\n ft_tst_series.index = tst_series.index\n return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level)",
"_____no_output_____"
]
],
[
[
"## Target Encoding 실행",
"_____no_output_____"
]
],
[
[
"def add_new_te(raw_train, raw_test):\n train_df = raw_train.copy()\n test_df = raw_test.copy()\n \n cat = 'product_id'\n trn, sub = target_encode(train_df[cat], \n test_df[cat], \n target=train_df.classes, \n min_samples_leaf=100,\n smoothing=10,\n noise_level=0.01)\n te_col_name = 'te_' + cat + '_mean_smoothed'\n train_df[te_col_name] = trn\n test_df[te_col_name] = sub\n \n return train_df, test_df\n \n\ntrain_df, test_df = add_new_te(train_df, test_df) \n \n ",
"_____no_output_____"
],
[
"display(train_df.head(2))\ndisplay(test_df.head(2))",
"_____no_output_____"
],
[
"def create_fe_target_encoding(raw_train, raw_test, cat_cols):\n train_df = raw_train.copy()\n test_df = raw_test.copy()\n for col in cat_cols:\n print(\"target col: \", col)\n train_df = create_target_encoding(col, train_df)\n train_df = create_target_encoding_smoothe(col, train_df, w)\n te_col_name = 'te_' + col + '_mean_smoothed'\n test_df = add_te_on_test(train_df, test_df, join_col = col, te_col_name= te_col_name) \n \n return train_df, test_df\n\ncat_cols = ['product_category_name_english',\n 'seller_state','seller_city','seller_zip_code_prefix',\n 'customer_seller_city','customer_seller_state','customer_seller_zip_code_prefix']\n\n# cat_cols = ['product_id','product_category_name_english',\n# 'seller_state','seller_city','seller_zip_code_prefix',\n# 'customer_seller_city','customer_seller_state','customer_seller_zip_code_prefix']\n\n\n\ntrain2_df, test2_df = create_fe_target_encoding(train_df, test_df, cat_cols)\n\n",
"target col: product_category_name_english\ntarge_df and test shape: (13436, 27) (13436, 26)\ntarget col: seller_state\ntarge_df and test shape: (13436, 28) (13436, 27)\ntarget col: seller_city\ntarge_df and test shape: (13436, 29) (13436, 28)\ntarget col: seller_zip_code_prefix\ntarge_df and test shape: (13436, 30) (13436, 29)\ntarget col: customer_seller_city\ntarge_df and test shape: (13436, 31) (13436, 30)\ntarget col: customer_seller_state\ntarge_df and test shape: (13436, 32) (13436, 31)\ntarget col: customer_seller_zip_code_prefix\ntarge_df and test shape: (13436, 33) (13436, 32)\n"
],
[
"train2_df.head(2)",
"_____no_output_____"
],
[
"test2_df",
"_____no_output_____"
],
[
"print(train2_df.shape)\nprint(test2_df.shape)",
"(53740, 33)\n(13436, 33)\n"
]
],
[
[
"## Category 레이블 Encoding",
"_____no_output_____"
]
],
[
[
"# from sklearn import preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nclass LabelEncoderExt(object):\n '''\n Source:\n # https://stackoverflow.com/questions/21057621/sklearn-labelencoder-with-never-seen-before-values\n '''\n def __init__(self):\n \"\"\"\n It differs from LabelEncoder by handling new classes and providing a value for it [Unknown]\n Unknown will be added in fit and transform will take care of new item. It gives unknown class id\n \"\"\"\n self.label_encoder = LabelEncoder()\n # self.classes_ = self.label_encoder.classes_\n\n def fit(self, data_list):\n \"\"\"\n This will fit the encoder for all the unique values and introduce unknown value\n :param data_list: A list of string\n :return: self\n \"\"\"\n self.label_encoder = self.label_encoder.fit(list(data_list) + ['Unknown'])\n self.classes_ = self.label_encoder.classes_\n\n return self\n\n def transform(self, data_list):\n \"\"\"\n This will transform the data_list to id list where the new values get assigned to Unknown class\n :param data_list:\n :return:\n \"\"\"\n new_data_list = list(data_list)\n for unique_item in np.unique(data_list):\n if unique_item not in self.label_encoder.classes_:\n new_data_list = ['Unknown' if x==unique_item else x for x in new_data_list]\n\n return self.label_encoder.transform(new_data_list)\n \ndef make_test_label_encoding(raw_train_df, raw_test_df,cols):\n train_df = raw_train_df.copy()\n test_df = raw_test_df.copy()\n \n for lb_col in cols:\n print(lb_col)\n le = LabelEncoderExt()\n le = le.fit(train_df[lb_col])\n \n train_en = le.transform(train_df[lb_col])\n test_en = le.transform(test_df[lb_col]) \n lb_col_name = 'lb_' + lb_col\n print(\"new col name: \", lb_col_name)\n train_df[lb_col_name] = train_en\n test_df[lb_col_name] = test_en \n \n return train_df, test_df\n\n",
"_____no_output_____"
]
],
[
[
"### Category 변수의 레이블 인코딩 실행",
"_____no_output_____"
]
],
[
[
"label_cols = ['customer_city','customer_state','customer_zip_code_prefix']\ntrain2_lb, test2_lb = make_test_label_encoding(train2_df, test2_df, label_cols)",
"customer_city\nnew col name: lb_customer_city\ncustomer_state\nnew col name: lb_customer_state\ncustomer_zip_code_prefix\nnew col name: lb_customer_zip_code_prefix\n"
],
[
"pd.options.display.max_rows = 10\nshow_rows = 5\nprint(train2_lb.customer_state.value_counts()[0:show_rows])\n# print(train2_lb[train2_lb.lb_customer_city == 185])\nprint(test2_lb.customer_state.value_counts()[0:show_rows])",
"SP 28232\nMG 6763\nRJ 6034\nPR 2912\nRS 2385\nName: customer_state, dtype: int64\nSP 6642\nMG 1541\nRJ 1491\nPR 715\nRS 663\nName: customer_state, dtype: int64\n"
]
],
[
[
"## 레이블 Encoding 안하고 바로 사용(AutoGluon 용)",
"_____no_output_____"
]
],
[
[
"# no_encoding_cate = tes_df",
"_____no_output_____"
]
],
[
[
"## 최종 사용할 컬럼 지정\n### XGBoost, CatBoost 알고리즘 용",
"_____no_output_____"
]
],
[
[
"def filter_df(raw_df, cols):\n df = raw_df.copy()\n df = df[cols]\n return df\n\n\ncols = ['classes',\n 'lb_customer_city',\n 'lb_customer_state', \n 'lb_customer_zip_code_prefix', \n 'price', 'freight_value',\n 'product_weight_g', \n 'product_volume', \n 'order_weekday',\n 'order_day', 'order_month', \n 'te_product_id_mean_smoothed',\n 'te_product_category_name_english_mean_smoothed', \n 'te_seller_state_mean_smoothed', 'te_seller_city_mean_smoothed',\n 'te_seller_zip_code_prefix_mean_smoothed',\n 'te_customer_seller_city_mean_smoothed',\n 'te_customer_seller_state_mean_smoothed',\n 'te_customer_seller_zip_code_prefix_mean_smoothed',\n ]\n\n\nencode_train = filter_df(train2_lb, cols)\nencode_test = filter_df(test2_lb, cols)\n\n# no_encode_train = filter_df(encode_train, cols)\n# no_encode_val = filter_df(encode_val, cols)\n# no_encode_test = filter_df(encode_test, cols)\n",
"_____no_output_____"
]
],
[
[
"## 피쳐 변환한 AutoGluon 용",
"_____no_output_____"
]
],
[
[
"cols = ['classes',\n 'customer_city',\n 'customer_state', \n 'customer_zip_code_prefix', \n 'product_category_name_english', \n 'price', 'freight_value',\n 'product_weight_g', \n 'product_volume', \n 'order_weekday',\n 'order_day', 'order_month', \n 'te_product_id_mean_smoothed',\n 'te_product_category_name_english_mean_smoothed', \n 'te_seller_state_mean_smoothed', 'te_seller_city_mean_smoothed',\n 'te_seller_zip_code_prefix_mean_smoothed',\n 'te_customer_seller_city_mean_smoothed',\n 'te_customer_seller_state_mean_smoothed',\n 'te_customer_seller_zip_code_prefix_mean_smoothed',\n ]\n\n\nauto_train = filter_df(train2_lb, cols)\nauto_test = filter_df(test2_lb, cols)\n",
"_____no_output_____"
]
],
[
[
"## 펴쳐 변환 없이 AutoGluon 용",
"_____no_output_____"
]
],
[
[
"train_df.columns",
"_____no_output_____"
],
[
"cols = ['classes', \n 'customer_zip_code_prefix', 'customer_city', 'customer_state', 'price',\n 'freight_value', 'product_weight_g', \n 'product_category_name_english', 'seller_zip_code_prefix',\n 'seller_city', 'seller_state', 'order_date', 'order_weekday',\n 'order_day', 'order_month', 'customer_seller_state',\n 'customer_seller_city', 'customer_seller_zip_code_prefix',\n 'product_volume']\n\n\nno_auto_train = filter_df(train_df, cols)\nno_auto_test = filter_df(test_df, cols)\n\n",
"_____no_output_____"
]
],
[
[
"## 로컬에 데이터 저장",
"_____no_output_____"
]
],
[
[
"import os\n\ndef save_local(train_data, test_data, preproc_folder):\n train_df = pd.concat([train_data['classes'], train_data.drop(['classes'], axis=1)], axis=1)\n train_file_name = os.path.join(preproc_folder, 'train.csv')\n train_df.to_csv(train_file_name, index=False)\n print(f'{train_file_name} is saved')\n\n test_df = pd.concat([test_data['classes'], test_data.drop(['classes'], axis=1)], axis=1)\n test_file_name = os.path.join(preproc_folder, 'test.csv')\n test_df.to_csv(test_file_name, index=False)\n print(f'{test_file_name} is saved') \n \n return train_file_name, test_file_name\n\npreproc_folder = 'preproc_data/feature_engineering'\nos.makedirs(preproc_folder, exist_ok=True) \npre_train_file, pre_test_file = save_local(encode_train, encode_test, preproc_folder)\n\npreproc_folder = 'preproc_data/auto_feature_engineering'\nos.makedirs(preproc_folder, exist_ok=True) \nauto_train_file,auto_test_file = save_local(auto_train, auto_test, preproc_folder)\n\npreproc_folder = 'preproc_data/auto_no_fe'\nos.makedirs(preproc_folder, exist_ok=True) \nno_auto_train_file,no_auto_test_file = save_local(no_auto_train, no_auto_test, preproc_folder)",
"preproc_data/feature_engineering/train.csv is saved\npreproc_data/feature_engineering/test.csv is saved\npreproc_data/auto_feature_engineering/train.csv is saved\npreproc_data/auto_feature_engineering/test.csv is saved\npreproc_data/auto_no_fe/train.csv is saved\npreproc_data/auto_no_fe/test.csv is saved\n"
],
[
"%store pre_train_file\n%store pre_test_file\n\n%store auto_train_file\n%store auto_test_file\n\n%store no_auto_train_file\n%store no_auto_test_file",
"Stored 'pre_train_file' (str)\nStored 'pre_test_file' (str)\nStored 'auto_train_file' (str)\nStored 'auto_test_file' (str)\nStored 'no_auto_train_file' (str)\nStored 'no_auto_test_file' (str)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74d265c47c6f98074dd9d670aaf152178f43c46 | 6,543 | ipynb | Jupyter Notebook | week3_assignment.ipynb | GunaSekhargithub/npteldatastructureswithpython | 6ad3870301fcf2d9c6e69cbd011cd1696dcf0ceb | [
"Apache-2.0"
] | null | null | null | week3_assignment.ipynb | GunaSekhargithub/npteldatastructureswithpython | 6ad3870301fcf2d9c6e69cbd011cd1696dcf0ceb | [
"Apache-2.0"
] | null | null | null | week3_assignment.ipynb | GunaSekhargithub/npteldatastructureswithpython | 6ad3870301fcf2d9c6e69cbd011cd1696dcf0ceb | [
"Apache-2.0"
] | null | null | null | 28.951327 | 261 | 0.429925 | [
[
[
"<a href=\"https://colab.research.google.com/github/GunaSekhargithub/npteldatastructureswithpython/blob/master/week3_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"1.Write a function contracting(l) that takes as input a list of integer l and returns True if the absolute difference between each adjacent pair of elements strictly decreases.\n\nHere are some examples of how your function should work.\n\n >>> contracting([9,2,7,3,1])\n True\n\n >>> contracting([-2,3,7,2,-1]) \n False\n\n >>> contracting([10,7,4,1])\n False",
"_____no_output_____"
]
],
[
[
"def contracting(l):\n n=len(l)\n b=abs(l[1]-l[0])\n for i in range(2,n):\n d=abs(l[i]-l[i-1])\n if (d<b):\n b=d\n else:\n return False\n break\n return True\ncontracting([-2,3,7,2,-1])",
"_____no_output_____"
]
],
[
[
"2.In a list of integers l, the neighbours of l[i] are l[i-1] and l[i+1]. l[i] is a hill if it is strictly greater than its neighbours and a valley if it is strictly less than its neighbours.\nWrite a function counthv(l) that takes as input a list of integers l and returns a list [hc,vc] where hc is the number of hills in l and vc is the number of valleys in l.\n\nHere are some examples to show how your function should work.\n\n \n>>> counthv([1,2,1,2,3,2,1])\n[2, 1]\n\n>>> counthv([1,2,3,1])\n[1, 0]\n\n>>> counthv([3,1,2,3])\n[0, 1]\n",
"_____no_output_____"
]
],
[
[
"def counthv(l):\n a=[]\n hc=0\n vc=0\n for i in range(1,len(l)-1):\n if (l[i]>l[i-1] and l[i]>l[i+1]):\n hc+=1\n elif (l[i]<l[i-1] and l[i]<l[i+1]):\n vc+=1\n else:\n continue\n a.append(hc)\n a.append(vc)\n return a\ncounthv([3,1,2,3])",
"_____no_output_____"
]
],
[
[
"3.A square n×n matrix of integers can be written in Python as a list with n elements, where each element is in turn a list of n integers, representing a row of the matrix. For instance, the matrix\n\n 1 2 3\n 4 5 6\n 7 8 9\nwould be represented as [[1,2,3], [4,5,6], [7,8,9]].\n\nWrite a function leftrotate(m) that takes a list representation m of a square matrix as input, and returns the matrix obtained by rotating the original matrix counterclockwize by 90 degrees. For instance, if we rotate the matrix above, we get\n\n 3 6 9\n 2 5 8 \n 1 4 7\nYour function should not modify the argument m provided to the function rotate().\n\nHere are some examples of how your function should work.\n\n \n >>> leftrotate([[1,2],[3,4]])\n [[2, 4], [1, 3]]\n\n >>> leftrotate([[1,2,3],[4,5,6],[7,8,9]])\n [[3, 6, 9], [2, 5, 8], [1, 4, 7]]\n\n >>> leftrotate([[1,1,1],[2,2,2],[3,3,3]])\n [[1, 2, 3], [1, 2, 3], [1, 2, 3]]",
"_____no_output_____"
]
],
[
[
"def col(l,n):\n m=[]\n for i in range(len(l)):\n m.append(l[i][n])\n return m \ndef leftrotate(l):\n m=[]\n for i in range(len(l)-1,-1,-1):\n m.append(col(l,i))\n return m\nleftrotate([[1,2],[3,4]])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74d29df9c31edc183ae24611389de964fbc39e3 | 6,641 | ipynb | Jupyter Notebook | pytorch-distributed/examples/pretrain.ipynb | Napkin-DL/my-aws-example | c6e8a1ec60468938c259fcec7542c85f5464c898 | [
"MIT-0"
] | null | null | null | pytorch-distributed/examples/pretrain.ipynb | Napkin-DL/my-aws-example | c6e8a1ec60468938c259fcec7542c85f5464c898 | [
"MIT-0"
] | null | null | null | pytorch-distributed/examples/pretrain.ipynb | Napkin-DL/my-aws-example | c6e8a1ec60468938c259fcec7542c85f5464c898 | [
"MIT-0"
] | 2 | 2020-03-19T04:49:14.000Z | 2020-07-27T05:51:50.000Z | 37.732955 | 1,708 | 0.598103 | [
[
[
"import gentrl\nimport torch\nimport pandas as pd\n# torch.cuda.set_device(0)",
"_____no_output_____"
],
[
"from moses.metrics import mol_passes_filters, QED, SA, logP\nfrom moses.metrics.utils import get_n_rings, get_mol\n\n\ndef get_num_rings_6(mol):\n r = mol.GetRingInfo()\n return len([x for x in r.AtomRings() if len(x) > 6])\n\n\ndef penalized_logP(mol_or_smiles, masked=False, default=-5):\n mol = get_mol(mol_or_smiles)\n if mol is None:\n return default\n reward = logP(mol) - SA(mol) - get_num_rings_6(mol)\n if masked and not mol_passes_filters(mol):\n return default\n return reward",
"_____no_output_____"
],
[
"! wget https://media.githubusercontent.com/media/molecularsets/moses/master/data/dataset_v1.csv",
"Warning: Failed to set locale category LC_NUMERIC to ko_Kore_US.\nWarning: Failed to set locale category LC_TIME to ko_Kore_US.\nWarning: Failed to set locale category LC_COLLATE to ko_Kore_US.\nWarning: Failed to set locale category LC_MONETARY to ko_Kore_US.\nWarning: Failed to set locale category LC_MESSAGES to ko_Kore_US.\n--2020-04-02 14:08:50-- https://media.githubusercontent.com/media/molecularsets/moses/master/data/dataset_v1.csv\nResolving media.githubusercontent.com (media.githubusercontent.com)... 151.101.228.133\nConnecting to media.githubusercontent.com (media.githubusercontent.com)|151.101.228.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84482588 (81M) [text/plain]\nSaving to: ‘dataset_v1.csv’\n\ndataset_v1.csv 100%[===================>] 80.57M 5.13MB/s in 16s \n\n2020-04-02 14:09:10 (4.91 MB/s) - ‘dataset_v1.csv’ saved [84482588/84482588]\n\n"
],
[
"df = pd.read_csv('dataset_v1.csv')\ndf = df[df['SPLIT'] == 'train']\ndf['plogP'] = df['SMILES'].apply(penalized_logP)\ndf.to_csv('train_plogp_plogpm.csv', index=None)",
"_____no_output_____"
],
[
"enc = gentrl.RNNEncoder(latent_size=50)\ndec = gentrl.DilConvDecoder(latent_input_size=50)\nmodel = gentrl.GENTRL(enc, dec, 50 * [('c', 20)], [('c', 20)], beta=0.001)\nmodel.cuda();",
"_____no_output_____"
],
[
"md = gentrl.MolecularDataset(sources=[\n {'path':'train_plogp_plogpm.csv',\n 'smiles': 'SMILES',\n 'prob': 1,\n 'plogP' : 'plogP',\n }], \n props=['plogP'])\n\nfrom torch.utils.data import DataLoader\ntrain_loader = DataLoader(md, batch_size=50, shuffle=True, num_workers=1, drop_last=True)",
"_____no_output_____"
],
[
"model.train_as_vaelp(train_loader, lr=1e-4)",
"_____no_output_____"
],
[
"! mkdir -p saved_gentrl",
"_____no_output_____"
],
[
"model.save('./saved_gentrl/')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74d2eafb3ad7625bac23c30c991ac7c78cb23ba | 410,739 | ipynb | Jupyter Notebook | M3 Advance Statistics/W2 EDA/EDA_Cars_Student_File.ipynb | fborrasumh/greatlearning-pgp-dsba | 2aff5e00f8d6a60e1d819b970901492af703de85 | [
"MIT"
] | 1 | 2021-12-04T12:11:50.000Z | 2021-12-04T12:11:50.000Z | M3 Advance Statistics/W2 EDA/EDA_Cars_Student_File.ipynb | fborrasumh/greatlearning-pgp-dsba | 2aff5e00f8d6a60e1d819b970901492af703de85 | [
"MIT"
] | null | null | null | M3 Advance Statistics/W2 EDA/EDA_Cars_Student_File.ipynb | fborrasumh/greatlearning-pgp-dsba | 2aff5e00f8d6a60e1d819b970901492af703de85 | [
"MIT"
] | 1 | 2022-03-20T07:01:46.000Z | 2022-03-20T07:01:46.000Z | 181.101852 | 244,044 | 0.878913 | [
[
[
"# EDA Car Data Set\n\n**We will explore the Car Data set and perform the exploratory data analysis on the dataset. The major topics to be covered are below:**\n\n- **Removing duplicates**\n- **Missing value treatment**\n- **Outlier Treatment**\n- **Normalization and Scaling( Numerical Variables)**\n- **Encoding Categorical variables( Dummy Variables)**\n- **Univerate Analysis**\n- **Bivariate Analysis**",
"_____no_output_____"
],
[
"**As a first step, we will import all the necessary libraries that we think we will requiring to perform the EDA.**",
"_____no_output_____"
],
[
"# Importing Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Loading the data set\n\n**We will be loading the EDA cars excel file using pandas. For this we will be using read_excel file.**",
"_____no_output_____"
]
],
[
[
"df=pd.read_excel('EDA Cars.xlsx')",
"_____no_output_____"
]
],
[
[
"# Basic Data Exploration \n\n**In this step, we will perform the below operations to check what the data set comprises of. We will check the below things:**\n\n- **head of the dataset**\n- **shape of the dataset**\n- **info of the dataset**\n- **summary of the dataset**",
"_____no_output_____"
],
[
"**head function will tell you the top records in the data set. By default python shows you only top 5 records.**",
"_____no_output_____"
],
[
"**Shape attribute tells us number of observations and variables we have in the data set. It is used to check the dimension of data. The cars data set has 303 observations and 13 variables in the data set.**",
"_____no_output_____"
]
],
[
[
" # Converting Postel Code into Category ",
"_____no_output_____"
]
],
[
[
"**info() is used to check the Information about the data and the datatypes of each respective attributes.**",
"_____no_output_____"
],
[
"**The describe method will help to see how data has been spread for the numerical values. We can clearly see the minimum value, mean values, different percentile values and maximum values.**",
"_____no_output_____"
],
[
"# Check for Duplicate records",
"_____no_output_____"
]
],
[
[
"# Check for duplicate data\n\n",
"Number of duplicate rows = 14\n"
]
],
[
[
"**Since we have 14 duplicate records in the data, we will remove this from the data set so that we get only distinct records.**",
"_____no_output_____"
],
[
"**Post removing the duplicate, we will check whether the duplicates has been removed from the data set or not.**",
"_____no_output_____"
]
],
[
[
"# Check for duplicate data\n\ndups = df.duplicated()\nprint('Number of duplicate rows = %d' % (dups.sum()))\n\ndf[dups]",
"Number of duplicate rows = 0\n"
]
],
[
[
"**Now, we can clearly see that there are no duplicate records in the data set. We can also quickly confirm the number of records by using the shape attribute as those 14 records should be removed from the original data. Initially it had 303 records now it should have 289**",
"_____no_output_____"
]
],
[
[
"df.shape",
"_____no_output_____"
]
],
[
[
"# Outlier Treatment\n\n**To check for outliers, we will be plotting the box plots.**",
"_____no_output_____"
]
],
[
[
"df.boxplot(column=['INCOME'])\nplt.show()",
"_____no_output_____"
],
[
"df.boxplot(column=['TRAVEL TIME'])\nplt.show()",
"_____no_output_____"
],
[
"df.boxplot(column=['CAR AGE'])\nplt.show()",
"_____no_output_____"
],
[
"df.boxplot(column=['MILES CLOCKED'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Looking at the box plot, it seems that the three variables INCOME, MILES CLOCKED and TRAVEL TIME have outlier present in the variables.**\n\n**These outliers value needs to be teated and there are several ways of treating them:**\n \n- **Drop the outlier value**\n- **Replace the outlier value using the IQR**\n\n",
"_____no_output_____"
],
[
"**Created a user definded function for finding the lower and upper range for a variable so that outlier can be treated.**",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
],
[
[
"## Make Boxplots after Outlier Treatment",
"_____no_output_____"
]
],
[
[
"df.boxplot(column=['TRAVEL TIME'])\nplt.show()",
"_____no_output_____"
],
[
"df.boxplot(column=['MILES CLOCKED'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"**If you look at the box plots above,post treating the outlier there are no outliers in all these columns.**",
"_____no_output_____"
],
[
"# Check for missing value",
"_____no_output_____"
]
],
[
[
"# Check for missing value in any column\n",
"_____no_output_____"
]
],
[
[
"**We can see that we have various missing values in respective columns. There are various ways of treating your missing values in the data set. And which technique to use when is actually dependent on the type of data you are dealing with.**\n\n- **Drop the missing values : In this case we drop the missing values from those variables. In case there are very few missing values you can drop those values.**\n\n- **Impute with mean value : For numerical column, you can replace the missing values with mean values. Before replacing with mean value, it is advisable to check that the variable shouldn't have extreme values .i.e. outliers.**\n\n- **Impute with median value : For numerical column, you can also replace the missing values with median values. In case you have extreme values such as outliers it is advisable to use median approach.**\n\n- **Impute with mode value : For categorical column, you can replace the missing values with mode values i.e the frequent ones.**\n\n**In this exercise, we will replace the numerical columns with median values and for categorical columns we will replace the missing values with mode values.**",
"_____no_output_____"
]
],
[
[
"df[df.isnull().sum()[df.isnull().sum()>0].index].dtypes",
"_____no_output_____"
]
],
[
[
"**Replacing NULL values in Numerical Columns using Median**",
"_____no_output_____"
]
],
[
[
"\n",
"_____no_output_____"
]
],
[
[
"**Replacing NULL values in Categorical Columns using Mode**",
"_____no_output_____"
]
],
[
[
"# Check for missing value in any column\ndf.isnull().sum()",
"_____no_output_____"
]
],
[
[
"# Univariate Analysis",
"_____no_output_____"
]
],
[
[
" # histogram of income",
"_____no_output_____"
]
],
[
[
"From above figure, we can say that the Income parameter is right skewed",
"_____no_output_____"
]
],
[
[
"sns.countplot(df[\"EDUCATION\"],hue=df[\"SEX\"]) #countplot for Education wrt SEX",
"_____no_output_____"
]
],
[
[
"From the above graph we can interpret that majority of the people are High School passouts and this is true for both Males and Females",
"_____no_output_____"
],
[
"# Bivariate Analysis",
"_____no_output_____"
]
],
[
[
" #Pairplot of all variables",
"_____no_output_____"
]
],
[
[
"**In the above plot scatter diagrams are plotted for all the numerical columns in the dataset. A scatter plot is a visual representation of the degree of correlation between any two columns. The pair plot function in seaborn makes it very easy to generate joint scatter plots for all the columns in the data.**",
"_____no_output_____"
]
],
[
[
"df.corr()",
"_____no_output_____"
]
],
[
[
"## Correlation Heatmap",
"_____no_output_____"
],
[
"# Normalizing and Scaling",
"_____no_output_____"
],
[
"**Often the variables of the data set are of different scales i.e. one variable is in millions and other in only 100. For e.g. in our data set Income is having values in thousands and age in just two digits. Since the data in these variables are of different scales, it is tough to compare these variables.**\n\n**Feature scaling (also known as data normalization) is the method used to standardize the range of features of data. Since, the range of values of data may vary widely, it becomes a necessary step in data preprocessing while using machine learning algorithms.**\n\n**In this method, we convert variables with different scales of measurements into a single scale.**\n\n**StandardScaler normalizes the data using the formula (x-mean)/standard deviation.**\n\n**We will be doing this only for the numerical variables.**",
"_____no_output_____"
]
],
[
[
"#Scales the data. Essentially returns the z-scores of every attribute\n",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"**If you look at the variables INCOME, TRAVEL TIME and CAR AGE, all has been normalized and scaled in one scale now.**",
"_____no_output_____"
],
[
"# ENCODING\n\n**One-Hot-Encoding is used to create dummy variables to replace the categories in a categorical variable into features of each category and represent it using 1 or 0 based on the presence or absence of the categorical value in the record.**\n\n**This is required to do since the machine learning algorithms only works on the numerical data. That is why there is a need to convert the categorical column into numerical one.**\n\n**get_dummies is the method which creates dummy variable for each categorical variable.**\n\n**It is considered a good practice to set parameter `drop_first` as `True` whenever get_dummies is used. It reduces the chances of multicollinearity which will be covered in coming courses and the number of features are also less as compared to `drop_first=False`**",
"_____no_output_____"
]
],
[
[
"columns=[\"MARITAL STATUS\", \"SEX\",\"EDUCATION\",\"JOB\",\"USE\",\"CAR TYPE\",\"CITY\"]\ndf = pd.concat([df, dummies], axis=1)\n\n# drop original column \"fuel-type\" from \"df\"\n",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"**In the data set, each Category in all of the categorical columns have been added as columns with values 0 and 1**\n**Example: married_Yes, sex_M, Education_High School\n**If sex_M =1, then it means its a Male and sex_M=0 means its a Female**",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e74d3b3185f6fdf6ca5b59166a411b786a98cf3e | 945 | ipynb | Jupyter Notebook | 12. Python Function/01. What is a function.ipynb | penanrajput/PythonCourseContent | 074a4af9c83a8a6b9b4608ce341ed96d1bd2e999 | [
"MIT"
] | null | null | null | 12. Python Function/01. What is a function.ipynb | penanrajput/PythonCourseContent | 074a4af9c83a8a6b9b4608ce341ed96d1bd2e999 | [
"MIT"
] | null | null | null | 12. Python Function/01. What is a function.ipynb | penanrajput/PythonCourseContent | 074a4af9c83a8a6b9b4608ce341ed96d1bd2e999 | [
"MIT"
] | 1 | 2020-12-19T19:29:17.000Z | 2020-12-19T19:29:17.000Z | 18.173077 | 59 | 0.501587 | [
[
[
"def greeting(name):\n print(\"Hello \"+ name + \", Good Morning !\")\ngreeting(\"Penan\")",
"Hello Penan, Good Morning !\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e74d4291eaf92ad7d78a037c00dfd1cda66b0860 | 212,081 | ipynb | Jupyter Notebook | 01-analysing-data.ipynb | onatemarta/thursday | bea178e434dfbb0f5ab2e916288f3f0a9c00c7cd | [
"MIT"
] | null | null | null | 01-analysing-data.ipynb | onatemarta/thursday | bea178e434dfbb0f5ab2e916288f3f0a9c00c7cd | [
"MIT"
] | null | null | null | 01-analysing-data.ipynb | onatemarta/thursday | bea178e434dfbb0f5ab2e916288f3f0a9c00c7cd | [
"MIT"
] | null | null | null | 263.782338 | 164,364 | 0.918177 | [
[
[
"## Analysing tabular data",
"_____no_output_____"
],
[
"We are going to use a LIBRARY called numpy",
"_____no_output_____"
],
[
"We are going to use a LIBRARY called numpy",
"_____no_output_____"
]
],
[
[
"import numpy",
"_____no_output_____"
],
[
"numpy.loadtxt(fname='data/weather-01.csv', delimiter = ',')",
"_____no_output_____"
]
],
[
[
"## Variables",
"_____no_output_____"
]
],
[
[
"weight_kg = 55",
"_____no_output_____"
],
[
"print (weight_kg)",
"55\n"
],
[
"print ('Weight in pounds: ', weight_kg * 2.2)",
"Weight in pounds: 121.00000000000001\n"
],
[
"weight_kg = 57.5",
"_____no_output_____"
],
[
"print ('New weight: ', weight_kg * 2.2)",
"New weight: 126.50000000000001\n"
],
[
"%whos",
"Variable Type Data/Info\n-------------------------------\nnumpy module <module 'numpy' from 'C:\\<...>ges\\\\numpy\\\\__init__.py'>\nweight_kg float 57.5\n"
],
[
"data = numpy.loadtxt(fname='data/weather-01.csv', delimiter = ',')",
"_____no_output_____"
],
[
"print (data)",
"[[ 0. 0. 1. ..., 3. 0. 0.]\n [ 0. 1. 2. ..., 1. 0. 1.]\n [ 0. 1. 1. ..., 2. 1. 1.]\n ..., \n [ 0. 1. 1. ..., 1. 1. 1.]\n [ 0. 0. 0. ..., 0. 2. 0.]\n [ 0. 0. 1. ..., 1. 1. 0.]]\n"
],
[
"print (type(data))",
"<class 'numpy.ndarray'>\n"
],
[
"%whos",
"Variable Type Data/Info\n--------------------------------\ndata ndarray 60x40: 2400 elems, type `float64`, 19200 bytes\nnumpy module <module 'numpy' from 'C:\\<...>ges\\\\numpy\\\\__init__.py'>\nweight_kg float 57.5\n"
],
[
"# Finding out the data type\nprint(data.dtype)",
"float64\n"
],
[
"# Find out the shape\nprint (data.shape)",
"(60, 40)\n"
],
[
"# This is 60 rows * 40 columns",
"_____no_output_____"
],
[
"# Getting a single number out of the array\nprint(\"First value in data: \" , data[0,0])",
"First value in data: 0.0\n"
],
[
"print('A middle value: ' , [30,20])",
"A middle value: [30, 20]\n"
],
[
"# Lets get the first 10 columns for the first 4 rows \nprint (data[0:4, 0:10])\n#Start at index 0 and go up to BUT NOT INCLUDING index 4",
"[[ 0. 0. 1. 3. 1. 2. 4. 7. 8. 3.]\n [ 0. 1. 2. 1. 2. 1. 3. 2. 2. 6.]\n [ 0. 1. 1. 3. 3. 2. 6. 2. 5. 9.]\n [ 0. 0. 2. 0. 4. 2. 2. 1. 6. 7.]]\n"
],
[
"# We don't need to start slicing at 0\nprint(data [5:10, 7:15])",
"[[ 1. 6. 4. 7. 6. 6. 9. 9.]\n [ 5. 5. 8. 6. 5. 11. 9. 4.]\n [ 3. 5. 3. 7. 8. 8. 5. 10.]\n [ 5. 5. 8. 2. 4. 11. 12. 10.]\n [ 3. 5. 8. 6. 8. 12. 5. 13.]]\n"
],
[
"# We don't even need to include the UPPER and LOWE bounds\nsmallchunk = data [:3, 36:]\nprint (smallchunk)",
"[[ 2. 3. 0. 0.]\n [ 1. 1. 0. 1.]\n [ 2. 2. 1. 1.]]\n"
],
[
"# Arithmetic on arrays\ndoublesmallchunk = smallchunk * 2.0",
"_____no_output_____"
],
[
"print (doublesmallchunk)",
"[[ 4. 6. 0. 0.]\n [ 2. 2. 0. 2.]\n [ 4. 4. 2. 2.]]\n"
],
[
"triplesmallchunk = smallchunk + doublesmallchunk",
"_____no_output_____"
]
],
[
[
"print (triplesmallchunk)",
"_____no_output_____"
]
],
[
[
"print (triplesmallchunk)",
"[[ 6. 9. 0. 0.]\n [ 3. 3. 0. 3.]\n [ 6. 6. 3. 3.]]\n"
],
[
"print (numpy.mean(data))",
"6.14875\n"
],
[
"print (numpy.max(data))",
"20.0\n"
],
[
"print (numpy.min(data))",
"0.0\n"
],
[
"# Get a set of data for the first station\nstation_0 = data [0, :]",
"_____no_output_____"
],
[
"print (numpy.max(station_0))",
"18.0\n"
],
[
"# We don't need to create 'temporaty' array slices\n# We can refer to what we call array axes",
"_____no_output_____"
],
[
"# axis = 0 gets the mean DOWN each column , so the mean temperature for each recording period\n\nprint (numpy.mean(data, axis = 0))",
"[ 0. 0.45 1.11666667 1.75 2.43333333 3.15\n 3.8 3.88333333 5.23333333 5.51666667 5.95 5.9\n 8.35 7.73333333 8.36666667 9.5 9.58333333\n 10.63333333 11.56666667 12.35 13.25 11.96666667\n 11.03333333 10.16666667 10. 8.66666667 9.15 7.25\n 7.33333333 6.58333333 6.06666667 5.95 5.11666667 3.6\n 3.3 3.56666667 2.48333333 1.5 1.13333333\n 0.56666667]\n"
],
[
"# axis = 0 gets the mean DOWN each column , so the mean temperature for each station for all the periods\n\nprint (numpy.mean (data, axis = 1))",
"[ 5.45 5.425 6.1 5.9 5.55 6.225 5.975 6.65 6.625 6.525\n 6.775 5.8 6.225 5.75 5.225 6.3 6.55 5.7 5.85 6.55\n 5.775 5.825 6.175 6.1 5.8 6.425 6.05 6.025 6.175 6.55\n 6.175 6.35 6.725 6.125 7.075 5.725 5.925 6.15 6.075 5.75\n 5.975 5.725 6.3 5.9 6.75 5.925 7.225 6.15 5.95 6.275 5.7\n 6.1 6.825 5.975 6.725 5.7 6.25 6.4 7.05 5.9 ]\n"
],
[
"# Do some simple visualisation",
"_____no_output_____"
],
[
"import matplotlib.pyplot",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"image = matplotlib.pyplot.imshow(data)",
"_____no_output_____"
],
[
"# Let's look the average temperature over time\navg_temperature = numpy.mean (data, axis = 0)",
"_____no_output_____"
],
[
"avg_plot = matplotlib.pyplot.plot(avg_temperature)",
"_____no_output_____"
]
],
[
[
"## Task:\n* Produce maximum and minimum plots of this data\n* What do you think?",
"_____no_output_____"
]
],
[
[
"max_temperature = numpy.max (data, axis = 0)\nmin_temperature = numpy.min (data, axis = 0)",
"_____no_output_____"
],
[
"max_plot = matplotlib.pyplot.plot(max_temperature)\nmin_plot = matplotlib.pyplot.plot(min_temperature)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74d46254611e27af0690729d58be76956ff9849 | 208,528 | ipynb | Jupyter Notebook | Chapter 04/vgg19_all_images_25_epochs_colab_modelfit.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | 1 | 2022-02-15T07:36:18.000Z | 2022-02-15T07:36:18.000Z | Chapter 04/vgg19_all_images_25_epochs_colab_modelfit.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | null | null | null | Chapter 04/vgg19_all_images_25_epochs_colab_modelfit.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | null | null | null | 200.700674 | 56,950 | 0.870799 | [
[
[
"<a href=\"https://colab.research.google.com/github/rajdeepd/tensorflow_2.0_book_code/blob/master/ch04/vgg19_all_images_25_epochs_colab_modelfit.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import sys\nIN_COLAB = 'google.colab' in sys.modules\nprint('Google Colab? ' + str(IN_COLAB))\n\nif not IN_COLAB:\n#!python -m pip show tensorflow\n !which python\n !python -m pip show tensorflow",
"Google Colab? True\n"
],
[
"!pwd",
"/content\n"
],
[
"\n\nfrom google.colab import drive\ndrive.mount(\"/content/gdrive\")\n",
"Mounted at /content/gdrive\n"
],
[
"!ls \"/content/gdrive/My Drive/cancer_detection/metastatic_cancer\"",
"model_summary\t\t plots_2.6.0_google_collab training_logs_2.5.0\nplots_2.4.1_google_collab training\t\t training_logs_2.6.0\nplots_2.5.0_google_collab training_logs_2.4.1\t validation\n"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import sys\nsys.executable",
"_____no_output_____"
]
],
[
[
"https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/applications/vgg19",
"_____no_output_____"
]
],
[
[
"# Imports\nimport numpy as np \nimport pandas as pd \nfrom glob import glob \nfrom skimage.io import imread \nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nimport tensorflow\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Average, Input, Concatenate, GlobalMaxPooling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam",
"_____no_output_____"
],
[
"#!pip show tensorflow\nTF_VERSION = tensorflow.__version__\nTF_VERSION",
"_____no_output_____"
],
[
"if IN_COLAB:\n BASE = '/content/gdrive/My Drive/cancer_detection/metastatic_cancer'\nelse:\n BASE = '.'",
"_____no_output_____"
],
[
"# Output files\nmodel_type='vgg19'\nno_of_images = 'all'\nEPOCHS = 25\n\nif IN_COLAB:\n PLOTS = 'plots_'+ TF_VERSION + '_google_collab'\n VERSION = TF_VERSION\nelse:\n PLOTS = 'plots_' + TF_VERSION\n VERSION = TF_VERSION\n \n_APPEND = '_' + model_type + '_' + str(no_of_images) + '_' + str(EPOCHS) \nAPPEND = _APPEND + \".png\"\n\n\nif IN_COLAB:\n if not os.path.exists(BASE + \"/training_logs_\" + VERSION):\n os.mkdir(BASE + \"/training_logs_\" + VERSION)\n\n if not os.path.exists(BASE + \"/model_summary/\"):\n os.mkdir(BASE + \"/model_summary/\")\n if not os.path.exists(BASE + \"/model_summary/\" + \"model_summary_\" + VERSION):\n os.mkdir(BASE + \"/model_summary/\" + \"model_summary_\" + VERSION)\n if not os.path.exists(BASE + '/' + PLOTS):\n os.mkdir(BASE + '/' + PLOTS)\nif IN_COLAB:\n TRAINING_LOGS_FILE = BASE + \"/training_logs_\" + VERSION + '/training_logs' + _APPEND + '.csv'\n MODEL_SUMMARY_FILE = BASE + \"/model_summary/\" \"model_summary_\" + VERSION + \"/model_summary\" + _APPEND + \".txt\"\n MODEL_PLOT_FILE = BASE + '/' + PLOTS + \"/model_plot_\" + APPEND\n MODEL_FILE = \"model_\" + VERSION + \"/model_\" + model_type + \"_all_collab.h5\"\n TRAINING_PLOT_FILE = BASE + '/'+ PLOTS + \"/training\" + APPEND\n VALIDATION_PLOT_FILE = BASE + '/'+ PLOTS + \"/validation\" + APPEND\n ROC_PLOT_FILE = BASE + '/'+ PLOTS + \"/roc\" + APPEND\nelse:\n TRAINING_LOGS_FILE = \"training_logs_\" + VERSION + '/training_logs' + _APPEND + '.csv'\n MODEL_SUMMARY_FILE = \"model_summary_\" + VERSION + \"/model_summary\" + _APPEND + \".txt\"\n MODEL_PLOT_FILE = PLOTS + \"/model_plot_\" + APPEND\n MODEL_FILE = \"model_\" + VERSION + \"/model_\" + model_type + \"_all_collab.h5\"\n TRAINING_PLOT_FILE = PLOTS + \"/training\" + APPEND\n VALIDATION_PLOT_FILE = PLOTS + \"/validation\" + APPEND\n ROC_PLOT_FILE = PLOTS + \"/roc\" + APPEND",
"_____no_output_____"
],
[
"# Hyperparams\nSAMPLE_COUNT = 85000\n#TRAINING_RATIO = 0.9\nIMAGE_SIZE = 96 \nIMAGE_SIZE2 = 224\n\nBATCH_SIZE = 192\nVERBOSITY = 1\nTESTING_BATCH_SIZE = 5000",
"_____no_output_____"
],
[
"import pathlib\ndata_dir = pathlib.Path(BASE)\ndata_dir",
"_____no_output_____"
],
[
"data_dir_training = pathlib.Path(BASE + '/training')",
"_____no_output_____"
],
[
"image_count = len(list(data_dir_training.glob('*/*.tif')))\nprint(image_count)",
"360\n"
],
[
"import PIL\nimport PIL.Image\nzeros = list(data_dir_training.glob('0/*'))\nPIL.Image.open(str(zeros[0]))",
"_____no_output_____"
],
[
"import PIL\nimport PIL.Image\nzeros = list(data_dir_training.glob('1/*'))\nPIL.Image.open(str(zeros[0]))",
"_____no_output_____"
],
[
"training_path= BASE + '/training'\nvalidation_path = BASE + '/validation'",
"_____no_output_____"
],
[
"# Data augmentation\ntraining_data_generator = ImageDataGenerator(rescale=1./255,\n horizontal_flip=True,\n vertical_flip=True,\n rotation_range=180,\n zoom_range=0.4, \n width_shift_range=0.3,\n height_shift_range=0.3,\n shear_range=0.3,\n channel_shift_range=0.3)",
"_____no_output_____"
],
[
"# Data generation\ntraining_generator = training_data_generator.flow_from_directory(training_path,\n target_size=(IMAGE_SIZE2,IMAGE_SIZE2),\n batch_size=BATCH_SIZE,\n class_mode='binary')\nvalidation_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(validation_path,\n target_size=(IMAGE_SIZE2,\n IMAGE_SIZE2),\n batch_size=BATCH_SIZE,\n class_mode='binary')\ntesting_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(validation_path,\n target_size=(IMAGE_SIZE2,IMAGE_SIZE2),\n batch_size=BATCH_SIZE,\n class_mode='binary',\n shuffle=False)",
"Found 360 images belonging to 2 classes.\nFound 40 images belonging to 2 classes.\nFound 40 images belonging to 2 classes.\n"
],
[
"import tensorflow as tf\nprint(tf.__version__)",
"2.6.0\n"
],
[
"# Model\ninput_shape = (IMAGE_SIZE2, IMAGE_SIZE2, 3)\ninputs = Input(input_shape)\n\nvgg19 = VGG19(include_top=False, input_shape=(224, 224, 3))(inputs) \noutputs = GlobalAveragePooling2D()(vgg19)\noutputs = Dropout(0.5)(outputs)\noutputs = Dense(1, activation='sigmoid')(outputs)\n\nmodel = Model(inputs, outputs)\nmodel.compile(optimizer=Adam(lr=0.0001, decay=0.00001),\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\nplot_model(model,\n to_file=MODEL_PLOT_FILE,\n show_shapes=True,\n show_layer_names=True)",
"Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\n80142336/80134624 [==============================] - 0s 0us/step\n80150528/80134624 [==============================] - 0s 0us/step\nModel: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 224, 224, 3)] 0 \n_________________________________________________________________\nvgg19 (Functional) (None, 7, 7, 512) 20024384 \n_________________________________________________________________\nglobal_average_pooling2d (Gl (None, 512) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 513 \n=================================================================\nTotal params: 20,024,897\nTrainable params: 20,024,897\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"import os\nif not os.path.exists(\"training_logs_\" + VERSION):\n os.makedirs(\"training_logs_\" + VERSION)\nfile = open(TRAINING_LOGS_FILE, 'w+')",
"_____no_output_____"
],
[
"history = model.fit(training_generator,\n steps_per_epoch=len(training_generator), \n validation_data=validation_generator,\n validation_steps=len(validation_generator),\n epochs=EPOCHS,\n verbose=VERBOSITY,\n callbacks=[#PlotLossesKeras(),\n ModelCheckpoint(MODEL_FILE,\n monitor='val_accuracy',\n verbose=VERBOSITY,\n save_best_only=True,\n mode='max'),\n CSVLogger(TRAINING_LOGS_FILE,\n append=True,\n separator=';')\n ])",
"Epoch 1/25\n2/2 [==============================] - 82s 42s/step - loss: 0.7398 - accuracy: 0.4917 - val_loss: 0.6933 - val_accuracy: 0.5250\n\nEpoch 00001: val_accuracy improved from -inf to 0.52500, saving model to model_2.6.0/model_vgg19_all_collab.h5\nEpoch 2/25\n2/2 [==============================] - 71s 37s/step - loss: 0.6725 - accuracy: 0.6000 - val_loss: 0.6622 - val_accuracy: 0.5750\n\nEpoch 00002: val_accuracy improved from 0.52500 to 0.57500, saving model to model_2.6.0/model_vgg19_all_collab.h5\nEpoch 3/25\n2/2 [==============================] - 71s 33s/step - loss: 0.6722 - accuracy: 0.5556 - val_loss: 0.6196 - val_accuracy: 0.5750\n\nEpoch 00003: val_accuracy did not improve from 0.57500\nEpoch 4/25\n2/2 [==============================] - 71s 33s/step - loss: 0.6320 - accuracy: 0.6778 - val_loss: 0.7477 - val_accuracy: 0.5750\n\nEpoch 00004: val_accuracy did not improve from 0.57500\nEpoch 5/25\n2/2 [==============================] - 71s 33s/step - loss: 0.6408 - accuracy: 0.6278 - val_loss: 0.6135 - val_accuracy: 0.7000\n\nEpoch 00005: val_accuracy improved from 0.57500 to 0.70000, saving model to model_2.6.0/model_vgg19_all_collab.h5\nEpoch 6/25\n2/2 [==============================] - 71s 37s/step - loss: 0.6075 - accuracy: 0.7111 - val_loss: 0.6645 - val_accuracy: 0.6500\n\nEpoch 00006: val_accuracy did not improve from 0.70000\nEpoch 7/25\n2/2 [==============================] - 71s 33s/step - loss: 0.5596 - accuracy: 0.7167 - val_loss: 0.6189 - val_accuracy: 0.6250\n\nEpoch 00007: val_accuracy did not improve from 0.70000\nEpoch 8/25\n2/2 [==============================] - 71s 33s/step - loss: 0.5619 - accuracy: 0.7417 - val_loss: 0.6828 - val_accuracy: 0.6250\n\nEpoch 00008: val_accuracy did not improve from 0.70000\nEpoch 9/25\n2/2 [==============================] - 71s 37s/step - loss: 0.5321 - accuracy: 0.7556 - val_loss: 0.7627 - val_accuracy: 0.6500\n\nEpoch 00009: val_accuracy did not improve from 0.70000\nEpoch 10/25\n2/2 [==============================] - 71s 33s/step - loss: 0.5135 - accuracy: 0.7750 - val_loss: 0.6983 - val_accuracy: 0.7000\n\nEpoch 00010: val_accuracy did not improve from 0.70000\nEpoch 11/25\n2/2 [==============================] - 71s 33s/step - loss: 0.5185 - accuracy: 0.7806 - val_loss: 0.6315 - val_accuracy: 0.6750\n\nEpoch 00011: val_accuracy did not improve from 0.70000\nEpoch 12/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4795 - accuracy: 0.7861 - val_loss: 0.6692 - val_accuracy: 0.7250\n\nEpoch 00012: val_accuracy improved from 0.70000 to 0.72500, saving model to model_2.6.0/model_vgg19_all_collab.h5\nEpoch 13/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4533 - accuracy: 0.8139 - val_loss: 0.7439 - val_accuracy: 0.7000\n\nEpoch 00013: val_accuracy did not improve from 0.72500\nEpoch 14/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4653 - accuracy: 0.8111 - val_loss: 0.6908 - val_accuracy: 0.7000\n\nEpoch 00014: val_accuracy did not improve from 0.72500\nEpoch 15/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4799 - accuracy: 0.8000 - val_loss: 0.6265 - val_accuracy: 0.7000\n\nEpoch 00015: val_accuracy did not improve from 0.72500\nEpoch 16/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4473 - accuracy: 0.8139 - val_loss: 0.6131 - val_accuracy: 0.7250\n\nEpoch 00016: val_accuracy did not improve from 0.72500\nEpoch 17/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4557 - accuracy: 0.8194 - val_loss: 0.7115 - val_accuracy: 0.7250\n\nEpoch 00017: val_accuracy did not improve from 0.72500\nEpoch 18/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4486 - accuracy: 0.8111 - val_loss: 0.7402 - val_accuracy: 0.6750\n\nEpoch 00018: val_accuracy did not improve from 0.72500\nEpoch 19/25\n2/2 [==============================] - 71s 37s/step - loss: 0.4111 - accuracy: 0.8333 - val_loss: 0.7180 - val_accuracy: 0.6750\n\nEpoch 00019: val_accuracy did not improve from 0.72500\nEpoch 20/25\n2/2 [==============================] - 70s 37s/step - loss: 0.4491 - accuracy: 0.8083 - val_loss: 0.6283 - val_accuracy: 0.6750\n\nEpoch 00020: val_accuracy did not improve from 0.72500\nEpoch 21/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4088 - accuracy: 0.8139 - val_loss: 0.5933 - val_accuracy: 0.7250\n\nEpoch 00021: val_accuracy did not improve from 0.72500\nEpoch 22/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4267 - accuracy: 0.8111 - val_loss: 0.8733 - val_accuracy: 0.7250\n\nEpoch 00022: val_accuracy did not improve from 0.72500\nEpoch 23/25\n2/2 [==============================] - 70s 37s/step - loss: 0.4685 - accuracy: 0.8083 - val_loss: 0.6697 - val_accuracy: 0.6500\n\nEpoch 00023: val_accuracy did not improve from 0.72500\nEpoch 24/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4903 - accuracy: 0.7833 - val_loss: 0.5624 - val_accuracy: 0.6750\n\nEpoch 00024: val_accuracy did not improve from 0.72500\nEpoch 25/25\n2/2 [==============================] - 71s 33s/step - loss: 0.4062 - accuracy: 0.8333 - val_loss: 0.9118 - val_accuracy: 0.8000\n\nEpoch 00025: val_accuracy improved from 0.72500 to 0.80000, saving model to model_2.6.0/model_vgg19_all_collab.h5\n"
],
[
"history.history",
"_____no_output_____"
],
[
"# Training plots\ncommon_title = model_type + ' with ' + str(no_of_images) + ' samples'\nepochs = [i for i in range(1, len(history.history['loss'])+1)]\n\nplt.plot(epochs, history.history['loss'], color='blue', label=\"training_loss\")\nplt.plot(epochs, history.history['val_loss'], color='red', label=\"validation_loss\")\nplt.legend(loc='best')\n#plt.title('training: ' + common_title)\nplt.xlabel('epoch')\nplt.savefig(TRAINING_PLOT_FILE, bbox_inches='tight')\nplt.close()\n\nplt.plot(epochs, history.history['accuracy'], color='blue', label=\"training_accuracy\")\nplt.plot(epochs, history.history['val_accuracy'], color='red',label=\"validation_accuracy\")\nplt.legend(loc='best')\nplt.title('Validation (TF '+ VERSION + '): ' + common_title )\nplt.xlabel('epoch')\nplt.savefig(VALIDATION_PLOT_FILE, bbox_inches='tight')\nplt.show()\nplt.close()",
"_____no_output_____"
],
[
"# ROC testing plot\n#model.load_weights(MODEL_FILE)\npredictions = model.predict_generator(testing_generator, steps=len(testing_generator), verbose=VERBOSITY)\nfalse_positive_rate, true_positive_rate, threshold = roc_curve(testing_generator.classes, predictions)\narea_under_curve = auc(false_positive_rate, true_positive_rate)\n\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(false_positive_rate, true_positive_rate, label='AUC = {:.3f}'.format(area_under_curve))\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve: ' + common_title + ' TF '+ VERSION)\nplt.legend(loc='best')\nplt.savefig(ROC_PLOT_FILE, bbox_inches='tight')\nplt.show()\nplt.close()",
"/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:2035: UserWarning: `Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.\n warnings.warn('`Model.predict_generator` is deprecated and '\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74d49a7ff61fb4dbe670c401ab7fa73cf5d1eb4 | 4,835 | ipynb | Jupyter Notebook | ch6 - Analyzing Data with the Spark DataFrame API/Descriptive Statistics.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | 1 | 2021-10-01T22:12:15.000Z | 2021-10-01T22:12:15.000Z | ch6 - Analyzing Data with the Spark DataFrame API/Descriptive Statistics.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | null | null | null | ch6 - Analyzing Data with the Spark DataFrame API/Descriptive Statistics.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | null | null | null | 2,417.5 | 4,834 | 0.645088 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74d6a460c06c2826386b934aae82e1027c081bb | 56,904 | ipynb | Jupyter Notebook | doc/courses/coursera/deep learning specialization/Convolutional Neural Networks/Convolution+model+-+Step+by+Step+-+v2.ipynb | junhan/learnmachinelearning | 0dc1f253fd7b2d47c3bf82e38bb7b359d8ddea18 | [
"MIT"
] | null | null | null | doc/courses/coursera/deep learning specialization/Convolutional Neural Networks/Convolution+model+-+Step+by+Step+-+v2.ipynb | junhan/learnmachinelearning | 0dc1f253fd7b2d47c3bf82e38bb7b359d8ddea18 | [
"MIT"
] | null | null | null | doc/courses/coursera/deep learning specialization/Convolutional Neural Networks/Convolution+model+-+Step+by+Step+-+v2.ipynb | junhan/learnmachinelearning | 0dc1f253fd7b2d47c3bf82e38bb7b359d8ddea18 | [
"MIT"
] | null | null | null | 42.026588 | 5,306 | 0.561771 | [
[
[
"# Convolutional Neural Networks: Step by Step\n\nWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \n\n**Notation**:\n- Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n\n- Superscript $(i)$ denotes an object from the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n \n \n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n \n \n- $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n- $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n\nWe assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!",
"_____no_output_____"
],
[
"## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"_____no_output_____"
]
],
[
[
"## 2 - Outline of the Assignment\n\nYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:\n\n- Convolution functions, including:\n - Zero Padding\n - Convolve window \n - Convolution forward\n - Convolution backward (optional)\n- Pooling functions, including:\n - Pooling forward\n - Create mask \n - Distribute value\n - Pooling backward (optional)\n \nThis notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n\n<img src=\"images/model.png\" style=\"width:800px;height:300px;\">\n\n**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. ",
"_____no_output_____"
],
[
"## 3 - Convolutional Neural Networks\n\nAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n\n<img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\n\nIn this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. ",
"_____no_output_____"
],
[
"### 3.1 - Zero-Padding\n\nZero-padding adds zeros around the border of an image:\n\n<img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>\n\nThe main benefits of padding are the following:\n\n- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n\n- It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.\n\n**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:\n```python\na = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line)\n X_pad = np.pad(X, ((0,0), (pad,pad), (pad,pad), (0,0)), 'constant', constant_values = (0,0))\n ### END CODE HERE ###\n \n return X_pad",
"_____no_output_____"
],
[
"np.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\nx_pad = zero_pad(x, 2)\nprint (\"x.shape =\", x.shape)\nprint (\"x_pad.shape =\", x_pad.shape)\nprint (\"x[1,1] =\", x[1,1])\nprint (\"x_pad[1,1] =\", x_pad[1,1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0,:,:,0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0,:,:,0])",
"x.shape = (4, 3, 3, 2)\nx_pad.shape = (4, 7, 7, 2)\nx[1,1] = [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\nx_pad[1,1] = [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **x.shape**:\n </td>\n <td>\n (4, 3, 3, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x_pad.shape**:\n </td>\n <td>\n (4, 7, 7, 2)\n </td>\n </tr>\n <tr>\n <td>\n **x[1,1]**:\n </td>\n <td>\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\n </td>\n </tr>\n <tr>\n <td>\n **x_pad[1,1]**:\n </td>\n <td>\n [[ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]\n [ 0. 0.]]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 3.2 - Single step of convolution \n\nIn this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n\n- Takes an input volume \n- Applies a filter at every position of the input\n- Outputs another volume (usually of different size)\n\n<img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\n\nIn a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \n\nLater in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \n\n**Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice and W. Do not add the bias yet.\n s = a_slice_prev * W\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z = Z + float(b)\n ### END CODE HERE ###\n\n return Z",
"_____no_output_____"
],
[
"np.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)",
"Z = -6.99908945068\n"
]
],
[
[
"**Expected Output**:\n<table>\n <tr>\n <td>\n **Z**\n </td>\n <td>\n -6.99908945068\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 3.3 - Convolutional Neural Networks - Forward pass\n\nIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n**Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. \n\n**Hint**: \n1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\n```python\na_slice_prev = a_prev[0:2,0:2,:]\n```\nThis will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.\n2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.\n\n<img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>\n\n\n**Reminder**:\nThe formulas relating the output shape of the convolution to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n$$ n_C = \\text{number of filters used in the convolution}$$\n\nFor this exercise, we won't worry about vectorization, and will just implement everything with for-loops.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from A_prev's shape (≈1 line) \n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape (≈1 line)\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\n n_H = int((n_H_prev - f + 2 * pad)/stride) + 1\n n_W = int((n_W_prev - f + 2 * pad)/stride) + 1\n \n # Initialize the output volume Z with zeros. (≈1 line)\n Z = np.zeros(shape = (m, n_H, n_W, n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation\n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[..., c], b[...,c])\n \n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(10,4,4,3)\nW = np.random.randn(2,2,3,8)\nb = np.random.randn(1,1,1,8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\", np.mean(Z))\nprint(\"Z[3,2,1] =\", Z[3,2,1])\nprint(\"cache_conv[0][1][2][3] =\", cache_conv[0][1][2][3])",
"Z's mean = 0.0489952035289\nZ[3,2,1] = [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\ncache_conv[0][1][2][3] = [-0.20075807 0.18656139 0.41005165]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Z's mean**\n </td>\n <td>\n 0.0489952035289\n </td>\n </tr>\n <tr>\n <td>\n **Z[3,2,1]**\n </td>\n <td>\n [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n 5.18531798 8.75898442]\n </td>\n </tr>\n <tr>\n <td>\n **cache_conv[0][1][2][3]**\n </td>\n <td>\n [-0.20075807 0.18656139 0.41005165]\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"Finally, CONV layer should also contain an activation, in which case we would add the following line of code:\n\n```python\n# Convolve the window to get back one output neuron\nZ[i, h, w, c] = ...\n# Apply activation\nA[i, h, w, c] = activation(Z[i, h, w, c])\n```\n\nYou don't need to do it here. \n",
"_____no_output_____"
],
[
"## 4 - Pooling layer \n\nThe pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n\n- Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n\n- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n\n<table>\n<td>\n<img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n<td>\n\n<td>\n<img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n<td>\n</table>\n\nThese pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. \n\n### 4.1 - Forward Pooling\nNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. \n\n**Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.\n\n**Reminder**:\nAs there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n$$ n_H = \\lfloor \\frac{n_{H_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_W = \\lfloor \\frac{n_{W_{prev}} - f}{stride} \\rfloor +1 $$\n$$ n_C = n_{C_{prev}}$$",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n ### END CODE HERE ###\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(2, 4, 4, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A =\", A)\nprint()\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A =\", A)",
"mode = max\nA = [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\nmode = average\nA = [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n"
]
],
[
[
"**Expected Output:**\n<table>\n\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 1.74481176 0.86540763 1.13376944]]]\n\n\n [[[ 1.13162939 1.51981682 2.18557541]]]]\n\n </td>\n </tr>\n <tr>\n <td>\n A =\n </td>\n <td>\n [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n\n\n [[[-0.22154621 0.51716526 0.48155844]]]]\n\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. \n\nThe remainer of this notebook is optional, and will not be graded.\n",
"_____no_output_____"
],
[
"## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)\n\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \n\nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.\n\n### 5.1 - Convolutional layer backward pass \n\nLet's start by implementing the backward pass for a CONV layer. \n\n#### 5.1.1 - Computing dA:\nThis is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n\n$$ dA += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\n\nWhere $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\nda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n```\n\n#### 5.1.2 - Computing dW:\nThis is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n\n$$ dW_c += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\n\nWhere $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n```\n\n#### 5.1.3 - Computing db:\n\nThis is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n\n$$ db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\n\nAs you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndb[:,:,:,c] += dZ[i, h, w, c]\n```\n\n**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. ",
"_____no_output_____"
]
],
[
[
"def conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve information from \"cache\"\n (A_prev, W, b, hparameters) = None\n \n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = None\n \n # Retrieve dimensions from W's shape\n (f, f, n_C_prev, n_C) = None\n \n # Retrieve information from \"hparameters\"\n stride = None\n pad = None\n \n # Retrieve dimensions from dZ's shape\n (m, n_H, n_W, n_C) = None\n \n # Initialize dA_prev, dW, db with the correct shapes\n dA_prev = None \n dW = None\n db = None\n\n # Pad A_prev and dA_prev\n A_prev_pad = None\n dA_prev_pad = None\n \n for i in range(None): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n a_prev_pad = None\n da_prev_pad = None\n \n for h in range(None): # loop over vertical axis of the output volume\n for w in range(None): # loop over horizontal axis of the output volume\n for c in range(None): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Use the corners to define the slice from a_prev_pad\n a_slice = None\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None\n dW[:,:,:,c] += None\n db[:,:,:,c] += None\n \n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n dA_prev[i, :, :, :] = None\n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"np.random.seed(1)\ndA, dW, db = conv_backward(Z, cache_conv)\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))",
"_____no_output_____"
]
],
[
[
"** Expected Output: **\n<table>\n <tr>\n <td>\n **dA_mean**\n </td>\n <td>\n 1.45243777754\n </td>\n </tr>\n <tr>\n <td>\n **dW_mean**\n </td>\n <td>\n 1.72699145831\n </td>\n </tr>\n <tr>\n <td>\n **db_mean**\n </td>\n <td>\n 7.83923256462\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"## 5.2 Pooling layer - backward pass\n\nNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n\n### 5.2.1 Max pooling - backward pass \n\nBefore jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: \n\n$$ X = \\begin{bmatrix}\n1 && 3 \\\\\n4 && 2\n\\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n0 && 0 \\\\\n1 && 0\n\\end{bmatrix}\\tag{4}$$\n\nAs you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. \n\n**Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. \nHints:\n- [np.max()]() may be helpful. It computes the maximum of an array.\n- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:\n```\nA[i,j] = True if X[i,j] = x\nA[i,j] = False if X[i,j] != x\n```\n- Here, you don't need to consider cases where there are several maxima in a matrix.",
"_____no_output_____"
]
],
[
[
"def create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n mask = None\n ### END CODE HERE ###\n \n return mask",
"_____no_output_____"
],
[
"np.random.seed(1)\nx = np.random.randn(2,3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)",
"_____no_output_____"
]
],
[
[
"**Expected Output:** \n\n<table> \n<tr> \n<td>\n\n**x =**\n</td>\n\n<td>\n\n[[ 1.62434536 -0.61175641 -0.52817175] <br>\n [-1.07296862 0.86540763 -2.3015387 ]]\n\n </td>\n</tr>\n\n<tr> \n<td>\n**mask =**\n</td>\n<td>\n[[ True False False] <br>\n [False False False]]\n</td>\n</tr>\n\n\n</table>",
"_____no_output_____"
],
[
"Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. ",
"_____no_output_____"
],
[
"### 5.2.2 - Average pooling - backward pass \n\nIn max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\n\nFor example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n$$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n1/4 && 1/4 \\\\\n1/4 && 1/4\n\\end{bmatrix}\\tag{5}$$\n\nThis implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \n\n**Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)",
"_____no_output_____"
]
],
[
[
"def distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from shape (≈1 line)\n (n_H, n_W) = None\n \n # Compute the value to distribute on the matrix (≈1 line)\n average = None\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n a = None\n ### END CODE HERE ###\n \n return a",
"_____no_output_____"
],
[
"a = distribute_value(2, (2,2))\nprint('distributed value =', a)",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\n<table> \n<tr> \n<td>\ndistributed_value =\n</td>\n<td>\n[[ 0.5 0.5]\n<br\\> \n[ 0.5 0.5]]\n</td>\n</tr>\n</table>",
"_____no_output_____"
],
[
"### 5.2.3 Putting it together: Pooling backward \n\nYou now have everything you need to compute backward propagation on a pooling layer.\n\n**Exercise**: Implement the `pool_backward` function in both modes (`\"max\"` and `\"average\"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.",
"_____no_output_____"
]
],
[
[
"def pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = None\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n stride = None\n f = None\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n m, n_H_prev, n_W_prev, n_C_prev = None\n m, n_H, n_W, n_C = None\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = None\n \n for i in range(None): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = None\n \n for h in range(None): # loop on the vertical axis\n for w in range(None): # loop on the horizontal axis\n for c in range(None): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Compute the backward propagation in both modes.\n if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = None\n # Create the mask from a_prev_slice (≈1 line)\n mask = None\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n elif mode == \"average\":\n \n # Get the value a from dA (≈1 line)\n da = None\n # Define the shape of the filter as fxf (≈1 line)\n shape = None\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n ### END CODE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev",
"_____no_output_____"
],
[
"np.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \nprint()\ndA_prev = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) ",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\nmode = max:\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0. 0. ] <br>\n [ 5.05844394 -1.68282702] <br>\n [ 0. 0. ]]\n</td>\n</tr>\n</table>\n\nmode = average\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\n**dA_prev[1,1] =** \n</td>\n<td>\n[[ 0.08485462 0.2787552 ] <br>\n [ 1.26461098 -0.25749373] <br>\n [ 1.17975636 -0.53624893]]\n</td>\n</tr>\n</table>",
"_____no_output_____"
],
[
"### Congratulations !\n\nCongratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74d93c4e8713444e07fe9a287f8a801dfa93cdb | 16,364 | ipynb | Jupyter Notebook | notebooks/22_0_L_ExploratoryDataAnalysis.ipynb | luiservela/AstraZeneca | 071e23f0111ece0a414ee4c4dc5dfca25c089979 | [
"Unlicense"
] | 3 | 2019-06-27T22:29:05.000Z | 2021-10-05T14:35:51.000Z | notebooks/22_0_L_ExploratoryDataAnalysis.ipynb | cchamber/AstraZenecaMar19 | 7e6573a3c5e1f9c362a9e6f0abbaf9e5e40f78d7 | [
"Unlicense"
] | null | null | null | notebooks/22_0_L_ExploratoryDataAnalysis.ipynb | cchamber/AstraZenecaMar19 | 7e6573a3c5e1f9c362a9e6f0abbaf9e5e40f78d7 | [
"Unlicense"
] | 1 | 2021-01-05T18:06:21.000Z | 2021-01-05T18:06:21.000Z | 28.45913 | 92 | 0.395013 | [
[
[
"## Imports",
"_____no_output_____"
]
],
[
[
"# Pandas, Numpy and Matplotlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"# Import All nltk\nimport nltk\n#nltk.download_shell()",
"_____no_output_____"
]
],
[
[
"# Get tagged words",
"_____no_output_____"
]
],
[
[
"# Set name of file\nfilename = '../data/interim/disease_tags.pkl'\n\n# Read to DataFrame\ndf = pd.read_pickle(filename)\n\n# Echo\ndf.head()",
"_____no_output_____"
],
[
"# Drop nulls, exclude start/end/disease_tag columns\ntags = df['Id ont unique_id'.split()].dropna(axis=0)\n\n# Rename fields, create combined field ont:unique_id\ntags['summary_id'] = tags['Id']\ntags['disease_id'] = tags['ont']+':'+tags['unique_id']\ntags['year'] = 2017 #pd.Series(np.random.randint(2000,2019,tags.shape[0]))\n\n# Leave only important fields\ntags = tags['year summary_id disease_id'.split()]\n\n# Drop duplicates\ntags = tags.drop_duplicates(subset='summary_id disease_id'.split())\n\n# Echo\ntags.head(10)",
"_____no_output_____"
],
[
"# Drop nulls, exclude start/end/disease_tag columns\ntags = df['Id ont unique_id'.split()].dropna(axis=0)\n\n# Rename fields, create combined field ont:unique_id\ntags['summary_id'] = tags['Id']\ntags['disease_id'] = tags['ont']+':'+tags['unique_id']\ntags['year'] = 2017 #pd.Series(np.random.randint(2000,2019,tags.shape[0]))\n\n# Leave only important fields\ntags = tags['year summary_id disease_id'.split()]\n\n# Echo\ntags.head(10)",
"_____no_output_____"
],
[
"# Set strength of duplicates\ntags['combined_id'] = tags['summary_id'] +'_'+ tags['disease_id']\n\ntags.head()",
"_____no_output_____"
]
],
[
[
"# Create links between #tags in same summary",
"_____no_output_____"
]
],
[
[
"links = set()\nfor index, record in df.iterrows():\n for tag1 in record['Tags']:\n for tag2 in record['Tags']:\n links.add((tag1, tag2))\nlen(links)",
"_____no_output_____"
],
[
"import csv\n\nwith open('Links_250.csv', 'w') as outfile:\n w = csv.writer(outfile, delimiter=',', quotechar='\"')\n w.writerow(['Source','Target'])\n for element in links:\n #print(list(element))\n w.writerow(element)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e74d9b49e3d903871a69ebc616e0072022342364 | 1,195 | ipynb | Jupyter Notebook | Complete-Python-Bootcamp-master/.ipynb_checkpoints/Jupyter (iPython) Notebooks Guide-checkpoint.ipynb | geoffduke/Python | 5af0471f6b4a74e33a53d493fa641d5b69a1628a | [
"MIT"
] | 6 | 2017-09-28T12:38:00.000Z | 2020-07-15T04:41:07.000Z | Complete-Python-Bootcamp-master/.ipynb_checkpoints/Jupyter (iPython) Notebooks Guide-checkpoint.ipynb | geoffduke/Python | 5af0471f6b4a74e33a53d493fa641d5b69a1628a | [
"MIT"
] | 5 | 2016-08-25T06:06:12.000Z | 2016-11-26T18:57:20.000Z | Complete-Python-Bootcamp-master/.ipynb_checkpoints/Jupyter (iPython) Notebooks Guide-checkpoint.ipynb | geoffduke/Python | 5af0471f6b4a74e33a53d493fa641d5b69a1628a | [
"MIT"
] | 1 | 2019-11-05T05:29:25.000Z | 2019-11-05T05:29:25.000Z | 25.425532 | 185 | 0.615063 | [
[
[
"# Guide to Using Jupyter Notebooks\nIn this lecture we will be going over the basics of the Jupyter (previously called iPython Notebooks).\n\nFor a complete User Manual check out the [Bryn Mawr College Computer Science Guide](http://jupyter.cs.brynmawr.edu/hub/dblank/public/Jupyter%20Notebook%20Users%20Manual.ipynb).\n\nMost of the breakdown will actually occur in the video lecture corresponding to this Notebook. So please refer to either the video or the full User Manual linked above.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e74d9c5638a9501aa0e02a96d03784512df115de | 10,830 | ipynb | Jupyter Notebook | experiments/experiment_4/20200409_change_means.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 3 | 2020-05-17T21:56:52.000Z | 2020-12-09T04:27:31.000Z | experiments/experiment_4/20200409_change_means.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 2 | 2020-08-06T04:58:37.000Z | 2020-08-06T05:02:37.000Z | experiments/experiment_4/20200409_change_means.ipynb | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T02:29:11.000Z | 2020-08-12T02:29:11.000Z | 33.323077 | 409 | 0.493998 | [
[
[
"from argparse import ArgumentParser\nfrom functools import partial\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom graspy.cluster import GaussianCluster\nfrom joblib import Parallel, delayed\nfrom scipy.stats import mannwhitneyu, ttest_ind, ks_2samp\n\nfrom src import generate_truncnorm_sbms_with_communities, estimate_embeddings",
"/home/ubuntu/env/miniconda3/envs/dnd/lib/python3.8/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.mixture.gaussian_mixture module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.mixture. Anything that cannot be imported from sklearn.mixture is now part of the private API.\n warnings.warn(message, FutureWarning)\n"
],
[
"def estimate_community(embeddings, n_clusters):\n predicted_labels = (\n GaussianCluster(n_clusters, n_clusters, \"all\").fit_predict(embeddings) + 1\n )\n\n # ari = adjusted_rand_score(true_labels, predicted_labels)\n return predicted_labels\n\ndef compute_statistic(tests, pop1, pop2):\n res = np.zeros(len(tests))\n\n for idx, test in enumerate(tests):\n if test.__name__ == \"multiscale_graphcorr\":\n statistic, pval, _ = test(pop1, pop2, reps=250, is_twosamp=True)\n elif test.__name__ == \"test\":\n statistic, pval = test(pop1, pop2, reps=250)\n else: # for other tests, do by edge\n statistic, pval = test(pop1, pop2)\n res[idx] = pval\n\n return res\n\ndef run_experiment(\n m,\n block_1,\n block_2,\n mean_1,\n mean_2,\n var_1,\n var_2,\n mean_delta,\n var_delta,\n n_clusters,\n reps,\n tests,\n):\n total_n = block_1 + block_2\n r, c = np.triu_indices(total_n, k=1)\n\n omni_res = np.zeros((reps, len(n_clusters), 2, len(tests)))\n mase_res = np.zeros((reps, len(n_clusters), 2, len(tests)))\n\n for i in np.arange(reps).astype(int):\n pop1, pop2, true_labels = generate_truncnorm_sbms_with_communities(\n m=m,\n block_1=block_1,\n block_2=block_2,\n mean_1=mean_1,\n mean_2=mean_2,\n var_1=var_1,\n var_2=var_2,\n mean_delta=mean_delta,\n var_delta=var_delta,\n )\n pop1_edges = pop1[:, r, c]\n pop2_edges = pop2[:, r, c]\n true_edges = (true_labels[:, None] + true_labels[None, :])[r, c]\n\n for method in [\"mase\", \"omni\"]:\n embeddings = estimate_embeddings(pop1, pop2, method, 2)\n\n for k_idx, k in enumerate(n_clusters):\n predicted_labels = estimate_community(embeddings, k)\n predicted_edge_labels = (\n predicted_labels[:, None] * predicted_labels[None, :]\n )[\n r, c\n ] # vectorize to uppper triu\n \n \n cluster_labels = np.unique(predicted_edge_labels)\n communitity_pvals = np.zeros((np.unique(cluster_labels).size, len(tests)))\n\n for cdx, cluster_label in enumerate(cluster_labels):\n tmp_labels = predicted_edge_labels == cluster_label\n tmp_pop1_edges = pop1_edges[:, tmp_labels].ravel()\n tmp_pop2_edges = pop2_edges[:, tmp_labels].ravel()\n\n pvals = compute_statistic(tests, tmp_pop1_edges, tmp_pop2_edges)\n# for p_idx, pval in enumerate(pvals):\n# if pval <= 0.05:\n# sig_edges[p_idx][tmp_labels] = 1\n communitity_pvals[cdx] = pvals\n \n sig_edges = np.zeros((len(tests), total_n, total_n))[:, r, c]\n \n for u in range(len(tests)):\n tmp_pvals = communitity_pvals[:, u]\n sig_comm = cluster_labels[np.argsort(tmp_pvals, kind='stable')[0]]\n sig_edges[u, predicted_edge_labels == sig_comm] = 1\n\n prec = (sig_edges[:, true_edges == 0]).sum(axis=1) / sig_edges.sum(\n axis=1\n )\n np.nan_to_num(prec, False)\n recall = (sig_edges[:, true_edges == 0]).sum(axis=1) / (\n true_edges == 0\n ).sum(axis=0)\n\n if method == \"mase\":\n mase_res[i, k_idx, :] = np.array((prec, recall))\n else:\n omni_res[i, k_idx, :] = np.array((prec, recall))\n\n omni_res = omni_res.mean(axis=0).reshape(-1)\n mase_res = mase_res.mean(axis=0).reshape(-1)\n\n to_append = [\n m,\n mean_1,\n mean_2,\n var_1,\n var_2,\n mean_delta,\n var_delta,\n *omni_res,\n *mase_res,\n ]\n return to_append",
"_____no_output_____"
],
[
"task_index = 0",
"_____no_output_____"
],
[
"spacing = 50\n\nblock_1 = 25 # different probability\nblock_2 = 25\nmean_1 = 0\nmean_2 = 0\nvar_1 = 0.25\nvar_2 = 0.25\nmean_deltas = np.linspace(0, 1 , spacing + 1)\n#var_deltas = np.linspace(var_1, 3, spacing + 1)\nvar_delta = 0\nreps = 50\nn_clusters = [2]\nms = np.linspace(0, 250, spacing + 1)[1:].astype(int)\n\ntests = [ks_2samp, mannwhitneyu, ttest_ind]\n\npartial_func = partial(\n run_experiment,\n block_1=block_1,\n block_2=block_2,\n mean_1=mean_1,\n mean_2=mean_2,\n var_1=var_1,\n var_2=var_2,\n var_delta=var_delta,\n #mean_delta=mean_delta,\n n_clusters=n_clusters,\n reps=reps,\n tests=tests,\n)",
"_____no_output_____"
],
[
"args = [dict(m=m, mean_delta=mean_delta) for m, mean_delta in product(ms, mean_deltas)]\nargs = args[task_index::2]\nargs = sum(zip(reversed(args), args), ())[: len(args)]\nres = Parallel(n_jobs=-1, verbose=7)(delayed(partial_func)(**arg) for arg in args)",
"[Parallel(n_jobs=-1)]: Using backend LokyBackend with 128 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 32 tasks | elapsed: 1.2min\n[Parallel(n_jobs=-1)]: Done 136 tasks | elapsed: 125.2min\n[Parallel(n_jobs=-1)]: Done 256 tasks | elapsed: 143.9min\n[Parallel(n_jobs=-1)]: Done 392 tasks | elapsed: 239.7min\n[Parallel(n_jobs=-1)]: Done 544 tasks | elapsed: 259.8min\n[Parallel(n_jobs=-1)]: Done 712 tasks | elapsed: 334.0min\n[Parallel(n_jobs=-1)]: Done 896 tasks | elapsed: 395.7min\n[Parallel(n_jobs=-1)]: Done 1203 out of 1275 | elapsed: 483.4min remaining: 28.9min\n[Parallel(n_jobs=-1)]: Done 1275 out of 1275 | elapsed: 493.2min finished\n"
],
[
"cols = [\n \"m\",\n \"mean_1\",\n \"mean_2\",\n \"var_1\",\n \"var_2\",\n \"mean_delta\",\n \"var_delta\",\n *[\n f\"omni_{metric}_{k}_{test.__name__}\"\n for k in n_clusters\n for metric in [\"precision\", \"recall\"]\n for test in tests\n ],\n *[\n f\"mase_{metric}_{k}_{test.__name__}\"\n for k in n_clusters\n for metric in [\"precision\", \"recall\"]\n for test in tests\n ],\n]\nres_df = pd.DataFrame(res, columns=cols)\nres_df.to_csv(\n f\"./results/20200409_weighted_correct_nodes_{task_index}.csv\", index=False\n)",
"_____no_output_____"
],
[
"dfs = pd.concat([pd.read_csv(f\"./results/20200409_weighted_correct_nodes_{i}.csv\") for i in range(2)], ignore_index=True)",
"_____no_output_____"
],
[
"dfs = dfs.sort_values(['m', 'mean_delta'])",
"_____no_output_____"
],
[
"dfs.to_csv(\"./results/20200409_weighted_correct_nodes.csv\", index=True)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74d9c797af3ff50780c812d23c3cf6c22b44d24 | 109,069 | ipynb | Jupyter Notebook | IMF_files/IMF_plot.ipynb | agnitm/starlink2022 | 17f3a678e97b01d278098cb55e22842c66955c97 | [
"MIT"
] | null | null | null | IMF_files/IMF_plot.ipynb | agnitm/starlink2022 | 17f3a678e97b01d278098cb55e22842c66955c97 | [
"MIT"
] | null | null | null | IMF_files/IMF_plot.ipynb | agnitm/starlink2022 | 17f3a678e97b01d278098cb55e22842c66955c97 | [
"MIT"
] | null | null | null | 319.85044 | 32,332 | 0.922819 | [
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\nimport netCDF4 as nc\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport spacepy.pybats.kyoto as kt",
"_____no_output_____"
],
[
"\"\"\"\n We are looking at solar wind and space weather indices from Feb 1 - Feb 8, 2022.\n Following solar wind values are from the DSCOVR satellite (https://www.ngdc.noaa.gov/dscovr/next/).\n Kp, AE and Sym-H observations fetched from Kyoto Observatory.\n\n\"\"\"\n# Set up inputs here\nstarttime = dt.datetime(2022, 2, 1, 0, 0, 0)\nstoptime = dt.datetime(2022, 2, 8, 23, 59, 59)\n\nkey = ['Feb01', 'Feb02', 'Feb03', 'Feb04', 'Feb05', 'Feb06', 'Feb07', 'Feb08']\n\nm1m_files = {key[0]:'oe_m1m_dscovr_s20220201000000_e20220201235959_p20220202021626_pub.nc.gz',\n key[1]:'oe_m1m_dscovr_s20220202000000_e20220202235959_p20220203021731_pub.nc.gz',\n key[2]:'oe_m1m_dscovr_s20220203000000_e20220203235959_p20220204021701_pub.nc.gz',\n key[3]:'oe_m1m_dscovr_s20220204000000_e20220204235959_p20220205021730_pub.nc.gz',\n key[4]:'oe_m1m_dscovr_s20220205000000_e20220205235959_p20220206021727_pub.nc.gz',\n key[5]:'oe_m1m_dscovr_s20220206000000_e20220206235959_p20220207021739_pub.nc.gz',\n key[6]:'oe_m1m_dscovr_s20220207000000_e20220207235959_p20220208021631_pub.nc.gz',\n key[7]:'oe_m1m_dscovr_s20220208000000_e20220208235959_p20220209021604_pub.nc.gz'}\n\nf1m_files = {key[0]:'oe_f1m_dscovr_s20220201000000_e20220201235959_p20220202022134_pub.nc.gz',\n key[1]:'oe_f1m_dscovr_s20220202000000_e20220202235959_p20220203022239_pub.nc.gz',\n key[2]:'oe_f1m_dscovr_s20220203000000_e20220203235959_p20220204022204_pub.nc.gz',\n key[3]:'oe_f1m_dscovr_s20220204000000_e20220204235959_p20220205022237_pub.nc.gz',\n key[4]:'oe_f1m_dscovr_s20220205000000_e20220205235959_p20220206022231_pub.nc.gz',\n key[5]:'oe_f1m_dscovr_s20220206000000_e20220206235959_p20220207022239_pub.nc.gz',\n key[6]:'oe_f1m_dscovr_s20220207000000_e20220207235959_p20220208022120_pub.nc.gz',\n key[7]:'oe_f1m_dscovr_s20220208000000_e20220208235959_p20220209022058_pub.nc.gz'}",
"_____no_output_____"
],
[
"# Set up a dictionary to store variables\ndata = {}\nkey_data = ['time', 'bx', 'by', 'bz', 'vx', 'vy', 'vz', 'rho', 'temp']\n\nfor kd in key_data:\n data[kd] = []\n\n# Extract all data from gun-zipped files here.\nfor k in key:\n os.system('gzip -d {:}'.format(m1m_files[k]))\n filem1m = m1m_files[k][:-3]; ds_m1m = nc.Dataset(filem1m)\n os.system('gzip -d {:}'.format(f1m_files[k]))\n filef1m = f1m_files[k][:-3]; ds_f1m = nc.Dataset(filef1m)\n \n for i in range(len(ds_m1m.variables['time'])):\n t_since = ds_m1m.variables['time'][i].compressed()[:]\n t_since = dt.datetime(1970, 1, 1, 0, 0, 0, 0) + dt.timedelta(milliseconds=t_since[0])\n data['time'].append(t_since)\n\n bx_gsm_dscovr = ds_m1m.variables['bx_gsm'][i].compressed()[:] \n if bx_gsm_dscovr.size > 0: \n data['bx'].append(bx_gsm_dscovr[0])\n else: data['bx'].append(np.nan)#-99999.)\n \n by_gsm_dscovr = ds_m1m.variables['by_gsm'][i].compressed()[:]\n if by_gsm_dscovr.size > 0: \n data['by'].append(by_gsm_dscovr[0])\n else: data['by'].append(np.nan)#-99999.)\n \n bz_gsm_dscovr = ds_m1m.variables['bz_gsm'][i].compressed()[:]\n if bz_gsm_dscovr.size > 0: \n data['bz'].append(bz_gsm_dscovr[0])\n else: data['bz'].append(np.nan)#-99999.)\n \n vx_gsm_dscovr = ds_f1m.variables['proton_vx_gsm'][i].compressed()[:]\n if vx_gsm_dscovr.size > 0: \n data['vx'].append(vx_gsm_dscovr[0])\n else: data['vx'].append(np.nan)#-99999.)\n \n vy_gsm_dscovr = ds_f1m.variables['proton_vy_gsm'][i].compressed()[:]\n if vy_gsm_dscovr.size > 0: \n data['vy'].append(vy_gsm_dscovr[0])\n else: data['vy'].append(np.nan)#-99999.)\n \n vz_gsm_dscovr = ds_f1m.variables['proton_vz_gsm'][i].compressed()[:]\n if vz_gsm_dscovr.size > 0: \n data['vz'].append(vz_gsm_dscovr[0])\n else: data['vz'].append(np.nan)#-99999.) \n \n rho_dscovr = ds_f1m.variables['proton_density'][i].compressed()[:]\n if rho_dscovr.size > 0: \n data['rho'].append(rho_dscovr[0])\n else: data['rho'].append(np.nan)#-99999.)\n \n temp_dscovr = ds_f1m.variables['proton_temperature'][i].compressed()[:]\n if temp_dscovr.size > 0: \n data['temp'].append(temp_dscovr[0])\n else: data['temp'].append(np.nan)#-99999.)\n",
"_____no_output_____"
],
[
"for kd in ['bx', 'by', 'bz', 'vx', 'vy', 'vz', 'rho', 'temp']:\n a = pd.Series(data[kd])\n data[kd] = a.interpolate()",
"_____no_output_____"
],
[
"pkl.dump(data, open('IMF_Starlink_2022.pkl', 'wb'))",
"_____no_output_____"
],
[
"plt.plot(data['time'], data['vx'])\nplt.plot(data['time'], data['vy'])\nplt.plot(data['time'], data['vz'])",
"_____no_output_____"
],
[
"plt.plot(data['time'], data['bx'])\nplt.plot(data['time'], data['by'])\nplt.plot(data['time'], data['bz'])",
"_____no_output_____"
],
[
"plt.plot(data['time'], data['rho'])",
"_____no_output_____"
],
[
"plt.plot(data['time'], data['temp'])",
"_____no_output_____"
],
[
"file = open('IMF_Starlink_2022.dat', 'w')\n\nfile.write('File created on {:} UT'.format(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") ))\nfile.write('\\n2022 Starlink Space Weather Event - IMF mesurements by DSCOVR Satellite.\\n')\n\nfile.write('\\nyear mo dy hr min sec msec bx by bz vx vy vz dens temp\\n#START')\n\na = pd.Series(data['bx'])\na.interpolate()\n\nfor i in range(len(data['time'])):\n txt = '\\n {0:} 000{1:8.2f}{2:7.2f}{3:7.2f}{4:9.2f}\\t{5:7.2f}\\t{6:7.2f}\\t{7:7.2f}\\t{8:12.1f}'\n file.write(txt.format(data['time'][i].strftime(\"%Y %m %d %H %M %S\"),\n a[i], data['by'][i], data['bz'][i], \n data['vx'][i], data['vy'][i], data['vz'][i],\n data['rho'][i], data['temp'][i]))\n\nfile.close()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74dad6d0124786d89c47a5aac1b56a2fab9c7e9 | 672,523 | ipynb | Jupyter Notebook | scripts/Regresion_Depr_Rate_Cluster_1.ipynb | henrycorazza/AISaturdays-depresion-rrss | 273551247c20850430f65c341584e23a049c6cb8 | [
"MIT"
] | 2 | 2020-11-16T23:10:41.000Z | 2021-01-19T22:43:34.000Z | scripts/Regresion_Depr_Rate_Cluster_1.ipynb | serNAVARRO7/AISaturdays-depresion-rrss | 273551247c20850430f65c341584e23a049c6cb8 | [
"MIT"
] | null | null | null | scripts/Regresion_Depr_Rate_Cluster_1.ipynb | serNAVARRO7/AISaturdays-depresion-rrss | 273551247c20850430f65c341584e23a049c6cb8 | [
"MIT"
] | 2 | 2020-11-23T19:10:43.000Z | 2021-01-25T15:02:03.000Z | 761.634202 | 420,720 | 0.933955 | [
[
[
"# <center>AI SATURDAYS DONOSTIA 2020<center>\n \n## <center>Regresión Indicador \"DeprRate\" (Índice de Depresión) - Cluster 1</center>\n \n## <center>Proyecto Práctico Equipo FACEMOOD</center><img src=\"attachment:image.png\" width=\"400\"> \n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom regresion_functions import *\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"### Conjunto de Datos con 3 Clusters",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../processed-data/cluster3_socialmedia_data.csv', index_col=0)\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Creación Índice de Depresión",
"_____no_output_____"
]
],
[
[
"df[\"DeprRate\"]=(df[\"LowMood\"]+df[\"LossOfInt\"]+df[\"Hopeless\"])/3\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Nuevo Conjunto de Datos",
"_____no_output_____"
]
],
[
[
"df2 = df[[\"ASMU\", \"News\", \"PSMU\", \"Stress\", \"Inferior\", \"Concentrat\", \"Loneliness\", \"Fatigue\", \"DeprRate\", \"Sintomas_Cluster3\"]]\nprint(df2.head())\nprint(\"No. Filas/Columnas del Conjunto de Datos: {}\".format(df2.shape))",
" ASMU News PSMU Stress Inferior \\\nParticipant \n115091 16.792208 15.012987 32.883117 37.441558 17.831169 \n131183 28.254237 11.593220 45.203390 16.898305 0.254237 \n438907 27.040816 34.645833 44.595745 25.000000 23.395833 \n515070 37.826923 38.576923 25.711538 17.365385 11.403846 \n572172 40.545455 45.250000 36.852273 20.511364 38.056818 \n\n Concentrat Loneliness Fatigue DeprRate Sintomas_Cluster3 \nParticipant \n115091 37.272727 23.285714 45.155844 29.839827 1 \n131183 19.508475 0.389831 36.288136 6.129944 0 \n438907 35.729167 21.833333 51.040816 30.619048 1 \n515070 32.903846 14.076923 34.865385 13.076923 0 \n572172 14.352273 9.375000 18.272727 17.977273 0 \nNo. Filas/Columnas del Conjunto de Datos: (125, 10)\n"
]
],
[
[
"### Datos Cluster 1",
"_____no_output_____"
]
],
[
[
"df3=df2[df2[\"Sintomas_Cluster3\"]==1]\ndf3= df3[[\"ASMU\", \"News\", \"PSMU\", \"Stress\", \"Inferior\", \"Concentrat\", \"Loneliness\", \"Fatigue\", \"DeprRate\"]]\nprint(df3.head())\nprint(\"No. Filas/Columnas del Conjunto de Datos: {}\".format(df3.shape))",
" ASMU News PSMU Stress Inferior \\\nParticipant \n115091 16.792208 15.012987 32.883117 37.441558 17.831169 \n438907 27.040816 34.645833 44.595745 25.000000 23.395833 \n680605 1.463158 14.631579 34.547368 40.063158 3.000000 \n1500743 25.939759 16.566265 32.590361 32.554217 14.855422 \n1875048 21.738462 5.843750 24.953125 33.468750 13.765625 \n\n Concentrat Loneliness Fatigue DeprRate \nParticipant \n115091 37.272727 23.285714 45.155844 29.839827 \n438907 35.729167 21.833333 51.040816 30.619048 \n680605 46.852632 3.663158 60.894737 19.715789 \n1500743 45.072289 9.228916 40.156627 23.871486 \n1875048 33.203125 24.312500 45.859375 29.424840 \nNo. Filas/Columnas del Conjunto de Datos: (44, 9)\n"
]
],
[
[
"### Estadísticas descriptivas de las medias por participante del Cluster 1",
"_____no_output_____"
]
],
[
[
"df3.describe()",
"_____no_output_____"
]
],
[
[
"### Diagrama de Matriz para las Medias de las 9 Variables",
"_____no_output_____"
]
],
[
[
"printMatrixDiagram(df3) # Función definida en \"regresion_functions\"",
"_____no_output_____"
]
],
[
[
"### Correlaciones de Pearson para las Medias de las 9 Variables",
"_____no_output_____"
]
],
[
[
"printPearsonCorrelations(df3) # Función definida en \"regresion_functions\"",
"_____no_output_____"
]
],
[
[
"### Se observan correlaciones más significativas entre las siguientes variables:\n\nDeprRate vs Loneliness\n\nDeprRate vs Inferior\n\nLoneliness vs Inferior\n\n\nNo se observa \"multicolinealidad\"",
"_____no_output_____"
],
[
"### Regresión Lineal para las Medias: y = DeprRate, X = Demás Variables",
"_____no_output_____"
]
],
[
[
"label = df3.DeprRate\n\ndf3.drop('DeprRate', axis=1, inplace=True)",
"_____no_output_____"
]
],
[
[
"### Proceso de eliminación de variables X que no contribuyen significativamente para explicar y",
"_____no_output_____"
]
],
[
[
"resultsummary = pd.DataFrame(data={'iteration': [], 'intercept': [], 'RMSE_Training': [], 'RMSE_Testing': [],\n 'R2_Training': [],'R2_Testing': [],'p_value_max':[],'removed_var':[]})\n\ndata_list_medias = calculateRegression(df3, label, resultsummary, alpha=0.15) # Función definida en \"regresion_functions\"",
" iteration intercept RMSE_Training RMSE_Testing R2_Training R2_Testing \\\n0 0.0 2.680 3.461 3.232 0.803 -0.225 \n1 1.0 2.228 3.468 3.121 0.802 -0.143 \n2 2.0 2.573 3.473 3.320 0.802 -0.293 \n3 3.0 1.842 3.510 3.229 0.798 -0.223 \n\n p_value_max removed_var \n0 0.911 PSMU \n1 0.633 Fatigue \n2 0.539 ASMU \n3 0.115 - \n\nModelo Final\n['News', 'Stress', 'Inferior', 'Concentrat', 'Loneliness']\n[0.07565841 0.18577859 0.19752373 0.16053377 0.33411428] 1.8420832771152895\nRMSE of Linear Regression Model with Training Data: 3.51\nRMSE of Linear Regression Model with Testing Data: 3.23\nR2 Coefficient for Linear Regression Model with Training Data: 0.798\nR2 Coefficient for Linear Regression Model with Testing Data: -0.223\n"
]
],
[
[
"### Análisis de Residuos Modelo Final",
"_____no_output_____"
]
],
[
[
"fitt = data_list_medias[5]\nstandardized_residuals = data_list_medias[4]\n\nresidualAnalysis(fitt, standardized_residuals) # Función definida en \"regresion_functions\"",
"Estadística prueba normalidad Kolmogorov-Smirnov=0.107, pvalue=0.815\n\nProbablemente Normal\n"
]
],
[
[
"## Conclusión Final Modelo Ajustado (Cluster 1; y = DeprRate)\n##### \n### El coeficiente de determinación (R2) del modelo ajustado para las \"medias\" del Cluster 1 es alto (79,8%).\n### El Análisis de Residuos ha sido considerado satisfactorio, teniendo en cuenta la escasez de datos.\n### En principio, se valida el modelo bajo un punto de vista estadístico.\n#### Nota: Debido a la escasez de los datos reservados para el testeo (total de 9 participantes), no tendremos en cuenta el \"R2 Testing Data\".\n##### \n### DeprRate = 1,84 + 0,33 Loneliness + 0,20 Inferior + 0,19 Stress + 0,16 Concentrat + 0,08 News",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74db381a4a6b0b783807c8b50d7cad80e5932ab | 3,920 | ipynb | Jupyter Notebook | index.ipynb | fomightez/photo2GPS | 95e662160e3a466e24e9ef17494b87a0c7721fdc | [
"MIT"
] | 1 | 2019-05-08T15:00:36.000Z | 2019-05-08T15:00:36.000Z | index.ipynb | fomightez/photo2GPS | 95e662160e3a466e24e9ef17494b87a0c7721fdc | [
"MIT"
] | null | null | null | index.ipynb | fomightez/photo2GPS | 95e662160e3a466e24e9ef17494b87a0c7721fdc | [
"MIT"
] | null | null | null | 40.412371 | 457 | 0.639796 | [
[
[
"# Collecting GPS coordinates from photo metadata\n\nFor orientation: \nThis page is the main panel and to the left you should see a 'file browser' panel listing some folders and files, including this one. You may need to click and drag the border in between the two panes in order to better see the names. In particular you want to look for a folder icon next the name 'PUT_PHOTOS_HERE'.\n\n**Follow the steps below.**\n\n----------------------",
"_____no_output_____"
],
[
"## PART 1: Upload the photos and collect the data\n\n1. Double-click on 'PUT_PHOTOS_HERE' folder icon in the file browser panel to the left of this page. That will drop the navigation into the empty folder.\n\n2. Drag-and-drop all the photos into the empty file browser panel to the left of this page. (A gray dashed line will appear around the edge of the file browser when drag the file onto the file browser pane & at this point you can drop a file or files in by releasing the mouse button.)\n\n3. Give the photos time to upload.\n\n4. At the top of the panel where you just dragged the photos & to the left of the 'PUT_PHOTOS_HERE' text, click on the `Home` icon to go back up to the level above the photos files in the file heierarchy.\n\n5. Click in the next cell and execute the commands in it by typing `shift-return` (or hit the `play` icon the menu at the top bar of this page).",
"_____no_output_____"
]
],
[
[
"output_file = \"coords.tsv\"\n#The following command is based on Phil Harvey's answer on January 20, 2011, 07:30:32 PM, &\n# comment on on: January 20, 2011 to add the -n option, from\n# http://u88.n24.queensu.ca/exiftool/forum/index.php?topic=3075.0\n!exiftool -filename -gpslatitude -gpslongitude -T -n PUT_PHOTOS_HERE > {output_file}\n# add column names\n!sed -i '1ifile_name\\tlat\\t\\long' {output_file}",
"_____no_output_____"
]
],
[
[
"It should generate a file named `coords.tsv` & after a few seconds to let the file browser panel refresh itself, you should now see it listed among the files in the file browser panel. \n\nView the extracted data by double-clicking *TWICE* on the `coords.tsv` icon. Alternatively, you can open it by right-clicking on it and selecting to 'Open With' > 'TSVTable'.\n\n## PART 2: Bring the data onto your computer\n\nRight click on `coords.tsv` in the file browser panel, select 'Download' to save the file to your local system. \n\nNext, you should be able to directly open this file from within your favorite spreadsheet program, Excel or Google Sheets, and it should automatically open with the correct labels and values in the right cells. In other words, it should look much like when you viewed in the 'TSVTable' view earlier. It is suggested that you now save it as a spreadsheet file and discard the tsv if you are going to do any further editing in the spreadsheet program.\n",
"_____no_output_____"
],
[
"---- \n\n----",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e74dbde3c05d337ab00d2dd55aa902a194b0fe95 | 28,152 | ipynb | Jupyter Notebook | ann.ipynb | deeplearningunb/safe-driving | 0036cc35856af92058dd584fea6659abd904c12d | [
"MIT"
] | null | null | null | ann.ipynb | deeplearningunb/safe-driving | 0036cc35856af92058dd584fea6659abd904c12d | [
"MIT"
] | 4 | 2020-11-09T11:40:24.000Z | 2020-11-21T13:57:42.000Z | ann.ipynb | deeplearningunb/safe-driving | 0036cc35856af92058dd584fea6659abd904c12d | [
"MIT"
] | null | null | null | 41.706667 | 137 | 0.44139 | [
[
[
"#Importa bibliotecas e remove acentos do dataset\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom unicodedata import normalize\nfrom sklearn.preprocessing import MinMaxScaler\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import GRU\nfrom keras.layers import Dropout\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split\n\ndef accent_remover(x):\n try:\n float(x)\n return x\n except:\n return normalize('NFKD',x).encode('ASCII', 'ignore').decode('ASCII')",
"_____no_output_____"
],
[
"# Importa os datasets\ndataset_2017 = pd.read_csv('datasets/datatran2017.csv', sep= ';', encoding='ISO-8859-1').dropna().drop_duplicates()\ndataset_2018 = pd.read_csv('datasets/datatran2018.csv', sep= ';', encoding='ISO-8859-1').dropna().drop_duplicates()\ndataset = pd.concat([dataset_2017,dataset_2018])\n\ndataset = dataset.transform([accent_remover])\ndataset.columns = dataset.columns.droplevel(1)",
"_____no_output_____"
],
[
"# Pega as dados das colunas:\n# data_inversa\tdia_semana\thorario\tuf\tbr\tkm\tcondicao_metereologica\tlatitude\tlongitude\ntraining_set = dataset.iloc[:, [1,2,3,4,5,6,13,25,26]].values",
"_____no_output_____"
],
[
"# Converte os dados de string para number\nencoder = LabelEncoder()\ntraining_set[:,0] = encoder.fit_transform(training_set[:,0])\ntraining_set[:,1] = encoder.fit_transform(training_set[:,1])\ntraining_set[:,2] = encoder.fit_transform(training_set[:,2])\ntraining_set[:,3] = encoder.fit_transform(training_set[:,3])\ntraining_set[:,4] = encoder.fit_transform(training_set[:,4])\ntraining_set[:,5] = encoder.fit_transform(training_set[:,5])\ntraining_set[:,6] = encoder.fit_transform(training_set[:,6])\ntraining_set[:,7] = encoder.fit_transform(training_set[:,7])\ntraining_set[:,8] = encoder.fit_transform(training_set[:,8])",
"_____no_output_____"
],
[
"# Converte os dados para uma mesma faixa de valores de 0 a 1\nsc = MinMaxScaler(feature_range = (0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)",
"_____no_output_____"
],
[
"# Cria a estrutura do dados com 60 timesteps e 1 saida\nX_train = []\ny_train = []\nfor i in range(60, 1258):\n X_train.append(training_set_scaled[i-60:i,:])\n y_train.append(training_set_scaled[i,:])\nX_train, y_train = np.array(X_train), np.array(y_train)",
"_____no_output_____"
],
[
"# Inicializa a RNN\nregressor = Sequential()\n\n# Adiciona a primeira camada\nregressor.add(GRU(units = 100, return_sequences = True, input_shape=(60,9)))\nregressor.add(Dropout(0.2))\n\n# Segunda camada\nregressor.add(GRU(units = 100, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Terceira camada\nregressor.add(GRU(units = 100, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Quarta camada\nregressor.add(GRU(units = 100))\nregressor.add(Dropout(0.2))\n\n# Camada de saida\nregressor.add(Dense(units = 9))\n\n# Compila\nregressor.compile(optimizer = 'adam', loss = 'mean_squared_error')\n\n# Executa o treinamento\nregressor.fit(X_train, y_train, epochs = 50, batch_size = 32)",
"Epoch 1/50\n38/38 [==============================] - 10s 253ms/step - loss: 0.0819\nEpoch 2/50\n38/38 [==============================] - 9s 245ms/step - loss: 0.0622\nEpoch 3/50\n38/38 [==============================] - 9s 244ms/step - loss: 0.0613\nEpoch 4/50\n38/38 [==============================] - 10s 252ms/step - loss: 0.0601\nEpoch 5/50\n38/38 [==============================] - 9s 245ms/step - loss: 0.0585\nEpoch 6/50\n38/38 [==============================] - 9s 245ms/step - loss: 0.0592\nEpoch 7/50\n38/38 [==============================] - 9s 246ms/step - loss: 0.0589\nEpoch 8/50\n38/38 [==============================] - 9s 245ms/step - loss: 0.0577\nEpoch 9/50\n38/38 [==============================] - 10s 257ms/step - loss: 0.0582\nEpoch 10/50\n38/38 [==============================] - 10s 255ms/step - loss: 0.0578\nEpoch 11/50\n38/38 [==============================] - 10s 251ms/step - loss: 0.0574\nEpoch 12/50\n38/38 [==============================] - 10s 254ms/step - loss: 0.0578\nEpoch 13/50\n38/38 [==============================] - 10s 255ms/step - loss: 0.0574\nEpoch 14/50\n38/38 [==============================] - 9s 247ms/step - loss: 0.0569\nEpoch 15/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0568\nEpoch 16/50\n38/38 [==============================] - 9s 250ms/step - loss: 0.0563\nEpoch 17/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0565\nEpoch 18/50\n38/38 [==============================] - 9s 247ms/step - loss: 0.0560\nEpoch 19/50\n38/38 [==============================] - 10s 251ms/step - loss: 0.0564\nEpoch 20/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0563\nEpoch 21/50\n38/38 [==============================] - 10s 250ms/step - loss: 0.0558\nEpoch 22/50\n38/38 [==============================] - 9s 248ms/step - loss: 0.0557\nEpoch 23/50\n38/38 [==============================] - 9s 248ms/step - loss: 0.0560\nEpoch 24/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0560\nEpoch 25/50\n38/38 [==============================] - 10s 253ms/step - loss: 0.0554\nEpoch 26/50\n38/38 [==============================] - 9s 250ms/step - loss: 0.0555\nEpoch 27/50\n38/38 [==============================] - 10s 251ms/step - loss: 0.0559\nEpoch 28/50\n38/38 [==============================] - 9s 247ms/step - loss: 0.0558\nEpoch 29/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0555\nEpoch 30/50\n38/38 [==============================] - 9s 250ms/step - loss: 0.0551\nEpoch 31/50\n38/38 [==============================] - 10s 252ms/step - loss: 0.0553\nEpoch 32/50\n38/38 [==============================] - 10s 251ms/step - loss: 0.0558\nEpoch 33/50\n38/38 [==============================] - 10s 255ms/step - loss: 0.0554\nEpoch 34/50\n38/38 [==============================] - 10s 253ms/step - loss: 0.0550\nEpoch 35/50\n38/38 [==============================] - 10s 254ms/step - loss: 0.0554\nEpoch 36/50\n38/38 [==============================] - 9s 246ms/step - loss: 0.0549\nEpoch 37/50\n38/38 [==============================] - 9s 247ms/step - loss: 0.0552\nEpoch 38/50\n38/38 [==============================] - 10s 251ms/step - loss: 0.0549\nEpoch 39/50\n38/38 [==============================] - 9s 247ms/step - loss: 0.0547\nEpoch 40/50\n38/38 [==============================] - 9s 248ms/step - loss: 0.0548\nEpoch 41/50\n38/38 [==============================] - 10s 257ms/step - loss: 0.0546\nEpoch 42/50\n38/38 [==============================] - 11s 289ms/step - loss: 0.0548\nEpoch 43/50\n38/38 [==============================] - 11s 281ms/step - loss: 0.0549\nEpoch 44/50\n38/38 [==============================] - 10s 263ms/step - loss: 0.0545\nEpoch 45/50\n38/38 [==============================] - 10s 267ms/step - loss: 0.0548\nEpoch 46/50\n38/38 [==============================] - 10s 254ms/step - loss: 0.0545\nEpoch 47/50\n38/38 [==============================] - 10s 254ms/step - loss: 0.0545\nEpoch 48/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0541\nEpoch 49/50\n38/38 [==============================] - 9s 249ms/step - loss: 0.0540\nEpoch 50/50\n38/38 [==============================] - 11s 301ms/step - loss: 0.0542\n"
],
[
"# Testando os dados analisados com os de 2019\ndataset_test = pd.read_csv('datasets/datatran2019.csv', sep= ';', encoding='ISO-8859-1').dropna().drop_duplicates()\ndataset_test = dataset_test.transform([accent_remover])\ndataset_test.columns = dataset_test.columns.droplevel(1)\n\nreal_data = dataset_test.iloc[:, [1,2,3,4,5,6,13,25,26]].values\nencoder = LabelEncoder()\nreal_data[:,0] = encoder.fit_transform(real_data[:,0])\nreal_data[:,1] = encoder.fit_transform(real_data[:,1])\nreal_data[:,2] = encoder.fit_transform(real_data[:,2])\nreal_data[:,3] = encoder.fit_transform(real_data[:,3])\nreal_data[:,4] = encoder.fit_transform(real_data[:,4])\nreal_data[:,5] = encoder.fit_transform(real_data[:,5])\nreal_data[:,6] = encoder.fit_transform(real_data[:,6])\nreal_data[:,7] = encoder.fit_transform(real_data[:,7])\nreal_data[:,8] = encoder.fit_transform(real_data[:,8])",
"_____no_output_____"
],
[
"# Realiza a predicao dos dados\ninputs = real_data\ninputs = sc.fit_transform(inputs)\nX_test = []\nfor i in range(60, 80):\n X_test.append(inputs[i-60:i,:])\nX_test = np.array(X_test)\npredicted_data = regressor.predict(X_test)\npredicted_data = sc.inverse_transform(predicted_data)",
"_____no_output_____"
],
[
"## ANN\n# Selecionando dataset Treinamento\nX = []\ny = []\nfor i in predicted_data:\n\tX.append(i)\n\ty.append(1)\n\nX_train = np.array(X)\ny_train = np.array(y)",
"_____no_output_____"
],
[
"# Converte os dados para uma mesma faixa de valores de 0 a 1\nsc = MinMaxScaler(feature_range = (0, 1))\nX_train = sc.fit_transform(X_train)",
"_____no_output_____"
],
[
"# Cria a ANN\nclassifier = Sequential()\nclassifier.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu', input_dim=9))\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\nclassifier.fit(X_train, y_train, batch_size = 32, epochs = 100)",
"Epoch 1/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6931 - accuracy: 1.0000\nEpoch 2/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6925 - accuracy: 1.0000\nEpoch 3/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6920 - accuracy: 1.0000\nEpoch 4/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6914 - accuracy: 1.0000\nEpoch 5/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6908 - accuracy: 1.0000\nEpoch 6/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6902 - accuracy: 1.0000\nEpoch 7/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6897 - accuracy: 1.0000\nEpoch 8/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6891 - accuracy: 1.0000\nEpoch 9/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6885 - accuracy: 1.0000\nEpoch 10/100\n1/1 [==============================] - 0s 6ms/step - loss: 0.6879 - accuracy: 1.0000\nEpoch 11/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6873 - accuracy: 1.0000\nEpoch 12/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6867 - accuracy: 1.0000\nEpoch 13/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6860 - accuracy: 1.0000\nEpoch 14/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6854 - accuracy: 1.0000\nEpoch 15/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6848 - accuracy: 1.0000\nEpoch 16/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6841 - accuracy: 1.0000\nEpoch 17/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6835 - accuracy: 1.0000\nEpoch 18/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6828 - accuracy: 1.0000\nEpoch 19/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6822 - accuracy: 1.0000\nEpoch 20/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6815 - accuracy: 1.0000\nEpoch 21/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6808 - accuracy: 1.0000\nEpoch 22/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6801 - accuracy: 1.0000\nEpoch 23/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6794 - accuracy: 1.0000\nEpoch 24/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6787 - accuracy: 1.0000\nEpoch 25/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6780 - accuracy: 1.0000\nEpoch 26/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6772 - accuracy: 1.0000\nEpoch 27/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6765 - accuracy: 1.0000\nEpoch 28/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6757 - accuracy: 1.0000\nEpoch 29/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6749 - accuracy: 1.0000\nEpoch 30/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6741 - accuracy: 1.0000\nEpoch 31/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6733 - accuracy: 1.0000\nEpoch 32/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6725 - accuracy: 1.0000\nEpoch 33/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6717 - accuracy: 1.0000\nEpoch 34/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6708 - accuracy: 1.0000\nEpoch 35/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6699 - accuracy: 1.0000\nEpoch 36/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6690 - accuracy: 1.0000\nEpoch 37/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6681 - accuracy: 1.0000\nEpoch 38/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6672 - accuracy: 1.0000\nEpoch 39/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6663 - accuracy: 1.0000\nEpoch 40/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6653 - accuracy: 1.0000\nEpoch 41/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6643 - accuracy: 1.0000\nEpoch 42/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6633 - accuracy: 1.0000\nEpoch 43/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6623 - accuracy: 1.0000\nEpoch 44/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6612 - accuracy: 1.0000\nEpoch 45/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6601 - accuracy: 1.0000\nEpoch 46/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6590 - accuracy: 1.0000\nEpoch 47/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6579 - accuracy: 1.0000\nEpoch 48/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6567 - accuracy: 1.0000\nEpoch 49/100\n1/1 [==============================] - 0s 5ms/step - loss: 0.6556 - accuracy: 1.0000\nEpoch 50/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6544 - accuracy: 1.0000\nEpoch 51/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6531 - accuracy: 1.0000\nEpoch 52/100\n1/1 [==============================] - 0s 9ms/step - loss: 0.6519 - accuracy: 1.0000\nEpoch 53/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6506 - accuracy: 1.0000\nEpoch 54/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6493 - accuracy: 1.0000\nEpoch 55/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6479 - accuracy: 1.0000\nEpoch 56/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6465 - accuracy: 1.0000\nEpoch 57/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6451 - accuracy: 1.0000\nEpoch 58/100\n1/1 [==============================] - 0s 6ms/step - loss: 0.6437 - accuracy: 1.0000\nEpoch 59/100\n1/1 [==============================] - 0s 7ms/step - loss: 0.6422 - accuracy: 1.0000\nEpoch 60/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6407 - accuracy: 1.0000\nEpoch 61/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6392 - accuracy: 1.0000\nEpoch 62/100\n1/1 [==============================] - 0s 9ms/step - loss: 0.6376 - accuracy: 1.0000\nEpoch 63/100\n1/1 [==============================] - 0s 5ms/step - loss: 0.6360 - accuracy: 1.0000\nEpoch 64/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6343 - accuracy: 1.0000\nEpoch 65/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6326 - accuracy: 1.0000\nEpoch 66/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6309 - accuracy: 1.0000\nEpoch 67/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6291 - accuracy: 1.0000\nEpoch 68/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6273 - accuracy: 1.0000\nEpoch 69/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6255 - accuracy: 1.0000\nEpoch 70/100\n1/1 [==============================] - 0s 5ms/step - loss: 0.6236 - accuracy: 1.0000\nEpoch 71/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6217 - accuracy: 1.0000\nEpoch 72/100\n1/1 [==============================] - ETA: 0s - loss: 0.6198 - accuracy: 1.00 - 0s 6ms/step - loss: 0.6198 - accuracy: 1.0000\nEpoch 73/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6178 - accuracy: 1.0000\nEpoch 74/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6157 - accuracy: 1.0000\nEpoch 75/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6136 - accuracy: 1.0000\nEpoch 76/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.6115 - accuracy: 1.0000\nEpoch 77/100\n1/1 [==============================] - 0s 2ms/step - loss: 0.6093 - accuracy: 1.0000\nEpoch 78/100\n1/1 [==============================] - 0s 1ms/step - loss: 0.6071 - accuracy: 1.0000\nEpoch 79/100\n1/1 [==============================] - 0s 8ms/step - loss: 0.6049 - accuracy: 1.0000\nEpoch 80/100\n1/1 [==============================] - 0s 10ms/step - loss: 0.6026 - accuracy: 1.0000\nEpoch 81/100\n1/1 [==============================] - 0s 3ms/step - loss: 0.6002 - accuracy: 1.0000\nEpoch 82/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.5978 - accuracy: 1.0000\nEpoch 83/100\n1/1 [==============================] - 0s 4ms/step - loss: 0.5954 - accuracy: 1.0000\nEpoch 84/100\n"
],
[
"# Entrada dos dados do usuario\ntest = []\ntest.append(input(\"Digite a data_inversa:\\nEx:'2019-01-01'\\n\"))\ntest.append(input(\"Digite o dia_semana:\\nEx:'terça-feira'\\n\"))\ntest.append(input(\"Digite a horario:\\nEx:'01:30:00'\\n\"))\ntest.append(input(\"Digite a uf:\\nEx:'SP'\\n\"))\ntest.append(input(\"Digite a br:\\nEx:'116'\\n\"))\ntest.append(input(\"Digite a km:\\nEx:'218'\\n\"))\ntest.append(input(\"Digite a condicao_metereologica:\\nEx:'Céu Claro'\\n\"))\ntest.append(input(\"Digite a latitude:\\nEx:'-23,46052014'\\n\"))\ntest.append(input(\"Digite a longitude:\\nEx:'-46,48772478'\\n\"))\n\ntest = pd.DataFrame({'dados': test})\ntest = encoder.fit_transform(test.dados)\ntest = np.reshape(test, (-1, 1))\ntest = sc.fit_transform(test[:])\ntest = np.reshape(test, (1, -1))",
"Digite a data_inversa:\nEx:'2019-01-01'\n2020-11-20\nDigite o dia_semana:\nEx:'terça-feira'\nsexta-feira\nDigite a horario:\nEx:'01:30:00'\n12:00:00\nDigite a uf:\nEx:'SP'\nDF\nDigite a br:\nEx:'116'\n101\nDigite a km:\nEx:'218'\n343\nDigite a condicao_metereologica:\nEx:'Céu Claro'\nCéu Claro\nDigite a latitude:\nEx:'-23,46052014'\n-28.5197545\nDigite a longitude:\nEx:'-46,48772478'\n-49.0560047\n"
],
[
"# Predicao\ny_pred = classifier.predict(test)\nvalue = round(float(y_pred[0][0])*100, 2)\nprint(\"\\n\\n---> Probabilidade de ocorrer acidente: \", value)",
"\n\n---> Probabilidade de ocorrer acidente: 57.02\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74ddb3e38fbc713bc63e991ed8594758a4a39a1 | 136,411 | ipynb | Jupyter Notebook | tensorflow_intro/part2-loading_your_own_data.ipynb | pbrainz/intro-to-ml | e09ce282a91ee8a2a2649d13e97040e1dabaf5ea | [
"0BSD"
] | null | null | null | tensorflow_intro/part2-loading_your_own_data.ipynb | pbrainz/intro-to-ml | e09ce282a91ee8a2a2649d13e97040e1dabaf5ea | [
"0BSD"
] | null | null | null | tensorflow_intro/part2-loading_your_own_data.ipynb | pbrainz/intro-to-ml | e09ce282a91ee8a2a2649d13e97040e1dabaf5ea | [
"0BSD"
] | null | null | null | 347.987245 | 113,022 | 0.931736 | [
[
[
"### Packages",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nfrom tqdm import tqdm\nimport random",
"_____no_output_____"
]
],
[
[
"### Initialize Data",
"_____no_output_____"
]
],
[
[
"DATADIR = \"/DriveArchive1/NN_DATASETS/PetImages\"\n\nCATEGORIES = [\"Dog\", \"Cat\"]\n\nfor category in CATEGORIES: # do dogs and cats\n path = os.path.join(DATADIR,category) # create path to dogs and cats\n for img in os.listdir(path): # iterate over each image per dogs and cats\n img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array\n plt.imshow(img_array, cmap='gray') # graph it\n plt.show() # display!\n\n break # we just want one for now so break\n break #...and one more!",
"_____no_output_____"
],
[
"print(img_array)",
"[[228 227 226 ... 51 55 67]\n [222 222 221 ... 78 83 79]\n [219 219 218 ... 115 123 107]\n ...\n [180 164 172 ... 145 162 177]\n [197 166 160 ... 143 158 172]\n [205 171 160 ... 146 161 174]]\n"
],
[
"print(img_array.shape)",
"(333, 500)\n"
],
[
"IMG_SIZE = 50\n\nnew_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(new_array, cmap='gray')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Build Training Data",
"_____no_output_____"
]
],
[
[
"training_data = []\n\ndef create_training_data():\n for category in CATEGORIES: # do dogs and cats\n\n path = os.path.join(DATADIR,category) # create path to dogs and cats\n class_num = CATEGORIES.index(category) # get the classification (0 or a 1). 0=dog 1=cat\n\n for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats\n try:\n img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size\n training_data.append([new_array, class_num]) # add this to our training_data\n except Exception as e: # in the interest in keeping the output clean...\n pass\n #except OSError as e:\n # print(\"OSErrroBad img most likely\", e, os.path.join(path,img))\n #except Exception as e:\n # print(\"general exception\", e, os.path.join(path,img))\n\ncreate_training_data()\n\nprint(len(training_data))",
" 4%|▍ | 490/12501 [00:00<00:07, 1577.10it/s]Warning: unknown JFIF revision number 0.00\n 18%|█▊ | 2213/12501 [00:01<00:06, 1528.94it/s]Corrupt JPEG data: 226 extraneous bytes before marker 0xd9\n 39%|███▊ | 4835/12501 [00:03<00:05, 1501.41it/s]Corrupt JPEG data: 65 extraneous bytes before marker 0xd9\n 41%|████ | 5142/12501 [00:03<00:04, 1482.51it/s]Corrupt JPEG data: 1403 extraneous bytes before marker 0xd9\n 46%|████▌ | 5760/12501 [00:03<00:04, 1501.34it/s]Corrupt JPEG data: 162 extraneous bytes before marker 0xd9\n 71%|███████ | 8858/12501 [00:05<00:02, 1479.79it/s]Corrupt JPEG data: 2230 extraneous bytes before marker 0xd9\n 73%|███████▎ | 9160/12501 [00:06<00:02, 1491.64it/s]Corrupt JPEG data: 399 extraneous bytes before marker 0xd9\n 98%|█████████▊| 12297/12501 [00:08<00:00, 1520.55it/s]Corrupt JPEG data: 254 extraneous bytes before marker 0xd9\n100%|██████████| 12501/12501 [00:08<00:00, 1518.68it/s]\n 6%|▌ | 688/12501 [00:00<00:06, 1706.07it/s]Corrupt JPEG data: 239 extraneous bytes before marker 0xd9\n 49%|████▉ | 6129/12501 [00:03<00:03, 1691.63it/s]Corrupt JPEG data: 99 extraneous bytes before marker 0xd9\n 69%|██████▉ | 8650/12501 [00:05<00:02, 1639.52it/s]Corrupt JPEG data: 214 extraneous bytes before marker 0xd9\n 73%|███████▎ | 9148/12501 [00:05<00:02, 1628.00it/s]Corrupt JPEG data: 128 extraneous bytes before marker 0xd9\n 81%|████████ | 10142/12501 [00:06<00:01, 1650.63it/s]Corrupt JPEG data: 1153 extraneous bytes before marker 0xd9\n100%|██████████| 12501/12501 [00:07<00:00, 1660.18it/s]"
]
],
[
[
"**shuffle data**",
"_____no_output_____"
]
],
[
[
"random.shuffle(training_data)\nfor sample in training_data[:10]:\n print(sample[1])",
"0\n0\n0\n0\n1\n1\n1\n0\n1\n0\n"
]
],
[
[
"### Make a Model",
"_____no_output_____"
]
],
[
[
"X = []\ny = []\n\nfor features,label in training_data:\n X.append(features)\n y.append(label)\n\nprint(X[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))\n\nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)",
"[[[[ 52]\n [124]\n [134]\n ...\n [ 93]\n [196]\n [ 91]]\n\n [[123]\n [ 79]\n [129]\n ...\n [179]\n [101]\n [100]]\n\n [[127]\n [130]\n [ 93]\n ...\n [ 84]\n [ 92]\n [ 91]]\n\n ...\n\n [[141]\n [140]\n [141]\n ...\n [123]\n [ 92]\n [157]]\n\n [[132]\n [ 95]\n [125]\n ...\n [121]\n [160]\n [ 99]]\n\n [[165]\n [143]\n [124]\n ...\n [161]\n [161]\n [158]]]]\n"
]
],
[
[
"**Export Data**",
"_____no_output_____"
]
],
[
[
"import pickle\n\npickle_out = open(\"X.pickle\",\"wb\")\npickle.dump(X, pickle_out)\npickle_out.close()\n\npickle_out = open(\"y.pickle\",\"wb\")\npickle.dump(y, pickle_out)\npickle_out.close()",
"_____no_output_____"
]
],
[
[
"**Import Data**",
"_____no_output_____"
]
],
[
[
"pickle_in = open(\"X.pickle\",\"rb\")\nX = pickle.load(pickle_in)\n\npickle_in = open(\"y.pickle\",\"rb\")\ny = pickle.load(pickle_in)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74de4f9f228bfc63966005929db7389212421ce | 12,723 | ipynb | Jupyter Notebook | Certification 1/Week1.2 - Gauss Distribution.ipynb | The-Brains/MathForMachineLearning | 5cbd9006f166059efaa2f312b741e64ce584aa1f | [
"MIT"
] | 6 | 2018-04-16T02:53:59.000Z | 2021-05-16T06:51:57.000Z | Certification 1/Week1.2 - Gauss Distribution.ipynb | The-Brains/MathForMachineLearning | 5cbd9006f166059efaa2f312b741e64ce584aa1f | [
"MIT"
] | null | null | null | Certification 1/Week1.2 - Gauss Distribution.ipynb | The-Brains/MathForMachineLearning | 5cbd9006f166059efaa2f312b741e64ce584aa1f | [
"MIT"
] | 4 | 2019-05-20T02:06:55.000Z | 2020-05-18T06:21:41.000Z | 144.579545 | 11,004 | 0.903639 | [
[
[
"%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def gauss(x, sigma=3, mu=3):\n sigma2Pi = sigma * np.sqrt(2 * np.pi)\n xMuSquared = np.power(x - mu, 2)\n Sigma2Squared = (2 * sigma * sigma)\n return (1 / sigma2Pi ) *\\\n np.exp(- xMuSquared / Sigma2Squared)",
"_____no_output_____"
],
[
"x = np.linspace(-10, 10, 100)\nplt.plot(x, gauss(x))\naxes = plt.gca()\naxes.set_xlim([0,10])\naxes.set_ylim([0,.5])\nplt.title('Gauss Distribution')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e74de57d145bf8965be09c2f989ca6e8a3b297a7 | 141,088 | ipynb | Jupyter Notebook | LeastSquaresMethod.ipynb | jraska1/py-notebooks | 9d113bec33bc672f5c60ba4f9d0c8f5005b40e2c | [
"Apache-2.0"
] | null | null | null | LeastSquaresMethod.ipynb | jraska1/py-notebooks | 9d113bec33bc672f5c60ba4f9d0c8f5005b40e2c | [
"Apache-2.0"
] | null | null | null | LeastSquaresMethod.ipynb | jraska1/py-notebooks | 9d113bec33bc672f5c60ba4f9d0c8f5005b40e2c | [
"Apache-2.0"
] | null | null | null | 258.40293 | 46,588 | 0.909992 | [
[
[
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom scipy.optimize import least_squares, curve_fit",
"_____no_output_____"
],
[
"def func(theta, t):\n return theta[0] / (1 + np.exp(- theta[1] * (t - theta[2])))\n\ndef lin(theta, t):\n return theta[0] + theta[1] * t\n\ndef qua(theta, t):\n return theta[0] + theta[1] * t + theta[2] * t ** 2\n\n\nu = np.linspace(0, 1, num=20)\n\nK = 1; r = 10; t0 = 0.5; \n# v = func((K, r, t0), u)\n# plt.plot(u, v, label=\"func(1, 10, 0.5)\")\nm = func((K, r, t0), u) + 0.1 * (np.random.rand(u.shape[0]) - 0.5)\n\nplt.plot(u, m, label=\"measure\")\n\ndef f(theta):\n return func(theta, u) - m\n\ndef f2(theta):\n return lin(theta, u) - m\n\ndef f3(theta):\n return qua(theta, u) - m\n\n\ntheta0 = [1,2,3]\nres = least_squares(f, theta0)\n\nif res.success:\n print(res.optimality, res.x)\n print(res)\n r = func(res.x, u)\n plt.plot(u, r, label=\"resulting F1\")\nelse:\n print(\"Failed\")\n\ntheta0 = [0, 1]\nres = least_squares(f2, theta0)\n\nif res.success:\n print(res.optimality, res.x)\n r = lin(res.x, u)\n plt.plot(u, r, label=\"resulting F2\")\nelse:\n print(\"Failed\")\n\n\ntheta0 = [0, 0.5, 0.5]\nres = least_squares(f3, theta0)\n\nif res.success:\n print(res.optimality, res.x)\n r = qua(res.x, u)\n plt.plot(u, r, label=\"resulting F3\")\nelse:\n print(\"Failed\")\n \n \nplt.legend(bbox_to_anchor=(1.04,1), loc=\"upper left\")\nplt.show()\n ",
"4.429860642196637e-08 [ 0.98362968 10.13564866 0.50012053]\n active_mask: array([0., 0., 0.])\n cost: 0.0068293516030714894\n fun: array([-0.00314312, -0.03732148, 0.01972365, 0.00147108, -0.04924982,\n -0.01017209, 0.00618423, 0.04158977, -0.00770687, 0.02021831,\n -0.04641298, 0.00951415, -0.00640073, 0.02927495, -0.00945123,\n 0.02476681, -0.04449791, -0.00998919, 0.03331985, -0.00800885])\n grad: array([-1.94456277e-10, -1.28593900e-08, -4.42986064e-08])\n jac: array([[ 0.00624912, -0.00305494, -0.06191267],\n [ 0.01060686, -0.00461923, -0.10462582],\n [ 0.01794853, -0.00684597, -0.17573014],\n [ 0.03021663, -0.00986427, -0.29214863],\n [ 0.05043947, -0.01364315, -0.47750326],\n [ 0.08303782, -0.01774757, -0.75912017],\n [ 0.13373683, -0.0210054 , -1.15500545],\n [ 0.20835667, -0.02136745, -1.64444787],\n [ 0.30972518, -0.01662763, -2.13148218],\n [ 0.43341062, -0.00638558, -2.448224 ],\n [ 0.5659893 , 0.00632941, -2.4490172 ],\n [ 0.68975221, 0.01659234, -2.13346232],\n [ 0.79124004, 0.02135873, -1.64679161],\n [ 0.86597986, 0.0210155 , -1.15707381],\n [ 0.91677595, 0.01776567, -0.7606682 ],\n [ 0.94944338, 0.01366177, -0.47855332],\n [ 0.9697117 , 0.00987998, -0.29282005],\n [ 0.98200835, 0.00685792, -0.17614459],\n [ 0.98936747, 0.0046278 , -0.10487634],\n [ 0.99373569, 0.00306084, -0.06206222]])\n message: '`ftol` termination condition is satisfied.'\n nfev: 21\n njev: 17\n optimality: 4.429860642196637e-08\n status: 2\n success: True\n x: array([ 0.98362968, 10.13564866, 0.50012053])\n5.609805231454601e-09 [-0.12408964 1.236215 ]\n4.428118483001953e-09 [-0.11901131 1.20405218 0.03216282]\n"
],
[
"data_path = './AlgorithmComplexity'\nsample = 'data01.txt'\n\nwith open(os.path.join(data_path, sample), 'r') as f:\n complexity = f.readline()\n values_x, values_y = [], []\n for l in f:\n x, y = l.split()\n values_x.append(float(x))\n values_y.append(float(y) / 1000)\n \n# print(values_x)\n# print(values_y)\n\nx = np.array(values_x, dtype=float)\ny = np.array(values_y, dtype=float)\nmax_y = np.max(y)\ny += 0.3 * max_y * (np.random.rand(y.shape[0]) - 0.5)\n\ndef lin(theta, t):\n return theta[0] + theta[1] * t\n\ndef f(theta):\n return lin(theta, x) - y\n\ntheta0 = [0, 1]\nres = least_squares(f, theta0)\n\nif res.success:\n plt.plot(x, y, label=\"measured values\")\n\n print(f\"FUNCTION: {res.x[0]:6.3f} + {res.x[1]:6.3f} * x\")\n r = lin(res.x, x)\n plt.plot(x, r, label=\"computed values\")\nelse:\n print(\"Failed\")\n\nplt.legend(bbox_to_anchor=(1.04,1), loc=\"upper left\")\nplt.show()\n",
"FUNCTION: 184.536 + -0.000 * x\n"
],
[
"data_path = './AlgorithmComplexity'\nsample = 'data02.txt'\n\nwith open(os.path.join(data_path, sample), 'r') as f:\n complexity = f.readline()\n values_x, values_y = [], []\n for l in f:\n x, y = l.split()\n values_x.append(float(x))\n values_y.append(float(y) / 1000)\n \nx = np.array(values_x, dtype=float)\ny = np.array(values_y, dtype=float)\nmax_y = np.max(y)\ny += 0.3 * max_y * (np.random.rand(y.shape[0]) - 0.5)\n\ndef lin(theta, t):\n return theta[0] + theta[1] * t\n\ndef f(theta):\n return lin(theta, x) - y\n\ntheta0 = [0, 1]\nres = least_squares(f, theta0)\n\nif res.success:\n plt.plot(x, y, label=\"measured values\")\n\n print(f\"FUNCTION: {res.x[0]:6.3f} + {res.x[1]:6.3f} * x\")\n r = lin(res.x, x)\n plt.plot(x, r, label=\"computed values\")\nelse:\n print(\"Failed\")\n\nplt.legend(bbox_to_anchor=(1.04,1), loc=\"upper left\")\nplt.show()\n",
"FUNCTION: 2.837 + 0.025 * x\n"
],
[
"data_path = './AlgorithmComplexity'\nsample = 'data07.txt'\n\nwith open(os.path.join(data_path, sample), 'r') as f:\n complexity = f.readline()\n print(f\"COMPLEXITY: {complexity}\")\n values_x, values_y = [], []\n for l in f:\n x, y = l.split()\n values_x.append(float(x))\n values_y.append(float(y) / 1000)\n \nx = np.array(values_x, dtype=float)\ny = np.array(values_y, dtype=float)\nmax_y = np.max(y)\ny += 0.1 * max_y * (np.random.rand(y.shape[0]) - 0.5)\n\ndef lin(theta, t):\n return theta[0] + theta[1]*t + theta[2]*t**2 + theta[3]*t**3\n\ndef f(theta):\n return lin(theta, x) - y\n\ntheta0 = (0, 1, 1, 1)\nres = least_squares(f, theta0)\n\nif res.success:\n plt.plot(x, y, label=\"measured values\")\n\n print(f\"FUNCTION: {res.x[0]:5.3f} + {res.x[1]:5.3f}x + {res.x[2]:5.3f}x^2 + {res.x[3]:5.3f}x^3\")\n r = lin(res.x, x)\n plt.plot(x, r, label=\"computed values\")\nelse:\n print(\"Failed\")\n\nplt.legend(bbox_to_anchor=(1.04,1), loc=\"upper left\")\nplt.show()\n",
"COMPLEXITY: O(2^n)\n\nFUNCTION: -1742.901 + 741.420x + -80.676x^2 + 2.480x^3\n"
],
[
"data_path = './AlgorithmComplexity'\nsample = 'data05.txt'\n\nwith open(os.path.join(data_path, sample), 'r') as f:\n complexity = f.readline()\n print(f\"COMPLEXITY: {complexity}\")\n values_x, values_y = [], []\n for l in f:\n x, y = l.split()\n values_x.append(float(x))\n values_y.append(float(y) / 1000)\n \nx = np.array(values_x, dtype=float)\ny = np.array(values_y, dtype=float)\nmax_y = np.max(y)\ny += 0.1 * max_y * (np.random.rand(y.shape[0]) - 0.5)\n\ndef quadratic(x, a, b, c):\n return a + b*x + c*x**2\n\n# params0 = (0, 1, 1)\npopt, pcov = curve_fit(quadratic, x, y)\n\nprint(popt)\nprint(pcov)\nprint(np.diag(pcov))\nperr = np.sqrt(np.diag(pcov))\nprint(perr)",
"COMPLEXITY: O(n^2 log n)\n\n[-30.98044507 -1.81750838 0.05280962]\n[[ 5.73992976e+03 -7.27324991e+01 1.96401357e-01]\n [-7.27324991e+01 1.05000695e+00 -3.02095447e-03]\n [ 1.96401357e-01 -3.02095447e-03 9.07193537e-06]]\n[5.73992976e+03 1.05000695e+00 9.07193537e-06]\n[7.57623241e+01 1.02469847e+00 3.01196537e-03]\n"
],
[
"data_path = './AlgorithmComplexity'\ndata_sets = [\n 'data01.txt',\n 'data02.txt',\n 'data03.txt',\n 'data04.txt',\n 'data05.txt',\n 'data06.txt',\n 'data07.txt',\n]\n\nnp.set_printoptions(precision=3)\n\nfor sample in data_sets:\n with open(os.path.join(data_path, sample), 'r') as f:\n complexity = f.readline()\n print(f\"DATASET: {sample}\")\n print(f\"COMPLEXITY: {complexity}\")\n values_x, values_y = [], []\n for l in f:\n x, y = l.split()\n values_x.append(float(x))\n values_y.append(float(y) / 1000)\n \n x = np.array(values_x, dtype=float)\n y = np.array(values_y, dtype=float)\n max_y = np.max(y)\n y += 0.1 * max_y * (np.random.rand(y.shape[0]) - 0.5)\n\n def quadratic(x, a, b):\n return a + b*x\n\n # params0 = (0, 1, 1)\n popt, pcov = curve_fit(quadratic, x, y)\n\n# print(popt)\n# print(pcov)\n perr = np.sqrt(np.diag(pcov))\n print(perr)\n\n print('-'*40)",
"DATASET: data01.txt\nCOMPLEXITY: O(1)\n\n[1.679e+00 2.926e-04]\n----------------------------------------\nDATASET: data02.txt\nCOMPLEXITY: O(n)\n\n[1.532e+00 2.672e-04]\n----------------------------------------\nDATASET: data03.txt\nCOMPLEXITY: O(n log n)\n\n[5.216e+00 9.056e-04]\n----------------------------------------\nDATASET: data04.txt\nCOMPLEXITY: O(n^2)\n\n[3.494e+01 6.742e-03]\n----------------------------------------\nDATASET: data05.txt\nCOMPLEXITY: O(n^2 log n)\n\n[108.691 0.591]\n----------------------------------------\nDATASET: data06.txt\nCOMPLEXITY: O(n^3)\n\n[105.127 0.365]\n----------------------------------------\nDATASET: data07.txt\nCOMPLEXITY: O(2^n)\n\n[550.047 36.256]\n----------------------------------------\n"
],
[
"x = np.concatenate((np.arange(10, 100, 20), np.arange(100, 1001, 200)))\n\ny = 1.15 + 0.23 * np.power(x, 2)\n\n# v = func((K, r, t0), u)\n# plt.plot(u, v, label=\"func(1, 10, 0.5)\")\n# m = func((K, r, t0), u) + 0.1 * (np.random.rand(u.shape[0]) - 0.5)\n\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\nz = y + y * (np.random.rand(y.shape[0]) - 0.5) * 0.1\n\nprint(x)\nprint(y)\nprint(z)\n\n# print(np.hstack([x, y]))\n",
"[ 10 30 50 70 90 100 300 500 700 900]\n[ 24.15 208.15 576.15 1128.15 1864.15 2301.15 20701.15\n 57501.15 112701.15 186301.15]\n[ 24.818 206.467 576.694 1142.221 1861.093 2239.507\n 21175.354 58231.456 115257.191 191215.308]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74df75d800b2842a4e38dc6610dc4bfbc6f4b54 | 31,330 | ipynb | Jupyter Notebook | 04.Keras-CNN/.ipynb_checkpoints/05.Visualizing CNN Layers (Grad and Weight) -checkpoint.ipynb | IoIoToTM/Deep-Learning-Python | 174664dbeaf68c5a0d55cfaeea4987586062f68e | [
"MIT"
] | null | null | null | 04.Keras-CNN/.ipynb_checkpoints/05.Visualizing CNN Layers (Grad and Weight) -checkpoint.ipynb | IoIoToTM/Deep-Learning-Python | 174664dbeaf68c5a0d55cfaeea4987586062f68e | [
"MIT"
] | null | null | null | 04.Keras-CNN/.ipynb_checkpoints/05.Visualizing CNN Layers (Grad and Weight) -checkpoint.ipynb | IoIoToTM/Deep-Learning-Python | 174664dbeaf68c5a0d55cfaeea4987586062f68e | [
"MIT"
] | 1 | 2020-08-29T06:32:04.000Z | 2020-08-29T06:32:04.000Z | 43.574409 | 115 | 0.616725 | [
[
[
"import numpy as np\nimport time\nfrom keras.preprocessing.image import save_img\nfrom keras.applications import VGG16\nfrom keras import backend as K\nfrom keras.models import Model\n\n\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.preprocessing import image\n\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"Using TensorFlow backend.\n"
],
[
"img_width, img_height = 128, 128",
"_____no_output_____"
],
[
"if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)",
"_____no_output_____"
],
[
"base_model = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)\nlayer_dict = dict([(layer.name, layer) for layer in base_model.layers])\nbase_model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 128, 128, 3) 0 \n_________________________________________________________________\nblock1_conv1 (Conv2D) (None, 128, 128, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 128, 128, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 64, 64, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 64, 64, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 64, 64, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 32, 32, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 32, 32, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 32, 32, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 32, 32, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 16, 16, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 16, 16, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 16, 16, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 16, 16, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 8, 8, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 8, 8, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 8, 8, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 8, 8, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 4, 4, 512) 0 \n=================================================================\nTotal params: 14,714,688\nTrainable params: 14,714,688\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"img = image.load_img('horse.jpg', target_size=(img_width, img_height))\nimg = image.img_to_array(img)\nimg_ = np.expand_dims(img, 0) \nimg = preprocess_input(img_)\nprint(img.shape)",
"(1, 128, 128, 3)\n"
],
[
"feature = base_model.predict(img)\ninput_img = base_model.layers[0].input",
"_____no_output_____"
],
[
"def deprocess_image(x):\n # normalize tensor: center on 0., ensure std is 0.1\n x -= x.mean()\n x /= (x.std() + K.epsilon())\n x *= 0.1\n\n # clip to [0, 1]\n x += 0.5\n x = np.clip(x, 0, 1)\n\n # convert to RGB array\n x *= 255\n if K.image_data_format() == 'channels_first':\n x = x.transpose((1, 2, 0))\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n\ndef normalize(x):\n # utility function to normalize a tensor by its L2 norm\n return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())",
"_____no_output_____"
],
[
"def get_filter(layer_name):\n \n layer_pool_extractor = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)\n feature_maps = layer_pool_extractor.predict(img_)\n \n _, height, width, depth = feature_maps.shape\n\n kept_filters = []\n \n #more depth = greater computational time\n for filter_index in range(depth):\n #print('Processing filter {} of {} filters'.format((filter_index+1), depth), end=\"\")\n print(\"processing , will take time...\", end=\".\")\n start_time = time.time()\n \n layer_output = layer_dict[layer_name].output\n \n if K.image_data_format() == 'channels_first':\n loss = K.mean(layer_output[:, filter_index, :, :])\n else:\n loss = K.mean(layer_output[:, :, :, filter_index])\n\n #gradient of the input picture wrt this loss\n grads = K.gradients(loss, input_img)[0]\n\n #normalize the gradient\n grads = normalize(grads)\n\n #function returns the loss and grads given the input picture\n iterate = K.function([input_img], [loss, grads])\n \n # step size for gradient ascent\n step = 1.\n\n # we start from a gray image with some random noise\n if K.image_data_format() == 'channels_first':\n input_img_data = np.random.random((1, 3, img_width, img_height))\n else:\n input_img_data = np.random.random((1, img_width, img_height, 3))\n input_img_data = (input_img_data - 0.5) * 20 + 128\n\n # we run gradient ascent for 20 steps\n for i in range(20):\n loss_value, grads_value = iterate([input_img_data])\n input_img_data += grads_value * step\n\n if loss_value <= 0.:\n # some filters get stuck to 0, we can skip them\n break\n\n # decode the resulting input image\n if loss_value > 0:\n img = deprocess_image(input_img_data[0])\n kept_filters.append((img, loss_value))\n \n end_time = time.time()\n #print('\\n in %ds' % (end_time - start_time))\n\n return kept_filters",
"_____no_output_____"
],
[
"def plot_filter(kept_filters):\n n = np.int(np.floor(np.sqrt(np.shape(kept_filters)[0])))\n \n # the filters that have the highest loss are assumed to be better-looking.\n kept_filters.sort(key=lambda x: x[1], reverse=True)\n kept_filters = kept_filters[:n * n]\n\n margin = 5\n width = n * img_width + (n - 1) * margin\n height = n * img_height + (n - 1) * margin\n stitched_filters = np.zeros((width, height, 3))\n\n # fill the picture with our saved filters\n for i in range(n):\n for j in range(n):\n img, loss = kept_filters[i * n + j]\n stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,\n (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img\n\n # save the result to disk\n print('Done')\n save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters)",
"_____no_output_____"
],
[
"%%time\nlayername = 'block4_conv3'\nfilter_return = get_filter(layer_name=layername)",
"Processing filter 1 of 512 filters in 1s\nProcessing filter 2 of 512 filters in 0s\nProcessing filter 3 of 512 filters in 0s\nProcessing filter 4 of 512 filters in 0s\nProcessing filter 5 of 512 filters in 0s\nProcessing filter 6 of 512 filters in 0s\nProcessing filter 7 of 512 filters in 0s\nProcessing filter 8 of 512 filters in 0s\nProcessing filter 9 of 512 filters in 0s\nProcessing filter 10 of 512 filters in 0s\nProcessing filter 11 of 512 filters in 0s\nProcessing filter 12 of 512 filters in 0s\nProcessing filter 13 of 512 filters in 0s\nProcessing filter 14 of 512 filters in 0s\nProcessing filter 15 of 512 filters in 0s\nProcessing filter 16 of 512 filters in 0s\nProcessing filter 17 of 512 filters in 0s\nProcessing filter 18 of 512 filters in 0s\nProcessing filter 19 of 512 filters in 0s\nProcessing filter 20 of 512 filters in 0s\nProcessing filter 21 of 512 filters in 0s\nProcessing filter 22 of 512 filters in 0s\nProcessing filter 23 of 512 filters in 0s\nProcessing filter 24 of 512 filters in 0s\nProcessing filter 25 of 512 filters in 0s\nProcessing filter 26 of 512 filters in 0s\nProcessing filter 27 of 512 filters in 0s\nProcessing filter 28 of 512 filters in 0s\nProcessing filter 29 of 512 filters in 0s\nProcessing filter 30 of 512 filters in 0s\nProcessing filter 31 of 512 filters in 0s\nProcessing filter 32 of 512 filters in 0s\nProcessing filter 33 of 512 filters in 0s\nProcessing filter 34 of 512 filters in 0s\nProcessing filter 35 of 512 filters in 0s\nProcessing filter 36 of 512 filters in 0s\nProcessing filter 37 of 512 filters in 0s\nProcessing filter 38 of 512 filters in 0s\nProcessing filter 39 of 512 filters in 0s\nProcessing filter 40 of 512 filters in 0s\nProcessing filter 41 of 512 filters in 0s\nProcessing filter 42 of 512 filters in 0s\nProcessing filter 43 of 512 filters in 0s\nProcessing filter 44 of 512 filters in 0s\nProcessing filter 45 of 512 filters in 0s\nProcessing filter 46 of 512 filters in 0s\nProcessing filter 47 of 512 filters in 0s\nProcessing filter 48 of 512 filters in 0s\nProcessing filter 49 of 512 filters in 0s\nProcessing filter 50 of 512 filters in 0s\nProcessing filter 51 of 512 filters in 0s\nProcessing filter 52 of 512 filters in 0s\nProcessing filter 53 of 512 filters in 0s\nProcessing filter 54 of 512 filters in 0s\nProcessing filter 55 of 512 filters in 0s\nProcessing filter 56 of 512 filters in 0s\nProcessing filter 57 of 512 filters in 0s\nProcessing filter 58 of 512 filters in 0s\nProcessing filter 59 of 512 filters in 0s\nProcessing filter 60 of 512 filters in 0s\nProcessing filter 61 of 512 filters in 0s\nProcessing filter 62 of 512 filters in 0s\nProcessing filter 63 of 512 filters in 0s\nProcessing filter 64 of 512 filters in 0s\nProcessing filter 65 of 512 filters in 0s\nProcessing filter 66 of 512 filters in 0s\nProcessing filter 67 of 512 filters in 0s\nProcessing filter 68 of 512 filters in 0s\nProcessing filter 69 of 512 filters in 0s\nProcessing filter 70 of 512 filters in 0s\nProcessing filter 71 of 512 filters in 0s\nProcessing filter 72 of 512 filters in 0s\nProcessing filter 73 of 512 filters in 0s\nProcessing filter 74 of 512 filters in 0s\nProcessing filter 75 of 512 filters in 0s\nProcessing filter 76 of 512 filters in 0s\nProcessing filter 77 of 512 filters in 0s\nProcessing filter 78 of 512 filters in 0s\nProcessing filter 79 of 512 filters in 0s\nProcessing filter 80 of 512 filters in 0s\nProcessing filter 81 of 512 filters in 0s\nProcessing filter 82 of 512 filters in 0s\nProcessing filter 83 of 512 filters in 0s\nProcessing filter 84 of 512 filters in 0s\nProcessing filter 85 of 512 filters in 0s\nProcessing filter 86 of 512 filters in 0s\nProcessing filter 87 of 512 filters in 0s\nProcessing filter 88 of 512 filters in 0s\nProcessing filter 89 of 512 filters in 0s\nProcessing filter 90 of 512 filters in 0s\nProcessing filter 91 of 512 filters in 0s\nProcessing filter 92 of 512 filters in 0s\nProcessing filter 93 of 512 filters in 0s\nProcessing filter 94 of 512 filters in 0s\nProcessing filter 95 of 512 filters in 0s\nProcessing filter 96 of 512 filters in 0s\nProcessing filter 97 of 512 filters in 0s\nProcessing filter 98 of 512 filters in 0s\nProcessing filter 99 of 512 filters in 0s\nProcessing filter 100 of 512 filters in 0s\nProcessing filter 101 of 512 filters in 0s\nProcessing filter 102 of 512 filters in 0s\nProcessing filter 103 of 512 filters in 0s\nProcessing filter 104 of 512 filters in 0s\nProcessing filter 105 of 512 filters in 0s\nProcessing filter 106 of 512 filters in 0s\nProcessing filter 107 of 512 filters in 0s\nProcessing filter 108 of 512 filters in 0s\nProcessing filter 109 of 512 filters in 0s\nProcessing filter 110 of 512 filters in 0s\nProcessing filter 111 of 512 filters in 0s\nProcessing filter 112 of 512 filters in 0s\nProcessing filter 113 of 512 filters in 0s\nProcessing filter 114 of 512 filters in 0s\nProcessing filter 115 of 512 filters in 0s\nProcessing filter 116 of 512 filters in 0s\nProcessing filter 117 of 512 filters in 0s\nProcessing filter 118 of 512 filters in 0s\nProcessing filter 119 of 512 filters in 0s\nProcessing filter 120 of 512 filters in 0s\nProcessing filter 121 of 512 filters in 0s\nProcessing filter 122 of 512 filters in 0s\nProcessing filter 123 of 512 filters in 0s\nProcessing filter 124 of 512 filters in 0s\nProcessing filter 125 of 512 filters in 0s\nProcessing filter 126 of 512 filters in 0s\nProcessing filter 127 of 512 filters in 0s\nProcessing filter 128 of 512 filters in 0s\nProcessing filter 129 of 512 filters in 0s\nProcessing filter 130 of 512 filters in 0s\nProcessing filter 131 of 512 filters in 1s\nProcessing filter 132 of 512 filters in 0s\nProcessing filter 133 of 512 filters in 0s\nProcessing filter 134 of 512 filters in 0s\nProcessing filter 135 of 512 filters in 0s\nProcessing filter 136 of 512 filters in 0s\nProcessing filter 137 of 512 filters in 0s\nProcessing filter 138 of 512 filters in 0s\nProcessing filter 139 of 512 filters in 0s\nProcessing filter 140 of 512 filters in 0s\nProcessing filter 141 of 512 filters in 0s\nProcessing filter 142 of 512 filters in 0s\nProcessing filter 143 of 512 filters in 0s\nProcessing filter 144 of 512 filters in 0s\nProcessing filter 145 of 512 filters in 0s\nProcessing filter 146 of 512 filters in 0s\nProcessing filter 147 of 512 filters in 0s\nProcessing filter 148 of 512 filters in 0s\nProcessing filter 149 of 512 filters in 0s\nProcessing filter 150 of 512 filters in 0s\nProcessing filter 151 of 512 filters in 0s\nProcessing filter 152 of 512 filters in 0s\nProcessing filter 153 of 512 filters in 0s\nProcessing filter 154 of 512 filters in 0s\nProcessing filter 155 of 512 filters in 0s\nProcessing filter 156 of 512 filters in 0s\nProcessing filter 157 of 512 filters in 0s\nProcessing filter 158 of 512 filters in 0s\nProcessing filter 159 of 512 filters in 0s\nProcessing filter 160 of 512 filters in 0s\nProcessing filter 161 of 512 filters in 0s\nProcessing filter 162 of 512 filters in 0s\nProcessing filter 163 of 512 filters in 0s\nProcessing filter 164 of 512 filters in 0s\nProcessing filter 165 of 512 filters in 0s\nProcessing filter 166 of 512 filters in 0s\nProcessing filter 167 of 512 filters in 0s\nProcessing filter 168 of 512 filters in 0s\nProcessing filter 169 of 512 filters in 1s\nProcessing filter 170 of 512 filters in 1s\nProcessing filter 171 of 512 filters in 0s\nProcessing filter 172 of 512 filters in 0s\nProcessing filter 173 of 512 filters in 1s\nProcessing filter 174 of 512 filters in 0s\nProcessing filter 175 of 512 filters in 0s\nProcessing filter 176 of 512 filters in 1s\nProcessing filter 177 of 512 filters in 0s\nProcessing filter 178 of 512 filters in 0s\nProcessing filter 179 of 512 filters in 0s\nProcessing filter 180 of 512 filters in 1s\nProcessing filter 181 of 512 filters in 1s\nProcessing filter 182 of 512 filters in 1s\nProcessing filter 183 of 512 filters in 0s\nProcessing filter 184 of 512 filters in 1s\nProcessing filter 185 of 512 filters in 0s\nProcessing filter 186 of 512 filters in 0s\nProcessing filter 187 of 512 filters in 0s\nProcessing filter 188 of 512 filters in 1s\nProcessing filter 189 of 512 filters in 1s\nProcessing filter 190 of 512 filters in 0s\nProcessing filter 191 of 512 filters in 0s\nProcessing filter 192 of 512 filters in 0s\nProcessing filter 193 of 512 filters in 1s\n"
],
[
"plot_filter(kept_filters=filter_return)",
"_____no_output_____"
],
[
"print (1)",
"1\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74e11553f718b888d09124c1326b45c91855202 | 109,949 | ipynb | Jupyter Notebook | notebooks/exploratory/01_rh_output.ipynb | rhangelxs/credit_scoring | 4a5ce41aeea285cacf86b85904e89eb4a2e20f46 | [
"MIT"
] | null | null | null | notebooks/exploratory/01_rh_output.ipynb | rhangelxs/credit_scoring | 4a5ce41aeea285cacf86b85904e89eb4a2e20f46 | [
"MIT"
] | null | null | null | notebooks/exploratory/01_rh_output.ipynb | rhangelxs/credit_scoring | 4a5ce41aeea285cacf86b85904e89eb4a2e20f46 | [
"MIT"
] | null | null | null | 473.918103 | 53,388 | 0.942592 | [
[
[
"%run '../00_rh_settings.ipynb'\n%run '../00_rh_load_interim.ipynb'",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\nPopulating the interactive namespace from numpy and matplotlib\n"
],
[
"df.shape",
"_____no_output_____"
],
[
"df[\"bad_flag\"].describe()",
"_____no_output_____"
],
[
"df[\"bad_flag\"].isna().sum()",
"_____no_output_____"
],
[
"sns.countplot(df['bad_flag'])\nplt.xlabel('Target', fontsize=12)\nplt.title(\"Target Histogram\", fontsize=14)",
"/usr/local/lib/python3.6/site-packages/seaborn/categorical.py:1428: FutureWarning: remove_na is deprecated and is a private function. Do not use.\n stat_data = remove_na(group_data)\n"
],
[
"(df[\"bad_flag\"]\n .value_counts(normalize=True)\n .reset_index()\n .pipe((sns.barplot, \"data\"), x=\"bad_flag\", y=\"bad_flag\"))",
"/usr/local/lib/python3.6/site-packages/seaborn/categorical.py:1428: FutureWarning: remove_na is deprecated and is a private function. Do not use.\n stat_data = remove_na(group_data)\n"
]
],
[
[
"**Output not very well balanced, but don't have missed values**",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e74e1e9ae318a58839b2d7d335da41a248a0f867 | 41,388 | ipynb | Jupyter Notebook | Identify_Customer_Segments.ipynb | OS-Shoaib/Customer-Segments-with-Arvato | 706a0956c6afe6fb3563a13096547cfc85c6dca4 | [
"CC0-1.0"
] | null | null | null | Identify_Customer_Segments.ipynb | OS-Shoaib/Customer-Segments-with-Arvato | 706a0956c6afe6fb3563a13096547cfc85c6dca4 | [
"CC0-1.0"
] | null | null | null | Identify_Customer_Segments.ipynb | OS-Shoaib/Customer-Segments-with-Arvato | 706a0956c6afe6fb3563a13096547cfc85c6dca4 | [
"CC0-1.0"
] | null | null | null | 54.746032 | 1,044 | 0.693897 | [
[
[
"# Project: Identify Customer Segments\n\nIn this project, you will apply unsupervised learning techniques to identify segments of the population that form the core customer base for a mail-order sales company in Germany. These segments can then be used to direct marketing campaigns towards audiences that will have the highest expected rate of returns. The data that you will use has been provided by our partners at Bertelsmann Arto Analytics, and represents a real-life data science task.\n\nThis notebook will help you complete this task by providing a framework within which you will perform your analysis steps. In each step of the project, you will see some text describing the subtask that you will perform, followed by one or more code cells for you to complete your work. **Feel free to add additional code and markdown cells as you go along so that you can explore everything in precise chunks.** The code cells provided in the base template will outline only the major tasks, and will usually not be enough to cover all the minor tasks that comprise it.\n\nIt should be noted that while there will be precise guidelines on how you should handle certain tasks in the project, there will also be places where an exact specification is not provided. **There will be times in the project where you will need to make and justify your own decisions on how to treat the data.** These are places where there may not be only one way to handle the data. In real-life tasks, there may be many valid ways to approach an analysis task. One of the most important things you can do is clearly document your approach so that other scientists can understand the decisions you've made.\n\nAt the end of most sections, there will be a Markdown cell labeled **Discussion**. In these cells, you will report your findings for the completed section, as well as document the decisions that you made in your approach to each subtask. **Your project will be evaluated not just on the code used to complete the tasks outlined, but also your communication about your observations and conclusions at each stage.**",
"_____no_output_____"
]
],
[
[
"# import libraries here; add more as necessary\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# magic word for producing visualizations in notebook\n%matplotlib inline\n\n'''\nImport note: The classroom currently uses sklearn version 0.19.\nIf you need to use an impute, it is available in sklearn.preprocessing.Impute,\ninstead of sklearn.impute as in newer versions of sklearn.\n'''",
"_____no_output_____"
]
],
[
[
"### Step 0: Load the Data\n\nThere are four files associated with this project (not including this one):\n\n- `Udacity_AZDIAS_Subset.csv`: Demographics data for the general population of Germany; 891211 persons (rows) x 85 features (columns).\n- `Udacity_CUSTOMERS_Subset.csv`: Demographics data for customers of a mail-order company; 191652 persons (rows) x 85 features (columns).\n- `Data_Dictionary.md`: Detailed information file about the features in the provided datasets.\n- `AZDIAS_Feature_Summary.csv`: Summary of feature attributes for demographics data; 85 features (rows) x 4 columns\n\nEach row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. You will use this information to cluster the general population into groups with similar demographic properties. Then, you will see how the people in the customers' dataset fit into those created clusters. The hope here is that certain clusters are over-represented in the customer's data, as compared to the general population; those over-represented clusters will be assumed to be part of the core userbase. This information can then be used for further applications, such as targeting for a marketing campaign.\n\nTo start off with, load in the demographic's data for the general population into a pandas DataFrame, and do the same for the feature attributes summary. Note for all of the `.csv` data files in this project: they're semicolon (`;`) delimited, so you'll need an additional argument in your [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call to read in the data properly. Also, considering the size of the main dataset, it may take some time for it to load completely.\n\nOnce the dataset is loaded, it's recommended that you take a little of time just browsing the general structure of the dataset and feature summary file. You'll be getting deep into the innards of the cleaning in the first major step of the project, so gaining some general familiarity can help you get your bearings.",
"_____no_output_____"
]
],
[
[
"# Load in the general demographics data.\nazdias = \n\n# Load in the feature summary file.\nfeat_info = ",
"_____no_output_____"
],
[
"# Check the structure of the data after it's loaded (e.g. print the number of\n# rows and columns, print the first few rows).\n\n",
"_____no_output_____"
]
],
[
[
"> **Tip**: Add additional cells to keep everything in reasonably-sized chunks! Keyboard shortcut `esc --> a` (press escape to enter command mode, then press the 'A' key) adds a new cell before the active cell, and `esc --> b` adds a new cell after the active cell. If you need to convert an active cell to a markdown cell, use `esc --> m` and to convert to a code cell, use `esc --> y`. \n\n## Step 1: Preprocessing\n\n### Step 1.1: Assess Missing Data\n\nThe feature summary file contains a summary of properties for each demographic's data column. You will use this file to help you make cleaning decisions during this stage of the project. First, you should assess the demographic's data in terms of missing data. Pay attention to the following points as you perform your analysis, and take notes on what you observe. Make sure that you fill in the **Discussion** cell with your findings and decisions at the end of each step that has one!\n\n#### Step 1.1.1: Convert Missing Value Codes to NaNs\nThe fourth column of the feature attributes summary (loaded in above as `feat_info`) documents the codes from the data dictionary that indicate missing or unknown data. While the file encodes this as a list (e.g. `[-1,0]`), this will get read in as a string object. You'll need to do a little of parsing to make use of it to identify and clean the data. Convert data that matches a 'missing' or 'unknown' value code into a numpy NaN value. You might want to see how much data takes on a 'missing' or 'unknown' code, and how much data is naturally missing, as a point of interest.\n\n**As one more reminder, you are encouraged to add additional cells to break up your analysis into manageable chunks.**",
"_____no_output_____"
]
],
[
[
"# Identify missing or unknown data values and convert them to NaNs.\n\n",
"_____no_output_____"
]
],
[
[
"#### Step 1.1.2: Assess Missing Data in Each Column\n\nHow much missing data is present in each column? There are a few columns that are outliers in terms of the proportion of values that are missing. You will want to use matplotlib [`hist()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html) function to visualize the distribution of missing value counts to find these columns. Identify and document these columns. While some of these columns might have justifications for keeping or re-encoding the data, for this project you should just remove them from the dataframe. (Feel free to make remarks about these outlier columns in the discussion, however!)\n\nFor the remaining features, are there any patterns in which columns have, or share, missing data?",
"_____no_output_____"
]
],
[
[
"# Perform an assessment of how much missing data there is in each column of the\n# dataset.\n\n",
"_____no_output_____"
],
[
"# Investigate patterns in the amount of missing data in each column.\n\n",
"_____no_output_____"
],
[
"# Remove the outlier columns from the dataset. (You'll perform other data\n# engineering tasks such as re-encoding and imputation later.)\n\n",
"_____no_output_____"
]
],
[
[
"#### Discussion 1.1.2: Assess Missing Data in Each Column\n\n(Double click this cell and replace this text with your own text, reporting your observations regarding the amount of missing data in each column. Are there any patterns in missing values? Which columns were removed from the dataset?)",
"_____no_output_____"
],
[
"#### Step 1.1.3: Assess Missing Data in Each Row\n\nNow, you'll perform a similar assessment for the rows of the dataset. How much data is missing in each row? As with the columns, you should see some groups of points that have a very different numbers of missing values. Divide the data into two subsets: one for data points that are above some threshold for missing values, and a second subset for points below that threshold.\n\nIn order to know what to do with the outlier rows, we should see if the distribution of data values on columns that are not missing data (or are missing very little data) are similar or different between the two groups. Select at least five of these columns and compare the distribution of values.\n- You can use seaborn's [`countplot()`](https://seaborn.pydata.org/generated/seaborn.countplot.html) function to create a bar chart of code frequencies and matplotlib [`subplot()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplot.html) function to put bar charts for the two subplots side by side.\n- To reduce repeated code, you might want to write a function that can perform this comparison, taking as one of its arguments a column to be compared.\n\nDepending on what you observe in your comparison, this will have implications on how you approach your conclusions later in the analysis. If the distributions of non-missing features look similar between the data with many missing values and the data with few or no missing values, then we could argue that simply dropping those points from the analysis won't present a major issue. On the other hand, if the data with many missing values looks very different from the data with few or no missing values, then we should make a note on those data as special. We'll revisit these data later on. **Either way, you should continue your analysis for now using just the subset of the data with few or no missing values.**",
"_____no_output_____"
]
],
[
[
"# How much data is missing in each row of the dataset?\n\n",
"_____no_output_____"
],
[
"# Write code to divide the data into two subsets based on the number of missing\n# values in each row.\n\n",
"_____no_output_____"
],
[
"# Compare the distribution of values for at least five columns where there are\n# no or few missing values, between the two subsets.\n\n",
"_____no_output_____"
]
],
[
[
"#### Discussion 1.1.3: Assess Missing Data in Each Row\n\n(Double-click this cell and replace this text with your own text, reporting your observations regarding missing data in rows. Are the data with lots of missing values are qualitatively different from data with few or no missing values?)",
"_____no_output_____"
],
[
"### Step 1.2: Select and Re-Encode Features\n\nChecking for missing data isn't the only way in which you can prepare a dataset for analysis. Since the unsupervised learning techniques to be used will only work on data that is encoded numerically, you need to make a few encoding changes or additional assumptions to be able to make progress. In addition, while almost all the values in the dataset are encoded using numbers, not all of them represent numeric values. Check the third column of the feature summary (`feat_info`) for a summary of types of measurement.\n- For numeric and interval data, these features can be kept without changes.\n- Most of the variables in the dataset are ordinal in nature. While ordinal values may technically be non-linear in spacing, make the simplifying assumption that the ordinal variables can be treated as being interval in nature (that is, kept without any changes).\n- Special handling may be necessary for the remaining two variable types: categorical, and 'mixed'.\n\nIn the first two parts of this sub-step, you will perform an investigation of the categorical and mixed-type features and make a decision on each of them, whether you will keep, drop, or re-encode each. Then, in the last part, you will create a new data frame with only the selected and engineered columns.\n\nData wrangling is often the trickiest part of the data analysis process, and there's a lot of it to be done here. But stick with it: once you're done with this step, you'll be ready to get to the machine learning parts of the project!",
"_____no_output_____"
]
],
[
[
"# How many features are there of each data type?\n\n",
"_____no_output_____"
]
],
[
[
"#### Step 1.2.1: Re-Encode Categorical Features\n\nFor categorical data, you would ordinarily need to encode the levels as dummy variables. Depending on the number of categories, perform one of the following:\n- For binary (two-level) categorical that take numeric values, you can keep them without needing to do anything.\n- There is one binary variable that takes on non-numeric values. For this one, you need to re-encode the values as numbers or create a dummy variable.\n- For multi-level categorical (three or more values), you can choose to encode the values using multiple dummy variables (e.g. via [OneHotEncoder](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html)), or (to keep things straightforward) just drop them from the analysis. As always, document your choices in the Discussion section.",
"_____no_output_____"
]
],
[
[
"# Assess categorical variables: which are binary, which are multi-level, and\n# which one needs to be re-encoded?\n\n",
"_____no_output_____"
],
[
"# Re-encode categorical variable(s) to be kept in the analysis.\n\n",
"_____no_output_____"
]
],
[
[
"#### Discussion 1.2.1: Re-Encode Categorical Features\n\n(Double-click this cell and replace this text with your own text, reporting your findings and decisions regarding categorical features. Which ones did you keep, which did you drop, and what engineering steps did you perform?)",
"_____no_output_____"
],
[
"#### Step 1.2.2: Engineer Mixed-Type Features\n\nThere are a handful of features that are marked as \"mixed\" in the feature summary that require special treatment in order to be included in the analysis. There are two in particular that deserve attention; the handling of the rest are up to your own choices:\n- \"PRAEGENDE_JUGENDJAHRE\" combines information on three dimensions: generation by decade, movement (mainstream vs. avantgarde), and nation (east vs. west). While there aren't enough levels to disentangle east from west, you should create two new variables to capture the other two dimensions: an interval-type variable for decade, and a binary variable for movement.\n- \"CAMEO_INTL_2015\" combines information on two axes: wealth and life stage. Break up the two-digit codes by their 'tens'-place and 'ones'-place digits into two new ordinal variables (which, for the purposes of this project, is equivalent to just treating them as their raw numeric values).\n- If you decide to keep or engineer new features around the other mixed-type features, make sure you note your steps in the Discussion section.\n\nBe sure to check `Data_Dictionary.md` for the details needed to finish these tasks.",
"_____no_output_____"
]
],
[
[
"# Investigate \"PRAEGENDE_JUGENDJAHRE\" and engineer two new variables.\n\n",
"_____no_output_____"
],
[
"# Investigate \"CAMEO_INTL_2015\" and engineer two new variables.\n\n",
"_____no_output_____"
]
],
[
[
"#### Discussion 1.2.2: Engineer Mixed-Type Features\n\n(Double-click this cell and replace this text with your own text, reporting your findings and decisions regarding mixed-value features. Which ones did you keep, which did you drop, and what engineering steps did you perform?)",
"_____no_output_____"
],
[
"#### Step 1.2.3: Complete Feature Selection\n\nIn order to finish this step up, you need to make sure that your data frame now only has the columns that you want to keep. To summarize, the dataframe should consist of the following:\n- All numeric, interval, and ordinal type columns from the original dataset.\n- Binary categorical features (all numerically-encoded).\n- Engineered features from other multi-level categorical features and mixed features.\n\nMake sure that for any new columns that you have engineered, that you've excluded the original columns from the final dataset. Otherwise, their values will interfere with the analysis later on the project. For example, you should not keep \"PRAEGENDE_JUGENDJAHRE\", since its values won't be useful for the algorithm: only the values derived from it in the engineered features you created should be retained. As a reminder, your data should only be from **the subset with few or no missing values**.",
"_____no_output_____"
]
],
[
[
"# If there are other re-engineering tasks you need to perform, make sure you\n# take care of them here. (Dealing with missing data will come in step 2.1.)\n\n",
"_____no_output_____"
],
[
"# Do whatever you need to in order to ensure that the dataframe only contains\n# the columns that should be passed to the algorithm functions.\n\n",
"_____no_output_____"
]
],
[
[
"### Step 1.3: Create a Cleaning Function\n\nEven though you've finished cleaning up the general population demographics data, it's important to look ahead to the future and realize that you'll need to perform the same cleaning steps on the customer demographics data. In this substep, complete the function below to execute the main feature selection, encoding, and re-engineering steps you performed above. Then, when it comes to looking at the customer data in Step 3, you can just run this function on that DataFrame to get the trimmed dataset in a single step.",
"_____no_output_____"
]
],
[
[
"def clean_data(df):\n \"\"\"\n Perform feature trimming, re-encoding, and engineering for demographics\n data\n \n INPUT: Demographics DataFrame\n OUTPUT: Trimmed and cleaned demographics DataFrame\n \"\"\"\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n \n \n # remove selected columns and rows, ...\n\n \n # select, re-encode, and engineer column values.\n\n \n # Return the cleaned dataframe.\n \n ",
"_____no_output_____"
]
],
[
[
"## Step 2: Feature Transformation\n\n### Step 2.1: Apply Feature Scaling\n\nBefore we apply dimensionality reduction techniques to the data, we need to perform feature scaling so that the principal component vectors are not influenced by the natural differences in scale for features. Starting from this part of the project, you'll want to keep an eye on the [API reference page for sklearn](http://scikit-learn.org/stable/modules/classes.html) to help you navigate to all the classes and functions that you'll need. In this substep, you'll need to check the following:\n\n- sklearn requires that data not have missing values in order for its estimators to work properly. So, before applying the scaler to your data, make sure that you've cleaned the DataFrame of the remaining missing values. This can be as simple as just removing all data points with missing data, or applying an [Imputer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html) to replace all missing values. You might also try a more complicated procedure where you temporarily remove missing values in order to compute the scaling parameters before re-introducing those missing values and applying imputation. Think about how much missing data you have and what possible effects each approach might have on your analysis, and justify your decision in the discussion section below.\n- For the actual scaling function, a [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) instance is suggested, scaling each feature to mean 0 and standard deviation 1.\n- For these classes, you can make use of the `.fit_transform()` method to both fit a procedure to the data and apply the transformation to the data at the same time. Don't forget to keep the fit sklearn objects handy, since you'll be applying them to the customer demographics data towards the end of the project.",
"_____no_output_____"
]
],
[
[
"# If you've not yet cleaned the dataset of all NaN values, then investigate and\n# do that now.\n\n",
"_____no_output_____"
],
[
"# Apply feature scaling to the general population demographics data.\n\n",
"_____no_output_____"
]
],
[
[
"### Discussion 2.1: Apply Feature Scaling\n\n(Double-click this cell and replace this text with your own text, reporting your decisions regarding feature scaling.)",
"_____no_output_____"
],
[
"### Step 2.2: Perform Dimensionality Reduction\n\nOn your scaled data, you are now ready to apply dimensionality reduction techniques.\n\n- Use sklearn's [PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) class to apply principal component analysis on the data, thus finding the vectors of maximal variance in the data. To start, you should not set any parameters (so all components are computed) or set a number of components that is at least half the number of features (so there's enough features to see the general trend in variability).\n- Check out the ratio of variance explained by each principal component as well as the cumulative variance explained. Try plotting the cumulative or sequential values using matplotlib [`plot()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html) function. Based on what you find, select a value for the number of transformed features you'll retain for the clustering part of the project.\n- Once you've made a choice for the number of components to keep, make sure you re-fit a PCA instance to perform the decided-on transformation.",
"_____no_output_____"
]
],
[
[
"# Apply PCA to the data.\n\n",
"_____no_output_____"
],
[
"# Investigate the variance accounted for by each principal component.\n\n",
"_____no_output_____"
],
[
"# Re-apply PCA to the data while selecting for number of components to retain.\n\n",
"_____no_output_____"
]
],
[
[
"### Discussion 2.2: Perform Dimensionality Reduction\n\n(Double-click this cell and replace this text with your own text, reporting your findings and decisions regarding dimensionality reduction. How many principal components / transformed features are you retaining for the next step of the analysis?)",
"_____no_output_____"
],
[
"### Step 2.3: Interpret Principal Components\n\nNow that we have our transformed principal components, it's a nice idea to check out the weight of each variable on the first few components to see if they can be interpreted in some fashion.\n\nAs a reminder, each principal component is a unit vector that points in the direction of the highest variance (after accounting for the variance captured by earlier principal components). The further a weight is from zero, the more the principal component is in the direction of the corresponding feature. If two features have large weights of the same sign (both positive or both negative), then increases in one tend expect to be associated with increases in the other. To contrast, features with different signs can be expected to show a negative correlation: increases in one variable should result in a decrease in the other.\n\n- To investigate the features, you should map each weight to their corresponding feature name, then sort the features according to weight. The most interesting features for each principal component, then, will be those at the beginning and end of the sorted list. Use the data dictionary document to help you understand these most prominent features, their relationships, and what a positive or negative value on the principal component might indicate.\n- You should investigate and interpret feature associations from the first three principal components in this substep. To help facilitate this, you should write a function that you can call at any time to print the sorted list of feature weights, for the *i*-th principal component. This might come in handy in the next step of the project, when you interpret the tendencies of the discovered clusters.",
"_____no_output_____"
]
],
[
[
"# Map weights for the first principal component to corresponding feature names\n# and then print the linked values, sorted by weight.\n# HINT: Try defining a function here or in a new cell that you can reuse in the\n# other cells.\n\n",
"_____no_output_____"
],
[
"# Map weights for the second principal component to corresponding feature names\n# and then print the linked values, sorted by weight.\n\n",
"_____no_output_____"
],
[
"# Map weights for the third principal component to corresponding feature names\n# and then print the linked values, sorted by weight.\n\n",
"_____no_output_____"
]
],
[
[
"### Discussion 2.3: Interpret Principal Components\n\n(Double-click this cell and replace this text with your own text, reporting your observations from detailed investigation of the first few principal components generated. Can we interpret positive and negative values from them in a meaningful way?)",
"_____no_output_____"
],
[
"## Step 3: Clustering\n\n### Step 3.1: Apply Clustering to General Population\n\nYou've assessed and cleaned the demographic's data, then scaled and transformed them. Now, it's time to see how the data clusters in the principal components space. In this substep, you will apply k-means clustering to the dataset and use the average within-cluster distances from each point to their assigned cluster's centroid to decide on a number of clusters to keep.\n\n- Use sklearn's [KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans) class to perform k-means clustering on the PCA-transformed data.\n- Then, compute the average difference from each point to its assigned cluster's center. **Hint**: The KMeans object's `.score()` method might be useful here, but note that in sklearn, scores tend to be defined so that larger is better. Try applying it to a small, toy dataset, or use an internet search to help your understanding.\n- Perform the above two steps for a number of different cluster counts. You can then see how the average distance decreases with an increasing number of clusters. However, each additional cluster provides a smaller net benefit. Use this fact to select a final number of clusters in which to group the data. **Warning**: because of the large size of the dataset, it can take a long time for the algorithm to resolve. The more clusters to fit, the longer the algorithm will take. You should test for cluster counts through at least 10 clusters to get the full picture, but you shouldn't need to test for a number of clusters above about 30.\n- Once you've selected a final number of clusters to use, re-fit a KMeans instance to perform the clustering operation. Make sure that you also obtain the cluster assignments for the general demographics' data, since you'll be using them in the final Step 3.3.",
"_____no_output_____"
]
],
[
[
"# Over a number of different cluster counts...\n\n\n # run k-means clustering on the data and...\n \n \n # compute the average within-cluster distances.\n \n ",
"_____no_output_____"
],
[
"# Investigate the change in within-cluster distance across number of clusters.\n# HINT: Use matplotlib's plot function to visualize this relationship.\n\n",
"_____no_output_____"
],
[
"# Re-fit the k-means model with the selected number of clusters and obtain\n# cluster predictions for the general population demographics data.\n\n",
"_____no_output_____"
]
],
[
[
"### Discussion 3.1: Apply Clustering to General Population\n\n(Double-click this cell and replace this text with your own text, reporting your findings and decisions regarding clustering. Into how many clusters have you decided to segment the population?)",
"_____no_output_____"
],
[
"### Step 3.2: Apply All Steps to the Customer Data\n\nNow that you have clusters and cluster centers for the general population, it's time to see how the customer data maps on to those clusters. Take care to not confuse this for re-fitting all the models to the customer data. Instead, you're going to use the fits from the general population to clean, transform, and cluster the customer data. In the last step of the project, you will interpret how the general population fits apply to the customer data.\n\n- Don't forget when loading in the customers' data, that it is semicolon (`;`) delimited.\n- Apply the same feature wrangling, selection, and engineering steps to the customer demographics using the `clean_data()` function you created earlier. (You can assume that the customer demographics data has similar meaning behind missing data patterns as the general demographics' data.)\n- Use the sklearn objects from the general demographics' data, and apply their transformations to the customer's data. That is, you should not be using a `.fit()` or `.fit_transform()` method to re-fit the old objects, nor should you be creating new sklearn objects! Carry the data through the feature scaling, PCA, and clustering steps, obtaining cluster assignments for all the data in the customer demographics data.",
"_____no_output_____"
]
],
[
[
"# Load in the customer demographics data.\ncustomers = ",
"_____no_output_____"
],
[
"# Apply preprocessing, feature transformation, and clustering from the general\n# demographics onto the customer data, obtaining cluster predictions for the\n# customer demographics data.\n\n",
"_____no_output_____"
]
],
[
[
"### Step 3.3: Compare Customer Data to Demographics Data\n\nAt this point, you have clustered data based on demographics of the general population of Germany, and seen how the customer data for a mail-order sales company maps onto those demographic clusters. In this final substep, you will compare the two cluster distributions to see where the strongest customer base for the company is.\n\nConsider the proportion of persons in each cluster for the general population, and the proportions for the customers. If we think the company's customer base to be universal, then the cluster assignment proportions should be fairly similar between the two. If there are only particular segments of the population that are interested in the company's products, then we should see a mismatch from one to the other. If there is a higher proportion of persons in a cluster for the customer data compared to the general population (e.g. 5% of persons are assigned to a cluster for the general population, but 15% of the customer data is closest to that cluster's centroid) then that suggests the people in that cluster to be a target audience for the company. On the other hand, the proportion of the data in a cluster being larger in the general population than the customer data (e.g. only 2% of customers closest to a population centroid that captures 6% of the data) suggests that group of persons to be outside the target demographics.\n\nTake a look at the following points in this step:\n\n- Compute the proportion of data points in each cluster for the general population and the customer data. Visualizations will be useful here: both for the individual dataset proportions, but also to visualize the ratios in cluster representation between groups. Seaborn's [`countplot()`](https://seaborn.pydata.org/generated/seaborn.countplot.html) or [`barplot()`](https://seaborn.pydata.org/generated/seaborn.barplot.html) function could be handy.\n - Recall the analysis you performed in step 1.1.3 of the project, where you separated out certain data points from the dataset if they had more than a specified threshold of missing values. If you found that this group was qualitatively different from the main bulk of the data, you should treat this as an additional data cluster in this analysis. Make sure that you account for the number of data points in this subset, for both the general population and customer datasets, when making your computations!\n- Which cluster or clusters are overrepresented in the customer dataset compared to the general population? Select at least one such cluster and infer what kind of people might be represented by that cluster. Use the principal component interpretations from step 2.3 or look at additional components to help you make this inference. Alternatively, you can use the `.inverse_transform()` method of the PCA and StandardScaler objects to transform centroids back to the original data space and interpret the retrieved values directly.\n- Perform a similar investigation for the underrepresented clusters. Which cluster or clusters are underrepresented in the customer dataset compared to the general population, and what kinds of people are typified by these clusters?",
"_____no_output_____"
]
],
[
[
"# Compare the proportion of data in each cluster for the customer data to the\n# proportion of data in each cluster for the general population.\n\n",
"_____no_output_____"
],
[
"# What kinds of people are part of a cluster that is overrepresented in the\n# customer data compared to the general population?\n\n",
"_____no_output_____"
],
[
"# What kinds of people are part of a cluster that is underrepresented in the\n# customer data compared to the general population?\n\n",
"_____no_output_____"
]
],
[
[
"### Discussion 3.3: Compare Customer Data to Demographics Data\n\n(Double-click this cell and replace this text with your own text, reporting findings and conclusions from the clustering analysis. Can we describe segments of the population that are relatively popular with the mail-order company, or relatively unpopular with the company?)",
"_____no_output_____"
],
[
"> Congratulations on making it this far in the project! Before you finish, make sure to check through the entire notebook from top to bottom to make sure that your analysis follows a logical flow and all of your findings are documented in **Discussion** cells. Once you've checked over all of your work, you should export the notebook as an HTML document to submit for evaluation. You can do this from the menu, navigating to **File -> Download as -> HTML (.html)**. You will submit both that document and this notebook for your project submission.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74e347a7febcb58148c0de43bb7b74809f4d1e9 | 5,725 | ipynb | Jupyter Notebook | Task 2/Task 2.ipynb | yangjiada/NLP | 8f52043ff0b8441ef3a3bb4fbfd6af39fbb9b0db | [
"MIT"
] | null | null | null | Task 2/Task 2.ipynb | yangjiada/NLP | 8f52043ff0b8441ef3a3bb4fbfd6af39fbb9b0db | [
"MIT"
] | null | null | null | Task 2/Task 2.ipynb | yangjiada/NLP | 8f52043ff0b8441ef3a3bb4fbfd6af39fbb9b0db | [
"MIT"
] | null | null | null | 18.832237 | 182 | 0.452227 | [
[
[
"## 结巴分词",
"_____no_output_____"
]
],
[
[
"import jieba",
"_____no_output_____"
],
[
"seg_list = jieba.cut(\"我来到北京清华大学\")",
"_____no_output_____"
],
[
"print(' '.join(seg_list))",
"Building prefix dict from the default dictionary ...\nDumping model to file cache C:\\Users\\Jan\\AppData\\Local\\Temp\\jieba.cache\nLoading model cost 0.935 seconds.\nPrefix dict has been built succesfully.\n"
]
],
[
[
"## 自定义词典",
"_____no_output_____"
]
],
[
[
"jieba.load_userdict(\"dict.txt\")\nimport jieba.posseg as pseg",
"_____no_output_____"
],
[
"test_sent = (\n\"李小福是创新办主任也是云计算方面的专家; 什么是八一双鹿\\n\"\n\"例如我输入一个带“韩玉赏鉴”的标题,在自定义词库中也增加了此词为N类\\n\"\n\"「台中」正確應該不會被切開。mac上可分出「石墨烯」;此時又可以分出來凱特琳了。\"\n)",
"_____no_output_____"
],
[
"words = jieba.cut(test_sent)",
"_____no_output_____"
],
[
"' '.join(words)",
"_____no_output_____"
]
],
[
[
"## 基于 TF-IDF 算法的关键词抽取",
"_____no_output_____"
]
],
[
[
"sentence = \"\"\"\n《复仇者联盟4》上映16天,连续16天获得单日票房冠军,《何以为家》以优质的口碑正在冲击3亿票房,但市场大盘又再次回落至4千万元一天的水平,随着影片热度逐渐退却,靠它们“续命”的影院也重回经营窘境。\n\"\"\"",
"_____no_output_____"
],
[
"import jieba.analyse",
"_____no_output_____"
],
[
"jieba.analyse.extract_tags(sentence, topK=20, withWeight=False, allowPOS=())",
"_____no_output_____"
]
],
[
[
"## 基于 TextRank 算法的关键词抽取",
"_____no_output_____"
]
],
[
[
"jieba.analyse.textrank(sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v')) ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74e4f246760eea7e6267d0be9e147b217389215 | 390 | ipynb | Jupyter Notebook | pset_challenging_ext/exercises/nb/p55.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_challenging_ext/exercises/nb/p55.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_challenging_ext/exercises/nb/p55.ipynb | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | 15 | 47 | 0.417949 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e74e52ddb957587d365628c170effd5932e0a053 | 50,367 | ipynb | Jupyter Notebook | Data_Frame/Sortowanie_Dannych.ipynb | MarekKras/Analiza_Dannych_01 | 11554348ab50736817bd2a96671680bb9a820648 | [
"Unlicense"
] | null | null | null | Data_Frame/Sortowanie_Dannych.ipynb | MarekKras/Analiza_Dannych_01 | 11554348ab50736817bd2a96671680bb9a820648 | [
"Unlicense"
] | null | null | null | Data_Frame/Sortowanie_Dannych.ipynb | MarekKras/Analiza_Dannych_01 | 11554348ab50736817bd2a96671680bb9a820648 | [
"Unlicense"
] | null | null | null | 33.578 | 131 | 0.346159 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as math",
"_____no_output_____"
],
[
"frame = pd.read_csv('./data/mcdonalds.csv',usecols=['Item','Category','Serving Size','Calories','TotalFat'])",
"_____no_output_____"
],
[
"frame.head()",
"_____no_output_____"
],
[
"frame.sort_values(by='Calories').head()",
"_____no_output_____"
],
[
"frame.sort_values(by='Calories',ascending=False).head()",
"_____no_output_____"
],
[
"frame[\"Calories\"]= frame['Calories'].astype('float')\nframe.loc[82,'Calories'] = np.NaN",
"_____no_output_____"
],
[
"frame.head()",
"_____no_output_____"
],
[
"frame.loc[82]",
"_____no_output_____"
],
[
"frame.sort_values(by='Calories').tail()",
"_____no_output_____"
],
[
"frame.sort_values(by='Calories',na_position='first').head()",
"_____no_output_____"
],
[
"frame.sort_values(by=['Category','Item']).head(20)",
"_____no_output_____"
],
[
"frame.sort_values(by=['Category','Item'],ascending=[True,False]).head(20)",
"_____no_output_____"
],
[
"frame = pd.read_csv('./data/mcdonalds.csv',usecols=['Item','Category','Serving Size','Calories','TotalFat'],index_col='Item')",
"_____no_output_____"
],
[
"frame.head()",
"_____no_output_____"
],
[
"frame.sort_index(ascending=True)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74e5d89daa0ff8801007fc91e7b60b30b231c41 | 18,959 | ipynb | Jupyter Notebook | Network Evaluation Examples/.ipynb_checkpoints/test-checkpoint.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | Network Evaluation Examples/.ipynb_checkpoints/test-checkpoint.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | Network Evaluation Examples/.ipynb_checkpoints/test-checkpoint.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | 102.481081 | 6,736 | 0.84208 | [
[
[
"import sys\nsys.path.append('/home/juan/Network_Evaluation_Tools')\n\nimport networkx\nfrom network_evaluation_tools import data_import_tools as dit\nfrom network_evaluation_tools import network_evaluation_functions as nef\nfrom network_evaluation_tools import network_propagation as prop\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Load network (We choose a smaller network here for the example's sake)\nnetwork1 = dit.load_network_file('/home/juan/Network_Evaluation_Tools/Data/PID_Symbol.sif', verbose=True)\nnetwork2 = dit.load_network_file('/home/juan/Network_Evaluation_Tools/Data/HumanInteractome_Symbol.sif', verbose=True)",
"('Network File Loaded:', '/home/juan/Network_Evaluation_Tools/Data/PID_Symbol.sif')\n('Network File Loaded:', '/home/juan/Network_Evaluation_Tools/Data/HumanInteractome_Symbol.sif')\n"
],
[
"ei_centrality = networkx.eigenvector_centrality(network1)\nei_centrality_pool = []\nfor prot in ei_centrality.keys():\n ei_centrality_pool.append(ei_centrality[prot])",
"_____no_output_____"
],
[
"ei_centrality_bins=plt.hist(ei_centrality_pool, bins=100)",
"_____no_output_____"
],
[
"ei_centrality = networkx.eigenvector_centrality(network2)\nei_centrality_pool = []\nfor prot in ei_centrality.keys():\n ei_centrality_pool.append(ei_centrality[prot])",
"_____no_output_____"
],
[
"ei_centrality_bins=plt.hist(ei_centrality_pool, bins=100)",
"_____no_output_____"
],
[
"network3 = networkx.intersection(network1,network2)",
"_____no_output_____"
],
[
"sys.path",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74e62598e0c34c45558e383c61d1fdabfbedf3b | 19,323 | ipynb | Jupyter Notebook | tensorflow_graphics/projects/point_convolutions/pylib/notebooks/Introduction.ipynb | schellmi42/graphics | 2e705622c2a6b0007347d2db154ccdf5a0eb73d4 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/projects/point_convolutions/pylib/notebooks/Introduction.ipynb | schellmi42/graphics | 2e705622c2a6b0007347d2db154ccdf5a0eb73d4 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/projects/point_convolutions/pylib/notebooks/Introduction.ipynb | schellmi42/graphics | 2e705622c2a6b0007347d2db154ccdf5a0eb73d4 | [
"Apache-2.0"
] | 1 | 2021-10-11T09:10:56.000Z | 2021-10-11T09:10:56.000Z | 33.605217 | 307 | 0.51788 | [
[
[
"##### Copyright 2020 Google LLC.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Point Clouds for tensorflow_graphics\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/schellmi42/graphics/blob/point_convolutions/tensorflow_graphics/projects/point_convolutions/pylib/notebooks/Introduction.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/schellmi42/graphics/blob/point_convolutions/tensorflow_graphics/projects/point_convolutions/pylib/notebooks/Introduction.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Initialization",
"_____no_output_____"
],
[
"\n### Clone repositories, and install requirements and custom_op package",
"_____no_output_____"
]
],
[
[
"# Clone repositories\n!rm -r graphics\n!git clone https://github.com/schellmi42/graphics\n\n# install requirements and load tfg module \n!pip install -r graphics/requirements.txt\n\n# install custom ops\n!pip install graphics/tensorflow_graphics/projects/point_convolutions/custom_ops/pkg_builds/tf_2.2.0/*.whl\n",
"_____no_output_____"
]
],
[
[
"### Load modules",
"_____no_output_____"
]
],
[
[
"import sys\n# (this is equivalent to export PYTHONPATH='$HOME/graphics:/content/graphics:$PYTHONPATH', but adds path to running session)\nsys.path.append(\"/content/graphics\")\n\n# load point cloud module \n# (this is equivalent to export PYTHONPATH='/content/graphics/tensorflow_graphics/projects/point_convolutions:$PYTHONPATH', but adds path to running session)\nsys.path.append(\"/content/graphics/tensorflow_graphics/projects/point_convolutions\")",
"_____no_output_____"
]
],
[
[
"Check if it loads without errors",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport tensorflow_graphics as tfg\nimport pylib.pc as pc\nimport numpy as np\n\nprint('TensorFlow version: %s'%tf.__version__)\nprint('TensorFlow-Graphics version: %s'%tfg.__version__)\nprint('Point Cloud Module: ', pc)",
"_____no_output_____"
]
],
[
[
"## Example Code\n",
"_____no_output_____"
],
[
"### 2D square point clouds using segmentation IDs\nHere we create a batch of point clouds with variable number of points per cloud from unordered points with an additional id tensor.\n\nThe `batch_ids` are the segmentation ids, which indicate which point belongs to which point cloud in the batch. For more information on segmentation IDs see: [tf.math#segmentation](https://www.tensorflow.org/api_docs/python/tf/math#Segmentation)\n\nIf the points are ordered by batch id, it is also possible to pass a `sizes` tensor, which has the size of each point cloud in it.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n\ndef square(num_samples, size=1):\n # 2D square in 3D for easier visualization\n points = np.random.rand(num_samples, 2)*2-1\n return points*size\n\nnum_samples=1000\nbatch_size = 10\n\n# create numpy input data consisting of points and segmentation identifiers\npoints = square(num_samples)\nbatch_ids = np.random.randint(0, batch_size, num_samples)\n\n# create tensorflow point cloud\npoint_cloud = pc.PointCloud(points, batch_ids, batch_size)\n\n# print information\nsizes = point_cloud.get_sizes()\nprint('%s point clouds of sizes:'%point_cloud._batch_size)\nprint(sizes.numpy())",
"_____no_output_____"
]
],
[
[
"Create a batch of point hierarchies using sequential poisson disk sampling with pooling radii 0.1, 0.4, 2.",
"_____no_output_____"
]
],
[
[
"# numpy input parameters\nsampling_radii = np.array([[0.1], [0.4], [2]])\n\n# create tensorflow point hierarchy\npoint_hierarchy = pc.PointHierarchy(point_cloud,\n sampling_radii,\n 'poisson_disk')",
"_____no_output_____"
],
[
"# print information\nnum_levels = len(sampling_radii) + 1\nprint('%s point clouds of sizes:'%point_cloud._batch_size)\nsizes = point_hierarchy.get_sizes()\nfor i in range(num_levels):\n print('level: ' + str(i))\n print(sizes[i].numpy())",
"_____no_output_____"
]
],
[
[
"assign a shape to the batch and look at the sizes again",
"_____no_output_____"
]
],
[
[
"point_hierarchy.set_batch_shape([2, 5])\nprint('%s point clouds of sizes:'%point_cloud._batch_size)\nsizes = point_hierarchy.get_sizes()\nfor i in range(num_levels):\n print('level: ' + str(i))\n print(sizes[i].numpy())",
"_____no_output_____"
]
],
[
[
"Visualize the levels of one example from the batch.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\n# which example from the batch to choose, can be 'int' or relative in [A1,...,An]\nbatch_id = [0,1]\n\ncurr_points = point_hierarchy.get_points(batch_id)\n\n# plotting\nplt.figure(figsize=[num_levels*5,5])\nfor i in range(num_levels):\n plt.subplot(1,num_levels,i+1)\n plt.plot(curr_points[i][:, 0],curr_points[i][:, 1],'bo')\n plt.axis([-1, 1, -1, 1])\n if i==0:\n plt.title('input point cloud')\n else:\n plt.title('poisson sampled points with radius %s'%sampling_radii[i - 1, 0])\n \n",
"_____no_output_____"
]
],
[
[
"### 3D point clouds from input files using arbitrary batch sizes with padding\n\nHere we create point clouds from input files using a zero padded representation of shape `[A1, .., An, V, D]`.\nInternally this is converted to a segmented representation.\n\n",
"_____no_output_____"
],
[
" #### Loading from ASCII .txt files",
"_____no_output_____"
]
],
[
[
"import pylib.io as io\n\n# SHREC15\n\n#### get files ####\ninput_dir = 'graphics/tensorflow_graphics/projects/point_convolutions/test_point_clouds/SHREC15/'\nfilenames = tf.io.gfile.listdir(input_dir)\nbatch_size = len(filenames)\nprint('### batch size ###'); print(batch_size)\n\nfor i in range(batch_size):\n filenames[i] = input_dir + filenames[i]\n\n#### load points #####\nbatch_shape = [5,2]\nprint('### batch shape###'); print(batch_shape)\npoints, normals, sizes = io.load_batch_of_points(filenames, batch_shape = batch_shape)\n\nprint('### data shape ###'); print(points.shape)\nprint('### points per point cloud ###');print(sizes.numpy())\n\n#### build point hierarchy #####\npoint_cloud = pc.PointCloud(points, sizes=sizes)\n\npoint_hierarchy = pc.PointHierarchy(point_cloud,\n [[0.05], [0.1]],\n 'poisson_disk')\n\nsizes = point_hierarchy.get_sizes()\n\nprint('### point per point cloud in hierarchy ###')\nfor level in range(len(sizes)):\n print('level %s'%level)\n print(sizes[level].numpy())\n\n### extract points from last level in original batch shape ###\nhierarchical_points = point_hierarchy.get_points()\nout_points = hierarchical_points[-1]\nprint('### shape of points in last level ###'); print(out_points.shape)",
"_____no_output_____"
]
],
[
[
"#### Loading vertices from mesh files \n",
"_____no_output_____"
]
],
[
[
"# Thingi10k meshes\n\n#### get files ####\ninput_dir = 'graphics/tensorflow_graphics/projects/point_convolutions/test_point_clouds/meshes/'\nfilenames = tf.io.gfile.listdir(input_dir)\nbatch_size = len(filenames)\nprint('### batch size ###'); print(batch_size)\n\nfor i in range(batch_size):\n filenames[i] = input_dir+filenames[i]\n\n#### load points ####\npoints, sizes = io.load_batch_of_meshes(filenames)\n\nprint('### data shape ###'); print(points.shape)\nprint('### points per point cloud ###');print(sizes.numpy())\n\n#### build a point cloud object ####\npoint_cloud = pc.PointCloud(points, sizes=sizes)\n\nprint('### internal shape conversion ###')\nprint('Input (padded): %s elements'%len(tf.reshape(points, [-1, 3])))\nprint('Internal (segmented): %s elements'%len(point_cloud._points))\n\npoint_hierarchy = pc.PointHierarchy(point_cloud,\n [[0.05], [0.1]],\n 'poisson_disk')\n\nsizes = point_hierarchy.get_sizes()\n\nprint('### point per point cloud in hierarchy ###')\nfor level in range(len(sizes)):\n print('level %s'%level)\n print(sizes[level].numpy())",
"_____no_output_____"
]
],
[
[
"### Monte-Carlo Convolutions\n",
"_____no_output_____"
],
[
"Create convolutions for a point hierarchy with MLPs as kernel \n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\n### create random input data\nnum_pts = 1000\npoint_dim = 3\nfeature_dim = 3\nbatch_size = 10\n\n# create random points\npoints = np.random.rand(num_pts,point_dim)\nbatch_ids = np.random.randint(0,batch_size,num_pts)\nbatch_ids[:batch_size] = np.arange(0,batch_size) # ensure non-empty point clouds\n# create random features\nfeatures = np.random.rand(num_pts,feature_dim)\n\n# build initial point cloud\npoint_cloud = pc.PointCloud(points, batch_ids, batch_size)\n\n# build point hierarchy\nsample_radii = np.array([[0.1],[0.2],[2]])\npoint_hierarchy = pc.PointHierarchy(point_cloud,sample_radii)\n\n### build model\n\n# layer parameters\nconv_radii = 2*sample_radii\nfeature_sizes = [8,16,32]\nkernel_hidden_size = 8 # number of neurons in the hidden layer of the kernel MLP\n\n### initialize layers\nConv1 = pc.layers.MCConv(feature_dim, feature_sizes[0], point_dim,kernel_hidden_size)\nConv2 = pc.layers.MCConv(feature_sizes[0],feature_sizes[1],point_dim,kernel_hidden_size)\nConv3 = pc.layers.MCConv(feature_sizes[1],feature_sizes[2],point_dim,kernel_hidden_size)\n\n### call layers\nconv1_result = Conv1(features,point_hierarchy[0], point_hierarchy[1],conv_radii[0])\nconv2_result = Conv2(conv1_result,point_hierarchy[1], point_hierarchy[2],conv_radii[1])\nconv3_result = Conv3(conv2_result,point_hierarchy[2], point_hierarchy[3],conv_radii[2], return_sorted=True)\n\n### printing \nprint('### point cloud sizes ###')\nsizes = point_hierarchy.get_sizes()\nfor s in sizes:\n print(s.numpy())\n\nprint('\\n### features dimensions flat ###')\nprint('Input: ');print(features.shape)\nprint('Conv1: ');print(conv1_result.shape)\nprint('Conv2: ');print(conv2_result.shape)\nprint('Conv3: ');print(conv3_result.shape)\n\n# again in padded format\npoint_hierarchy.set_batch_shape([5,2])\n\nunflatten = point_hierarchy[0].get_unflatten()\nfeatures_padded = unflatten(features)\n### call layers\nconv1_result_padded = Conv1(features_padded, point_hierarchy[0], point_hierarchy[1],conv_radii[0], return_padded=True)\nconv2_result_padded = Conv2(conv1_result_padded, point_hierarchy[1], point_hierarchy[2],conv_radii[1], return_padded=True)\nconv3_result_padded = Conv3(conv2_result_padded, point_hierarchy[2], point_hierarchy[3],conv_radii[2], return_padded=True)\nprint('\\n### feature dimensions padded ###')\nprint('Input: ');print(features_padded.shape)\nprint('Conv1: ');print(conv1_result_padded.shape)\nprint('Conv2: ');print(conv2_result_padded.shape)\nprint('Conv3: ');print(conv3_result_padded.shape)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e74e6c59391d8e62fb74b83290fbef0b4e447c71 | 40,411 | ipynb | Jupyter Notebook | _build/jupyter_execute/Lab1/Lab1.ipynb | Astro-330/Astro-330.github.io | e7ba5d1db0f369a110419e939d9ed2d29c9d7020 | [
"MIT"
] | 4 | 2021-08-28T23:26:14.000Z | 2022-03-27T14:35:17.000Z | _build/jupyter_execute/Lab1/Lab1.ipynb | mgebran/Astro-330.github.io | e7ba5d1db0f369a110419e939d9ed2d29c9d7020 | [
"MIT"
] | null | null | null | _build/jupyter_execute/Lab1/Lab1.ipynb | mgebran/Astro-330.github.io | e7ba5d1db0f369a110419e939d9ed2d29c9d7020 | [
"MIT"
] | 2 | 2021-12-18T00:53:51.000Z | 2022-03-21T14:53:12.000Z | 44.213348 | 1,563 | 0.631635 | [
[
[
"# Lab 1: Overview, Review, and Environments\n\n### Objectives\nIn this lab, we'll \n- Review the computational infrastructure around our data science environments,\n- Go through the process of ensuring that we have a Python environment set up for this class with the proper installed packages\n- Within our environment, we'll review the basic data science operations in Python, and introduce some tips and tricks. \n\n\n```{admonition} Take a Deep Breath\nDon't freak out if stuff presented here is brand new to you! Ask a friend, google around (esp. stack overflow) and find a solution. All of these examples can be done in a few lines of code.\n```\n\n# Part I: Computational Ecosystem \n\nIn the space below, or in your own assignment, answer the following: \n\n## Question A\n\nDescribe the following terms, and point out the differences between them. Feel free to look things up.\n- python:\n- the terminal:\n- the file system:\n- jupyter:\n- an IDE: \n- a text editor:\n- git: \n- PATH: \n",
"_____no_output_____"
],
[
"I am writing this lab in a notebook. We'll be discussing the pros and cons of notebooks in class. Below, I'm going to check which python installation on my computer is being pointed to within my PATH. As a reminder, you can see your full path by echoing it from the terminal. Within a notebook, that looks like this:",
"_____no_output_____"
]
],
[
[
"!echo $PATH",
"/Users/ipasha/anaconda3/bin:/Users/ipasha/anaconda3/condabin:/anaconda3/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Users/ipasha/anaconda3/bin:.\n"
]
],
[
[
"```{note}\nThe \"!\" in my notebook allows me to run terminal commands from a notebook; you don't need this symbol when running commands in an actual terminal.\n```",
"_____no_output_____"
],
[
"I can check my python as follows:",
"_____no_output_____"
]
],
[
[
"!which python",
"/Users/ipasha/anaconda3/bin/python\n"
]
],
[
[
"We can see that calls to `python` are triggering the python installed in `anaconda3`, which is what we want (see the installation video for more details). If your call to `which python` in the terminal returns something like `usr/bin/python`, then something has likely gone wrong with your installation. There are some troubleshooting steps suggested in the installation video. \n\n## Setting up an Environment. \n\nWe can think of packages as programs installed on our computer. But what if one of my projects needs Photoshop 14.0, and another needs a feature that was only available in Photoshop 12.5.2? An environment is the system on which your code is being executed. When you fire up a terminal, this is usually your *base* environment, the default one for your *user* on a given computer system. But rather than always installing programs in this base installation, we can create custom environments for each of our projects. We can then install the exact dependencies for those projects within our environments, and we'll know they won't mess with each other. \n\nFor this class, we're going to be doing a lot of package installations. To ensure we are all on the same page and are working with the same tools, we're going to use a `conda environment` to maintain versioning. Note: there are multiple environment creation tools/methods. Conda environments are the predominant standard in astronomy, hence their use here. \n\nIn your terminal, type the following:\n```{note}\nIf you are on WINDOWS, you NEED to use the ANACONDA TERMINAL. NOT YOUR WINDOWS POWER SHELL/COMMAND PROMPT. Search your pc for anaconda and you'll see an anaconda launcher (if you followed the installation video correctly). From there, you should be able to find an anaconda terminal/prompt. That's where you should do anything whenever I say \"from your terminal\".\n```\n\n",
"_____no_output_____"
]
],
[
[
"conda create -n a330 python=3.8 ",
"_____no_output_____"
]
],
[
[
"Once you run this, answer \"y\" to the prompts, and your new environment will be installed.\n\n```{note}\nThe above command may take several minutes to execute.\n```\n\n\nNext, we want to activate this environment (still in our terminal). We do this as follows:",
"_____no_output_____"
]
],
[
[
"conda activate a330",
"_____no_output_____"
]
],
[
[
"When you do so, you should see the left hand edge of your prompt switch from (base) to (a330). \n\nNext, let's make an alias so that getting into our a330 environment is a snap. We're going to access a file called `.bash_profile`, which allows us to set aliases and environment variables. This file is located in your home directory, so I can print mine here:",
"_____no_output_____"
]
],
[
[
"!more ~/.bash_profile",
"# >>> conda init >>>\n# !! Contents within this block are managed by 'conda init' !!\n__conda_setup=\"$(CONDA_REPORT_ERRORS=false '/anaconda3/bin/conda' shell.bash hook 2> /dev/null)\"\nif [ $? -eq 0 ]; then\n \\eval \"$__conda_setup\"\nelse\n if [ -f \"/anaconda3/etc/profile.d/conda.sh\" ]; then\n# . \"/anaconda3/etc/profile.d/conda.sh\" # commented out by conda initialize\n CONDA_CHANGEPS1=false conda activate base\n else\n \\export PATH=\"/anaconda3/bin:$PATH\"\n fi\nfi\nunset __conda_setup\n# <<< conda init <<<\nexport BASH_SILENCE_DEPRECATION_WARNING=1\nexport PATH=$PATH:/Users/ipasha/anaconda3/bin\nexport PYTHONPATH=/Users/ipasha/anaconda3/bin/python3.8\nPATH=$PATH:.\nalias python='python3'\n\n\n\u001b[K\u001b[?1l\u001b>/ipasha/.bash_profile\u001b[m\u001b[K\u0007"
]
],
[
[
"Notice above I use the `~` which is a shorthand for home directory. On my computer, the default home directory for my user is `/Users/ipasha/`. \n\n\n\nThis file has some conda stuff in it at the top, as well as some path and python path exports, as well as an alias. \n\nYours should also have the conda init stuff, if you installed anaconda properly. Using your text editor of choice, add a line to this file that reads `alias a330='conda activate a330'`. \n\n```{sidebar} Using Vi/vim\nVi/vim is a built-in terminal program that allows for the editing of files. It is helpful to learn, especially when working on remote servers. We'll go into it more later, but here is a step by step for performing the above step with vim. \n- First: from the terminal, type `vim ~/.bash_profile` and hit enter. This will open the editor. If 'vim' isn't recognized, try 'vi'. \n- Next: Press the \"I\" key to open insert mode. Move your cursor with the arrow keys to the desired line, then type in the alias command shown to left. \n- Finally: Press `esc` to get out of insert mode, then type `:wq` and hit enter in order to \"write\" then \"quit\". \n```\n\n\n\nNow, from your terminal, source your profile by typing `source ~/.bash_profile`. You're good to go! Test that you can activate your environment by typing `a330` and hitting enter. \n\n```{note}\nTo deactivate, just type `conda deactivate`. \n```\n\n",
"_____no_output_____"
],
[
"## Adding Jupyter\n\nYou'll be using notebooks during this class, and we need to make sure that we can access our new environment from within Jupyter notebook. To ensure this, we're going to do the following:\n\nFirst, make sure your environment is activated. \n\nThen, type:",
"_____no_output_____"
]
],
[
[
"conda install -c anaconda ipykernel",
"_____no_output_____"
]
],
[
[
"This ensures we can select different kernels inside jupyter. A kernel is basically \"the thing that is python\", the root thing being run on your system when you use python. By creating environments, we're creating different unique kernels, and we can now get to them within our notebooks. \n\nNow, run the following:",
"_____no_output_____"
]
],
[
[
"python -m ipykernel install --user --name=a330",
"_____no_output_____"
]
],
[
[
"Once you've done this, you should have the ability to access your new environment from within Jupyter. We can test this as follows: \n- First, open a new terminal window, and activate your environment (if you made the alias, this means typing `a330` in your terminal. \n- Next, type `jupyter lab` to open jupyter lab. If for some reason you don't have jupyter lab yet, you can install it now with `conda install -c conda-forge jupyterlab`. \n- Once you have lab open, there should be a 'launcher' page, with one option being to create a new notebook using python -- you *should* see your environment listed there. \n- If you don't hit refresh on the webpage just in case. \n- You can also click on the option to open a python3 notebook. Inside, in the top right corner, it should say your current environment (probably Python 3). Clicking that, it should give you the option to choose a different environment, and your environment should be listed there. \n\n```{note}\nIf you already had a lab open, you'll have to hit refresh to get it to show up. \n```\n## Installing Packages\n\nNow that we have our environment, we're going to install the set of packages we need for this class. We may need more of them as the semester goes on, but for now, do the following (in your terminal, within your environment). ",
"_____no_output_____"
]
],
[
[
"conda install -n a330 numpy scipy astropy matplotlib ",
"_____no_output_____"
]
],
[
[
"(again, hitting \"y\" when prompted). Again, this step might take a minute or so to run.\n\nCongrats, you now have an environment set up for this class, and can jump in and out of it at will, either in your terminal, or within a Jupyter notebook.\n\n```{admonition} Hot Tip\nIt's highly recommended you do these steps anytime you start a new research project. Up front, you may not know all the dependencies that will arise, but as you go along, if you keep your work to that environment, you'll be able to carefully control which versions of which packages you're accessing at all times.\n```\n\n",
"_____no_output_____"
],
[
"# Part II: Python Review\n\nIn this section, I'll ask you to perform some pythonic operations to get back into the swing of things if it has been a little while. \n\nFor this assignment, please carry out your work in a Jupyter notebook, with the questions labeled and your output shown. You'll submit this notebook via Github, but we will discuss how to perform this step in class.",
"_____no_output_____"
],
[
"## Question 1\nCreate a 2D array of dimensions 1000 x 1000, in which the values in each pixel are random-gaussian distributed about a mean of 10, with a sigma of 2, and then use matplotlib to display this image. Make sure (0,0) is in the lower lefthand corner. ",
"_____no_output_____"
]
],
[
[
"# Your Code ",
"_____no_output_____"
]
],
[
[
"## Question 2\n\nThe distribution of pixels in your above image should not have many outliers beyond 3-sigma from the mean, but there will be some. Find the location of any 3-sigma outliers in the image, and highlight them by circling their location. \nConfirm that the fraction of these out of the total number of pixels agrees with the expectation for a normal distribution.",
"_____no_output_____"
]
],
[
[
"# Your Code",
"_____no_output_____"
]
],
[
[
"## Question 3\n\nWhen dealing with astronomical data, it is sometimes advisable to not include outliers in a calculation being performed on a set of data (in this example, an image). We know, of course, that the data we're plotting ARE coming from a gaussian distribution, so there's no reason to exclude, e.g., 3-sigma outliers, but for this example, let's assume we want to. \n\nCreate a numpy masked array in which all pixels that are > 3$\\sigma$ from the image mean are masked. Then, calculate the mean and sigma of the new masked array. ",
"_____no_output_____"
]
],
[
[
"# Your Code",
"_____no_output_____"
]
],
[
[
"Clipping the outliers of this distribution should not affect the mean in any strong way, but should noticably decrease $\\sigma$. ",
"_____no_output_____"
],
[
"## Question 4:\n\nUsing Array indexing, re-plot the same array from above, but zoom in on the inner 20% of the image, such that the full width is 20% of the total. Note: try not to hard code your indexing. You should be able to flexibly change the percentage. For this one, use a white-to-black color map.\n",
"_____no_output_____"
]
],
[
[
"# Your Code",
"_____no_output_____"
]
],
[
[
"Your image should now be 200 by 200 pixels across. Note that your new image has its own indexing. A common \"gotcha\" when working with arrays like this is to index in, but then try to use indices found (e.g., via `where()`) in the larger array on the cropped in version, which can lead to errors.",
"_____no_output_____"
],
[
"## Question 5\n\nOften, we have an expression to calculate of the form \n\n$$\n\\sum_i \\sum_j a_i b_j\n$$",
"_____no_output_____"
],
[
"Your natural impulse for coding this double sum might look like this:",
"_____no_output_____"
]
],
[
[
"total = 0\nfor i in a:\n for j in b:\n total+= i*j",
"_____no_output_____"
]
],
[
[
"which, mathematically, makes sense! But as it turns out, there's a way we can do this without any loops at all --- and when $\\vec{a}$ and $\\vec{b}$ get long, this becomes hugely important in our code.\n\nThe trick we're going to use here is called [array broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html), which you can read about at the link if you're not already familar. I'm going to give you $\\vec{a}$ and $\\vec{b}$ below. For this exercise, calculate the double sum indicated above without the use of a for-loop. Check that your code worked by using the slow double-loop method.\n\n```{hint}\nThe command `np.newaxis` will be useful here, or for a slightly longer solution, try `np.repeat` and `reshape()`. \n```",
"_____no_output_____"
]
],
[
[
"a = np.array([1,5,10,20])\nb = np.array([1,2,4,16])\n# Your Code",
"_____no_output_____"
]
],
[
[
"```{tip}\nIf you're familiar with the jupyter magic command `%%timeit`, try timing your loop vs non-loop solutions with a longer list (say, 5000 random numbers in $\\vec{a}$ and $\\vec{b}$). How much faster is the non-loop?\n```\n",
"_____no_output_____"
],
[
"## Question 6\n\nOften in astronomy we need to work with grids of values. For example, let's say we have a model that describes some data, and the model has 2 parameters, $a$ and $b$.\n\nWe might choose different combinations of $a$ and $b$, and determine a metric for how well models of such combinations fit our data (e.g., $\\chi^2$). \n\nWe may then want to plot this $\\chi^2$ value for each point on our grid -- that is, at each grid position corresponding to some $a_i$ and $b_j$. \n\nBelow, I provide a function, `chi2`, which returns a single number given some singular inputs `a` and `b`. \n\nCreate some arrays of `a` and `b` to test that range between 1 and 25, and have 10 entries evenly spaced between those values. Then, loop over them and find the $\\chi^2$ using my function. \n```{note}\nWe can't get around the double loop in this case, because we are operating under the assumption that the calculation of some single $\\chi^2$ using a unique combination of $a_i$ and $b_j$ cannot be vectorized. If it could, we wouldn't need to do this activity. But often, we can't, because the creation of a model given some inputs is nontrivial.\n```\n\nOnce you've stored the $\\chi^2$ values for each combination of $a$ and $b$, create a plot with $a$ and $b$ as the axes and show using colored circles the $\\chi^2$ value at each location. Add a colorbar to see the values being plotted. \n\nTo create this grid, use the `np.meshgrid()` function. For your plot, make sure the marker size is big enough to see the colors well. \n\n\n\n",
"_____no_output_____"
]
],
[
[
"def chi2(a,b):\n return ((15-a)**2+(12-b)**2)**0.2 #note, this is nonsense, but should return a different value for each input a,b\n\n# Your Code \n",
"_____no_output_____"
]
],
[
[
"## Question 7 \n\nRe-show your final plot above, making the following changes:\n\n- label your colorbar as $\\chi^2$ using latex notation, with a fontsize>13\n- Make your ticks point inward and be longer\n- Make your ticks appear on the top and right hand axes of the plot as well \n- If you didn't already, label the x and y axes appropriately and with a font size > 13 \n- Make sure the numbers along the axes have fontsizes > 13\n",
"_____no_output_____"
]
],
[
[
"# Your Code ",
"_____no_output_____"
]
],
[
[
"## Question 8\n\nSome quick list comprehensions! For any unfamilar, **comprehensions** are pythonic statements that allow you to compress a for-loop (generally) into a single line, and usually runs faster than a full loop (but not by a ton). \n\nTake the for-loop below and write it as a list comprehension.",
"_____no_output_____"
]
],
[
[
"visited_cities = ['San Diego', 'Boston', 'New York City','Atlanta']\nall_cities = ['San Diego', 'Denver', 'Boston', 'Portland', 'New York City', 'San Francisco', 'Atlanta']\n\nnot_visited = []\nfor city in all_cities:\n if city not in visited_cities:\n not_visited.append(city)\n \nprint(not_visited)",
"['Denver', 'Portland', 'San Francisco']\n"
],
[
"# Your Code",
"_____no_output_____"
]
],
[
[
"Next, create an array of integers including 1 through 30, inclusive. Using a comprehension, create a numpy array containing the squared value of only the odd numbers in your original array. (*Hint, remember the modulo operator*)",
"_____no_output_____"
]
],
[
[
"# Your Code",
"_____no_output_____"
]
],
[
[
"In the next example, you have a list of first names and a list of last names. Use a list comprehension to create an array that is a list of full names (with a space between first and last names). ",
"_____no_output_____"
]
],
[
[
"first_names = ['Bob','Samantha','John','Renee']\nlast_names = ['Smith','Bee','Oliver','Carpenter']\n\n# Your Code",
"_____no_output_____"
]
],
[
[
"```{admonition} Challenge Problem (worth Extra Credit) \nI've created new lists that contain strings of the names in the format Lastname,Firstname, with random leading/trailing spaces and terrible capitalizations. Use a list comprehension to make our nice, \"Firstname Lastname\" list again.\n```",
"_____no_output_____"
]
],
[
[
"all_names = ['sMitH,BoB ', ' bee,samanthA',' oLIVER,JOHN ',' caRPENTer,reneE ']\n\n# Your Code \n",
"_____no_output_____"
]
],
[
[
"```{note}\nNote that with this last example, we're entering a degree of single-line length and complexity that it almost doesn't make sense to use a comprehension anymore. Just because something CAN be done in one line doesn't mean is has to be, or should be.\n\n```\n\nYou may be wondering what use case this type of coding has in astronomy -- turns out, quite a lot. Take this example: you read in a data table and the columns have names like \"FLUX HA\", \"FLUX ERR\", etc. \n\nIf you're trying to make a `pandas` `DataFrame` of this table, it is advantageous to rename these columns something like `flux_ha` and `flux_err`. This way, commands like `df.flux_ha` can be used. \n\nBeing able to iterate over the string list of column names and turn caps into lower case, spaces into underscores, etc., is a useful skill that will come in handy when wrangling data. \n\nIn the solutions, I will show how I myself would do the above example in production code:",
"_____no_output_____"
],
[
"By making the string cleaning steps a function, I could take the time to explain what is going on within the function, as well as control for additional possibilities (like the names being in First,Last formatting). I could make this function more robust and complex, and my final comprehension stays readable and simple, as I loop over the names and run each through my handy functions (with some settings tweaked, potentially). ",
"_____no_output_____"
],
[
"## Question 9 \n\nTake the arrays `XX`, `YY`, and `ZZ` below and create one multidimensional array in which they are the columns. Print to confirm this worked.",
"_____no_output_____"
]
],
[
[
"XX = np.array([1,2,3,4,5,6,7,8,9])\nYY = np.array([5,6,7,8,9,10,11,12,13])\nZZ = np.array([10,11,12,13,14,15,16,17,18])\n\n# Your Code",
"_____no_output_____"
]
],
[
[
"## Question 10 \n\nUnits, units, units. The bane of every scientists' existence... except theorists that set every constant equal to 1. \n\nIn the real world, we measure fluxes or magnitudes in astronomical images, infer temperatures and densities from data and simulations, and ultimately have to deal with units one way or another. \n\nThankfully, our friends at `astropy` know this, and they've come to save the day. This next question serves as an introduction to the `units` module in astropy, which can be both a live saver and a pain in the ass, but at the end of the day is absolutely worth learning.",
"_____no_output_____"
]
],
[
[
"import astropy.units as u",
"_____no_output_____"
]
],
[
[
"The standard import for this library is `u`, so be careful not to name any variables that letter. \n\nTo \"assign\" units to a variable, we multiply by the desired unit as follows. Note that generally the module knows several aliases/common abrreviations for a unit, if it is uniquely identifiable.",
"_____no_output_____"
]
],
[
[
"star_temp = 5000*u.K \nstar_radius = 0.89 * u.Rsun \nstar_mass = 0.6 * u.Msun",
"_____no_output_____"
]
],
[
[
"We can perform trivial conversions using the `.to()` method.",
"_____no_output_____"
]
],
[
[
"star_radius.to(u.km)",
"_____no_output_____"
]
],
[
[
"Once we attach units to something, it is now a `Quantity` object. Quantity objects are great, above, we saw they have built-in methods to facilitate conversion. They can also be annoying -- sometimes another function we've written needs just the raw value or array back out. To get this, we use the `.value` attribute of a quantity object:",
"_____no_output_____"
]
],
[
[
"star_mass.to(u.kg).value",
"_____no_output_____"
]
],
[
[
"This now strips away all `Quantity` stuff and gives us an array or value to use elsewhere in our code. \n\nUnits are great because they help us combine quantities while tracking units and dimensional analysis. A common operation in astronomy is converting a flux to a luminosity given a distance, using \n\n$$\nF = \\frac{L}{4\\pi D^2}\n$$\nwhere $L$ is the luminosity and $D$ is the distance to the source. \n\nWhat if I've made a flux measurement in astronomical units such as erg/s/cm$^2$, and I want to know the luminosity in solar luminosities, and my distance happens to be in Mpc? Regardless of my input units, I can easily do this:",
"_____no_output_____"
]
],
[
[
"L = 4 * np.pi * (3.6*u.Mpc)**2 * (7.5e-14 * u.erg/u.s/u.cm**2)\nL.to(u.Lsun)",
"_____no_output_____"
]
],
[
[
"This conversion worked because the units worked out. If my units of flux weren't correct, I'd get an error:",
"_____no_output_____"
]
],
[
[
"L = 4 * np.pi * (3.6*u.Mpc)**2 * (7.5e-14 * u.erg/u.s/u.cm**2/u.AA)\nL.to(u.Lsun)",
"_____no_output_____"
]
],
[
[
"Here, `units` realized that I was putting in units of flux density, but wanted a luminosity out, and ultimately those units don't resolve out. Thus, it can be a great way to catch errors in your inputs. \n\nNote: just be careful that sometimes, you throw a constant into an equation but the constant has some units. If you're going to use the unit module to do a calculation, ALL inputs that HAVE units must be assigned them correctly as above for it to work.",
"_____no_output_____"
],
[
"For your exercise, consider the following: \n \nThe virial temperature of a galaxy halo is given roughly by \n\n$$\nT_{\\rm vir} \\simeq 5.6\\times10^4\\;\\textrm{K}\\left(\\frac{\\mu}{0.59}\\right)\\left(\\frac{M_{\\rm halo}}{10^{10}\\; M_{\\odot}}\\right)^{2/3}\\left(\\frac{1+z}{4}\\right)\n$$",
"_____no_output_____"
],
[
"where here, we can assume $\\mu$ is 0.59. \n\nWrite a function that takes as an input a halo mass, redshift, and optionally $\\mu$ (default 0.59), and returns the virial temperature in Kelvin. Your function should take in an astropy quantity with mass units, but should allow for the mass to be input with any appropriate units. ",
"_____no_output_____"
]
],
[
[
"# Your Code",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e74e6ddc7692b4cce8bf55417f776f957aa82caf | 8,370 | ipynb | Jupyter Notebook | jupyter/load_pytorch_model.ipynb | sreev/djl | 0275f05abb679f325df21aff1d5925ed1fa6417c | [
"Apache-2.0"
] | 1 | 2020-04-30T07:06:27.000Z | 2020-04-30T07:06:27.000Z | jupyter/load_pytorch_model.ipynb | sreev/djl | 0275f05abb679f325df21aff1d5925ed1fa6417c | [
"Apache-2.0"
] | null | null | null | jupyter/load_pytorch_model.ipynb | sreev/djl | 0275f05abb679f325df21aff1d5925ed1fa6417c | [
"Apache-2.0"
] | null | null | null | 33.614458 | 276 | 0.609558 | [
[
[
"\n# Load PyTorch model\n\nIn this tutorial, you learn how to load an existing PyTorch model and use it to run a prediction task.\n\nWe will run the inference in DJL way with [example](https://pytorch.org/hub/pytorch_vision_resnet/) on the pytorch official website.\n\n\n## Preparation\n\nThis tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/awslabs/djl/blob/master/jupyter/README.md).",
"_____no_output_____"
]
],
[
[
"// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/\n\n%maven ai.djl:api:0.4.0\n%maven ai.djl:repository:0.4.0\n%maven ai.djl.pytorch:pytorch-engine:0.4.0\n%maven org.slf4j:slf4j-api:1.7.26\n%maven org.slf4j:slf4j-simple:1.7.26\n%maven net.java.dev.jna:jna:5.3.0\n \n// See https://github.com/awslabs/djl/blob/master/pytorch/pytorch-engine/README.md\n// for more PyTorch library selection options\n%maven ai.djl.pytorch:pytorch-native-auto:1.4.0",
"_____no_output_____"
],
[
"import java.awt.image.*;\nimport ai.djl.*;\nimport ai.djl.inference.*;\nimport ai.djl.modality.*;\nimport ai.djl.modality.cv.*;\nimport ai.djl.modality.cv.util.*;\nimport ai.djl.modality.cv.transform.*;\nimport ai.djl.modality.cv.translator.*;\nimport ai.djl.repository.zoo.*;\nimport ai.djl.translate.*;\nimport ai.djl.training.util.*;",
"_____no_output_____"
]
],
[
[
"## Step 1: Prepare your model\n\nThis tutorial assumes that you have a TorchScript model.\nDJL only supports the TorchScript format for loading models from PyTorch, so other models will need to be [converted](https://github.com/awslabs/djl/blob/master/docs/pytorch/how_to_convert_your_model_to_torchscript.md).\nA TorchScript model includes the model structure and all of the parameters.\n\nWe will be using a pre-trained `resnet18` model. First, use the `DownloadUtils` to download the model files and save them in the `build/pytorch_models` folder",
"_____no_output_____"
]
],
[
[
"DownloadUtils.download(\"https://djl-ai.s3.amazonaws.com/mlrepo/model/cv/image_classification/ai/djl/pytorch/resnet/0.0.1/traced_resnet18.pt.gz\", \"build/pytorch_models/resnet18/resnet18.pt\", new ProgressBar());",
"_____no_output_____"
]
],
[
[
"In order to do image classification, you will also need the synset.txt which stores the classification class labels. We will need the synset containing the Imagenet labels with which resnet18 was originally trained.",
"_____no_output_____"
]
],
[
[
"DownloadUtils.download(\"https://djl-ai.s3.amazonaws.com/mlrepo/model/cv/image_classification/ai/djl/pytorch/synset.txt\", \"build/pytorch_models/resnet18/synset.txt\", new ProgressBar());",
"_____no_output_____"
]
],
[
[
"## Step 2: Create a Translator\n\nWe will create a transformation pipeline which maps the transforms shown in the [PyTorch example](https://pytorch.org/hub/pytorch_vision_resnet/).\n```python\n...\npreprocess = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n])\n...\n```\n\nThen, we will use this pipeline to create the [`Translator`](https://javadoc.djl.ai/api/0.4.0/index.html?ai/djl/translate/Translator.html)",
"_____no_output_____"
]
],
[
[
"Pipeline pipeline = new Pipeline();\npipeline.add(new Resize(256))\n .add(new CenterCrop(224, 224))\n .add(new ToTensor())\n .add(new Normalize(\n new float[] {0.485f, 0.456f, 0.406f},\n new float[] {0.229f, 0.224f, 0.225f}));\n\nTranslator<BufferedImage, Classifications> translator = ImageClassificationTranslator.builder()\n .setPipeline(pipeline)\n .setSynsetArtifactName(\"synset.txt\")\n .optApplySoftmax(true)\n .build();",
"_____no_output_____"
]
],
[
[
"## Step 3: Load your model\n\nNext, we will set the model zoo location to the `build/pytorch_models` directory we saved the model to. You can also create your own [`Repository`](https://javadoc.djl.ai/repository/0.4.0/index.html?ai/djl/repository/Repository.html) to avoid manually managing files.\n\nNext, we add some search criteria to find the resnet18 model and load it.",
"_____no_output_____"
]
],
[
[
"// Search for models in the build/pytorch_models folder\nSystem.setProperty(\"ai.djl.repository.zoo.location\", \"build/pytorch_models\");\n\nCriteria<BufferedImage, Classifications> criteria = Criteria.builder()\n .setTypes(BufferedImage.class, Classifications.class)\n // only search the model in local directory\n .optArtifactId(\"ai.djl.localmodelzoo:resnet18\")\n .optTranslator(translator)\n .optProgress(new ProgressBar()).build();\n\nZooModel model = ModelZoo.loadModel(criteria);",
"_____no_output_____"
]
],
[
[
"## Step 4: Load image for classification\n\nWe will use a sample dog image to run our prediction on.",
"_____no_output_____"
]
],
[
[
"var img = BufferedImageUtils.fromUrl(\"https://github.com/pytorch/hub/raw/master/dog.jpg\");\nimg",
"_____no_output_____"
]
],
[
[
"## Step 5: Run inference\n\nLastly, we will need to create a predictor using our model and translator. Once we have a predictor, we simply need to call the predict method on our test image.",
"_____no_output_____"
]
],
[
[
"Predictor<BufferedImage, Classifications> predictor = model.newPredictor();\nClassifications classifications = predictor.predict(img);\n\nclassifications",
"_____no_output_____"
]
],
[
[
"## Summary\n\nNow, you can load any TorchScript model and run inference using it.\n\nYou might also want to check out [load_mxnet_model.ipynb](https://github.com/awslabs/djl/blob/master/jupyter/load_mxnet_model.ipynb) which demonstrates loading a local model directly instead of through the Model Zoo API.\nTo optimize inference performance, you might check out [how_to_optimize_inference_performance](https://github.com/awslabs/djl/blob/master/docs/pytorch/how_to_optimize_inference_performance.md).",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e74e7306edc22478abe5818b1feda50c6a15c00a | 42,554 | ipynb | Jupyter Notebook | Python/Pandas_Practice/3_Occupation_Exercise.ipynb | gurher/TID | 859ac10cd9941b47752f986eb48fe7069f56e42c | [
"MIT"
] | null | null | null | Python/Pandas_Practice/3_Occupation_Exercise.ipynb | gurher/TID | 859ac10cd9941b47752f986eb48fe7069f56e42c | [
"MIT"
] | null | null | null | Python/Pandas_Practice/3_Occupation_Exercise.ipynb | gurher/TID | 859ac10cd9941b47752f986eb48fe7069f56e42c | [
"MIT"
] | null | null | null | 35.343854 | 247 | 0.27478 | [
[
[
"<a href=\"https://colab.research.google.com/github/gurher/Pandas/blob/main/Pandas_Practice/3_Occupation_Exercise.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Occupation",
"_____no_output_____"
],
[
"### Introduction:\n\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user). ",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called users.",
"_____no_output_____"
]
],
[
[
"url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user'\n\nusers = pd.read_csv(url, sep = '|', index_col = 'user_id',)\n\nusers.head(5)",
"_____no_output_____"
]
],
[
[
"#### Step 3.1) Check the columns\n",
"_____no_output_____"
]
],
[
[
"users.columns.ravel()",
"_____no_output_____"
]
],
[
[
"### Step 4. Discover what is the mean age per occupation",
"_____no_output_____"
]
],
[
[
"users.groupby('occupation')[['age']].mean()",
"_____no_output_____"
]
],
[
[
"### Step 5. Discover the Male ratio per occupation and sort it from the most to the least",
"_____no_output_____"
]
],
[
[
"# users.groupby(['occupation','gender'])[['user_id']].count()\n\n\n\ntemp = users.groupby(['occupation','gender'])['user_id'].count()\n\n(temp.loc[temp.index.get_level_values('gender')=='M'] / users.groupby('occupation')['user_id'].count()).sort_values(ascending=False)",
"_____no_output_____"
]
],
[
[
"### Step 6. For each occupation, calculate the minimum and maximum ages",
"_____no_output_____"
]
],
[
[
"users.groupby('occupation')[['age']].min()\nusers.groupby('occupation')[['age']].max()",
"_____no_output_____"
]
],
[
[
"### Step 7. For each combination of occupation and gender, calculate the mean age",
"_____no_output_____"
]
],
[
[
"users.groupby(['occupation','gender']).age.agg(['mean','max','min'])",
"_____no_output_____"
]
],
[
[
"### Step 8. For each occupation present the percentage of women and men",
"_____no_output_____"
]
],
[
[
"temp = users.groupby(['occupation', 'gender']).agg({'gender':'count'})\ntemp1 = users.groupby('occupation').agg({'gender':'count'})\n\ntemp['ratio'] = temp / temp1\ntemp\n\n\n# # create a data frame and apply count to gender\n# gender_ocup = users.groupby(['occupation', 'gender']).agg({'gender': 'count'})\n\n# # create a DataFrame and apply count for each occupation\n# occup_count = users.groupby(['occupation']).agg('count')\n\n# # divide the gender_ocup per the occup_count and multiply per 100\n# occup_gender = gender_ocup.div(occup_count, level = \"occupation\") * 100\n\n# # present all rows from the 'gender column'\n# occup_gender.loc[: , 'gender']",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74e77128d00a172a592394d103b82d852cb0141 | 162,440 | ipynb | Jupyter Notebook | notebooks/ch-algorithms/deutsch-jozsa.ipynb | kifumi/platypus | 619f0842a207ba19088728895736f02d905d5aa2 | [
"Apache-2.0"
] | 12 | 2022-01-07T17:21:42.000Z | 2022-03-23T12:53:16.000Z | notebooks/ch-algorithms/deutsch-jozsa.ipynb | kifumi/platypus | 619f0842a207ba19088728895736f02d905d5aa2 | [
"Apache-2.0"
] | 477 | 2022-01-06T17:05:03.000Z | 2022-03-31T15:46:10.000Z | notebooks/ch-algorithms/deutsch-jozsa.ipynb | kifumi/platypus | 619f0842a207ba19088728895736f02d905d5aa2 | [
"Apache-2.0"
] | 21 | 2022-01-06T18:59:42.000Z | 2022-03-31T07:44:03.000Z | 149.714286 | 22,991 | 0.64241 | [
[
[
"# Deutsch-Jozsa Algorithm",
"_____no_output_____"
],
[
"In this section, we first introduce the Deutsch-Jozsa problem, and classical and quantum algorithms to solve it. We then implement the quantum algorithm using Qiskit, and run it on a simulator and device.",
"_____no_output_____"
],
[
"## 1. Introduction <a id='introduction'></a>",
"_____no_output_____"
],
[
"The Deutsch-Jozsa algorithm, first introduced in Reference [1], was the first example of a quantum algorithm that performs better than the best classical algorithm. It showed that there can be advantages to using a quantum computer as a computational tool for a specific problem.",
"_____no_output_____"
],
[
"### 1.1 Deutsch-Jozsa Problem <a id='djproblem'> </a>\n\nWe are given a hidden Boolean function $f$, which takes as input a string of bits, and returns either $0$ or $1$, that is:\n\n$$\nf(\\{x_0,x_1,x_2,...\\}) \\rightarrow 0 \\textrm{ or } 1 \\textrm{ , where } x_n \\textrm{ is } 0 \\textrm{ or } 1$$\n\nThe property of the given Boolean function is that it is guaranteed to either be balanced or constant. A constant function returns all $0$'s or all $1$'s for any input, while a balanced function returns $0$'s for exactly half of all inputs and $1$'s for the other half. Our task is to determine whether the given function is balanced or constant. \n\nNote that the Deutsch-Jozsa problem is an $n$-bit extension of the single bit Deutsch problem. \n\n### 1.2 The Classical Solution <a id='classical-solution'> </a>\n\nClassically, in the best case, two queries to the oracle can determine if the hidden Boolean function, $f(x)$, is balanced: \ne.g. if we get both $f(0,0,0,...)\\rightarrow 0$ and $f(1,0,0,...) \\rightarrow 1$, then we know the function is balanced as we have obtained the two different outputs. \n\nIn the worst case, if we continue to see the same output for each input we try, we will have to check exactly half of all possible inputs plus one in order to be certain that $f(x)$ is constant. Since the total number of possible inputs is $2^n$, this implies that we need $2^{n-1}+1$ trial inputs to be certain that $f(x)$ is constant in the worst case. For example, for a $4$-bit string, if we checked $8$ out of the $16$ possible combinations, getting all $0$'s, it is still possible that the $9^\\textrm{th}$ input returns a $1$ and $f(x)$ is balanced. Probabilistically, this is a very unlikely event. In fact, if we get the same result continually in succession, we can express the probability that the function is constant as a function of $k$ inputs as:\n\n\n\n$$ P_\\textrm{constant}(k) = 1 - \\frac{1}{2^{k-1}} \\qquad \\textrm{for } 1 < k \\leq 2^{n-1}$$\n\n\n\nRealistically, we could opt to truncate our classical algorithm early, say if we were over x% confident. But if we want to be 100% confident, we would need to check $2^{n-1}+1$ inputs.",
"_____no_output_____"
],
[
"### 1.3 Quantum Solution <a id='quantum-solution'> </a>\n\nUsing a quantum computer, we can solve this problem with 100% confidence after only one call to the function $f(x)$, provided we have the function $f$ implemented as a quantum oracle, which maps the state $\\vert x\\rangle \\vert y\\rangle $ to $ \\vert x\\rangle \\vert y \\oplus f(x)\\rangle$, where $\\oplus$ is addition modulo $2$. Below is the generic circuit for the Deutsch-Jozsa algorithm.\n\n\n\nNow, let's go through the steps of the algorithm:\n\n<ol>\n <li>\n Prepare two quantum registers. The first is an $n$-qubit register initialized to $|0\\rangle$, and the second is a one-qubit register initialized to $|1\\rangle$:\n \n\n$$\\vert \\psi_0 \\rangle = \\vert0\\rangle^{\\otimes n} \\vert 1\\rangle$$\n\n\n </li>\n \n <li>\n Apply a Hadamard gate to each qubit:\n \n\n$$\\vert \\psi_1 \\rangle = \\frac{1}{\\sqrt{2^{n+1}}}\\sum_{x=0}^{2^n-1} \\vert x\\rangle \\left(|0\\rangle - |1 \\rangle \\right)$$\n\n\n </li>\n \n <li>\n Apply the quantum oracle $\\vert x\\rangle \\vert y\\rangle$ to $\\vert x\\rangle \\vert y \\oplus f(x)\\rangle$:\n $$\n \\begin{aligned}\n \\lvert \\psi_2 \\rangle \n & = \\frac{1}{\\sqrt{2^{n+1}}}\\sum_{x=0}^{2^n-1} \\vert x\\rangle (\\vert f(x)\\rangle - \\vert 1 \\oplus f(x)\\rangle) \\\\ \n & = \\frac{1}{\\sqrt{2^{n+1}}}\\sum_{x=0}^{2^n-1}(-1)^{f(x)}|x\\rangle ( |0\\rangle - |1\\rangle ) \n \\end{aligned}\n $$\n \nsince for each $x,f(x)$ is either $0$ or $1$.\n </li>\n\n <li>\n At this point the second single qubit register may be ignored. Apply a Hadamard gate to each qubit in the first register:\n $$\n \\begin{aligned}\n \\lvert \\psi_3 \\rangle \n & = \\frac{1}{2^n}\\sum_{x=0}^{2^n-1}(-1)^{f(x)}\n \\left[ \\sum_{y=0}^{2^n-1}(-1)^{x \\cdot y} \n \\vert y \\rangle \\right] \\\\\n & = \\frac{1}{2^n}\\sum_{y=0}^{2^n-1}\n \\left[ \\sum_{x=0}^{2^n-1}(-1)^{f(x)}(-1)^{x \\cdot y} \\right]\n \\vert y \\rangle\n \\end{aligned}\n $$\n \nwhere $x \\cdot y = x_0y_0 \\oplus x_1y_1 \\oplus \\ldots \\oplus x_{n-1}y_{n-1}$ is the sum of the bitwise product.\n </li>\n\n <li>\n Measure the first register. Notice that the probability of measuring $\\vert 0 \\rangle ^{\\otimes n} = \\lvert \\frac{1}{2^n}\\sum_{x=0}^{2^n-1}(-1)^{f(x)} \\rvert^2$, which evaluates to $1$ if $f(x)$ is constant and $0$ if $f(x)$ is balanced. \n </li>\n\n</ol>\n\n### 1.4 Why Does This Work? <a id='why-does-this-work'> </a>\n\n- **Constant Oracle**\n\nWhen the oracle is *constant*, it has no effect (up to a global phase) on the input qubits, and the quantum states before and after querying the oracle are the same. Since the H-gate is its own inverse, in Step 4 we reverse Step 2 to obtain the initial quantum state of $|00\\dots 0\\rangle$ in the first register.\n\n$$\nH^{\\otimes n}\\begin{bmatrix} 1 \\\\ 0 \\\\ 0 \\\\ \\vdots \\\\ 0 \\end{bmatrix} \n= \n\\tfrac{1}{\\sqrt{2^n}}\\begin{bmatrix} 1 \\\\ 1 \\\\ 1 \\\\ \\vdots \\\\ 1 \\end{bmatrix}\n\\quad \\xrightarrow{\\text{after } U_f} \\quad\nH^{\\otimes n}\\tfrac{1}{\\sqrt{2^n}}\\begin{bmatrix} 1 \\\\ 1 \\\\ 1 \\\\ \\vdots \\\\ 1 \\end{bmatrix}\n= \n\\begin{bmatrix} 1 \\\\ 0 \\\\ 0 \\\\ \\vdots \\\\ 0 \\end{bmatrix} \n$$\n\n- **Balanced Oracle**\n\nAfter step 2, our input register is an equal superposition of all the states in the computational basis. When the oracle is *balanced*, phase kickback adds a negative phase to exactly half these states:\n\n$$\nU_f \\tfrac{1}{\\sqrt{2^n}}\\begin{bmatrix} 1 \\\\ 1 \\\\ 1 \\\\ \\vdots \\\\ 1 \\end{bmatrix} \n= \n\\tfrac{1}{\\sqrt{2^n}}\\begin{bmatrix} -1 \\\\ 1 \\\\ -1 \\\\ \\vdots \\\\ 1 \\end{bmatrix}\n$$\n\n\nThe quantum state after querying the oracle is orthogonal to the quantum state before querying the oracle. Thus, in Step 4, when applying the H-gates, we must end up with a quantum state that is orthogonal to $|00\\dots 0\\rangle$. This means we should never measure the all-zero state. \n",
"_____no_output_____"
],
[
"## 2. Worked Example <a id='example'></a>\n\nLet's go through a specific example for a two bit balanced function: \n\nConsider a two-bit function $f(x_0,x_1)=x_0 \\oplus x_1$ such that \n\n$f(0,0)=0$\n\n$f(0,1)=1$\n\n$f(1,0)=1$\n\n$f(1,1)=0$\n\nThe corresponding phase oracle of this two-bit oralce is $U_f \\lvert x_1, x_0 \\rangle = (-1)^{f(x_1, x_0)}\\lvert x \\rangle$\n\nWe will now check if this oracle works as expected by taking a example state\n$$\\lvert \\psi_0 \\rangle = \\lvert 0 0 \\rangle_{01} \\otimes \\lvert 1 \\rangle_{2} $$\n\n<ol>\n <li> The first register of two qubits is initialized to $|00\\rangle$ and the second register qubit to $|1\\rangle$ \n \n(Note that we are using subscripts 0, 1, and 2 to index the qubits. A subscript of \"01\" indicates the state of the register containing qubits 0 and 1)\n \n\n$$\\lvert \\psi_0 \\rangle = \\lvert 0 0 \\rangle_{01} \\otimes \\lvert 1 \\rangle_{2} $$\n\n \n </li>\n \n <li> Apply Hadamard on all qubits\n \n\n$$\\lvert \\psi_1 \\rangle = \\frac{1}{2} \\left( \\lvert 0 0 \\rangle + \\lvert 0 1 \\rangle + \\lvert 1 0 \\rangle + \\lvert 1 1 \\rangle \\right)_{01} \\otimes \\frac{1}{\\sqrt{2}} \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} $$\n\n \n </li>\n \n <li> The oracle function can be implemented as $\\text{Q}_f = CX_{02}CX_{12}$, \n $$\n \\begin{aligned}\n \\lvert \\psi_2 \\rangle = \\frac{1}{2\\sqrt{2}} \\left[ \\lvert 0 0 \\rangle_{01} \\otimes \\left( \\lvert 0 \\oplus 0 \\oplus 0 \\rangle - \\lvert 1 \\oplus 0 \\oplus 0 \\rangle \\right)_{2} \\\\\n + \\lvert 0 1 \\rangle_{01} \\otimes \\left( \\lvert 0 \\oplus 0 \\oplus 1 \\rangle - \\lvert 1 \\oplus 0 \\oplus 1 \\rangle \\right)_{2} \\\\\n + \\lvert 1 0 \\rangle_{01} \\otimes \\left( \\lvert 0 \\oplus 1 \\oplus 0 \\rangle - \\lvert 1 \\oplus 1 \\oplus 0 \\rangle \\right)_{2} \\\\\n + \\lvert 1 1 \\rangle_{01} \\otimes \\left( \\lvert 0 \\oplus 1 \\oplus 1 \\rangle - \\lvert 1 \\oplus 1 \\oplus 1 \\rangle \\right)_{2} \\right]\n \\end{aligned}\n $$\n </li>\n \n <li>Simplifying this, we get the following: \n $$\n \\begin{aligned}\n \\lvert \\psi_2 \\rangle & = \\frac{1}{2\\sqrt{2}} \\left[ \\lvert 0 0 \\rangle_{01} \\otimes \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} - \\lvert 0 1 \\rangle_{01} \\otimes \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} - \\lvert 1 0 \\rangle_{01} \\otimes \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} + \\lvert 1 1 \\rangle_{01} \\otimes \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} \\right] \\\\\n & = \\frac{1}{2} \\left( \\lvert 0 0 \\rangle - \\lvert 0 1 \\rangle - \\lvert 1 0 \\rangle + \\lvert 1 1 \\rangle \\right)_{01} \\otimes \\frac{1}{\\sqrt{2}} \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} \\\\\n & = \\frac{1}{\\sqrt{2}} \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{0} \\otimes \\frac{1}{\\sqrt{2}} \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{1} \\otimes \\frac{1}{\\sqrt{2}} \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2}\n \\end{aligned}\n $$\n </li>\n \n <li> Apply Hadamard on the first register\n \n\n$$ \\lvert \\psi_3\\rangle = \\lvert 1 \\rangle_{0} \\otimes \\lvert 1 \\rangle_{1} \\otimes \\left( \\lvert 0 \\rangle - \\lvert 1 \\rangle \\right)_{2} $$\n\n\n </li>\n \n <li> Measuring the first two qubits will give the non-zero $11$, indicating a balanced function.\n </li>\n</ol>\n\nYou can try out similar examples using the widget below. Press the buttons to add H-gates and oracles, re-run the cell and/or set `case=\"constant\"` to try out different oracles.",
"_____no_output_____"
]
],
[
[
"from qiskit_textbook.widgets import dj_widget\ndj_widget(size=\"small\", case=\"balanced\")",
"_____no_output_____"
]
],
[
[
"## 3. Creating Quantum Oracles <a id='creating-quantum-oracles'> </a>\n\nLet's see some different ways we can create a quantum oracle. \n\nFor a constant function, it is simple:\n\n$\\qquad$ 1. if f(x) = 0, then apply the $I$ gate to the qubit in register 2. \n$\\qquad$ 2. if f(x) = 1, then apply the $X$ gate to the qubit in register 2.\n\nFor a balanced function, there are many different circuits we can create. One of the ways we can guarantee our circuit is balanced is by performing a CNOT for each qubit in register 1, with the qubit in register 2 as the target. For example:\n\n\n\nIn the image above, the top three qubits form the input register, and the bottom qubit is the output register. We can see which input states give which output in the table below:\n\n| Input states that output 0 | Input States that output 1 |\n|:--------------------------:|:--------------------------:|\n| 000 | 001 |\n| 011 | 100 |\n| 101 | 010 |\n| 110 | 111 |\n\n\nWe can change the results while keeping them balanced by wrapping selected controls in X-gates. For example, see the circuit and its results table below:\n\n\n\n| Input states that output 0 | Input states that output 1 |\n|:--------------------------:|:--------------------------:|\n| 001 | 000 |\n| 010 | 011 |\n| 100 | 101 |\n| 111 | 110 |",
"_____no_output_____"
],
[
"## 4. Qiskit Implementation <a id='implementation'></a>\n\nWe now implement the Deutsch-Jozsa algorithm for the example of a three-bit function, with both constant and balanced oracles. First let's do our imports:",
"_____no_output_____"
]
],
[
[
"# initialization\nimport numpy as np\n\n# importing Qiskit\nfrom qiskit import IBMQ, Aer\nfrom qiskit.providers.ibmq import least_busy\nfrom qiskit import QuantumCircuit, transpile\n\n# import basic plot tools\nfrom qiskit.visualization import plot_histogram",
"_____no_output_____"
]
],
[
[
"Next, we set the size of the input register for our oracle:",
"_____no_output_____"
]
],
[
[
"# set the length of the n-bit input string. \nn = 3",
"_____no_output_____"
]
],
[
[
"### 4.1 Constant Oracle <a id='const_oracle'></a>\nLet's start by creating a constant oracle, in this case the input has no effect on the output so we just randomly set the output qubit to be 0 or 1:",
"_____no_output_____"
]
],
[
[
"# set the length of the n-bit input string. \nn = 3\n\nconst_oracle = QuantumCircuit(n+1)\n\noutput = np.random.randint(2)\nif output == 1:\n const_oracle.x(n)\n\nconst_oracle.draw()",
"_____no_output_____"
]
],
[
[
"### 4.2 Balanced Oracle <a id='balanced_oracle'></a>",
"_____no_output_____"
]
],
[
[
"balanced_oracle = QuantumCircuit(n+1)",
"_____no_output_____"
]
],
[
[
"Next, we create a balanced oracle. As we saw in section 1b, we can create a balanced oracle by performing CNOTs with each input qubit as a control and the output bit as the target. We can vary the input states that give 0 or 1 by wrapping some of the controls in X-gates. Let's first choose a binary string of length `n` that dictates which controls to wrap:",
"_____no_output_____"
]
],
[
[
"b_str = \"101\"",
"_____no_output_____"
]
],
[
[
"Now we have this string, we can use it as a key to place our X-gates. For each qubit in our circuit, we place an X-gate if the corresponding digit in `b_str` is `1`, or do nothing if the digit is `0`.",
"_____no_output_____"
]
],
[
[
"balanced_oracle = QuantumCircuit(n+1)\nb_str = \"101\"\n\n# Place X-gates\nfor qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n balanced_oracle.x(qubit)\nbalanced_oracle.draw()",
"_____no_output_____"
]
],
[
[
"Next, we do our controlled-NOT gates, using each input qubit as a control, and the output qubit as a target:",
"_____no_output_____"
]
],
[
[
"balanced_oracle = QuantumCircuit(n+1)\nb_str = \"101\"\n\n# Place X-gates\nfor qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n balanced_oracle.x(qubit)\n\n# Use barrier as divider\nbalanced_oracle.barrier()\n\n# Controlled-NOT gates\nfor qubit in range(n):\n balanced_oracle.cx(qubit, n)\n\nbalanced_oracle.barrier()\nbalanced_oracle.draw()",
"_____no_output_____"
]
],
[
[
"Finally, we repeat the code from two cells up to finish wrapping the controls in X-gates:",
"_____no_output_____"
]
],
[
[
"balanced_oracle = QuantumCircuit(n+1)\nb_str = \"101\"\n\n# Place X-gates\nfor qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n balanced_oracle.x(qubit)\n\n# Use barrier as divider\nbalanced_oracle.barrier()\n\n# Controlled-NOT gates\nfor qubit in range(n):\n balanced_oracle.cx(qubit, n)\n\nbalanced_oracle.barrier()\n\n# Place X-gates\nfor qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n balanced_oracle.x(qubit)\n\n# Show oracle\nbalanced_oracle.draw()",
"_____no_output_____"
]
],
[
[
"We have just created a balanced oracle! All that's left to do is see if the Deutsch-Jozsa algorithm can solve it.\n\n### 4.3 The Full Algorithm <a id='full_alg'></a>\n\nLet's now put everything together. This first step in the algorithm is to initialize the input qubits in the state $|{+}\\rangle$ and the output qubit in the state $|{-}\\rangle$:",
"_____no_output_____"
]
],
[
[
"dj_circuit = QuantumCircuit(n+1, n)\n\n# Apply H-gates\nfor qubit in range(n):\n dj_circuit.h(qubit)\n\n# Put qubit in state |->\ndj_circuit.x(n)\ndj_circuit.h(n)\ndj_circuit.draw()",
"_____no_output_____"
]
],
[
[
"Next, let's apply the oracle. Here we apply the `balanced_oracle` we created above:",
"_____no_output_____"
]
],
[
[
"dj_circuit = QuantumCircuit(n+1, n)\n\n# Apply H-gates\nfor qubit in range(n):\n dj_circuit.h(qubit)\n\n# Put qubit in state |->\ndj_circuit.x(n)\ndj_circuit.h(n)\n\n# Add oracle\ndj_circuit = dj_circuit.compose(balanced_oracle)\ndj_circuit.draw()",
"_____no_output_____"
]
],
[
[
"Finally, we perform H-gates on the $n$-input qubits, and measure our input register:",
"_____no_output_____"
]
],
[
[
"dj_circuit = QuantumCircuit(n+1, n)\n\n# Apply H-gates\nfor qubit in range(n):\n dj_circuit.h(qubit)\n\n# Put qubit in state |->\ndj_circuit.x(n)\ndj_circuit.h(n)\n\n# Add oracle\ndj_circuit = dj_circuit.compose(balanced_oracle)\n\n# Repeat H-gates\nfor qubit in range(n):\n dj_circuit.h(qubit)\ndj_circuit.barrier()\n\n# Measure\nfor i in range(n):\n dj_circuit.measure(i, i)\n\n# Display circuit\ndj_circuit.draw()",
"_____no_output_____"
]
],
[
[
"Let's see the output:",
"_____no_output_____"
]
],
[
[
"# use local simulator\naer_sim = Aer.get_backend('aer_simulator')\nresults = aer_sim.run(dj_circuit).result()\nanswer = results.get_counts()\n\nplot_histogram(answer)",
"_____no_output_____"
]
],
[
[
"We can see from the results above that we have a 0% chance of measuring `000`. This correctly predicts the function is balanced. \n\n### 4.4 Generalised Circuits <a id='general_circs'></a>\n\nBelow, we provide a generalised function that creates Deutsch-Jozsa oracles and turns them into quantum gates. It takes the `case`, (either `'balanced'` or '`constant`', and `n`, the size of the input register:",
"_____no_output_____"
]
],
[
[
"def dj_oracle(case, n):\n # We need to make a QuantumCircuit object to return\n # This circuit has n+1 qubits: the size of the input,\n # plus one output qubit\n oracle_qc = QuantumCircuit(n+1)\n \n # First, let's deal with the case in which oracle is balanced\n if case == \"balanced\":\n # First generate a random number that tells us which CNOTs to\n # wrap in X-gates:\n b = np.random.randint(1,2**n)\n # Next, format 'b' as a binary string of length 'n', padded with zeros:\n b_str = format(b, '0'+str(n)+'b')\n # Next, we place the first X-gates. Each digit in our binary string \n # corresponds to a qubit, if the digit is 0, we do nothing, if it's 1\n # we apply an X-gate to that qubit:\n for qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n oracle_qc.x(qubit)\n # Do the controlled-NOT gates for each qubit, using the output qubit \n # as the target:\n for qubit in range(n):\n oracle_qc.cx(qubit, n)\n # Next, place the final X-gates\n for qubit in range(len(b_str)):\n if b_str[qubit] == '1':\n oracle_qc.x(qubit)\n\n # Case in which oracle is constant\n if case == \"constant\":\n # First decide what the fixed output of the oracle will be\n # (either always 0 or always 1)\n output = np.random.randint(2)\n if output == 1:\n oracle_qc.x(n)\n \n oracle_gate = oracle_qc.to_gate()\n oracle_gate.name = \"Oracle\" # To show when we display the circuit\n return oracle_gate",
"_____no_output_____"
]
],
[
[
"Let's also create a function that takes this oracle gate and performs the Deutsch-Jozsa algorithm on it:",
"_____no_output_____"
]
],
[
[
"def dj_algorithm(oracle, n):\n dj_circuit = QuantumCircuit(n+1, n)\n # Set up the output qubit:\n dj_circuit.x(n)\n dj_circuit.h(n)\n # And set up the input register:\n for qubit in range(n):\n dj_circuit.h(qubit)\n # Let's append the oracle gate to our circuit:\n dj_circuit.append(oracle, range(n+1))\n # Finally, perform the H-gates again and measure:\n for qubit in range(n):\n dj_circuit.h(qubit)\n \n for i in range(n):\n dj_circuit.measure(i, i)\n \n return dj_circuit",
"_____no_output_____"
]
],
[
[
"Finally, let's use these functions to play around with the algorithm:",
"_____no_output_____"
]
],
[
[
"n = 4\noracle_gate = dj_oracle('balanced', n)\ndj_circuit = dj_algorithm(oracle_gate, n)\ndj_circuit.draw()",
"_____no_output_____"
]
],
[
[
"And see the results of running this circuit:",
"_____no_output_____"
]
],
[
[
"transpiled_dj_circuit = transpile(dj_circuit, aer_sim)\nresults = aer_sim.run(transpiled_dj_circuit).result()\nanswer = results.get_counts()\nplot_histogram(answer)",
"_____no_output_____"
]
],
[
[
"## 5. Experiment with Real Devices <a id='device'></a>\n\nWe can run the circuit on the real device as shown below. We first look for the least-busy device that can handle our circuit.",
"_____no_output_____"
]
],
[
[
"# Load our saved IBMQ accounts and get the least busy backend device with greater than or equal to (n+1) qubits\nIBMQ.load_account()\nprovider = IBMQ.get_provider(hub='ibm-q')\nbackend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= (n+1) and\n not x.configuration().simulator and x.status().operational==True))\nprint(\"least busy backend: \", backend)",
"least busy backend: ibmq_belem\n"
],
[
"# Run our circuit on the least busy backend. Monitor the execution of the job in the queue\nfrom qiskit.tools.monitor import job_monitor\n\ntranspiled_dj_circuit = transpile(dj_circuit, backend, optimization_level=3)\njob = backend.run(transpiled_dj_circuit)\njob_monitor(job, interval=2)",
"Job Status: job has successfully run\n"
],
[
"# Get the results of the computation\nresults = job.result()\nanswer = results.get_counts()\n\nplot_histogram(answer)",
"_____no_output_____"
]
],
[
[
"As we can see, the most likely result is `1111`. The other results are due to errors in the quantum computation. ",
"_____no_output_____"
],
[
"## 6. Problems <a id='problems'></a>\n\n1. Are you able to create a balanced or constant oracle of a different form?\n\n2. The function `dj_problem_oracle` (below) returns a Deutsch-Jozsa oracle for `n = 4` in the form of a gate. The gate takes 5 qubits as input where the final qubit (`q_4`) is the output qubit (as with the example oracles above). You can get different oracles by giving `dj_problem_oracle` different integers between 1 and 5. Use the Deutsch-Jozsa algorithm to decide whether each oracle is balanced or constant (**Note:** It is highly recommended you try this example using the `aer_simulator` instead of a real device).",
"_____no_output_____"
]
],
[
[
"from qiskit_textbook.problems import dj_problem_oracle\noracle = dj_problem_oracle(1)",
"_____no_output_____"
]
],
[
[
"## 7. References <a id='references'></a>\n\n1. David Deutsch and Richard Jozsa (1992). \"Rapid solutions of problems by quantum computation\". Proceedings of the Royal Society of London A. 439: 553–558. [doi:10.1098/rspa.1992.0167](https://doi.org/10.1098%2Frspa.1992.0167).\n2. R. Cleve; A. Ekert; C. Macchiavello; M. Mosca (1998). \"Quantum algorithms revisited\". Proceedings of the Royal Society of London A. 454: 339–354. [doi:10.1098/rspa.1998.0164](https://doi.org/10.1098%2Frspa.1998.0164).",
"_____no_output_____"
]
],
[
[
"import qiskit.tools.jupyter\n%qiskit_version_table",
"/usr/local/anaconda3/envs/terra-unstable/lib/python3.9/site-packages/qiskit/aqua/__init__.py:86: DeprecationWarning: The package qiskit.aqua is deprecated. It was moved/refactored to qiskit-terra For more information see <https://github.com/Qiskit/qiskit-aqua/blob/main/README.md#migration-guide>\n warn_package('aqua', 'qiskit-terra')\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e74e7a23762277f4ec369b5d80a57fc0ac2e981b | 27,271 | ipynb | Jupyter Notebook | APS2-sistemas-de-equacoes-matrizes.ipynb | alcapriles/seven | 1418b3660e343087152f02ba26416b3220a2f10b | [
"MIT"
] | null | null | null | APS2-sistemas-de-equacoes-matrizes.ipynb | alcapriles/seven | 1418b3660e343087152f02ba26416b3220a2f10b | [
"MIT"
] | null | null | null | APS2-sistemas-de-equacoes-matrizes.ipynb | alcapriles/seven | 1418b3660e343087152f02ba26416b3220a2f10b | [
"MIT"
] | null | null | null | 19.761594 | 207 | 0.435224 | [
[
[
"## APS 2 - Sistemas de equações (matrizes).",
"_____no_output_____"
],
[
"Considerar epsilon_s = 0,0001%.",
"_____no_output_____"
],
[
"Explicar como funcionam os comandos de inversão de matriz e multiplicação de matrizes usados pela linguagem de programação Python.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Exercício 12.23 ",
"_____no_output_____"
],
[
"<img src=\"exx23.JPG\">",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ [1,-1,-1,0],\n [-35,0,-5,200],\n [0,-27,5,0]\n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"### Utilizando a eliminação de Gauss ingênua:",
"_____no_output_____"
]
],
[
[
"temp = matrix[1][0]/matrix[0][0]\nfor i in range(4): \n matrix[1][i] -= temp*matrix[0][i]",
"_____no_output_____"
],
[
"temp = matrix[2][1]/matrix[1][1]\nfor i in range(4): \n matrix[2][i] -= temp*matrix[1][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[1.000 -1.000 -1.000 0.000]\n [0.000 -35.000 -40.000 200.000]\n [0.000 0.000 35.857 -154.286]]\n"
]
],
[
[
"### i3:",
"_____no_output_____"
]
],
[
[
"i3 = -154.286/35.857\nprint(i3)",
"-4.302813955434085\n"
]
],
[
[
"### i2:",
"_____no_output_____"
]
],
[
[
"i2 = (40*i3+200)/-35\nprint(i2)",
"-0.7967840509324738\n"
]
],
[
[
"### i1:",
"_____no_output_____"
]
],
[
[
"i1 = i2+i3\nprint(i1)",
"-5.099598006366559\n"
]
],
[
[
"### O sinal negativo da corrente indica que ela tem sinal contrário ao que está no desenho acima.",
"_____no_output_____"
],
[
"### Exercício 12.25\nutilizar eliminação de Gauss com pivotamento.",
"_____no_output_____"
],
[
"<img src=\"exx25.JPG\">",
"_____no_output_____"
],
[
"### Matriz obtida a partir das leis de Kirchoff aplicadas ao circuito elétrico:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n [-75,-25, 0, 0,-20, 0,-70], \n [0 ,-25,-5, 0, 0, 0, 0], \n [0 , 0,-5,-10, 25, 0, 0], \n [1 , -1, 0, 0, 0,-1, 0], \n [1 , 0, 0, -1, -1, 0, 0], \n [0 , 1, -1, 0, -1, 0, 0] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"### Matriz pivotada manualmente:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n [-75,-25, 0, 0,-20, 0,-70], \n [0 ,-25,-5, 0, 0, 0, 0],\n [0 , 0,-5,-10, 25, 0, 0], \n [1 , 0, 0, -1, -1, 0, 0], \n [0 , 1, -1, 0, -1, 0, 0],\n [1 , -1, 0, 0, 0,-1, 0] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"Primeiro coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[3][0]/matrix[0][0]\nfor i in range(7): \n matrix[3][i] -= temp*matrix[0][i]\n\ntemp = matrix[5][0]/matrix[0][0]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[0][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-75.000 -25.000 0.000 0.000 -20.000 0.000 -70.000]\n [0.000 -25.000 -5.000 0.000 0.000 0.000 0.000]\n [0.000 0.000 -5.000 -10.000 25.000 0.000 0.000]\n [0.000 -0.333 0.000 -1.000 -1.267 0.000 -0.933]\n [0.000 1.000 -1.000 0.000 -1.000 0.000 0.000]\n [0.000 -1.333 0.000 0.000 -0.267 -1.000 -0.933]]\n"
]
],
[
[
"Segundo coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[3][1]/matrix[1][1]\nfor i in range(7): \n matrix[3][i] -= temp*matrix[1][i]\n\ntemp = matrix[4][1]/matrix[1][1]\nfor i in range(7): \n matrix[4][i] -= temp*matrix[1][i]\n\ntemp = matrix[5][1]/matrix[1][1]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[1][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-75.000 -25.000 0.000 0.000 -20.000 0.000 -70.000]\n [0.000 -25.000 -5.000 0.000 0.000 0.000 0.000]\n [0.000 0.000 -5.000 -10.000 25.000 0.000 0.000]\n [0.000 0.000 0.067 -1.000 -1.267 0.000 -0.933]\n [0.000 0.000 -1.200 0.000 -1.000 0.000 0.000]\n [0.000 0.000 0.267 0.000 -0.267 -1.000 -0.933]]\n"
]
],
[
[
"Terceiro coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[3][2]/matrix[2][2]\nfor i in range(6): \n matrix[3][i] -= temp*matrix[2][i]\n\ntemp = matrix[4][2]/matrix[2][2]\nfor i in range(6): \n matrix[4][i] -= temp*matrix[2][i]\n\ntemp = matrix[5][2]/matrix[2][2]\nfor i in range(6): \n matrix[5][i] -= temp*matrix[2][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-75.000 -25.000 0.000 0.000 -20.000 0.000 -70.000]\n [0.000 -25.000 -5.000 0.000 0.000 0.000 0.000]\n [0.000 0.000 -5.000 -10.000 25.000 0.000 0.000]\n [0.000 0.000 0.000 -1.133 -0.933 0.000 -0.933]\n [0.000 0.000 0.000 2.400 -7.000 0.000 0.000]\n [0.000 0.000 0.000 -0.533 1.067 -1.000 -0.933]]\n"
]
],
[
[
"Quarto coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[4][3]/matrix[3][3]\nfor i in range(7): \n matrix[4][i] -= temp*matrix[3][i]\n \ntemp = matrix[5][3]/matrix[3][3]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[3][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-75.000 -25.000 0.000 0.000 -20.000 0.000 -70.000]\n [0.000 -25.000 -5.000 0.000 0.000 0.000 0.000]\n [0.000 0.000 -5.000 -10.000 25.000 0.000 0.000]\n [0.000 0.000 0.000 -1.133 -0.933 0.000 -0.933]\n [0.000 0.000 0.000 0.000 -8.976 0.000 -1.976]\n [0.000 0.000 0.000 0.000 1.506 -1.000 -0.494]]\n"
]
],
[
[
"Quinto coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[5][4]/matrix[4][4]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[4][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-75.000 -25.000 0.000 0.000 -20.000 0.000 -70.000]\n [0.000 -25.000 -5.000 0.000 0.000 0.000 0.000]\n [0.000 0.000 -5.000 -10.000 25.000 0.000 0.000]\n [0.000 0.000 0.000 -1.133 -0.933 0.000 -0.933]\n [0.000 0.000 0.000 0.000 -8.976 0.000 -1.976]\n [0.000 0.000 0.000 0.000 0.000 -1.000 -0.826]]\n"
]
],
[
[
"### i6:",
"_____no_output_____"
]
],
[
[
"i6 = -0.826/-1\nprint(i6)",
"0.826\n"
]
],
[
[
"### i5:",
"_____no_output_____"
]
],
[
[
"i5 = -1.976/-8.976\nprint(i5)",
"0.22014260249554365\n"
]
],
[
[
"### i4:",
"_____no_output_____"
]
],
[
[
"i4 = (-0.933 + 0.933*i5)/-1.133\nprint(i4)",
"0.6421950148911366\n"
]
],
[
[
"### i3:",
"_____no_output_____"
]
],
[
[
"i3 = (-25*i5 + 10*i4)/-5\nprint(i3)",
"-0.1836770173045549\n"
]
],
[
[
"### i2:",
"_____no_output_____"
]
],
[
[
"i2 = 5*i3/-25\nprint(i2)",
"0.03673540346091098\n"
]
],
[
[
"### i1:",
"_____no_output_____"
]
],
[
[
"i1 = (-70+20*i5+25*i2)/-75\nprint(i1)",
"0.8623835048475513\n"
]
],
[
[
"### Exercício 12.27",
"_____no_output_____"
],
[
"<img src=\"exx27.JPG\">",
"_____no_output_____"
],
[
"### Utilizando eliminação de Gauss com pivotamento (pivotamento executado manualmente), vem:",
"_____no_output_____"
],
[
"O pivotamento cumpre o propósito de evitar a divisão por zero na eliminação de Gauss, e consiste em reordenar as linhas de forma que o elemento pivô (geralmente o da diagonal principal), não seja zero.",
"_____no_output_____"
],
[
"### Matriz obtida a partir das leis de Kirchoff aplicadas ao circuito elétrico:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n \n [0 ,0 ,1 ,1 ,-1, 0],\n [-1,0 ,1 ,0 ,1 , 0], \n [-1,1 ,1 ,0 ,0 , 0], \n [-5,0 ,-15,0 ,0 ,-80], \n [0 ,0 ,-20,25 ,0 , 50] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"### Matriz pivotada:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n [-5,0 ,-15,0 ,0 ,-80],\n [-1,1 ,1 ,0 ,0 , 0],\n [-1,0 ,1 ,0 ,1 , 0], \n [0 ,0 ,1 ,1 ,-1, 0],\n [0 ,0 ,-20,25 ,0 , 50] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"Primeiro coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[1][0]/matrix[0][0]\nfor i in range(6): \n matrix[1][i] -= temp*matrix[0][i]\n\ntemp = matrix[2][0]/matrix[0][0]\nfor i in range(6): \n matrix[2][i] -= temp*matrix[0][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-5.000 0.000 -15.000 0.000 0.000 -80.000]\n [0.000 1.000 4.000 0.000 0.000 16.000]\n [0.000 0.000 4.000 0.000 1.000 16.000]\n [0.000 0.000 1.000 1.000 -1.000 0.000]\n [0.000 0.000 -20.000 25.000 0.000 50.000]]\n"
]
],
[
[
"Terceiro coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[3][2]/matrix[2][2]\nfor i in range(6): \n matrix[3][i] -= temp*matrix[2][i]\n\ntemp = matrix[4][2]/matrix[2][2]\nfor i in range(6): \n matrix[4][i] -= temp*matrix[2][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-5.000 0.000 -15.000 0.000 0.000 -80.000]\n [0.000 1.000 4.000 0.000 0.000 16.000]\n [0.000 0.000 4.000 0.000 1.000 16.000]\n [0.000 0.000 0.000 1.000 -1.250 -4.000]\n [0.000 0.000 0.000 25.000 5.000 130.000]]\n"
]
],
[
[
"Quarto coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[4][3]/matrix[3][3]\nfor i in range(6): \n matrix[4][i] -= temp*matrix[3][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[-5.000 0.000 -15.000 0.000 0.000 -80.000]\n [0.000 1.000 4.000 0.000 0.000 16.000]\n [0.000 0.000 4.000 0.000 1.000 16.000]\n [0.000 0.000 0.000 1.000 -1.250 -4.000]\n [0.000 0.000 0.000 0.000 36.250 230.000]]\n"
]
],
[
[
"### Substituição regressiva:",
"_____no_output_____"
],
[
"### i5:",
"_____no_output_____"
]
],
[
[
"i5 = 230.000/36.250\nprint(i5)",
"6.344827586206897\n"
]
],
[
[
"### i4:",
"_____no_output_____"
]
],
[
[
"i4 = -4 + 1.25*i5\nprint(i4)",
"3.931034482758621\n"
]
],
[
[
"### i3:",
"_____no_output_____"
]
],
[
[
"i3 = (16) -i5 /4\nprint(i3)",
"14.413793103448276\n"
]
],
[
[
"### i2:",
"_____no_output_____"
]
],
[
[
"i2 = 16 - 4*i3\nprint(i2)",
"-41.6551724137931\n"
]
],
[
[
"### i1:",
"_____no_output_____"
]
],
[
[
"i1 = (-80 + 15*i3) / 5\nprint(i1)",
"27.241379310344826\n"
]
],
[
[
"### Exercício 12.28",
"_____no_output_____"
],
[
"<img src=\"exx28.JPG\">",
"_____no_output_____"
],
[
"### Matriz obtida a partir das leis de Kirchoff aplicadas ao circuito elétrico:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n [1,-1 ,-1, 0, 0, 0, 0], \n [0, 1 , 0, 0, 1,-1, 0], \n [0, 0 ,-4,-2, 0, 0,-20], \n [0 ,-6, 4, 0, 8, 0, 0], \n [0 ,0 ,-1, 1, 1, 0, 0], \n [0 ,0 , 0,-2, 8, 5, 0] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"### Matriz pivotada manualmente:",
"_____no_output_____"
]
],
[
[
"matrix = np.array([ \n \n [1,-1 ,-1, 0, 0, 0, 0], \n [0, 1 , 0, 0, 1,-1, 0], \n [0, 0 ,-4,-2, 0, 0,-20], \n [0 ,0 ,-1, 1, 1, 0, 0], \n [0 ,-6, 4, 0, 8, 0, 0], \n [0 ,0 , 0,-2, 8, 5, 0] \n ])\n\nmatrix = matrix.astype('float64')",
"_____no_output_____"
]
],
[
[
"Segundo coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[4][1]/matrix[1][1]\nfor i in range(7): \n matrix[4][i] -= temp*matrix[1][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[1.000 -1.000 -1.000 0.000 0.000 0.000 0.000]\n [0.000 1.000 0.000 0.000 1.000 -1.000 0.000]\n [0.000 0.000 -4.000 -2.000 0.000 0.000 -20.000]\n [0.000 0.000 -1.000 1.000 1.000 0.000 0.000]\n [0.000 0.000 4.000 0.000 14.000 -6.000 0.000]\n [0.000 0.000 0.000 -2.000 8.000 5.000 0.000]]\n"
]
],
[
[
"Terceiro coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[3][2]/matrix[2][2]\nfor i in range(7): \n matrix[3][i] -= temp*matrix[2][i]\n\ntemp = matrix[4][2]/matrix[2][2]\nfor i in range(7): \n matrix[4][i] -= temp*matrix[2][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[1.000 -1.000 -1.000 0.000 0.000 0.000 0.000]\n [0.000 1.000 0.000 0.000 1.000 -1.000 0.000]\n [0.000 0.000 -4.000 -2.000 0.000 0.000 -20.000]\n [0.000 0.000 0.000 1.500 1.000 0.000 5.000]\n [0.000 0.000 0.000 -2.000 14.000 -6.000 -20.000]\n [0.000 0.000 0.000 -2.000 8.000 5.000 0.000]]\n"
]
],
[
[
"Quarto coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[4][3]/matrix[3][3]\nfor i in range(7): \n matrix[4][i] -= temp*matrix[3][i]\n \ntemp = matrix[5][3]/matrix[3][3]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[3][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[1.000 -1.000 -1.000 0.000 0.000 0.000 0.000]\n [0.000 1.000 0.000 0.000 1.000 -1.000 0.000]\n [0.000 0.000 -4.000 -2.000 0.000 0.000 -20.000]\n [0.000 0.000 0.000 1.500 1.000 0.000 5.000]\n [0.000 0.000 0.000 0.000 15.333 -6.000 -13.333]\n [0.000 0.000 0.000 0.000 9.333 5.000 6.667]]\n"
]
],
[
[
"Quinto coeficiente:",
"_____no_output_____"
]
],
[
[
"temp = matrix[5][4]/matrix[4][4]\nfor i in range(7): \n matrix[5][i] -= temp*matrix[4][i]",
"_____no_output_____"
],
[
"print(matrix)",
"[[1.000 -1.000 -1.000 0.000 0.000 0.000 0.000]\n [0.000 1.000 0.000 0.000 1.000 -1.000 0.000]\n [0.000 0.000 -4.000 -2.000 0.000 0.000 -20.000]\n [0.000 0.000 0.000 1.500 1.000 0.000 5.000]\n [0.000 0.000 0.000 0.000 15.333 -6.000 -13.333]\n [0.000 0.000 0.000 0.000 0.000 8.652 14.783]]\n"
]
],
[
[
"### i6:",
"_____no_output_____"
]
],
[
[
"i6 = 14.783/8.652\nprint(i6)",
"1.7086222838650025\n"
]
],
[
[
"### i5:",
"_____no_output_____"
]
],
[
[
"i5 = (6.000*i6 - 13.333)/15.333\nprint(i5)",
"-0.20095651841192105\n"
]
],
[
[
"### i4:",
"_____no_output_____"
]
],
[
[
"i4 = (5-i5)/1.5\nprint(i4)",
"3.4673043456079475\n"
]
],
[
[
"### i3:",
"_____no_output_____"
]
],
[
[
"i3 = (-20 + 2*i4)/-4\nprint(i3)",
"3.266347827196026\n"
]
],
[
[
"### i2:",
"_____no_output_____"
]
],
[
[
"i2 = i6-i5\nprint(i2)",
"1.9095788022769236\n"
]
],
[
[
"### i1:",
"_____no_output_____"
]
],
[
[
"i1 = i2+i3\nprint(i1)",
"5.17592662947295\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.