hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ec831a2c14ccbf9a5c7854728d91cc135b6ad02c
49,729
ipynb
Jupyter Notebook
RNN.ipynb
arpitkumar1412/Sentiment-Analyser
7b0a5f39d59dae36f187bf99fb3684b306926053
[ "MIT" ]
1
2021-06-17T21:48:05.000Z
2021-06-17T21:48:05.000Z
RNN.ipynb
arpitkumar1412/Sentiment-Analyser
7b0a5f39d59dae36f187bf99fb3684b306926053
[ "MIT" ]
null
null
null
RNN.ipynb
arpitkumar1412/Sentiment-Analyser
7b0a5f39d59dae36f187bf99fb3684b306926053
[ "MIT" ]
null
null
null
36.272064
1,777
0.498059
[ [ [ "import pandas as pd\nimport re\nimport numpy as np", "_____no_output_____" ] ], [ [ "Data preprocessing, converting to numpy array and dividing into train, test, validation dataset. Data location of both glove matrix and sentiment(2) has to be changed.", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "_____no_output_____" ], [ "df = pd.read_csv (r'/content/drive/MyDrive/sentiment (2).csv') #change the value to your file location while running\nprint (df)", "_____no_output_____" ], [ "reviews = df['review']\nlabels = np.array(df['senti_score'])", "_____no_output_____" ], [ "for label in range(len(labels)):\n if float(labels[label])<=0.2:\n labels[label] = 0\n elif float(labels[label])<=0.4:\n labels[label] = 1\n elif float(labels[label])<=0.6:\n labels[label] = 2\n elif float(labels[label])<=0.8:\n labels[label] = 3\n elif float(labels[label])<=1.0:\n labels[label] = 4", "_____no_output_____" ], [ "reviews = reviews.str.lower()\nfor i in range(df.shape[0]):\n reviews[i] = re.sub(r'[^\\w\\s]', '', reviews[i])", "_____no_output_____" ], [ "from numpy import array\nfrom numpy import asarray\nfrom numpy import zeros\n\nembeddings_dictionary = dict()\nglove_file = open('/content/drive/MyDrive/glove.6B.100d.txt', encoding=\"utf8\") #change the value to your file location while running\n\nfor line in glove_file:\n records = line.split()\n word = records[0]\n vector_dimensions = asarray(records[1:], dtype='float32')\n embeddings_dictionary [word] = vector_dimensions\nglove_file.close()", "_____no_output_____" ], [ "maxlen = 32 #length of review set here", "_____no_output_____" ], [ "embedding_matrix = np.empty((len(reviews),maxlen,100))\nj=0\nk=0\nk2=0\nfor sent in reviews:\n sent_embed = np.empty((maxlen,100))\n i=0\n for word in sent.split():\n if i==maxlen:\n break\n \n try:\n sent_embed[i] = np.array(embeddings_dictionary[word]).reshape((1,100))\n except KeyError: \n sent_embed[i] = np.zeros((1,100))\n k+=1\n print(sent_embed.shape)\n i+=1\n k2+=1\n\n if i<maxlen:\n padding_matrix = np.zeros((maxlen-i,100))\n sent_embed[i:] = padding_matrix\n embedding_matrix[j] = sent_embed\n j+=1", "_____no_output_____" ], [ "X = embedding_matrix", "_____no_output_____" ], [ "y = np.empty((len(reviews),maxlen))\ncom = np.ones((1,maxlen))\ni=0\nfor label in labels:\n y[i] = np.array(label*com)\n i+=1\ny = y.astype(int)", "_____no_output_____" ], [ "# fp = open('/content/Train-Dev_test split.txt', 'r')\nsent=np.loadtxt('/content/drive/MyDrive/Train-Dev_test split.txt',delimiter=',',dtype=str)", "_____no_output_____" ], [ "X_train = np.empty((len(reviews),maxlen,100))\nX_test = np.empty((len(reviews),maxlen,100)) \nX_val = np.empty((len(reviews),maxlen,100))\ny_train = np.empty((len(reviews),maxlen))\ny_test = np.empty((len(reviews),maxlen))\ny_val = np.empty((len(reviews),maxlen))\ni=0\nj=0\nb=0", "_____no_output_____" ], [ "for k in range(len(reviews)):\n if sent[k][1]=='1':\n X_train[i] = X[max(11285,int(sent[k][0])-1)]\n y_train[i] = y[max(11285,int(sent[k][0])-1)]\n i+=1\n elif sent[k][1]=='2':\n X_val[b] = X[max(11285,int(sent[k][0])-1)]\n y_val[b] = y[max(11285,int(sent[k][0])-1)]\n b+=1\n elif sent[k][1]=='3':\n X_test[j] = X[max(11285,int(sent[k][0])-1)]\n y_test[j] = y[max(11285,int(sent[k][0])-1)]\n j+=1\ny_train = y_train.astype(int)\ny_val = y_val.astype(int)\ny_test = y_test.astype(int)", "_____no_output_____" ] ], [ [ "Model", "_____no_output_____" ] ], [ [ "import numpy as np\nimport sys\nfrom datetime import datetime", "_____no_output_____" ] ], [ [ "Multiplication gate for the network, multiply weights with input/H(t-1)", "_____no_output_____" ] ], [ [ "class MultiplyGate:\n def forward(self,W, x):\n return np.dot(W, x)\n def backward(self, W, x, dz):\n dW = np.asarray(np.dot(np.transpose(np.asmatrix(dz)), np.asmatrix(x)))\n dx = np.dot(np.transpose(W), dz)\n return dW, dx", "_____no_output_____" ] ], [ [ "Addition gate for the network, add terms in the forward prop of RNN", "_____no_output_____" ] ], [ [ "class AddGate:\n def forward(self, x1, x2):\n return x1 + x2\n def backward(self, x1, x2, dz):\n dx1 = dz * np.ones_like(x1)\n dx2 = dz * np.ones_like(x2)\n return dx1, dx2", "_____no_output_____" ], [ "class Sigmoid:\n def forward(self, x):\n return 1.0 / (1.0 + np.exp(-x))\n def backward(self, x, top_diff):\n output = self.forward(x)\n return (1.0 - output) * output * top_diff", "_____no_output_____" ], [ "class Tanh:\n def forward(self, x):\n return np.tanh(x)\n def backward(self, x, top_diff):\n output = self.forward(x)\n return (1.0 - np.square(output)) * top_diff", "_____no_output_____" ], [ "class Softmax:\n def predict(self, x):\n log_c = np.max(x, axis=x.ndim - 1, keepdims=True)\n y = np.sum(np.exp(x - log_c), axis=x.ndim - 1, keepdims=True)\n return np.exp(x - log_c)/y\n def loss(self, x, y):\n probs = self.predict(x)\n return -np.log(probs[y])\n def diff(self, x, y):\n probs = self.predict(x)\n probs[y] -= 1.0\n return probs", "_____no_output_____" ] ], [ [ "1 RNN layer is compiled here, both forward and backward propagation", "_____no_output_____" ] ], [ [ "mulGate = MultiplyGate()\naddGate = AddGate()\nactivation = Sigmoid()\n\nclass RNNLayer:\n def forward(self, x, prev_s, U, W, V):\n self.mulu = mulGate.forward(U, x)\n self.mulw = mulGate.forward(W, prev_s)\n self.add = addGate.forward(self.mulw, self.mulu)\n self.s = activation.forward(self.add)\n self.mulv = mulGate.forward(V, self.s)\n \n def backward(self, x, prev_s, U, W, V, diff_s, dmulv):\n self.forward(x, prev_s, U, W, V)\n dV, dsv = mulGate.backward(V, self.s, dmulv)\n ds = dsv + diff_s\n dadd = activation.backward(self.add, ds)\n dmulw, dmulu = addGate.backward(self.mulw, self.mulu, dadd)\n dW, dprev_s = mulGate.backward(W, prev_s, dmulw)\n dU, dx = mulGate.backward(U, x, dmulu)\n return (dprev_s, dU, dW, dV)", "_____no_output_____" ] ], [ [ "Entire model is compiled here, both forward and backward propagation, contains both gradient clipping and regularization", "_____no_output_____" ] ], [ [ "class Model:\n def __init__(self, word_dim, hidden_dim=128, bptt_truncate=4):\n self.word_dim = word_dim\n self.hidden_dim = hidden_dim\n self.bptt_truncate = bptt_truncate\n self.U = np.random.uniform(-np.sqrt(1. / word_dim), np.sqrt(1. / word_dim), (hidden_dim, word_dim))\n self.W = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (hidden_dim, hidden_dim))\n self.V = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (word_dim, hidden_dim))\n\n def forward_propagation(self, x):\n # The total number of time steps\n T = len(x)\n layers = []\n prev_s = np.zeros(self.hidden_dim)\n # For each time step...\n for t in range(T):\n layer = RNNLayer()\n input = x[t]\n layer.forward(input, prev_s, self.U, self.W, self.V)\n prev_s = layer.s\n layers.append(layer)\n return layers\n\n def predict(self, x):\n output = Softmax()\n layers = self.forward_propagation(x)\n return [np.argmax(output.predict(layer.mulv)) for layer in layers]\n\n def calculate_loss(self, x, y):\n assert len(x) == len(y)\n output = Softmax()\n layers = self.forward_propagation(x)\n loss = 0.0\n for i, layer in enumerate(layers):\n loss += output.loss(layer.mulv, y[i])\n return loss / float(len(y))\n\n def calculate_total_loss(self, X, Y):\n loss = 0.0\n for i in range(len(Y)):\n loss += self.calculate_loss(X[i], Y[i])\n return loss / float(len(Y))\n\n def bptt(self, x, y): #back propagation through time\n assert len(x) == len(y)\n output = Softmax()\n layers = self.forward_propagation(x)\n dU = np.zeros(self.U.shape)\n dV = np.zeros(self.V.shape)\n dW = np.zeros(self.W.shape)\n\n T = len(layers)\n prev_s_t = np.zeros(self.hidden_dim)\n diff_s = np.zeros(self.hidden_dim)\n for t in range(0, T):\n dmulv = output.diff(layers[t].mulv, y[t])\n input = x[t]\n dprev_s, dU_t, dW_t, dV_t = layers[t].backward(input, prev_s_t, self.U, self.W, self.V, diff_s, dmulv)\n prev_s_t = layers[t].s\n dmulv = np.zeros(self.word_dim)\n #gradient clipping is done here, \n for i in range(t-1, max(-1, t-self.bptt_truncate-1), -1):\n input = np.zeros(self.word_dim)\n input = x[i]\n prev_s_i = np.zeros(self.hidden_dim) if i == 0 else layers[i-1].s\n dprev_s, dU_i, dW_i, dV_i = layers[i].backward(input, prev_s_i, self.U, self.W, self.V, dprev_s, dmulv)\n dU_t += dU_i\n dW_t += dW_i\n dV += dV_t\n dU += dU_t\n dW += dW_t\n return (dU, dW, dV)\n\n def sgd_step(self, x, y, reg_param, learning_rate): # Regularization is implemented here, reg_param is the regularization parameter\n dU, dW, dV = self.bptt(x, y)\n self.U -= learning_rate * dU + (reg_param/maxlen)*self.U\n self.V -= learning_rate * dV + (reg_param/maxlen)*self.V\n self.W -= learning_rate * dW + (reg_param/maxlen)*self.W\n\n def train(self, X, Y, learning_rate, reg_param, nepoch=100, evaluate_loss_after=5):\n num_examples_seen = 0\n #initialize the vector\n m_t = np.zeros(3) \n v_t = np.zeros(3)\n t = np.zeros(3)\n losses = []\n for epoch in range(nepoch):\n if (epoch % evaluate_loss_after == 0):\n loss = self.calculate_total_loss(X, Y)\n losses.append((num_examples_seen, loss))\n time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print(\"%s: Loss after num_examples_seen=%d epoch=%d: %f\" % (time, num_examples_seen, epoch, loss))\n # Adjust the learning rate if loss increases\n if len(losses) > 1 and losses[-1][1] > losses[-2][1]:\n learning_rate = learning_rate * 0.5\n print(\"Setting learning rate to %f\" % learning_rate)\n sys.stdout.flush()\n # For each training example...\n for i in range(len(Y)):\n self.sgd_step(X[i], Y[i], reg_param, learning_rate)\n num_examples_seen += 1\n return losses", "_____no_output_____" ] ], [ [ "Code for training, trained in batches of 100", "_____no_output_____" ] ], [ [ "loss = []\nword_dim = 100 # length of embedding of glove vector\nhidden_dim = 128 # size of hidden dimensions\nnum_classes = 5 # number of classes\n\nnp.random.seed(10)\nrnn = Model(word_dim, num_classes, hidden_dim)\nfor i in range(112):\n losses = rnn.train(X_train[i*100:100*(i+1)], y_train[i*100:100*(i+1)], reg_param = 0.02, learning_rate=0.0001, nepoch=2, evaluate_loss_after=1)\n loss.append(losses[-1][1])\n print(losses)", "_____no_output_____" ], [ "#hyperparameter testing\n# hidden size of RNN, alpha, epoch\n#word embeddings are not considered as hyperparameter as pretrained glove matrix are hard to handle on colab(only size 100 used)", "_____no_output_____" ], [ "hidden_vals = [64,128,256]\nalpha_vals = [0.1,0.01,0.001]\nreg_param_vals = [0, 0.02, 0.04, 0.08, 0.16, 0.32]\nhidden_param = []\nreg_param = []\nlearning_rate = []\nword_dim = 100\nnum_classes = 5\n\nfor hidden_dim in hidden_vals: # hidden dimensions\n np.random.seed(10)\n rnn = Model(word_dim, num_classes, hidden_dim)\n losses = rnn.train(X_val, y_val, reg_param = 0.02, learning_rate=0.005, nepoch=10, evaluate_loss_after=1)\n hidden_param.append(losses[-1][1])\n print(losses)\n\nfor lr in alpha_vals: # learning rate\n np.random.seed(10)\n rnn = Model(word_dim, num_classes, hidden_dim)\n losses = rnn.train(X_val, y_val, reg_param = 0.02, learning_rate=lr, nepoch=10, evaluate_loss_after=1)\n learning_rate.append(losses[-1][1])\n print(losses)\n\nfor lamb in reg_param_vals: # regularization parameter, both with regularization and without one\n np.random.seed(10)\n rnn = Model(word_dim, num_classes, hidden_dim)\n losses = rnn.train(X_val, y_val, reg_param = lamb, learning_rate=0.005, nepoch=10, evaluate_loss_after=1)\n reg_param.append(losses[-1][1])\n print(losses)", "_____no_output_____" ] ], [ [ "Printing values of various hyperparameters ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nplt.title('hidden parameter')\nplt.ylabel('loss')\nplt.xlabel('number of nodes')\nplt.plot([64,128,256], hidden_param)\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.title('regularization parameter, zero corresponds to no regularization')\nplt.ylabel('loss')\nplt.xlabel('value of lambda')\nplt.plot([0, 0.02, 0.04, 0.08, 0.16, 0.32], reg_param)\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.title('learning rate')\nplt.ylabel('loss')\nplt.xlabel('value of alpha')\nplt.plot([0.1,0.01,0.001], learning_rate)\nplt.show()", "_____no_output_____" ] ], [ [ "Code testing , do not run", "_____no_output_____" ] ], [ [ "said = [-0.13569,0.14029,0.0041988,-0.32062,0.012745,0.92511,-0.44523,-0.16454,0.6016,0.4267,0.26053,0.71426,0.57701,-0.09754,0.64286,-0.0002438,-0.3013,0.097057,-0.21678,-0.27131,0.30927,3.0062,-0.3179,0.28998,-0.39905,0.11234,0.19019,-0.49873,0.11857,-0.22241,-0.52668,-0.040781,-0.16783,-0.35887,0.60394,-0.069027,-0.60611,-0.22444,-0.51665,0.77766,-0.41533,-0.23924,-0.64725,0.16413,-0.41185,-0.15507,0.52085,-0.29298,0.034067,-0.71414,0.17124,0.18186,-0.62824,-0.21882,-0.3784,-0.60484,-0.10613,-0.0065723,0.52873,-0.12537,1.1519,0.16512,-0.12301,0.73452,0.15381,-0.021303,-0.4185,0.32132,-0.56697,0.58886,-0.19564,-0.1671,0.35969,0.42898,-0.071104,0.50165,0.15248,-0.089535,-0.67192,0.1817,-0.030661,-0.20289,-0.23925,0.33899,0.088455,-0.16614,-0.78902,0.14403,-0.22256,-1.377,0.38399,-0.14929,-0.16867,-0.73194,-0.76784,0.96968,0.26804,0.36036,-0.33896,-0.17303,0.3866,-0.76154,-0.52335,0.091027,0.049086,0.059747,0.13165,1.9048,-0.77324,-0.094455,0.17805,-0.096256,0.0684,-0.36729,0.011347,0.12147,0.2453,-0.4354,-0.1733,0.36181,0.78902,0.66458,0.45523,0.079105,0.30238,-0.29991,-0.25161,-0.060112,0.59835,-5.4345e-06,0.47396,-0.25035,-0.16549,0.54022,0.62029,0.43227,0.44656,-0.45033,-0.22726,-0.060223,0.42781,0.34668,-0.38491,-0.25167,1.0969,0.66604,0.036542,-0.32485,0.4289,0.13762,-0.22326,0.69871,0.23841,0.89013,0.23899,-0.51714,-0.050005,0.1487,0.055402,-0.41163,-0.33454,-0.014032,0.36476,-0.23411,-0.13272,-0.050314,0.19593,0.16193,0.28734,0.13478,0.74715,-0.060806,0.14221,0.047109,-0.44866,-0.408,-0.68475,-0.25236,0.16233,-0.33454,0.69021,-0.36958,-0.4338,-0.099908,-0.53847,-0.16861,-0.54684,0.40052,0.11458,0.45688,0.28415,0.43329,0.11012,0.22958,0.024016,0.20695,0.23759,0.0087948,-0.13017,-0.25626]\nwere = [-0.29716,-0.025477,-0.43389,-0.52616,-0.14354,0.33104,-0.77455,-0.096218,0.15055,0.40654,0.47132,-0.043477,0.018865,-0.31835,0.0021327,0.51416,0.07266,0.86377,0.39094,-0.2941,0.022928,2.9991,-0.14775,-0.48026,-0.46537,-0.28892,-0.28702,-0.78647,0.21987,0.23957,0.2389,0.04237,0.05836,0.11709,-0.18796,-0.31004,-0.64738,-0.7185,0.33694,0.3713,0.6525,-0.48662,0.093115,0.068172,-0.064165,0.0067802,0.8213,-0.46428,0.052333,-0.054842,-0.81224,0.12655,-0.1431,0.53736,-0.10751,-0.1201,-0.13545,0.20551,-0.25494,-0.14893,0.090701,0.36655,-0.20965,0.673,0.51765,-0.35084,-0.47291,0.31541,-0.19238,-0.02999,0.018858,-0.40037,0.0066421,0.12526,0.097123,-0.10354,0.027577,0.073137,0.22532,-0.41189,0.047902,0.11876,-0.57981,0.54385,0.33422,0.3453,0.2162,-0.69258,-0.13744,-0.59278,-0.054606,-0.10773,0.79549,-0.025879,-0.116,0.4545,0.070413,-0.49586,-0.35715,-0.28674,0.5773,-0.81279,0.019403,0.091408,0.13133,0.34139,0.52689,0.64284,-0.26375,-0.018898,-0.51385,-0.062924,0.16246,0.48888,0.72696,0.49494,-0.45197,-0.25742,0.0047182,-0.91796,0.46416,0.1966,0.67564,-0.080587,0.12588,-1.2989,-0.02176,0.4114,0.41177,-0.60598,0.10907,0.0048411,0.36911,-0.35354,0.50438,0.43354,-0.60223,-0.070002,-0.59426,0.081687,-0.74034,-0.071562,-0.14978,-0.47965,1.3329,0.52734,-0.14646,-0.18742,0.23384,-0.088226,-0.22079,0.42821,0.07259,-0.86941,0.065263,0.14209,-0.38875,0.62475,-0.043056,-0.1405,-0.087399,0.26025,0.18315,0.57811,-0.3948,0.84753,0.25191,0.15239,-0.86611,0.56031,-0.13038,0.31795,0.089552,0.24,0.11417,0.13804,-0.47975,-0.49307,-0.65965,-0.03232,0.99141,0.49788,-0.43471,-0.082005,-0.22052,-0.069465,-0.025023,0.40196,-0.22087,0.1288,-0.11603,0.21029,-0.37609,-0.065688,0.72272,0.11403,-0.08957,-0.34873,-0.47252,0.85348]\nnoti = [0.34303,0.4082,-0.023317,-0.36093,0.0526,0.28925,-0.72928,0.077745,0.25907,0.20004,0.14167,0.49461,-0.043323,-0.17258,0.071147,0.26755,-0.17498,0.81793,0.16388,-0.43131,-0.10978,3.3862,-0.39972,0.079416,0.00044842,0.025372,-0.066779,-0.073348,0.11878,-0.071623,-0.095796,-0.11912,0.13945,0.081686,0.11199,-0.379,-0.86427,-0.65059,0.0072629,0.11515,0.13784,-0.37365,-0.023701,0.31684,-0.22221,0.0094901,0.48885,-0.23512,0.20877,-0.36594,-0.086444,-0.19801,-0.43175,0.22803,0.32309,-0.20011,0.1592,-0.27653,-0.043781,0.30648,0.21574,0.26831,-0.12455,-0.11471,0.29235,-0.041827,-0.27627,0.6043,0.1427,0.5277,0.87699,0.22292,-0.13668,0.13928,-0.46867,-0.13543,-0.47535,-0.46317,0.01747,-0.231,0.14377,-0.15767,0.26873,0.24767,0.24199,-0.12111,-0.40109,-0.5547,0.025637,-1.227,0.25114,0.35073,0.62196,-0.19648,-0.27999,0.09606,-0.082684,-0.014249,-0.099184,-0.11603,0.35344,-0.033989,-0.060309,-0.48616,0.14479,-0.17586,-0.36172,1.2432,-0.4364,-0.16122,-0.13308,-0.34845,0.21167,0.097091,0.23652,0.027138,0.29117,-0.36449,-0.38529,-0.17665,0.37197,0.14412,0.40894,-0.11261,0.097247,-0.24356,-0.018665,0.16607,0.3411,-0.28714,-0.28679,0.49481,0.40374,-0.39017,0.35375,0.12035,-0.065398,-0.16071,-0.044542,-0.28337,0.042688,0.027212,0.37139,-0.30543,1.4214,0.53121,-0.46548,-0.19737,-0.0054895,-0.18203,0.11912,0.42438,-0.40208,0.058662,0.10283,-0.30155,-0.33827,0.34796,-0.082353,0.51862,-0.29775,0.14422,0.44026,-0.123,-0.23571,0.17797,0.11558,0.18801,-0.30945,0.48821,0.1476,0.40829,0.19013,-0.13821,0.333,0.011581,-0.57089,0.30526,0.20618,0.17889,1.2769,-0.11154,-0.47002,-0.12611,-0.20999,-0.013965,-0.078315,0.15576,0.099716,0.27449,-0.36542,0.24097,0.11508,-0.032185,0.18049,-0.028734,0.13449,0.1724,0.031143,0.031783]\nthis = [0.39086,0.65528,0.064706,-0.33366,0.18502,-0.027321,-0.3878,-0.15081,0.39917,-0.30206,0.23819,0.45941,-0.023606,-0.043237,0.54309,-0.085014,-0.044168,0.66163,-0.39539,-0.27537,0.37465,3.0274,-0.085225,0.1731,0.58574,-0.36105,0.18828,0.41495,0.13081,-0.039031,-0.24917,-0.16286,0.012653,-0.0098054,-0.11815,-0.16429,-0.90413,-0.57109,0.026838,-0.43601,-0.15484,-0.37619,0.24899,0.51744,0.0009744,0.12833,0.24256,0.26005,0.050365,-0.016651,0.091362,-0.31346,-0.0078254,0.72088,0.1415,-0.0050633,-0.24204,-0.39191,-0.058966,-0.053058,0.25604,0.074284,-0.23051,-0.54815,-0.22384,-0.031049,0.019959,0.17193,-0.054222,0.033631,0.39632,0.24702,-0.14935,0.42653,-0.27151,0.28648,-0.46361,-0.21448,-0.46598,0.057568,-0.12724,-0.14651,-0.36591,0.34369,0.05271,-0.018639,-0.46642,-0.49551,0.62577,-0.64458,0.41497,0.15695,0.5569,0.074682,-0.59488,0.15163,0.0050693,-0.11666,0.010334,-0.21172,-0.19726,0.25814,0.16371,0.10557,-0.0063813,0.12384,-0.23964,0.99755,-0.78864,0.25616,0.28623,-0.45224,-0.0022179,0.057452,0.041398,0.09977,0.027042,-0.088172,-0.29811,-0.026336,0.069132,0.23899,0.35541,-0.063479,0.0059764,-0.21982,0.38767,0.1966,-0.027814,-0.088787,-0.19875,0.14134,0.22949,-0.27561,0.13077,0.41609,-0.10151,-0.077741,-0.16479,0.043119,-0.24528,0.21869,0.095889,-0.32395,1.5937,0.31002,-0.058686,-0.45488,0.077925,0.17054,0.0018438,0.71656,-0.42026,0.07127,0.51768,-0.21712,-0.2484,0.22494,0.069718,-0.38438,0.19313,-0.1105,-0.10447,-0.32604,-0.26355,0.073983,-0.27133,0.084472,-0.58727,0.53549,-0.059486,0.22041,0.64939,-0.11922,-0.0081812,-0.063136,-0.15009,-0.096871,-0.27551,0.23581,1.8095,-0.35952,-0.026458,0.47649,-0.18462,0.057494,-0.11701,0.23265,0.043931,0.32839,0.084436,0.051592,0.021732,0.10135,0.075084,-0.23,-0.20108,0.3865,0.052221,-0.22646]\nwho = [0.075467,-0.29236,-0.26037,-0.28167,0.16097,-0.19472,-0.28206,0.49141,-0.0041119,0.095879,0.33396,0.016874,0.20616,-0.15743,-0.16138,0.21538,-0.14836,0.02546,-0.43681,-0.063661,0.55261,3.0819,-0.11784,-0.46131,-0.27609,0.20948,-0.20544,-0.57155,0.33448,0.15913,0.0025436,0.18004,0.13472,-0.097404,0.35537,-0.47428,-0.79257,-0.54418,0.0243,0.63599,0.12337,-0.12913,-0.26565,-0.24957,-0.52199,-0.40523,0.48403,0.018373,0.23039,0.062138,-0.19292,0.29506,-0.35793,0.16702,0.31868,-0.36054,-0.10978,-0.15632,0.45921,0.096475,-0.377,-0.077615,-0.48899,0.20575,0.50543,0.053419,-0.25978,0.51042,0.097521,0.32606,0.14354,0.0022715,0.48615,0.46938,-0.41124,-0.17148,-0.39744,-0.28901,-0.17756,0.037001,0.3483,0.15934,-0.74281,0.18897,0.043685,0.57208,-0.67016,-0.043947,-0.28336,-0.31996,-0.20404,-0.087898,-0.15724,0.021818,-0.56757,0.63296,-0.10097,-0.065576,0.0058269,0.033035,0.39783,-0.31166,-0.61089,0.27559,0.10008,-0.4199,0.006356,1.8717,0.31473,-0.36004,0.81384,-0.2171,-0.018459,-0.22632,0.14585,-0.1435,-0.041424,0.55974,-0.66752,-0.21959,0.19011,0.33015,0.6129,0.46771,0.42026,-0.52819,0.023165,0.03291,0.47306,0.014006,-0.17396,-0.44362,0.41377,-0.20679,0.39283,0.30211,0.073134,0.042164,-0.9271,-0.47614,0.2431,-0.13379,-0.22238,-0.041457,1.585,0.37481,0.025994,-0.24272,0.30578,0.14687,0.11666,-0.029418,-0.078339,-0.22512,0.13315,-0.064842,-0.28687,-0.01056,-0.34668,0.042145,-0.60041,0.82481,0.31022,0.16489,-0.072921,0.19394,-0.098498,-0.020383,-0.40909,-0.10404,0.19169,-0.15969,0.38026,0.62802,0.2595,-0.33367,-0.73333,-0.40743,0.68423,-0.066338,0.50436,-0.28983,-0.39086,-0.045931,-0.26624,-0.1677,-0.15037,0.14828,-0.28143,-0.17087,-0.25576,-0.056283,-0.1665,0.35106,0.041032,0.27311,0.03002,0.16465,-0.084189,0.057506]\nthey = [0.0528,0.13495,-0.38214,-0.27999,-0.38392,-0.084598,-0.37277,0.10046,-0.023454,0.71256,0.024259,0.2814,0.010209,-0.19417,-0.30018,0.2074,-0.18105,0.71611,-0.11311,-0.26412,0.17681,3.2366,-0.2329,-0.07999,0.13482,-0.22448,-0.11799,-0.092002,0.23552,-0.12608,0.12557,-0.21536,0.12587,0.049804,0.024991,-0.55934,-0.93924,-0.51797,0.42125,0.34652,0.19897,-0.014204,0.17652,0.43155,-0.38901,0.032722,0.95275,-0.24641,0.12699,0.24171,-0.2495,0.13663,-0.58702,0.53192,0.25364,-0.44631,0.071339,-0.1072,0.016232,0.28356,0.25355,0.36077,-0.066849,0.22595,0.26663,0.1958,-0.21102,0.39934,-0.0066386,0.095334,0.46205,0.028948,0.20202,0.1217,-0.1758,-0.24526,-0.29105,0.153,-0.081641,-0.1527,-0.061132,0.33011,-0.27052,0.24315,0.31077,0.24527,-0.55383,-0.56974,-0.055326,-1.1106,0.23658,0.092638,0.73168,0.085777,-0.24735,0.2301,0.049224,-0.10027,-0.12215,-0.32324,0.45269,-0.2575,-0.16278,-0.18948,0.23016,0.33619,0.11044,1.0879,-0.48341,-0.24421,-0.11329,-0.028835,0.19768,0.31868,0.14758,0.1623,0.071596,0.032118,0.072414,-0.16726,0.74261,0.12729,0.3097,-0.32765,0.12688,-0.69808,0.20057,0.2493,0.49528,-0.4856,-0.46447,0.41582,0.47338,-0.38178,0.4538,0.18398,-0.12377,-0.28023,-0.22348,-0.11058,-0.26895,-0.13358,0.21927,-0.46339,1.4292,0.67536,-0.17342,-0.46714,0.0090229,0.099559,0.15438,0.44203,-0.49867,-0.061944,-0.0033505,-0.17547,-0.1701,0.34289,-0.093336,0.28919,-0.32519,0.13184,0.0011452,-0.084836,-0.26536,0.46792,0.23974,0.044068,-0.75486,0.30871,0.10976,0.039385,0.48202,0.053616,0.43554,0.057253,-0.56428,-0.33534,-0.032656,0.19587,1.2662,-0.3715,-0.74673,-0.19089,-0.2307,0.094298,-0.018306,0.055102,-0.028414,0.27891,-0.22275,0.25638,0.064459,0.031974,0.19601,0.004318,0.043765,0.42737,-0.20698,0.44511]", "_____no_output_____" ], [ "print(reviews[0].split())", "['the', 'rock', 'is', 'destined', 'to', 'be', 'the', '21st', 'century', 's', 'new', 'conan', 'and', 'that', 'he', 's', 'going', 'to', 'make', 'a', 'splash', 'even', 'greater', 'than', 'arnold', 'schwarzenegger', 'jeanclaud', 'van', 'damme', 'or', 'steven', 'segal']\n" ], [ "a = embeddings_dictionary['movie']\nb = embeddings_dictionary['was']\nc = embeddings_dictionary['awesome']\nd = embeddings_dictionary['it']\ne = embeddings_dictionary['was']\nf = embeddings_dictionary['horrible']", "_____no_output_____" ], [ "print(\"said = [\"+said.replace(\" \",\",\")+']')", "said = [-0.13569,0.14029,0.0041988,-0.32062,0.012745,0.92511,-0.44523,-0.16454,0.6016,0.4267,0.26053,0.71426,0.57701,-0.09754,0.64286,-0.0002438,-0.3013,0.097057,-0.21678,-0.27131,0.30927,3.0062,-0.3179,0.28998,-0.39905,0.11234,0.19019,-0.49873,0.11857,-0.22241,-0.52668,-0.040781,-0.16783,-0.35887,0.60394,-0.069027,-0.60611,-0.22444,-0.51665,0.77766,-0.41533,-0.23924,-0.64725,0.16413,-0.41185,-0.15507,0.52085,-0.29298,0.034067,-0.71414,0.17124,0.18186,-0.62824,-0.21882,-0.3784,-0.60484,-0.10613,-0.0065723,0.52873,-0.12537,1.1519,0.16512,-0.12301,0.73452,0.15381,-0.021303,-0.4185,0.32132,-0.56697,0.58886,-0.19564,-0.1671,0.35969,0.42898,-0.071104,0.50165,0.15248,-0.089535,-0.67192,0.1817,-0.030661,-0.20289,-0.23925,0.33899,0.088455,-0.16614,-0.78902,0.14403,-0.22256,-1.377,0.38399,-0.14929,-0.16867,-0.73194,-0.76784,0.96968,0.26804,0.36036,-0.33896,-0.17303,0.3866,-0.76154,-0.52335,0.091027,0.049086,0.059747,0.13165,1.9048,-0.77324,-0.094455,0.17805,-0.096256,0.0684,-0.36729,0.011347,0.12147,0.2453,-0.4354,-0.1733,0.36181,0.78902,0.66458,0.45523,0.079105,0.30238,-0.29991,-0.25161,-0.060112,0.59835,-5.4345e-06,0.47396,-0.25035,-0.16549,0.54022,0.62029,0.43227,0.44656,-0.45033,-0.22726,-0.060223,0.42781,0.34668,-0.38491,-0.25167,1.0969,0.66604,0.036542,-0.32485,0.4289,0.13762,-0.22326,0.69871,0.23841,0.89013,0.23899,-0.51714,-0.050005,0.1487,0.055402,-0.41163,-0.33454,-0.014032,0.36476,-0.23411,-0.13272,-0.050314,0.19593,0.16193,0.28734,0.13478,0.74715,-0.060806,0.14221,0.047109,-0.44866,-0.408,-0.68475,-0.25236,0.16233,-0.33454,0.69021,-0.36958,-0.4338,-0.099908,-0.53847,-0.16861,-0.54684,0.40052,0.11458,0.45688,0.28415,0.43329,0.11012,0.22958,0.024016,0.20695,0.23759,0.0087948,-0.13017,-0.25626]\n" ], [ "print(X_train.shape)", "(2, 3, 200)\n" ], [ "X_train = np.array([[a,b,c],[d,e,f]])\ny_train = np.array([[4,4,4],[0,0,0]])", "_____no_output_____" ], [ "import numpy as np\nfrom numpy import savetxt", "_____no_output_____" ], [ "#fetch dataset\nfrom google.colab import drive\ndrive.mount('/content/drive',force_remount=True)\npat='/content/drive/My Drive/Projects/STCS assignment(Word vectors)/'\npat2='/content/drive/My Drive/Projects/HASOC/'\n", "Mounted at /content/drive\n" ], [ "a=np.loadtxt(pat+'Movie Review Dataset.txt',delimiter='\\t',dtype=str)", "_____no_output_____" ], [ "a2=[]\nfor e1,e2 in a:\n a2+=[e2]", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "d=np.loadtxt(pat+'Dictionary.txt',delimiter='\\t',dtype=str)", "_____no_output_____" ], [ "len(d)", "_____no_output_____" ], [ "d2=' '.join(d)", "_____no_output_____" ], [ "d3=d2.split('|')", "_____no_output_____" ], [ "len(d3)", "_____no_output_____" ], [ "d3", "_____no_output_____" ], [ "d3[-1]", "_____no_output_____" ], [ "p1=[d3[0]]\np2=[]\nfor el in d3[1:]:\n e=el.split(' ')\n p1+=[' '.join(e[1:])]\n p2+=[int(e[0])]", "_____no_output_____" ], [ "len(p1),len(p2)", "_____no_output_____" ], [ "p1[-2]", "_____no_output_____" ], [ "p1=p1[:-1]", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "p2[-1]", "_____no_output_____" ], [ "dicti=dict(zip(p2,p1))", "_____no_output_____" ], [ "dicti[220444]", "_____no_output_____" ], [ "p3=[]\nfor el in a2:\n try:\n p3+=[p2[p1.index(el)]]\n except ValueError:\n p3+=[-1]", "_____no_output_____" ], [ "p4=np.array(p3)", "_____no_output_____" ], [ "len(p4[p4!=-1])", "_____no_output_____" ], [ "sent=np.loadtxt(pat+'Senti_scores.txt',delimiter='|',dtype=str)", "_____no_output_____" ], [ "sent[1]", "_____no_output_____" ], [ "s1=[]\ns2=[]\nfor e1,e2 in sent[1:]:\n s1+=[int(e1)]\n s2+=[float(e2)]", "_____no_output_____" ], [ "p6=[]\nfor el in p3:\n try:\n p6+=[s2[s1.index(el)]]\n except ValueError:\n p6+=[-1]", "_____no_output_____" ], [ "p6[3]", "_____no_output_____" ], [ "#p6 all senti scores in order of sentence if not present has -1\n#p3 phrase index of all sentence in that order\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec832b421ecfb76bf641c743954e13853500ffaa
23,974
ipynb
Jupyter Notebook
docs/jax-101/02-jitting.ipynb
mtsokol/jax
2fc2ff409a5fc3712d42b3cd4c3bb86dff28948a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/jax-101/02-jitting.ipynb
mtsokol/jax
2fc2ff409a5fc3712d42b3cd4c3bb86dff28948a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/jax-101/02-jitting.ipynb
mtsokol/jax
2fc2ff409a5fc3712d42b3cd4c3bb86dff28948a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
41.477509
952
0.6306
[ [ [ "# Just In Time Compilation with JAX\n\n[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/jax/blob/master/docs/jax-101/02-jitting.ipynb)\n\n*Authors: TODO*\n\nIn this section, we will further explore how JAX works, and how we can make it performant.\nWe will discuss the `jax.jit()` transform, which will perform *Just In Time* (JIT) compilation\nof a JAX Python function so it can be executed efficiently in XLA.\n\n## How JAX transforms work\n\nIn the previous section, we discussed that JAX allows us to transform Python functions. This is done by first converting the Python function into a simple intermediate language called jaxpr. The transformations then work on the jaxpr representation. \n\nWe can show a representation of the jaxpr of a function by using `jax.make_jaxpr`:", "_____no_output_____" ] ], [ [ "import jax\nimport jax.numpy as jnp\n\nglobal_list = []\n\ndef log2(x):\n global_list.append(x)\n ln_x = jnp.log(x)\n ln_2 = jnp.log(2.0)\n return ln_x / ln_2\n\nprint(jax.make_jaxpr(log2)(3.0))", "{ lambda ; a.\n let b = log a\n c = log 2.0\n d = div b c\n in (d,) }\n" ] ], [ [ "The [Understanding Jaxprs](https://jax.readthedocs.io/en/latest/jaxpr.html) section of the documentation provides more information on the meaning of the above output.\n\nImportantly, note how the jaxpr does not capture the side-effect of the function: there is nothing in it corresponding to `global_list.append(x)`. This is a feature, not a bug: JAX is designed to understand side-effect-free (a.k.a. functionally pure) code. If *pure function* and *side-effect* are unfamiliar terms, this is explained in a little more detail in [🔪 JAX - The Sharp Bits 🔪: Pure Functions]](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Pure-functions).\n\nOf course, impure functions can still be written and even run, but JAX gives no guarantees about their behaviour once converted to jaxpr. However, as a rule of thumb, you can expect (but shouldn't rely on) the side-effects of a JAX-transformed function to run once (during the first call), and never again. This is because of the way that JAX generates jaxpr, using a process called 'tracing'.\n\nWhen tracing, JAX wraps each argument by a *tracer* object. These tracers then record all JAX operations performed on them during the function call (which happens in regular Python). Then, JAX uses the tracer records to reconstruct the entire function. The output of that reconstruction is the jaxpr. Since the tracers do not record the Python side-effects, they do not appear in the jaxpr. However, the side-effects still happen during the trace itself.\n\nNote: the Python `print()` function is not pure: the text output is a side-effect of the function. Therefore, any `print()` calls will only happen during tracing, and will not appear in the jaxpr:", "_____no_output_____" ] ], [ [ "def log2_with_print(x):\n print(\"printed x:\", x)\n ln_x = jnp.log(x)\n ln_2 = jnp.log(2.0)\n return ln_x / ln_2\n\nprint(jax.make_jaxpr(log2_with_print)(3.))", "printed x: Traced<ShapedArray(float32[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>\n{ lambda ; a.\n let b = log a\n c = log 2.0\n d = div b c\n in (d,) }\n" ] ], [ [ "See how the printed `x` is a `Traced` object? That's the JAX internals at work.\n\nThe fact that the Python code runs at least once is strictly an implementation detail, and so shouldn't be relied upon. However, it's useful to understand as you can use it when debugging to print out intermediate values of a computation.", "_____no_output_____" ], [ "A key thing to understand is that jaxpr captures the function as executed on the parameters given to it. For example, if we have a conditional, jaxpr will only know about the branch we take:", "_____no_output_____" ] ], [ [ "def log2_if_rank_2(x):\n if x.ndim == 2:\n ln_x = jnp.log(x)\n ln_2 = jnp.log(2.0)\n return ln_x / ln_2\n else:\n return x\n\nprint(jax.make_jaxpr(log2_if_rank_2)(jax.numpy.array([1, 2, 3])))", "{ lambda ; a.\n let \n in (a,) }\n" ] ], [ [ "## JIT compiling a function\n\nAs explained before, JAX enables operations to execute on CPU/GPU/TPU using the same code.\nLet's look at an example of computing a *Scaled Exponential Linear Unit*\n([SELU](https://proceedings.neurips.cc/paper/6698-self-normalizing-neural-networks.pdf)), an\noperation commonly used in deep learning:", "_____no_output_____" ] ], [ [ "import jax\nimport jax.numpy as jnp\n\ndef selu(x, alpha=1.67, lambda_=1.05):\n return lambda_ * jnp.where(x > 0, x, alpha * jnp.exp(x) - alpha)\n\nx = jnp.arange(1000000)\n%timeit selu(x).block_until_ready()", "The slowest run took 21.33 times longer than the fastest. This could mean that an intermediate result is being cached.\n100 loops, best of 5: 7.9 ms per loop\n" ] ], [ [ "The code above is sending one operation at a time to the accelerator. This limits the ability of the XLA compiler to optimize our functions.\n\nNaturally, what we want to do is give the XLA compiler as much code as possible, so it can fully optimize it. For this purpose, JAX provides the `jax.jit` transformation, which will JIT compile a JAX-compatible function. The example below shows how to use JIT to speed up the previous function.", "_____no_output_____" ] ], [ [ "selu_jit = jax.jit(selu)\n\n# Warm up\nselu_jit(x).block_until_ready()\n\n%timeit selu_jit(x).block_until_ready()", "1000 loops, best of 5: 1.6 ms per loop\n" ] ], [ [ "Here's what just happened:\n\n1) We defined `selu_jit` as the compiled version of `selu`.\n\n2) We ran `selu_jit` once on `x`. This is where JAX does its tracing -- it needs to have some inputs to wrap in tracers, after all. The jaxpr is then compiled using XLA into very efficient code optimized for your GPU or TPU. Subsequent calls to `selu_jit` will now use that code, skipping our old Python implementation entirely. \n\n(If we didn't include the warm-up call separately, everything would still work, but then the compilation time would be included in the benchmark. It would still be faster, because we run many loops in the benchmark, but it wouldn't be a fair comparison.)\n\n3) We timed the execution speed of the compiled version. (Note the use of `block_until_ready()`, which is required due to JAX's [Asynchronous execution](https://jax.readthedocs.io/en/latest/async_dispatch.html) model).", "_____no_output_____" ], [ "## Why can't we just JIT everything?\n\nAfter going through the example above, you might be wondering whether we should simply apply `jax.jit` to every function. To understand why this is not the case, and when we should/shouldn't apply `jit`, let's first check some cases where JIT doesn't work.", "_____no_output_____" ] ], [ [ "# Condition on value of x.\n\ndef f(x):\n if x > 0:\n return x\n else:\n return 2 * x\n\nf_jit = jax.jit(f)\nf_jit(10) # Should raise an error. ", "_____no_output_____" ], [ "# While loop conditioned on x and n.\n\ndef g(x, n):\n i = 0\n while i < n:\n i += 1\n return x + i\n\ng_jit = jax.jit(g)\ng_jit(10, 20) # Should raise an error. ", "_____no_output_____" ] ], [ [ "The problem is that we tried to condition on the *value* of an input to the function being jitted. The reason we can't do this is related to the fact mentioned above that jaxpr depends on the actual values used to trace it. \n\nThe more specific information about the values we use in the trace, the more we can use standard Python control flow to express ourselves. However, being too specific means we can't reuse the same traced function for other values. JAX solves this by tracing at different levels of abstraction for different purposes.\n\nFor `jax.jit`, the default level is `ShapedArray` -- that is, each tracer has a concrete shape (which we're allowed to condition on), but no concrete value. This allows the compiled function to work on all possible inputs with the same shape -- the standard use case in machine learning. However, because the tracers have no concrete value, if we attempt to condition on one, we get the error above.\n\nIn `jax.grad`, the constraints are more relaxed, so you can do more. If you compose several transformations, however, you must satisfy the constraints of the most strict one. So, if you `jit(grad(f))`, `f` mustn't condition on value. For more detail on the interaction between Python control flow and JAX, see [🔪 JAX - The Sharp Bits 🔪: Control Flow](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Control-Flow).\n\nOne way to deal with this problem is to rewrite the code to avoid conditionals on value. Another is to use special [control flow operators](https://jax.readthedocs.io/en/latest/jax.lax.html#control-flow-operators) like `jax.lax.cond`. However, sometimes that is impossible. In that case, you can consider jitting only part of the function. For example, if the most computationally expensive part of the function is inside the loop, we can JIT just that inner part (though make sure to check the next section on caching to avoid shooting yourself in the foot):", "_____no_output_____" ] ], [ [ "# While loop conditioned on x and n with a jitted body.\n\[email protected]\ndef loop_body(prev_i):\n return prev_i + 1\n\ndef g_inner_jitted(x, n):\n i = 0\n while i < n:\n i = loop_body(i)\n return x + i\n\ng_inner_jitted(10, 20)", "_____no_output_____" ] ], [ [ "If we really need to JIT a function that has a condition on the value of an input, we can tell JAX to help itself to a less abstract tracer for a particular input by specifying `static_argnums`. The cost of this is that the resulting jaxpr is less flexible, so JAX will have to re-compile the function for every new value of the specified input. It is only a good strategy if the function is guaranteed to get limited different values.", "_____no_output_____" ] ], [ [ "f_jit_correct = jax.jit(f, static_argnums=0)\nprint(f_jit_correct(10))", "10\n" ], [ "g_jit_correct = jax.jit(g, static_argnums=1)\nprint(g_jit_correct(10, 20))", "30\n" ] ], [ [ "## When to use JIT\n\nIn many of the the examples above, jitting is not worth it:", "_____no_output_____" ] ], [ [ "print(\"g jitted:\")\n%timeit g_jit_correct(10, 20).block_until_ready()\n\nprint(\"g:\")\n%timeit g(10, 20)", "g jitted:\nThe slowest run took 62.20 times longer than the fastest. This could mean that an intermediate result is being cached.\n10000 loops, best of 5: 83 µs per loop\ng:\n1000000 loops, best of 5: 926 ns per loop\n" ] ], [ [ "This is because `jax.jit` introduces some overhead itself. Therefore, it usually only saves time if the compiled function is complex and you will run it numerous times. Fortunately, this is common in machine learning, where we tend to compile a large, complicated model, then run it for millions of iterations.\n\nGenerally, you want to jit the largest possible chunk of your computation; ideally, the entire update step. This gives the compiler maximum freedom to optimise.", "_____no_output_____" ], [ "## Caching\n\nIt's important to understand the caching behaviour of `jax.jit`.\n\nSuppose I define `f = jax.jit(g)`. When I first invoke `f`, it will get compiled, and the resulting XLA code will get cached. Subsequent calls of `f` will reuse the cached code. This is how `jax.jit` makes up for the up-front cost of compilation.\n\nIf I specify `static_argnums`, then the cached code will be used only for the same values of arguments labelled as static. If any of them change, recompilation occurs. If there are many values, then your program might spend more time compiling than it would have executing ops one-by-one.\n\nAvoid calling `jax.jit` inside loops. Doing that effectively creates a new `f` at each call, which will get compiled each time instead of reusing the same cached function:", "_____no_output_____" ] ], [ [ "def unjitted_loop_body(prev_i):\n return prev_i + 1\n\ndef g_inner_jitted_poorly(x, n):\n i = 0\n while i < n:\n # Don't do this!\n i = jax.jit(unjitted_loop_body)(i)\n return x + i\n\nprint(\"jit called outside the loop:\")\n%timeit g_inner_jitted(10, 20).block_until_ready()\n\nprint(\"jit called inside the loop:\")\n%timeit g_inner_jitted_poorly(10, 20).block_until_ready()", "jit called outside the loop:\n100 loops, best of 5: 10.8 ms per loop\njit called inside the loop:\n100 loops, best of 5: 15.2 ms per loop\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec832c55ea29898d85c69414c5c57edea936d0f6
19,393
ipynb
Jupyter Notebook
Dev/BTC-USD/Codes/04 Short Term Dependent Variable.ipynb
Sidhus234/WQU-Capstone-Project-2021
d92cf80e06e8f919e1404c1e93200d2e92847c71
[ "MIT" ]
6
2021-04-11T09:18:15.000Z
2022-03-29T15:42:40.000Z
Dev/BTC-USD/Codes/04 Short Term Dependent Variable.ipynb
Sidhus234/WQU-Capstone-Project-2021
d92cf80e06e8f919e1404c1e93200d2e92847c71
[ "MIT" ]
null
null
null
Dev/BTC-USD/Codes/04 Short Term Dependent Variable.ipynb
Sidhus234/WQU-Capstone-Project-2021
d92cf80e06e8f919e1404c1e93200d2e92847c71
[ "MIT" ]
2
2022-02-24T06:06:50.000Z
2022-03-31T13:12:46.000Z
30.492138
162
0.393338
[ [ [ "# <span style=\"color:Maroon\">Short Term Dependent Variable", "_____no_output_____" ], [ "__Summary:__ <span style=\"color:Blue\">20 trading days standard deviation will be used to define the dependent variable as below:\n \n$\\;\\;\\;\\;\\;\\;$ <span style=\"color:Blue\">Buy: If in next 5 days the price goes above (today price + 1 std deviation)\n \n$\\;\\;\\;\\;\\;\\;$ <span style=\"color:Blue\">Sell: If in next 5 days the price goes below (today price - 1.5 std deviation)\n \n$\\;\\;\\;\\;\\;\\;$ <span style=\"color:Blue\">No Action: If price oscillates between (today price + 1 std deviation) and (today price - 1.5 std deviation)", "_____no_output_____" ] ], [ [ "# Import required libraries\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nnp.random.seed(0)", "_____no_output_____" ], [ "# User defined names\nindex = \"BTC-USD\"\nfilename = index+\"_hurst_segment.csv\"\ndate_col = \"Date\"\nstd_window = 20 # Window size to calculate std\nanalysis_window = 5 # Number of days in future to analyze price for tagging", "_____no_output_____" ], [ "# Get current working directory\nmycwd = os.getcwd()\nprint(mycwd)", "C:\\Users\\sidhu\\Downloads\\Course 10 Capstone Project\\Trading Strategy Development\\Dev\\BTC-USD\\Codes\n" ], [ "# Change to data directory\nos.chdir(\"..\")\nos.chdir(str(os.getcwd()) + \"\\\\Data\")", "_____no_output_____" ], [ "# Read the data\ndf = pd.read_csv(filename, index_col=date_col)\ndf.index = pd.to_datetime(df.index)\ndf.head()", "_____no_output_____" ] ], [ [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "## <span style=\"color:Maroon\">Calculations for Dependent Variable", "_____no_output_____" ] ], [ [ "# Calculate N days standard deviation\ndf['DVT STD'] = df['Adj Close'].rolling(std_window).std()\n# Calculate Maximum and Minimum price in next n days\ndf['DVT MAX'] = df['Adj Close'].rolling(analysis_window).max().shift(-analysis_window)\ndf['DVT MIN'] = df['Adj Close'].rolling(analysis_window).min().shift(-analysis_window)", "_____no_output_____" ], [ "# Calculate the upper and lower range as todays price +- 1 std\ndf['DVT Upper'] = df['Adj Close'] + df['DVT STD']\ndf['DVT Lower'] = df['Adj Close'] - 1.5*df['DVT STD']", "_____no_output_____" ], [ "# Define the dependent variable. We shall give preference to Buy decision over sell decision\ndf['Target'] = np.where(df['DVT MAX'] > df['DVT Upper'], 1,np.where(df['DVT MIN'] < df['DVT Lower'], -1, 0))", "_____no_output_____" ], [ "# Value counts\ndf['Target'].value_counts()", "_____no_output_____" ], [ "# Cross Tab\ndf1 = df[df['hurst_200'] > 0]\npd.crosstab(df1['Target'], df1['Segment'], normalize='columns')", "_____no_output_____" ] ], [ [ "__Comments:__ <span style=\"color:Blue\"> Both segments tend to provide almost equal buy and sell signals", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "## <span style=\"color:Maroon\">Save the Data", "_____no_output_____" ] ], [ [ "# Get the columns\ndf.columns", "_____no_output_____" ], [ "# drop columns not required in future\ndf.drop(['DVT MAX', 'DVT MIN', 'DVT Upper', 'DVT Lower'], axis=1, inplace=True)", "_____no_output_____" ], [ "# Get the columns\ndf.columns", "_____no_output_____" ], [ "os.chdir(\"..\")\nos.chdir(str(os.getcwd()) + \"\\\\Data\")\ndf.to_csv(index +\"_hurst_segment_dependent\"+\".csv\", index=True)", "_____no_output_____" ] ], [ [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec832ca5a4477664e1deb876d37193f195f5ff6d
7,552
ipynb
Jupyter Notebook
Maximo.ipynb
vanderleik/Logica
eed2da8cf91d512b329f60e9e234b8e7cb0616c1
[ "MIT" ]
null
null
null
Maximo.ipynb
vanderleik/Logica
eed2da8cf91d512b329f60e9e234b8e7cb0616c1
[ "MIT" ]
null
null
null
Maximo.ipynb
vanderleik/Logica
eed2da8cf91d512b329f60e9e234b8e7cb0616c1
[ "MIT" ]
null
null
null
23.380805
124
0.477225
[ [ [ "#### Criar uma função capaz de determinar qual valor é maior entre dois números determinados.\n\n### Pseudo-Código\n\n1. Defina n1 como sendo o primeiro número\n2. Defina n2 como sendo o segundo número\n3. Estabeleça a condição de que:\n3.1. se n1 > n2, então n1 é maior que n2.\n3.2. se n1 < n2, então n2 é maior que n1.", "_____no_output_____" ], [ "### Código do programa", "_____no_output_____" ] ], [ [ "# Solução 01\n\nenter = input(\"Pressione <Enter> para iniciar...\")\n\n# 1. Defina n1 como sendo o primeiro número\nn1 = input(\"\\nInforme o primeiro valor:\")\nn1 = int(n1)\n\n# 2. Defina n2 como sendo o segundo número\nn2 = input(\"\\nInforme o segundo valor:\")\nn2 = int(n2)\n\n# 3. Estabeleça a condição de que:\n# 3.1. se n1 > n2, então n1 é maior que n2.\nif n1 > n2:\n print(n1, \"é maior que \",n2)\n# 3.2. se n1 < n2, então n2 é maior que n1.\nelif n2 > n1:\n print(n2, \"é maior que \",n1)\nelse:\n print(n1, \"é igual a \",n2)", "Pressione <Enter> para iniciar...\n\nInforme o primeiro valor:1\n\nInforme o segundo valor:2\n2 é maior que 1\n" ] ], [ [ "### Solução 02, adaptada da aula.\nEssa solução não prevê que os números de entrada podem ser iguais!\n", "_____no_output_____" ] ], [ [ "enter = input(\"\\nPressione <Enter> para executar novamente o programa com outro código de programação...\")", "\nPressione <Enter> para executar novamente o programa com outro código de programação...\n" ], [ "def maximo2(a, b):\n if a > b:\n return a\n else:\n return b\n\n# 1. Defina a como sendo o primeiro número\na = input(\"\\nInforme o valor de a:\")\na = int(a)\n\n# 2. Defina b como sendo o segundo número\nb = input(\"\\nInforme o valor de b:\")\nb = int(b)\n\n# 3. Estabeleça a condição de que:\n# 3.1. se a > b, então a é maior que b.\n# 3.2. se a < b, então b é maior que a.\n\nprint(\"\\nO maior valor entre\",a, \"e\", b, \"é\", maximo2(a, b))", "\nInforme o valor de a:10\n\nInforme o valor de b:20\n\nO maior valor entre 10 e 20 é 20\n" ], [ "enter = input(\"\\nPressione <Enter> para executar outro programa, que encontrará o valor máximo entre 3 números...\")", "\nPressione <Enter> para executar outro programa, que encontrará o valor máximo entre 3 números...\n" ] ], [ [ "### Pseudo-Código para obter valor máximo entre 3 números\n\n1. Leia 3 números representados por a, b e c.\n2. Se a > b e b > c; retorne a.\n3. Se a < b e b > c; retorne b.\n4. Se as condições acima não forem satisfeitas, retorne c.", "_____no_output_____" ], [ "### Código do programa", "_____no_output_____" ] ], [ [ "# Solução 01\n\n# 1. Leia 3 números representados por a, b e c.\ndef maximo3(a, b, c):\n # 2. Se a > b e b > c; retorne a.\n if a > b and b > c:\n print(\"O maior valor é\", a)\n # 3. Se a < b e b > c; retorne b.\n elif a < b and b > c:\n print(\"O marior valor é \", b)\n # 4. Se as condições acima não forem satisfeitas, retorne c.\n else:\n print(\"O maior valor é\", c)\n\na = input(\"\\nInforme o valor de a:\")\na = int(a)\n\nb = input(\"\\nInforme o valor de b:\")\nb = int(b)\n\nc = input(\"\\nInforme o valor de c:\")\nc = int(c)\n\nmaximo3(a, b, c)", "\nInforme o valor de a:5\n\nInforme o valor de b:10\n\nInforme o valor de c:15\nO maior valor é 15\n" ], [ "enter = input(\"\\nPressione <Enter> para entrar novamente com os valores e ver outra solução...\")", "\nPressione <Enter> para entrar novamente com os valores e ver outra solução...\n" ], [ "# Solução 02\n\n# 1. Leia 3 números representados por a, b e c.\ndef maximo3_v2(a, b, c):\n if a < b:\n return maximo2(b, c)\n else:\n return maximo2(a, c)\n\na = input(\"\\nInforme o valor de a:\")\na = int(a)\n\nb = input(\"\\nInforme o valor de b:\")\nb = int(b)\n\nc = input(\"\\nInforme o valor de c:\")\nc = int(c)\n\nprint(\"O maior valor é:\", maximo3_v2(a, b, c))", "\nInforme o valor de a:25\n\nInforme o valor de b:20\n\nInforme o valor de c:10\nO maior valor é: 25\n" ], [ "enter = input(\"\\nPressione <Enter> para encerrar...\")", "\nPressione <Enter> para encerrar...\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
ec832d325d7794466b250c7f6c421d81b7e1b152
619
ipynb
Jupyter Notebook
Notebooks/Graphs.ipynb
BenjaminBorn/IntroToJulia
888969b4763e6d76b170a956d47c1ffb594585a3
[ "MIT" ]
2
2020-09-26T11:17:00.000Z
2021-06-24T01:15:37.000Z
Notebooks/Graphs.ipynb
BenjaminBorn/IntroToJulia
888969b4763e6d76b170a956d47c1ffb594585a3
[ "MIT" ]
null
null
null
Notebooks/Graphs.ipynb
BenjaminBorn/IntroToJulia
888969b4763e6d76b170a956d47c1ffb594585a3
[ "MIT" ]
2
2019-06-14T14:20:37.000Z
2021-09-16T22:53:25.000Z
20.633333
171
0.575121
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec833c992a6e5873a93cf5cef06f969ed2f7b2ac
261,999
ipynb
Jupyter Notebook
_solved/case3_bacterial_resistance_lab_experiment.ipynb
jorisvandenbossche/DS-python-data-analysis
b4dd68b9c912c5d5c52c607aa117f5054449c73d
[ "BSD-3-Clause" ]
65
2017-03-21T09:15:40.000Z
2022-02-01T23:43:08.000Z
_solved/case3_bacterial_resistance_lab_experiment.ipynb
jorisvandenbossche/DS-python-data-analysis
b4dd68b9c912c5d5c52c607aa117f5054449c73d
[ "BSD-3-Clause" ]
100
2016-12-15T03:44:06.000Z
2022-03-07T08:14:07.000Z
_solved/case3_bacterial_resistance_lab_experiment.ipynb
jorisvandenbossche/DS-python-data-analysis
b4dd68b9c912c5d5c52c607aa117f5054449c73d
[ "BSD-3-Clause" ]
52
2016-12-19T07:48:52.000Z
2022-02-19T17:53:48.000Z
137.460126
63,188
0.835915
[ [ [ "<p><font size=\"6\"><b>CASE - Bacterial resistance experiment</b></font></p>\n\n> *DS Data manipulation, analysis and visualization in Python*\n> *May/June, 2021*\n>\n> *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*\n\n---", "_____no_output_____" ], [ "In this case study, we will make use of the open data, affiliated to the following [journal article](http://rsbl.royalsocietypublishing.org/content/12/5/20160064):\n\n>Arias-Sánchez FI, Hall A (2016) Effects of antibiotic resistance alleles on bacterial evolutionary responses to viral parasites. Biology Letters 12(5): 20160064. https://doi.org/10.1098/rsbl.2016.0064", "_____no_output_____" ], [ "<img src=\"../img/bacteriophage.jpeg\">", "_____no_output_____" ], [ "Check the full paper on the [web version](http://rsbl.royalsocietypublishing.org/content/12/5/20160064). The study handles:\n> Antibiotic resistance has wide-ranging effects on bacterial phenotypes and evolution. However, the influence of antibiotic resistance on bacterial responses to parasitic viruses remains unclear, despite the ubiquity of such viruses in nature and current interest in therapeutic applications. We experimentally investigated this by exposing various Escherichia coli genotypes, including eight antibiotic-resistant genotypes and a mutator, to different viruses (lytic bacteriophages). Across 960 populations, we measured changes in population density and sensitivity to viruses, and tested whether variation among bacterial genotypes was explained by their relative growth in the absence of parasites, or mutation rate towards phage resistance measured by fluctuation tests for each phage", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Reading and processing the data", "_____no_output_____" ], [ "The data is available on [Dryad](http://www.datadryad.org/resource/doi:10.5061/dryad.90qb7.3), a general purpose data repository providing all kinds of data sets linked to journal papers. The downloaded data is available in this repository in the `data` folder as an excel-file called `Dryad_Arias_Hall_v3.xlsx`.\n\nFor the exercises, two sheets of the excel file will be used:\n* `Main experiment`:\n\n\n| Variable name | Description |\n|---------------:|:-------------|\n|**AB_r** |\tAntibiotic resistance |\n|**Bacterial_genotype** | Bacterial genotype |\n|**Phage_t** |\tPhage treatment |\n|**OD_0h** |\tOptical density at the start of the experiment (0h) |\n|**OD_20h**\t| Optical density after 20h |\n|**OD_72h**\t| Optical density at the end of the experiment (72h) |\n|**Survival_72h** |\tPopulation survival at 72h (1=survived, 0=extinct) |\n|**PhageR_72h**\t| Bacterial sensitivity to the phage they were exposed to (0=no bacterial growth, 1= colony formation in the presence of phage) |\n\n* `Falcor`: we focus on a subset of the columns:\n\n| Variable name | Description |\n|---------------:|:-------------|\n| **Phage** | Bacteriophage used in the fluctuation test (T4, T7 and lambda) |\n| **Bacterial_genotype** | Bacterial genotype. |\n| **log10 Mc** |\tLog 10 of corrected mutation rate |\n| **log10 UBc** |\tLog 10 of corrected upper bound |\n| **log10 LBc** |\tLog 10 of corrected lower bound |", "_____no_output_____" ], [ "Reading the `main experiment` data set from the corresponding sheet:", "_____no_output_____" ] ], [ [ "main_experiment = pd.read_excel(\"data/Dryad_Arias_Hall_v3.xlsx\",\n sheet_name=\"Main experiment\")\nmain_experiment", "_____no_output_____" ] ], [ [ "Read the `Falcor` data and subset the columns of interest:", "_____no_output_____" ] ], [ [ "falcor = pd.read_excel(\"data/Dryad_Arias_Hall_v3.xlsx\", sheet_name=\"Falcor\",\n skiprows=1)\nfalcor = falcor[[\"Phage\", \"Bacterial_genotype\", \"log10 Mc\", \"log10 UBc\", \"log10 LBc\"]]\nfalcor.head()", "_____no_output_____" ] ], [ [ "## Tidy the `main_experiment` data", "_____no_output_____" ], [ "*(If you're wondering what `tidy` data representations are, check again the `pandas_07_reshaping_data.ipynb` notebook)*", "_____no_output_____" ], [ "Actually, the columns `OD_0h`, `OD_20h` and `OD_72h` are representing the same variable (i.e. `optical_density`) and the column names itself represent a variable, i.e. `experiment_time_h`. Hence, it is stored in the table as *short* format and we could *tidy* these columns by converting them to 2 columns: `experiment_time_h` and `optical_density`.", "_____no_output_____" ], [ "Before making any changes to the data, we will add an identifier column for each of the current rows to make sure we keep the connection in between the entries of a row when converting from wide to long format.", "_____no_output_____" ] ], [ [ "main_experiment[\"experiment_ID\"] = [\"ID_\" + str(idx) for idx in range(len(main_experiment))]\nmain_experiment", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\nConvert the columns `OD_0h`, `OD_20h` and `OD_72h` to a long format with the values stored in a column `optical_density` and the time in the experiment as `experiment_time_h`. Save the variable as <code>tidy_experiment</code>\n\n<details><summary>Hints</summary>\n\n- Have a look at `pandas_07_reshaping_data.ipynb` to find out the required function.\n- Remember to check the documentation of a function using the `SHIFT` + `TAB` keystroke combination when the cursor is on the function of interest.\n\n</details>\n\n</div>", "_____no_output_____" ] ], [ [ "tidy_experiment = main_experiment.melt(id_vars=['AB_r', 'Bacterial_genotype', 'Phage_t',\n 'Survival_72h', 'PhageR_72h', 'experiment_ID'],\n value_vars=['OD_0h', 'OD_20h', 'OD_72h'],\n var_name='experiment_time_h',\n value_name='optical_density', )\ntidy_experiment", "_____no_output_____" ] ], [ [ "## Visual data exploration", "_____no_output_____" ] ], [ [ "tidy_experiment.head()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n* Make a histogram using the [Seaborn package](https://seaborn.pydata.org/index.html) to visualize the distribution of the `optical_density`\n* Change the overall theme to any of the available Seaborn themes\n* Change the border color of the bars to `white` and the fill color of the bars to `grey`\n\n<details><summary>Hints</summary>\n\n- See https://seaborn.pydata.org/tutorial/distributions.html#plotting-univariate-histograms.\n- There are five preset seaborn themes: `darkgrid`, `whitegrid`, `dark`, `white`, and `ticks`.\n- Make sure to set the theme before creating the graph.\n- Seaborn relies on Matplotlib to plot the individual bars, so the available parameters (`**kwargs`) to adjust the bars that can be passed (e.g. `color` and `edgecolor`) are enlisted in the [matplotlib.axes.Axes.bar](https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.axes.Axes.bar.html) documentation.\n\n</details>\n\n\n</div>", "_____no_output_____" ] ], [ [ "sns.set_style(\"white\")\nsns.displot(tidy_experiment, x=\"optical_density\",\n color='grey', edgecolor='white')", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\nUse a Seaborn `violin plot` to check the distribution of the `optical_density` in each of the experiment time phases (`experiment_time_h` in the x-axis).\n\n<details><summary>Hints</summary>\n\n- See https://seaborn.pydata.org/tutorial/categorical.html#violinplots.\n- Whereas the previous exercise focuses on the distribution of data (`distplot`), this exercise focuses on distributions _for each category of..._ and needs the categorical functions of Seaborn (`catplot`).\n\n</details>", "_____no_output_____" ] ], [ [ "sns.catplot(data=tidy_experiment, x=\"experiment_time_h\",\n y=\"optical_density\", kind=\"violin\")", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\nFor each `Phage_t` in an individual subplot, use a `violin plot` to check the distribution of the `optical_density` in each of the experiment time phases (`experiment_time_h`)\n\n<details><summary>Hints</summary>\n\n- The technical term for splitting in subplots using a categorical variable is 'faceting' (or sometimes also 'small multiple'), see https://seaborn.pydata.org/tutorial/categorical.html#showing-multiple-relationships-with-facets\n- You want to wrap the number of columns on 2 subplots, look for a function argument in the documentation of the `catplot` function.\n\n</details>", "_____no_output_____" ] ], [ [ "sns.catplot(data=tidy_experiment, x=\"experiment_time_h\", y=\"optical_density\",\n col=\"Phage_t\", col_wrap=2, kind=\"violin\")", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\nCreate a summary table of the __average__ `optical_density` with the `Bacterial_genotype` in the rows and the `experiment_time_h` in the columns\n\n<details><summary>Hints</summary>\n\n- No Seaborn required here, rely on Pandas `pivot_table()` function to reshape tables.\n\n</details>", "_____no_output_____" ] ], [ [ "pd.pivot_table(tidy_experiment, values='optical_density',\n index='Bacterial_genotype',\n columns='experiment_time_h',\n aggfunc='mean')", "_____no_output_____" ] ], [ [ "Advanced/optional solution:", "_____no_output_____" ] ], [ [ "# advanced/optional solution\ntidy_experiment.groupby(['Bacterial_genotype', 'experiment_time_h'])['optical_density'].mean().unstack()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\n- Calculate for each combination of `Bacterial_genotype`, `Phage_t` and `experiment_time_h` the <i>mean</i> `optical_density` and store the result as a DataFrame called `density_mean` (tip: use `reset_index()` to convert the resulting Series to a DataFrame).\n- Based on `density_mean`, make a _barplot_ of the (mean) values for each `Bacterial_genotype`, with for each `Bacterial_genotype` an individual bar and with each `Phage_t` in a different color/hue (i.e. grouped bar chart).\n- Use the `experiment_time_h` to split into subplots. As we mainly want to compare the values within each subplot, make sure the scales in each of the subplots are adapted to its own data range, and put the subplots on different rows.\n- Adjust the size and aspect ratio of the Figure to your own preference.\n- Change the color scale of the bars to another Seaborn palette.\n\n<details><summary>Hints</summary>\n\n\n- _Calculate for each combination of..._ should remind you to the `groupby` functionality of Pandas to calculate statistics for each group.\n- The exercise is still using the `catplot` function of Seaborn with `bar`s. Variables are used to vary the `hue` and `row`.\n- Each subplot its own range is the same as not sharing axes (`sharey` argument).\n- Seaborn in fact has six variations of matplotlib’s palette, called `deep`, `muted`, `pastel`, `bright`, `dark`, and `colorblind`. See https://seaborn.pydata.org/tutorial/color_palettes.html#qualitative-color-palettes\n\n</details>", "_____no_output_____" ] ], [ [ "density_mean = (tidy_experiment\n .groupby(['Bacterial_genotype','Phage_t', 'experiment_time_h'])['optical_density']\n .mean().reset_index())", "_____no_output_____" ], [ "sns.catplot(data=density_mean, kind=\"bar\",\n x='Bacterial_genotype',\n y='optical_density',\n hue='Phage_t',\n row=\"experiment_time_h\",\n sharey=False,\n aspect=3, height=3,\n palette=\"colorblind\")", "_____no_output_____" ] ], [ [ "## (Optional) Reproduce chart of the original paper", "_____no_output_____" ], [ "Check Figure 2 of the original journal paper in the 'correction' part of the <a href=\"http://rsbl.royalsocietypublishing.org/content/roybiolett/12/5/20160064.full.pdf\">pdf</a>:\n\n<img src=\"https://royalsocietypublishing.org/cms/attachment/eb511c57-4167-4575-b8b3-93fbcf728572/rsbl20160064f02.jpg\" width=\"500\">", "_____no_output_____" ] ], [ [ "falcor.head()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\nWe will first reproduce 'Figure 2' without the error bars:\n\n- Make sure the `WT(2)` and `MUT(2)` categories are used as respectively `WT` and `MUT` by adjusting them with Pandas first.\n- Use the __falcor__ data and the Seaborn package. The 'log10 mutation rate' on the figure corresponds to the `log10 Mc` column.\n\n\n<details><summary>Hints</summary>\n\n- To replace values using a mapping (dictionary with the keys the current values and the values the new values), use the Pandas `replace` method.\n- This is another example of a `catplot`, using `point`s to represent the data.\n- The `join` argument defines if individual points need to be connected or not.\n- One combination appears multiple times, so make sure to not yet use confidence intervals by setting `ci` to `Null`.\n\n</details>", "_____no_output_____" ] ], [ [ "falcor[\"Bacterial_genotype\"] = falcor[\"Bacterial_genotype\"].replace({'WT(2)': 'WT',\n 'MUT(2)': 'MUT'})\nfalcor.head()", "_____no_output_____" ], [ "sns.catplot(data=falcor, kind=\"point\",\n x='Bacterial_genotype',\n y='log10 Mc',\n row=\"Phage\",\n join=False, ci=None,\n aspect=3, height=3,\n color=\"black\")", "_____no_output_____" ] ], [ [ "Seaborn supports confidence intervals by different estimators when multiple values are combined (see [this example](https://seaborn.pydata.org/examples/pointplot_anova.html)). In this particular case, the error estimates are already provided and are not symmetrical. Hence, we need to find a method to use the lower `log10 LBc` and upper `log10 UBc` confidence intervals.\n\nStackoverflow can help you with this, see [this thread](https://stackoverflow.com/questions/38385099/adding-simple-error-bars-to-seaborn-factorplot) to solve the following exercise.", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\n\n**EXERCISE**\n\nReproduce 'Figure 2' with the error bars using the information from [this Stackoverflow thread](https://stackoverflow.com/questions/38385099/adding-simple-error-bars-to-seaborn-factorplot). You do not have to adjust the order of the categories in the x-axis.\n\n<details><summary>Hints</summary>\n\n- Do not use the `catplot` function, but first create the layout of the graph by `FacetGrid` on the `Phage` variable.\n- Next, map a custom `errorbar` function to the FactgGrid as the example from Stackoverflow.\n- Adjust/Simplify the `errorbar` custom function for your purpose.\n- Matplotlib uses the `capsize` to draw upper and lower lines of the intervals.\n\n</details>", "_____no_output_____" ] ], [ [ "falcor[\"Bacterial_genotype\"] = falcor[\"Bacterial_genotype\"].replace({'WT(2)': 'WT',\n 'MUT(2)': 'MUT'})", "_____no_output_____" ], [ "def errorbar(x, y, low, high, **kws):\n \"\"\"Utility function to link falcor data representation with the errorbar representation\"\"\"\n plt.errorbar(x, y, (y - low, high - y), capsize=3, fmt=\"o\", color=\"black\", ms=4)", "_____no_output_____" ], [ "sns.set_style(\"ticks\")\ng = sns.FacetGrid(falcor, row=\"Phage\", aspect=3, height=3)\ng.map(errorbar,\n \"Bacterial_genotype\", \"log10 Mc\",\n \"log10 LBc\", \"log10 UBc\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
ec835f7d30b590cd9d59d91a2ea58159910d34a5
85,173
ipynb
Jupyter Notebook
extract_tracks.ipynb
RnoB/fly-matrix
50733b1be715fccb386c1a4bb9e57f19d82a0078
[ "MIT" ]
2
2019-07-16T12:42:47.000Z
2021-12-10T09:39:33.000Z
extract_tracks.ipynb
RnoB/fly-matrix
50733b1be715fccb386c1a4bb9e57f19d82a0078
[ "MIT" ]
null
null
null
extract_tracks.ipynb
RnoB/fly-matrix
50733b1be715fccb386c1a4bb9e57f19d82a0078
[ "MIT" ]
2
2019-07-16T12:41:26.000Z
2020-06-08T07:59:04.000Z
326.333333
71,968
0.926937
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nimport json", "_____no_output_____" ], [ "df = pd.DataFrame(np.load('/Users/vivekhsridhar/Documents/Code/Python/fly-matrix/two_choice.npy'), columns=['x','y','z'])\ndf = df.iloc[::200, :]\ndf = df.reset_index()\ndf.head()", "_____no_output_____" ], [ "df['bool_dist'] = 0\ndf.loc[(df['x'] - df['x'].shift())**2 + (df['y'] - df['y'].shift())**2 > 1, 'bool_dist'] = 1", "_____no_output_____" ], [ "df['event_id'] = 0\nfor i in range(1,len(df)):\n if df.loc[i,'bool_dist'] == 0:\n df.loc[i,'event_id'] = df.loc[i-1,'event_id']\n else:\n df.loc[i,'event_id'] = df.loc[i-1,'event_id'] + 1", "_____no_output_____" ], [ "plt.scatter(df['x'], df['y'], s=1, c=df['event_id'], alpha = 0.1)", "_____no_output_____" ], [ "fly = []\nfor i in df['event_id'].unique():\n pos = []\n tmp = df[df['event_id'] == i]\n tmp = tmp.reset_index()\n for idx in range(0,len(tmp)):\n pos.append({\"x\":tmp.loc[idx,'y'], \"y\":tmp.loc[idx,'z'], \"z\":tmp.loc[idx,'x']})\n fly.append({\"id\" : int(i), \"trajectory\" : pos})", "_____no_output_____" ], [ "all_tracks = {\"all_tracks\" : fly}", "_____no_output_____" ], [ "with open('/Users/vivekhsridhar/Documents/Code/Python/fly-matrix/sparse_tracks.json', 'w') as f:\n json.dump(all_tracks, f)", "_____no_output_____" ], [ "tmp = df[df['event_id'] == 6]\n\nplt.scatter(tmp['x'], tmp['y'], s=1)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec837779bd011578c40134928b77b4b29e522b06
9,210
ipynb
Jupyter Notebook
notebooks/kubeflow_pipelines/cicd/solutions/kfp_cicd.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
11
2021-09-08T05:39:02.000Z
2022-03-25T14:35:22.000Z
notebooks/kubeflow_pipelines/cicd/solutions/kfp_cicd.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
118
2021-08-28T03:09:44.000Z
2022-03-31T00:38:44.000Z
notebooks/kubeflow_pipelines/cicd/solutions/kfp_cicd.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
110
2021-09-02T15:01:35.000Z
2022-03-31T12:32:48.000Z
34.62406
325
0.629533
[ [ [ "# CI/CD for a KFP pipeline", "_____no_output_____" ], [ "**Learning Objectives:**\n1. Learn how to create a custom Cloud Build builder to pilote CAIP Pipelines\n1. Learn how to write a Cloud Build config file to build and push all the artifacts for a KFP\n1. Learn how to setup a Cloud Build Github trigger to rebuild the KFP", "_____no_output_____" ], [ "In this lab you will walk through authoring of a **Cloud Build** CI/CD workflow that automatically builds and deploys a KFP pipeline. You will also integrate your workflow with **GitHub** by setting up a trigger that starts the workflow when a new tag is applied to the **GitHub** repo hosting the pipeline's code.\n\n\n", "_____no_output_____" ], [ "## Configuring environment settings\n\nUpdate the `ENDPOINT` constant with the settings reflecting your lab environment. \n\nThe endpoint to the AI Platform Pipelines instance can be found on the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console.\n\n1. Open the *SETTINGS* for your instance\n2. Use the value of the `host` variable in the *Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD* section of the *SETTINGS* window.", "_____no_output_____" ] ], [ [ "ENDPOINT = \"<YOUR_ENDPOINT>\"\nPROJECT_ID = !(gcloud config get-value core/project)\nPROJECT_ID = PROJECT_ID[0]", "_____no_output_____" ] ], [ [ "## Creating the KFP CLI builder\n### Review the Dockerfile describing the KFP CLI builder", "_____no_output_____" ] ], [ [ "!cat kfp-cli/Dockerfile", "_____no_output_____" ] ], [ [ "### Build the image and push it to your project's **Container Registry**.", "_____no_output_____" ] ], [ [ "IMAGE_NAME = \"kfp-cli\"\nTAG = \"latest\"\nIMAGE_URI = f\"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}\"", "_____no_output_____" ], [ "!gcloud builds submit --timeout 15m --tag {IMAGE_URI} kfp-cli", "_____no_output_____" ] ], [ [ "## Understanding the **Cloud Build** workflow.\n\nReview the `cloudbuild.yaml` file to understand how the CI/CD workflow is implemented and how environment specific settings are abstracted using **Cloud Build** variables.\n\nThe CI/CD workflow automates the steps you walked through manually during `lab-02-kfp-pipeline`:\n1. Builds the trainer image\n1. Builds the base image for custom components\n1. Compiles the pipeline\n1. Uploads the pipeline to the KFP environment\n1. Pushes the trainer and base images to your project's **Container Registry**\n\nAlthough the KFP backend supports pipeline versioning, this feature has not been yet enable through the **KFP** CLI. As a temporary workaround, in the **Cloud Build** configuration a value of the `TAG_NAME` variable is appended to the name of the pipeline. \n\nThe **Cloud Build** workflow configuration uses both standard and custom [Cloud Build builders](https://cloud.google.com/cloud-build/docs/cloud-builders). The custom builder encapsulates **KFP CLI**. ", "_____no_output_____" ], [ "## Manually triggering CI/CD runs\n\nYou can manually trigger **Cloud Build** runs using the `gcloud builds submit` command.", "_____no_output_____" ] ], [ [ "SUBSTITUTIONS = \"\"\"\n_ENDPOINT={},\\\n_TRAINER_IMAGE_NAME=trainer_image,\\\n_BASE_IMAGE_NAME=base_image,\\\nTAG_NAME=test,\\\n_PIPELINE_FOLDER=.,\\\n_PIPELINE_DSL=covertype_training_pipeline.py,\\\n_PIPELINE_PACKAGE=covertype_training_pipeline.yaml,\\\n_PIPELINE_NAME=covertype_continuous_training,\\\n_RUNTIME_VERSION=1.15,\\\n_PYTHON_VERSION=3.7,\\\n_USE_KFP_SA=True,\\\n_COMPONENT_URL_SEARCH_PREFIX=https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/\n\"\"\".format(\n ENDPOINT\n).strip()", "_____no_output_____" ], [ "!gcloud builds submit . --config cloudbuild.yaml --substitutions {SUBSTITUTIONS}", "_____no_output_____" ] ], [ [ "## Setting up GitHub integration\n\nIn this exercise you integrate your CI/CD workflow with **GitHub**, using [Cloud Build GitHub App](https://github.com/marketplace/google-cloud-build). \nYou will set up a trigger that starts the CI/CD workflow when a new tag is applied to the **GitHub** repo managing the pipeline source code. You will use a fork of this repo as your source GitHub repository.", "_____no_output_____" ], [ "### Create a fork of this repo\n[Follow the GitHub documentation](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) to fork this repo", "_____no_output_____" ], [ "### Create a **Cloud Build** trigger\n\nConnect the fork you created in the previous step to your Google Cloud project and create a trigger following the steps in the [Creating GitHub app trigger](https://cloud.google.com/cloud-build/docs/create-github-app-triggers) article. Use the following values on the **Edit trigger** form:\n\n|Field|Value|\n|-----|-----|\n|Name|[YOUR TRIGGER NAME]|\n|Description|[YOUR TRIGGER DESCRIPTION]|\n|Event| Tag|\n|Source| [YOUR FORK]|\n|Tag (regex)|.\\*|\n|Build Configuration|Cloud Build configuration file (yaml or json)|\n|Cloud Build configuration file location| ./notebooks/kubeflow_pipelines/cicd/solutions/cloudbuild.yaml|\n\n\nUse the following values for the substitution variables:\n\n|Variable|Value|\n|--------|-----|\n|_BASE_IMAGE_NAME|base_image|\n|_COMPONENT_URL_SEARCH_PREFIX|https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/|\n|_ENDPOINT|[Your inverting proxy host]|\n|_PIPELINE_DSL|covertype_training_pipeline.py|\n|_PIPELINE_FOLDER|notebooks/kubeflow_pipelines/cicd/solutions|\n|_PIPELINE_NAME|covertype_training_deployment|\n|_PIPELINE_PACKAGE|covertype_training_pipeline.yaml|\n|_PYTHON_VERSION|3.7|\n|_RUNTIME_VERSION|1.15|\n|_TRAINER_IMAGE_NAME|trainer_image|\n|_USE_KFP_SA|False|", "_____no_output_____" ], [ "### Trigger the build\n\nTo start an automated build [create a new release of the repo in GitHub](https://help.github.com/en/github/administering-a-repository/creating-releases). Alternatively, you can start the build by applying a tag using `git`. \n```\ngit tag [TAG NAME]\ngit push origin --tags\n```\n", "_____no_output_____" ], [ "<font size=-1>Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec8378923061e3dab9f93cbea5520500c929bd54
4,654
ipynb
Jupyter Notebook
Analyse NewYork city fire department Dataset/PROBLEM & DATA PROVIDED/Pandas - Assignment 02.ipynb
NeoWist/aiengineer-simplylearn-projects
6b0c2413c4882e8c711918b4541b6de1a5237f2e
[ "MIT" ]
null
null
null
Analyse NewYork city fire department Dataset/PROBLEM & DATA PROVIDED/Pandas - Assignment 02.ipynb
NeoWist/aiengineer-simplylearn-projects
6b0c2413c4882e8c711918b4541b6de1a5237f2e
[ "MIT" ]
null
null
null
Analyse NewYork city fire department Dataset/PROBLEM & DATA PROVIDED/Pandas - Assignment 02.ipynb
NeoWist/aiengineer-simplylearn-projects
6b0c2413c4882e8c711918b4541b6de1a5237f2e
[ "MIT" ]
null
null
null
19.152263
224
0.532015
[ [ [ "<img src=\"http://cfs22.simplicdn.net/ice9/new_logo.svgz \"/>\n\n# Assignment 02: Evaluate the FDNY Dataset\n\n*The comments/sections provided are your cues to perform the assignment. You don't need to limit yourself to the number of rows/cells provided. You can add additional rows in each section to add more lines of code.*\n\n*If at any point in time you need help on solving this assignment, view our demo video to understand the different steps of the code.*\n\n**Happy coding!**\n\n* * *", "_____no_output_____" ], [ "#### 1: View and import the dataset", "_____no_output_____" ] ], [ [ "#Import the required libraries\n", "_____no_output_____" ], [ "#Import the Fire Department of New York City (FDNY) file\n", "_____no_output_____" ] ], [ [ "#### 2: Analyze the dataset", "_____no_output_____" ] ], [ [ "#View the content of the data\n", "_____no_output_____" ], [ "#View the first five records\n", "_____no_output_____" ], [ "#Skip the duplicate header row\n", "_____no_output_____" ], [ "#Verify if the dataset is fixed\n", "_____no_output_____" ], [ "#View the data statistics (Hint: use describe() method)\n", "_____no_output_____" ], [ "#View the attributes of the dataset (Hint: view the column names)\n", "_____no_output_____" ], [ "#View the index of the dataset\n", "_____no_output_____" ] ], [ [ "#### 3: Find the total number of fire department facilities in New York city", "_____no_output_____" ] ], [ [ "#Count number of records for each attribute\n", "_____no_output_____" ], [ "#view the datatypes of all three attributes\n", "_____no_output_____" ] ], [ [ "#### 4: Find the total number of fire department facilities in each borough", "_____no_output_____" ] ], [ [ "#Select FDNY information boroughwise\n", "_____no_output_____" ], [ "#View FDNY informationn for each borough\n", "_____no_output_____" ] ], [ [ "#### 5: Find the total number of fire department facilities in Manhattan", "_____no_output_____" ] ], [ [ "#Select FDNY information for Manhattan\n", "_____no_output_____" ], [ "#View FDNY information for Manhattan\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec837f9814f0f3fea87f76e57f38fb0c7318849d
3,829
ipynb
Jupyter Notebook
examples/server_embed.ipynb
canavandl/jupyterlab_bokeh
090e344fa7460824dccb04a7bcda6493c3b07625
[ "BSD-3-Clause" ]
105
2019-11-01T14:18:41.000Z
2022-03-05T17:55:40.000Z
examples/server_embed.ipynb
canavandl/jupyterlab_bokeh
090e344fa7460824dccb04a7bcda6493c3b07625
[ "BSD-3-Clause" ]
72
2019-10-19T20:59:15.000Z
2022-03-22T23:21:31.000Z
examples/server_embed.ipynb
canavandl/jupyterlab_bokeh
090e344fa7460824dccb04a7bcda6493c3b07625
[ "BSD-3-Clause" ]
25
2017-08-24T13:52:50.000Z
2019-07-03T19:03:30.000Z
27.948905
94
0.515539
[ [ [ "# Test Case: Server embed", "_____no_output_____" ], [ "Expected result:\n\n* The initial output should be \"BokehJS is loading...\"\n* The output should be updated to /[Logo] BokehJS x.y.z successfully loaded", "_____no_output_____" ] ], [ [ "import numpy as np\n\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import Slider, TextInput\nfrom bokeh.plotting import figure, output_notebook, show\n\noutput_notebook()", "_____no_output_____" ], [ "def modify_doc(doc):\n # Set up data\n N = 200\n x = np.linspace(0, 4*np.pi, N)\n y = np.sin(x)\n source = ColumnDataSource(data=dict(x=x, y=y))\n\n # Set up plot\n plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\",\n tools=\"crosshair,pan,reset,save,wheel_zoom\",\n x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])\n\n plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\n # Set up widgets\n text = TextInput(title=\"title\", value='my sine wave')\n offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1)\n amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1)\n phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi)\n freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1)\n\n # Set up callbacks\n def update_title(attrname, old, new):\n plot.title.text = text.value\n\n text.on_change('value', update_title)\n\n def update_data(attrname, old, new):\n\n # Get the current slider values\n a = amplitude.value\n b = offset.value\n w = phase.value\n k = freq.value\n\n # Generate the new curve\n x = np.linspace(0, 4*np.pi, N)\n y = a*np.sin(k*x + w) + b\n\n source.data = dict(x=x, y=y)\n\n for w in [offset, amplitude, phase, freq]:\n w.on_change('value', update_data)\n\n\n # Set up layouts and add to document\n inputs = widgetbox(text, offset, amplitude, phase, freq)\n\n doc.add_root(row(inputs, plot, width=800))\n doc.title = \"Sliders\"", "_____no_output_____" ] ], [ [ "Expected Result:\n \n* A Bokeh line plot should be rendered\n* Dragging the sliders should update the line glyph\n* Changing the title text area and hitting Enter should update the plot title", "_____no_output_____" ] ], [ [ "show(modify_doc)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec8397042698e343f00a81897656898c89a95bdf
4,851
ipynb
Jupyter Notebook
notebooks/01_Data_analysis/01_introduction_Pandas.ipynb
ElDwarf/DS_and_ML_Docs
d173b09593799058589306330b2061969b3648ff
[ "MIT" ]
null
null
null
notebooks/01_Data_analysis/01_introduction_Pandas.ipynb
ElDwarf/DS_and_ML_Docs
d173b09593799058589306330b2061969b3648ff
[ "MIT" ]
null
null
null
notebooks/01_Data_analysis/01_introduction_Pandas.ipynb
ElDwarf/DS_and_ML_Docs
d173b09593799058589306330b2061969b3648ff
[ "MIT" ]
null
null
null
22.990521
248
0.563801
[ [ [ "# 01 Intrucción Pandas\n\n[Pandas](https://pandas.pydata.org/) es una libreria que provee herramientas para menjode estructuras de datos y analisis de datos.\n\nPanda nos permite crear dataframes partiendo desde muchas fuentes como por ejemplo los archivos csv, para ello vamos a empezar importando la libreria y luego vamos a cargar un csv desde internet, lo mismo se podria hacer desde su disco local.", "_____no_output_____" ], [ "### Importamos pandas\n\n Importamos pandas y luego imprimimos la version para verificar que se importo correctamente", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nimport pandas as pd\npd.__version__", "_____no_output_____" ] ], [ [ "### Carga de datos\n\nCargamos un csv desde internet por medio de la url directamente", "_____no_output_____" ] ], [ [ "ch_df = pd.read_csv(\"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv\", sep=\",\")", "_____no_output_____" ] ], [ [ "Los dataframe tiene una estructura de tabla con columnas con sus titulos las cules pueden ser utlizadas para consultar, buscar, filtrar datos al igual que registros con los datos.\n\na continuacion vamos a empezar a revisar metodos utiles dentro de pandas", "_____no_output_____" ], [ "### Metodo head\n\nNos permite ver rapidamente las columans que conforman el dataframe y un top de 5 registros", "_____no_output_____" ] ], [ [ "ch_df.head()", "_____no_output_____" ] ], [ [ "### Metodo describe\n\nNos mostrar estadísticas interesantes sobre un DataFrame\n 1. count: Cantidad de datos en la columna\n 2. mean: Muestra la media del set de datos\n 3. std: Muestra la desviacion estandart del set de datos\n 4. min: Muestra el minimo del set de datos\n 5. 25%: Muestra el percentil al 25% del set de datos.\n 6. 50%: Muestra el percentil al 50% del set de datos.\n 7. 75%: Muestra el percentil al 75% del set de datos.\n 8. max: Muestra el maximo del set de datos\n\n", "_____no_output_____" ] ], [ [ "ch_df.describe()", "_____no_output_____" ] ], [ [ "### Acceder a una columan en particular", "_____no_output_____" ] ], [ [ "ch_df['median_income'].head()", "_____no_output_____" ] ], [ [ "### Graficacion: Generar un histograma de una columna del DataFrame", "_____no_output_____" ] ], [ [ "ch_df.hist('median_income')", "_____no_output_____" ] ], [ [ "### Acceder al dato de una celda en particular de una columna", "_____no_output_____" ] ], [ [ "ch_df['median_income'][1]", "_____no_output_____" ] ], [ [ "### Metodo describe() de una columna en particular", "_____no_output_____" ] ], [ [ "ch_df['median_income'].describe()", "_____no_output_____" ] ], [ [ "### Aplicar operacion a toda una columna", "_____no_output_____" ] ], [ [ "ch_df['median_income'] = ch_df['median_income']/10\nch_df['median_income'].describe()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec83a5e314259c874d36ec7911b1fd921b2bd1f1
87,990
ipynb
Jupyter Notebook
Malaria Detection/Malaria_Detection_CNN.ipynb
Ashishkumar-hub/Deep-Learning-Projects
ea61372b79c3400955a69411b2cfdf6cad49e1cc
[ "Apache-2.0" ]
2
2022-02-08T18:17:13.000Z
2022-02-14T10:02:32.000Z
Malaria Detection/Malaria_Detection_CNN.ipynb
Ashishkumar-hub/Deep-Learning-Projects
ea61372b79c3400955a69411b2cfdf6cad49e1cc
[ "Apache-2.0" ]
null
null
null
Malaria Detection/Malaria_Detection_CNN.ipynb
Ashishkumar-hub/Deep-Learning-Projects
ea61372b79c3400955a69411b2cfdf6cad49e1cc
[ "Apache-2.0" ]
null
null
null
86.519174
26,498
0.723116
[ [ [ "## Creating CNN Using Scratch ", "_____no_output_____" ] ], [ [ "# import the libraries as shown below\n\nfrom tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img\nfrom tensorflow.keras.models import Sequential\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from google.colab import drive\r\ndrive.mount('/content/drive/')", "Mounted at /content/drive/\n" ], [ "# re-size all the images to this\nIMAGE_SIZE = [224, 224]\n\ntrain_path = '/content/drive/MyDrive/Colab Notebooks/Dataset/Train'\nvalid_path = '/content/drive/MyDrive/Colab Notebooks/Dataset/Test'\n", "_____no_output_____" ], [ "from tensorflow.keras.layers import MaxPooling2D", "_____no_output_____" ], [ "### Create Model from scratch using CNN\nmodel=Sequential()\nmodel.add(Conv2D(filters=16,kernel_size=2,padding=\"same\",activation=\"relu\",input_shape=(224,224,3)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(filters=32,kernel_size=2,padding=\"same\",activation =\"relu\"))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(filters=64,kernel_size=2,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Flatten())\nmodel.add(Dense(500,activation=\"relu\"))\nmodel.add(Dense(2,activation=\"softmax\"))\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 224, 224, 16) 208 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 112, 112, 16) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 112, 112, 32) 2080 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 56, 56, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 56, 56, 64) 8256 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 28, 28, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 50176) 0 \n_________________________________________________________________\ndense (Dense) (None, 500) 25088500 \n_________________________________________________________________\ndense_1 (Dense) (None, 2) 1002 \n=================================================================\nTotal params: 25,100,046\nTrainable params: 25,100,046\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# tell the model what cost and optimization method to use\nmodel.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n)\n", "_____no_output_____" ], [ "# Use the Image Data Generator to import the images from the dataset\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n", "_____no_output_____" ], [ "# Make sure you provide the same target size as initialied for the image size\ntraining_set = train_datagen.flow_from_directory('/content/drive/MyDrive/Colab Notebooks/Dataset/Train',\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'categorical')", "Found 416 images belonging to 2 classes.\n" ], [ "training_set", "_____no_output_____" ], [ "test_set = test_datagen.flow_from_directory('/content/drive/MyDrive/Colab Notebooks/Dataset/Test',\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'categorical')", "Found 134 images belonging to 2 classes.\n" ], [ "# fit the model\n# Run the cell. It will take some time to execute\nr = model.fit_generator(\n training_set,\n validation_data=test_set,\n epochs=50,\n steps_per_epoch=len(training_set),\n validation_steps=len(test_set)\n)", "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1844: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n warnings.warn('`Model.fit_generator` is deprecated and '\n" ], [ "", "_____no_output_____" ], [ "# plot the loss\nplt.plot(r.history['loss'], label='train loss')\nplt.plot(r.history['val_loss'], label='val loss')\nplt.legend()\nplt.show()\nplt.savefig('LossVal_loss')\n\n# plot the accuracy\nplt.plot(r.history['accuracy'], label='train acc')\nplt.plot(r.history['val_accuracy'], label='val acc')\nplt.legend()\nplt.show()\nplt.savefig('AccVal_acc')", "_____no_output_____" ], [ "# save it as a h5 file\n\n\nfrom tensorflow.keras.models import load_model\n\nmodel.save('model_vgg19.h5')", "_____no_output_____" ], [ "\ny_pred = model.predict(test_set)\n", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "import numpy as np\ny_pred = np.argmax(y_pred, axis=1)", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "from tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image", "_____no_output_____" ], [ "model=load_model('model_vgg19.h5')", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "img=image.load_img('/content/drive/MyDrive/Colab Notebooks/Dataset/Test/Uninfected/2.png',target_size=(224,224))\n\n", "_____no_output_____" ], [ "x=image.img_to_array(img)\nx", "_____no_output_____" ], [ "x.shape", "_____no_output_____" ], [ "x=x/255", "_____no_output_____" ], [ "x=np.expand_dims(x,axis=0)\nimg_data=preprocess_input(x)\nimg_data.shape", "_____no_output_____" ], [ "model.predict(img_data)", "_____no_output_____" ], [ "a=np.argmax(model.predict(img_data), axis=1)", "_____no_output_____" ], [ "if(a==0):\n print(\"Uninfected\")\nelse:\n print(\"Infected\")", "Uninfected\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec83ae376993f3b4d377e39fe589c7fea3a8922f
131,523
ipynb
Jupyter Notebook
examples/statistics/poststats_nodal_efficiency.ipynb
tsalo/IDConn
b3f5a673067efd8c77d56cd7c0a70693d281b39f
[ "MIT" ]
3
2020-01-17T18:20:17.000Z
2021-06-24T19:37:49.000Z
examples/statistics/poststats_nodal_efficiency.ipynb
62442katieb/idconn-retrieval
8d4322f9106194a5b80fb19c192d911ff6d222ec
[ "MIT" ]
6
2020-10-01T18:41:04.000Z
2022-03-01T17:48:05.000Z
notebooks/poststats_nodal_efficiency.ipynb
NBCLab/IDConn
0677e372c02fe35be28f70567e71e040e1d2a023
[ "MIT" ]
3
2020-10-01T17:56:34.000Z
2021-06-17T21:16:31.000Z
131.654655
85,612
0.815584
[ [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport nibabel as nib\nimport bct\nimport json\nfrom os import makedirs\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom os.path import join, exists\nfrom nilearn.plotting import plot_glass_brain, plot_roi, find_parcellation_cut_coords\n#import bct\nimport datetime\nfrom nilearn.mass_univariate import permuted_ols\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nsns.set_context('poster', font_scale=0.85)\nimport matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "def jili_sidak_mc(data, alpha):\n import math\n import numpy as np\n\n mc_corrmat = data.corr()\n mc_corrmat.fillna(0, inplace=True)\n eigvals, eigvecs = np.linalg.eig(mc_corrmat)\n\n M_eff = 0\n for eigval in eigvals:\n if abs(eigval) >= 0:\n if abs(eigval) >= 1:\n M_eff += 1\n else:\n M_eff += abs(eigval) - math.floor(abs(eigval))\n else:\n M_eff += 0\n print('Number of effective comparisons: {0}'.format(M_eff))\n\n #and now applying M_eff to the Sidak procedure\n sidak_p = 1 - (1 - alpha)**(1/M_eff)\n if sidak_p < 0.00001:\n print('Critical value of {:.3f}'.format(alpha),'becomes {:2e} after corrections'.format(sidak_p))\n else:\n print('Critical value of {:.3f}'.format(alpha),'becomes {:.6f} after corrections'.format(sidak_p))\n return sidak_p, M_eff\n", "_____no_output_____" ], [ "sink_dir = '/Users/kbottenh/Dropbox/Projects/physics-retrieval/data/output'\nfig_dir = '/Users/kbottenh/Dropbox/Projects/physics-retrieval/figures/'\ndata_dir = '/Users/kbottenh/Dropbox/Projects/physics-retrieval/data'\nroi_dir = '/Users/kbottenh/Dropbox/Data/templates/shen2015/'\n\n\nshen = '/Users/kbottenh/Dropbox/Projects/physics-retrieval/shen2015_2mm_268_parcellation.nii.gz'\ncraddock = '/Users/kbottenh/Dropbox/Projects/physics-retrieval/craddock2012_tcorr05_2level_270_2mm.nii.gz'\nmasks = ['shen2015', 'craddock2012']\n\ntasks = {'retr': [{'conditions': ['Physics', 'General']},\n {'runs': [0, 1]}],\n 'fci': [{'conditions': ['Physics', 'NonPhysics']},\n {'runs': [0, 1, 2]}]}\n\ntasks = ['fci', 'retr']\nsessions = [0, 1]\nsesh = ['pre', 'post']\nconditions = ['high-level', 'lower-level']\niqs = ['VCI', 'WMI', 'PRI', 'PSI', 'FSIQ']", "_____no_output_____" ] ], [ [ "# Data wrangling\nNodal efficiency data needs to be scaled according to mean efficiency across empirically estimated null models. Imputation should happen, too.", "_____no_output_____" ] ], [ [ "\n# # Data wrangling\n# Nodal efficiency data is currently in an <i>incredbily</i> long, multi-indexed dataframe. \n# Here, we transform it into wide data (dataframe per condition per task per session) for ease of analysis later.\nnull_df = pd.read_csv(join(sink_dir, 'local_efficiency', 'task_eff_dist.csv'), \n index_col=[0,1,2,3], header=0)\n\nbig_df = pd.read_csv(join(data_dir, 'rescored', 'physics_learning-local_efficiency-BayesianImpute.csv'), \n index_col=0, header=0)", "_____no_output_____" ], [ "session = 1\nfor mask in masks:\n for task in tasks:\n for condition in conditions:\n if condition == 'high-level':\n cond = 'physics'\n elif condition == 'lower-level':\n cond = 'control'\n conns = big_df.filter(regex='lEff.*, {0},.*{1}.*{2}.*{3}.*'.format(session,\n task, \n condition, \n mask)).columns\n print(conns[:2])\n new_conns = []\n for conn in conns:\n new_conn = conn.replace('\\', \\'', '_')\n new_conn = new_conn.replace('\\', ', '_')\n new_conn = new_conn.replace(', \\'', '_')\n new_conn = new_conn[2:-2]\n new_conns.append(new_conn)\n big_df.rename({conn: new_conn}, axis=1, inplace=True)\n \n print(new_conns[:2])\n big_df[new_conns] = big_df[new_conns] / null_df.loc[sesh[session],\n task,\n cond,\n mask]['mean']", "Index(['('lEff1', 1, 'fci', 'high-level', 'shen2015')', '('lEff2', 1, 'fci', 'high-level', 'shen2015')'], dtype='object')\n['lEff1_1_fci_high-level_shen2015', 'lEff2_1_fci_high-level_shen2015']\nIndex([], dtype='object')\n[]\nIndex(['('lEff1', 1, 'retr', 'high-level', 'shen2015')', '('lEff2', 1, 'retr', 'high-level', 'shen2015')'], dtype='object')\n['lEff1_1_retr_high-level_shen2015', 'lEff2_1_retr_high-level_shen2015']\nIndex([], dtype='object')\n[]\nIndex(['('lEff1', 1, 'fci', 'high-level', 'craddock2012')', '('lEff2', 1, 'fci', 'high-level', 'craddock2012')'], dtype='object')\n['lEff1_1_fci_high-level_craddock2012', 'lEff2_1_fci_high-level_craddock2012']\nIndex([], dtype='object')\n[]\nIndex(['('lEff1', 1, 'retr', 'high-level', 'craddock2012')', '('lEff2', 1, 'retr', 'high-level', 'craddock2012')'], dtype='object')\n['lEff1_1_retr_high-level_craddock2012', 'lEff2_1_retr_high-level_craddock2012']\nIndex([], dtype='object')\n[]\n" ], [ "conn_list = []\nfor i in np.arange(0,268):\n conn = big_df.filter(regex='.*_1_fci_high-level_{0}'.format(mask)).columns[i].split('_')[0]\n conn_list.append(conn)", "_____no_output_____" ], [ "for iq in iqs:\n big_df['delta{0}'.format(iq)] = big_df['{0}2'.format(iq)] - big_df['{0}1'.format(iq)]\n big_df['delta{0}XSex'.format(iq)] = big_df['delta{0}'.format(iq)] * big_df['F']\n big_df['{0}2XSex'.format(iq)] = big_df['{0}2'.format(iq)] * big_df['F']\n big_df['delta{0}XClass'.format(iq)] = big_df['delta{0}'.format(iq)] * big_df['Mod']\n big_df['{0}2XClass'.format(iq)] = big_df['{0}2'.format(iq)] * big_df['Mod']\n big_df['SexXClass'] = big_df['F'] * big_df['Mod']\n big_df['delta{0}XSexXClass'.format(iq)] = big_df['delta{0}'.format(iq)] * big_df['SexXClass']\n big_df['{0}2XSexXClass'.format(iq)] = big_df['{0}2'.format(iq)] * big_df['SexXClass']", "_____no_output_____" ] ], [ [ "# Regress local efficiency on IQ and all the covariates\n- Permuted OLS tests each `target_var` independently, while regressing out `confounding_vars`, so to run a multiple regression, we test each variable of interest, separately, and put all other variables in the regression in with the confounds. This way, we can test interactions <i>with</i> main effects.\n- Maximum p-values are saved in `sig` dictionary and for each significant variable, the p- and t-values for each node are saved in `nodaleff_sig`.\n- For each regression, maximum <i>p</i>- and <i>t</i>-values are stored in `params`, along with nodes whose local efficiency is significantly related to each parameter, are stored <i> by variable</i>.\n", "_____no_output_____" ] ], [ [ "sig = pd.DataFrame(index=masks)\nnodaleff_sig = pd.DataFrame(index=conn_list)\nalpha = 0.05\n\nadj_a = 1 - (1 - alpha)**(1/8)\nnloga = -np.log10(adj_a)\nprint('adjusted alpha across ', nloga)\n \nfor mask in masks:\n effs = {'post phys fci': {'conns': big_df.filter(regex='.*_1_fci_high-level_{0}'.format(mask)).columns,\n 'iqs': ['deltaPRI', 'deltaFSIQ', 'PRI2', 'FSIQ2']},\n 'post phys retr': {'conns': big_df.filter(regex='.*_1_retr_high-level_{0}'.format(mask)).columns,\n 'iqs': ['WMI2', 'VCI2']}}\n iqs = effs['post phys fci']['iqs'] + effs['post phys retr']['iqs']\n variables = ['iq', 'iqXsex', 'iqXclass', 'iqXsexXclass', 'sexXclass', 'F', 'Mod', 'Age', 'StrtLvl', 'fd']\n index = pd.MultiIndex.from_product([iqs, effs.keys(), variables])\n params = pd.DataFrame(index=index, columns=['max nlog(p)', 'max t', 'nodes'])\n for key in effs.keys():\n print(key)\n efficiencies = effs[key]['conns']\n iqs = effs[key]['iqs']\n \n for iq in iqs:\n print(iq)\n variables = ['{0}'.format(iq), '{0}XSex'.format(iq), '{0}XClass'.format(iq), \n '{0}XSexXClass'.format(iq),\n 'F', 'StrtLvl', 'SexXClass', 'Age', 'Mod', '{0} fd'.format(key)]\n for var in variables:\n covariates = list(set(variables) - set([var]))\n p, t, _ = permuted_ols(big_df[var], \n big_df[efficiencies], \n big_df[covariates],\n n_perm=10000)\n print(key, var, 'max p-val:', np.max(p[0]))\n sig.at[mask, '{0}, {1}, {2}'.format(variables[0], key, var)] = np.max(p[0])\n nodaleff_sig['{0} {1} {2} {3} p'.format(mask, iq, var, key)] = p.reshape((268,)).T\n nodaleff_sig['{0} {1} {2} {3} t'.format(mask, iq, var, key)] = t.reshape((268,)).T\n #nodaleff_sig.to_csv(join(sink_dir, '{0}-{1}-{2}-{3}-nodal_efficiency-p+tvals.csv'.format(mask, key, iq, var)))\n sig_nodes = nodaleff_sig[nodaleff_sig['{0} {1} {2} {3} p'.format(mask, iq, var, key)] >= nloga].index\n \n print('# significant nodes:', len(sig_nodes))\n if key in var:\n params.loc[iq, key, 'fd']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'fd']['max t'] = np.max(t[0])\n params.loc[iq, key, 'fd']['nodes'] = list(sig_nodes)\n elif iq in var:\n if 'Sex' in var:\n if 'Class' in var:\n params.loc[iq, key, 'iqXsexXclass']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'iqXsexXclass']['max t'] = np.max(t[0])\n params.loc[iq, key, 'iqXsexXclass']['nodes'] = list(sig_nodes)\n else:\n params.loc[iq, key, 'iqXsex']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'iqXsex']['max t'] = np.max(t[0])\n params.loc[iq, key, 'iqXsex']['nodes'] = list(sig_nodes)\n if 'Class' in var:\n if not 'Sex' in var:\n params.loc[iq, key, 'iqXclass']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'iqXclass']['max t'] = np.max(t[0])\n params.loc[iq, key, 'iqXclass']['nodes'] = list(sig_nodes)\n else:\n params.loc[iq, key, 'iq']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'iq']['max t'] = np.max(t[0])\n params.loc[iq, key, 'iq']['nodes'] = list(sig_nodes)\n elif var == 'SexXClass':\n params.loc[iq, key, 'sexXclass']['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, 'sexXclass']['max t'] = np.max(t[0])\n params.loc[iq, key, 'sexXclass']['nodes'] = list(sig_nodes)\n else:\n params.loc[iq, key, var]['max nlog(p)'] = np.max(p[0])\n params.loc[iq, key, var]['max t'] = np.max(t[0])\n params.loc[iq, key, var]['nodes'] = list(sig_nodes)\n params.to_csv(join(sink_dir, '{0}-params-permutedOLS-efficiency.csv'.format(mask)))\nsig.to_csv(join(sink_dir, 'max-nlogp-local_efficiency-permutedOLS.csv'))\nnodaleff_sig.to_csv(join(sink_dir, 'nodal_efficiency-nlogp+tvals.csv'.format(mask, key, iq, var)))", "adjusted alpha across 2.194420924596385\npost phys fci\ndeltaPRI\npost phys fci deltaPRI max p-val: 1.6252950812667588\n# significant nodes: 0\npost phys fci deltaPRIXSex max p-val: 0.4351407547476579\n# significant nodes: 0\npost phys fci deltaPRIXClass max p-val: 1.414582697768362\n# significant nodes: 0\npost phys fci deltaPRIXSexXClass max p-val: 0.2722582530939567\n# significant nodes: 0\npost phys fci F max p-val: 0.066353718318968\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.33044764625254935\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.006960066578800675\n# significant nodes: 0\npost phys fci Age max p-val: 1.0685855565878577\n# significant nodes: 0\npost phys fci Mod max p-val: 0.22794185799985078\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 3.301073422940844\n# significant nodes: 90\ndeltaFSIQ\npost phys fci deltaFSIQ max p-val: 1.127304599804194\n# significant nodes: 0\npost phys fci deltaFSIQXSex max p-val: 0.07778520384389605\n# significant nodes: 0\npost phys fci deltaFSIQXClass max p-val: 0.8159200730371915\n# significant nodes: 0\npost phys fci deltaFSIQXSexXClass max p-val: 0.19583582219482124\n# significant nodes: 0\npost phys fci F max p-val: 0.22911781263786354\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.05758890093438555\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.2562202056731123\n# significant nodes: 0\npost phys fci Age max p-val: 0.8982963533304964\n# significant nodes: 0\npost phys fci Mod max p-val: 0.20695182710028248\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 4.000043427276863\n# significant nodes: 141\nPRI2\npost phys fci PRI2 max p-val: 0.3027268855444793\n# significant nodes: 0\npost phys fci PRI2XSex max p-val: 0.20208378353966652\n# significant nodes: 0\npost phys fci PRI2XClass max p-val: 1.0410020349557692\n# significant nodes: 0\npost phys fci PRI2XSexXClass max p-val: 0.3618537870860256\n# significant nodes: 0\npost phys fci F max p-val: 0.18813344685676378\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.021452100456787995\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.4275203294272251\n# significant nodes: 0\npost phys fci Age max p-val: 0.6921197236649811\n# significant nodes: 0\npost phys fci Mod max p-val: 0.9718790078523928\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 3.0000434272768626\n# significant nodes: 19\nFSIQ2\npost phys fci FSIQ2 max p-val: 0.29068865144246664\n# significant nodes: 0\npost phys fci FSIQ2XSex max p-val: 1.207651737778609\n# significant nodes: 0\npost phys fci FSIQ2XClass max p-val: 0.9201389596101419\n# significant nodes: 0\npost phys fci FSIQ2XSexXClass max p-val: 0.7217472191855888\n# significant nodes: 0\npost phys fci F max p-val: 1.2396209438536507\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.010771635635169658\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.6686161307561196\n# significant nodes: 0\npost phys fci Age max p-val: 0.5789332974834284\n# significant nodes: 0\npost phys fci Mod max p-val: 0.9614232653271598\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 3.3979834359489\n# significant nodes: 54\npost phys retr\nWMI2\npost phys retr WMI2 max p-val: 0.1994638057077325\n# significant nodes: 0\npost phys retr WMI2XSex max p-val: 0.0846963649526708\n# significant nodes: 0\npost phys retr WMI2XClass max p-val: 0.018088982806889115\n# significant nodes: 0\npost phys retr WMI2XSexXClass max p-val: 0.40443699241125963\n# significant nodes: 0\npost phys retr F max p-val: 0.10131824568736919\n# significant nodes: 0\npost phys retr StrtLvl max p-val: 0.1763736140087263\n# significant nodes: 0\npost phys retr SexXClass max p-val: 0.42427844674014326\n# significant nodes: 0\npost phys retr Age max p-val: 0.014616953193860971\n# significant nodes: 0\npost phys retr Mod max p-val: 0.041479544054895234\n# significant nodes: 0\npost phys retr post phys retr fd max p-val: 1.7190100600291351\n# significant nodes: 0\nVCI2\npost phys retr VCI2 max p-val: 0.36135454058673916\n# significant nodes: 0\npost phys retr VCI2XSex max p-val: 0.26428488983312376\n# significant nodes: 0\npost phys retr VCI2XClass max p-val: 1.3270225201479664\n# significant nodes: 0\npost phys retr VCI2XSexXClass max p-val: 0.28029793474728604\n# significant nodes: 0\npost phys retr F max p-val: 0.28714130222963996\n# significant nodes: 0\npost phys retr StrtLvl max p-val: 0.17112586566017676\n# significant nodes: 0\npost phys retr SexXClass max p-val: 0.371143862856256\n# significant nodes: 0\npost phys retr Age max p-val: 0.0332448808935017\n# significant nodes: 0\npost phys retr Mod max p-val: 1.3429875744197588\n# significant nodes: 0\npost phys retr post phys retr fd max p-val: 1.7305304830589463\n# significant nodes: 0\npost phys fci\ndeltaPRI\npost phys fci deltaPRI max p-val: 1.6655896761259317\n# significant nodes: 0\npost phys fci deltaPRIXSex max p-val: 0.37298496327587305\n# significant nodes: 0\npost phys fci deltaPRIXClass max p-val: 2.1938634532929755\n# significant nodes: 0\npost phys fci deltaPRIXSexXClass max p-val: 0.9465808223514074\n# significant nodes: 0\npost phys fci F max p-val: 0.18049949173499397\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.3808457114839154\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.22116895527412314\n# significant nodes: 0\npost phys fci Age max p-val: 0.3219805223025175\n# significant nodes: 0\npost phys fci Mod max p-val: 0.5662735933519969\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 3.5229221725572004\n# significant nodes: 101\ndeltaFSIQ\npost phys fci deltaFSIQ max p-val: 0.6552583046442021\n# significant nodes: 0\npost phys fci deltaFSIQXSex max p-val: 0.5224880950778815\n# significant nodes: 0\npost phys fci deltaFSIQXClass max p-val: 0.7049163420246715\n# significant nodes: 0\npost phys fci deltaFSIQXSexXClass max p-val: 0.6099919308178754\n# significant nodes: 0\npost phys fci F max p-val: 1.0218629103394488\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.5250990918114747\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.46945956763174496\n# significant nodes: 0\npost phys fci Age max p-val: 0.1603768703944293\n# significant nodes: 0\npost phys fci Mod max p-val: 0.18400108635486603\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 4.000043427276863\n# significant nodes: 144\nPRI2\npost phys fci PRI2 max p-val: 0.617666123808749\n# significant nodes: 0\npost phys fci PRI2XSex max p-val: 0.47577715850988384\n# significant nodes: 0\npost phys fci PRI2XClass max p-val: 1.0851716097368123\n# significant nodes: 0\npost phys fci PRI2XSexXClass max p-val: 0.5616593202421486\n# significant nodes: 0\npost phys fci F max p-val: 0.3988528946615292\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.5308106847702505\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.4720270860876612\n# significant nodes: 0\npost phys fci Age max p-val: 0.12146418921464346\n# significant nodes: 0\npost phys fci Mod max p-val: 0.9646136890923144\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 2.886100074970026\n# significant nodes: 18\nFSIQ2\npost phys fci FSIQ2 max p-val: 0.5621343917373642\n# significant nodes: 0\npost phys fci FSIQ2XSex max p-val: 1.4522687218890402\n# significant nodes: 0\npost phys fci FSIQ2XClass max p-val: 0.34279629739314627\n# significant nodes: 0\npost phys fci FSIQ2XSexXClass max p-val: 0.9382141199821636\n# significant nodes: 0\npost phys fci F max p-val: 1.4225516274396373\n# significant nodes: 0\npost phys fci StrtLvl max p-val: 0.5683580825908509\n# significant nodes: 0\npost phys fci SexXClass max p-val: 0.9172580569604126\n# significant nodes: 0\npost phys fci Age max p-val: 0.26412531074556556\n# significant nodes: 0\npost phys fci Mod max p-val: 0.3470239761772493\n# significant nodes: 0\npost phys fci post phys fci fd max p-val: 3.6990134316128813\n# significant nodes: 43\npost phys retr\nWMI2\npost phys retr WMI2 max p-val: 0.4012529205137476\n# significant nodes: 0\npost phys retr WMI2XSex max p-val: 0.6105229614304852\n# significant nodes: 0\npost phys retr WMI2XClass max p-val: 0.09722495919460916\n# significant nodes: 0\npost phys retr WMI2XSexXClass max p-val: 1.694692057830239\n# significant nodes: 0\npost phys retr F max p-val: 0.6536904528262241\n# significant nodes: 0\n" ], [ "for col in sig.columns:\n if sig.at['shen2015', col] > nloga:\n if sig.at['craddock2012', col] > nloga:\n print(col)", "deltaPRI, post phys fci, post phys fci fd\ndeltaFSIQ, post phys fci, post phys fci fd\nPRI2, post phys fci, post phys fci fd\nFSIQ2, post phys fci, post phys fci fd\n" ], [ "n_map = int(len(params[params['max nlog(p)'] > 1].index)) + 1\ninterval = 1 / n_map\nhusl_pal = sns.husl_palette(n_colors=n_map, h=interval)\nhusl_cmap = LinearSegmentedColormap.from_list(husl_pal, husl_pal, N=n_map)\nsns.palplot(husl_pal)\n\ncrayons_l = sns.crayon_palette(['Vivid Tangerine', 'Cornflower'])\ncrayons_d = sns.crayon_palette(['Brick Red', 'Midnight Blue'])\ngrays = sns.light_palette('#999999', n_colors=3, reverse=True)\n\nf_2 = sns.crayon_palette(['Red Orange', 'Vivid Tangerine'])\nm_2 = sns.crayon_palette(['Cornflower', 'Cerulean'])", "_____no_output_____" ], [ "params", "_____no_output_____" ], [ "empty_nii = nib.load(join(roi_dir, 'roi101.nii.gz'))\nempty_roi = empty_nii.get_fdata() * 0\nempty = nib.Nifti1Image(empty_roi, empty_nii.affine)\ng = plot_glass_brain(empty, colorbar=False, vmin=0.5, vmax=n_map)\ni = 0\n\n\nfor var in params.index:\n if 'fd' not in var:\n if params.loc[var]['max nlog(p)'] > 2:\n print(var)\n i += 1\n husl_pal = sns.husl_palette(h=interval * i, n_colors=n_map)\n rois = None\n print(i, var)\n corr_nodes = []\n #tvals = params.loc[i]['max t']\n nodes = params.loc[var]['nodes']\n print('first nodes raw', nodes[0])\n corr_nodes.append(int(nodes[0]))\n roi_nifti = nib.load(join(roi_dir,'roi{0}.nii.gz'.format(int(nodes[0]+1))))\n roi = roi_nifti.get_fdata()\n rois = (roi * i)\n print(int(nodes[0])+1, np.max(rois))\n if len(nodes) > 1:\n for node in nodes[1:]:\n node += 1\n corr_nodes.append(int(node))\n roi_nifti = nib.load(join(roi_dir,'roi{0}.nii.gz'.format(int(node)+1)))\n roi = roi_nifti.get_fdata()\n rois += (roi * i)\n print(int(node), np.max(rois))\n else:\n pass\n np.savetxt(join(fig_dir, '{1}-{0}.txt'.format(i, var)), corr_nodes, delimiter=',')\n rois_nifti = nib.Nifti1Image(rois, roi_nifti.affine)\n rois_nifti.to_filename(join(data_dir, 'output/local_efficiency', '{0}_nodes.nii.gz'.format(var)))\n h = plot_glass_brain(rois_nifti, cmap=LinearSegmentedColormap.from_list(husl_pal, husl_pal, N=3))\n h.savefig(join(fig_dir, '{0}-{1}_ROIs.png'.format(i, var)), dpi=300)\n\n husl_pal = sns.husl_palette(n_colors=int(n_map), h=interval*i)\n g.add_contours(rois_nifti, colors=husl_pal, filled=True, alpha=0.7)\n\n else:\n pass\n\ng.savefig(join(fig_dir, 'LEffXIQ_ROIs.png'), dpi=300)", "('deltaPRI', 'post phys fci', 'iqXclass')\n1 ('deltaPRI', 'post phys fci', 'iqXclass')\n" ], [ "all_data = big_df\nall_data.columns = all_data.columns.str.replace(', ', '_')\n\nall_data.filter(regex='lEff.*').columns", "_____no_output_____" ], [ "n_col = int(len(nodaleff_sig.columns)/2) + 1\nhusl_pal = sns.husl_palette(n_colors=int(n_col))\nhusl_cmap = LinearSegmentedColormap.from_list(husl_pal, husl_pal, N=int(n_col))\ni = 0\nfor var in params.index:\n if 'fd' not in var:\n if params.loc[var]['max nlog(p)'] > 2:\n iq = var[0]\n task = var[1]\n dat = effs[task]['conns']\n husl_pal = sns.husl_palette(h=(interval*i), n_colors=int(n_col))\n\n print(var, i)\n \n nodes = params.loc[var]['nodes']\n print(nodes)\n for node in nodes:\n node = 'lEff{0}'.format(node+1)\n if var[-1] == 'iqXsex':\n #print(iq, 'x Sex', node, nodaleff_sig.at[node,'{0}t'.format(var[:-1])])\n h = sns.lmplot(iq, node, data=all_data, hue='F', palette=crayons_d)\n h.savefig(join(fig_dir, '{0}-{1}-scatter.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n elif var[-1] == 'iqXsexXclass':\n #print(iq, 'x Sex x Class', node, nodaleff_sig.at[node,'{0}t'.format(var[:-1])])\n h = sns.lmplot(iq, node, data=all_data[all_data['F'] == 1], hue='Mod', palette=f_2)\n h.savefig(join(fig_dir, '{0}-{1}-scatter-f.png'.format(i, var, node)), dpi=300)\n h = sns.lmplot(iq, node, data=all_data[all_data['F'] == 0], hue='Mod', palette=m_2)\n h.savefig(join(fig_dir, '{0}-{1}-scatter-m.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n elif var[-1] == 'iqXclass':\n #print(iq, 'x Class', node, nodaleff_sig.at[node,'{0}t'.format(column[:-1])])\n h = sns.lmplot(iq, node, data=all_data, hue='Mod', palette=grays)\n h.savefig(join(fig_dir, '{0}-{1}-scatter.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n elif var[-1] == 'sexXclass':\n #print('Sex x Class', node, nodaleff_sig.at[node,'{0}t'.format(column[:-1])])\n h = sns.lmplot('F', node, data=all_data[all_data['F'] == 1], hue='Mod', palette=f_2)\n h.savefig(join(fig_dir, '{0}-{1}-scatter-.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n elif var[-1] == 'iq':\n #print('no interxn', iq, node, nodaleff_sig.at[node,'{0}t'.format(column[:-1])])\n fig,ax = plt.subplots()\n sns.regplot(all_data[iq], all_data[node], color=husl_pal[0])\n sns.despine()\n plt.tight_layout()\n fig.savefig(join(fig_dir, '{0}-{1}-scatter.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n elif var[-1] == 'fd':\n pass\n else:\n fig,ax = plt.subplots()\n sns.regplot(all_data[var[-1]], all_data[node], color=husl_pal[0])\n sns.despine()\n plt.tight_layout()\n fig.savefig(join(fig_dir, '{0}-{1}-scatter.png'.format(i+1, var, node)), dpi=300)\n plt.close()\n i += 1", "_____no_output_____" ], [ "all_data.columns = all_data.columns.str.replace(', ', '_')\n\nall_data.filter(regex='lEff.*').columns", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec83c0597945e7dc4b7c866e774ad550760a48f6
491,219
ipynb
Jupyter Notebook
unity/filterImages_anomaly.ipynb
kreimanlab/WhenPigsFlyContext
4d03bb29f3be3e96c2b9d1945dc08c381abae513
[ "MIT" ]
13
2021-04-07T15:39:24.000Z
2022-03-08T19:01:20.000Z
unity/filterImages_anomaly.ipynb
kreimanlab/WhenPigsFlyContext
4d03bb29f3be3e96c2b9d1945dc08c381abae513
[ "MIT" ]
1
2021-11-13T17:18:03.000Z
2021-12-03T02:05:33.000Z
unity/filterImages_anomaly.ipynb
kreimanlab/WhenPigsFlyContext
4d03bb29f3be3e96c2b9d1945dc08c381abae513
[ "MIT" ]
1
2021-04-18T18:14:51.000Z
2021-04-18T18:14:51.000Z
160.686621
227,096
0.860237
[ [ [ "import pickle\nimport pandas\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom shutil import copyfile\nimport csv\nimport os.path\nfrom os import path\n\napartmentlist = range(0,7) #total: 7\nrootdir = '/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/'\nJasondir = rootdir + \"jason_anomaly/\"\nImgdir = \"stimulus_anomaly/\"\nclassnamelist = []\nroomlist = []\napartidlist = []\nimagenamelist = []\n\nleftlist = []\ntoplist = []\nrightlist = []\nbottomlist = []\n\nstartStr = \"img_\"\nendStr = \"_prefab_\"\n\nfor apartmentid in apartmentlist:\n f = open(rootdir + \"filtered_anomaly/apartment_\" + str(apartmentid) + \"_good.txt\", \"r\") \n for x in f: \n x =x[len(rootdir+Imgdir):]\n infor = pickle.load( open( (Jasondir + x).rstrip('.png\\n') + '.pkl', \"rb\" ) ) \n targetclassname = infor['target_node']['class_name']\n targetroom = infor['targetroomname']\n \n targetid = int(x[x.find(startStr)+len(startStr):x.rfind(endStr)])\n JasonData = infor['JasonData']\n for obj in JasonData.items():\n if obj[1]['prefab_id'] == targetid: \n left = obj[1]['bbox'][2]\n top = obj[1]['bbox'][0]\n right = obj[1]['bbox'][3]\n bottom = obj[1]['bbox'][1]\n break \n \n \n roomlist.append(targetroom)\n apartidlist.append(apartmentid) \n classnamelist.append(targetclassname)\n imagenamelist.append(Imgdir + x)\n \n leftlist.append(left)\n toplist.append(top)\n rightlist.append(right)\n bottomlist.append(bottom)\n \n \nJasondir = rootdir + \"jason_anomaly_wall/\"\nImgdir = \"stimulus_anomaly_wall/\"\n\nfor apartmentid in apartmentlist:\n f = open(rootdir + \"filtered_anomaly_wall/apartment_\" + str(apartmentid) + \"_good.txt\", \"r\") \n for x in f: \n x =x[len(rootdir+Imgdir):]\n xname = (Jasondir + x).rstrip('.png\\n') + '.pkl'\n print(xname)\n# imgxname = rootdir + Imgdir+x\n# print(imgxname)\n# if not os.path.exists(xname):\n# print('aaaaaaaaaaaaaaaaaaaaaa')\n# continue\n \n# if not os.path.exists(imgxname):\n# print('ddddddddddddddddddddd')\n# continue\n \n infor = pickle.load( open(xname, \"rb\" ) ) \n targetclassname = infor['target_node']['class_name']\n targetroom = infor['targetroomname']\n targetbbox = infor['target_bbox']\n #left, top, right, bottom\n left = targetbbox[0]\n top = targetbbox[1]\n right = targetbbox[2]\n bottom = targetbbox[3] \n \n roomlist.append(targetroom)\n apartidlist.append(apartmentid) \n classnamelist.append(targetclassname)\n print(Imgdir+x)\n imagenamelist.append(Imgdir + x)\n \n leftlist.append(left)\n toplist.append(top)\n rightlist.append(right)\n bottomlist.append(bottom)\n\n ", "/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_7.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_7.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_1.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_1.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_7.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_7.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_7.pkl\nstimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_7.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_5.pkl\nstimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_5.png\n\n/media/mengmi/KLAB15/Mengmi/Proj_context3/VirtualHome/unity/jason_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_6.pkl\n" ], [ "print(imagenamelist)", "['stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Remote_control_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Candle_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Candle_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Mug_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Towel1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Towel1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Towel1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Microwave_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Knife_03_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Plate_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Plate_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Plate_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Book_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Book_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Book_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_0/img_455_prefab_Cellphone_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_mH_ChineseTakeoutBox_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pudding_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_sponge_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Lime_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Peach_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Peach_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Remote_control_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Remote_control_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Remote_control_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Candle_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Condiment_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_ELE_Mouse_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_ELE_Mouse_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_ELE_Mouse_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Towel1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Microwave_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Knife_03_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Knife_03_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Knife_03_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_ELE_Keyboard_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_ELE_Keyboard_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Plate_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Book_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Cellphone_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_1/img_332_prefab_Cellphone_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pudding_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pudding_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pudding_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pudding_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Washing_liquid_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_SHP_PRE_Toothpaste_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Lime_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Remote_control_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Candle_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Candle_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Mouse_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Mug_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Mug_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Mug_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Towel1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Towel1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Towel1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Towel1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Microwave_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Knife_03_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Keyboard_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Keyboard_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Keyboard_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_ELE_Keyboard_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Plate_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Plate_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_02_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_02_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_02_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_PRO_Bowl_02_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Book_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Book_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Book_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_2/img_411_prefab_Cellphone_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Remote_control_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Candle_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Mug_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Mug_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Mug_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Mug_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Towel1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Microwave_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Knife_03_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Plate_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Book_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Cellphone_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Cellphone_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Cellphone_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Cellphone_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_3/img_368_prefab_Cellphone_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pudding_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pudding_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pudding_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pudding_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Cherry_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Cherry_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Cherry_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_sponge_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Lime_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Remote_control_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Condiment_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Mouse_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Mouse_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Mouse_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Mug_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Towel1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Knife_03_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Knife_03_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Knife_03_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Knife_03_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Knife_03_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Keyboard_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Keyboard_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Keyboard_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_ELE_Keyboard_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Plate_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Plate_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_PRE_PRO_Bowl_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Book_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Cellphone_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Cellphone_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_4/img_366_prefab_Cellphone_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Keyboard_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Keyboard_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Keyboard_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pudding_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Lime_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Peach_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Remote_control_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Remote_control_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Remote_control_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Remote_control_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Candle_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Candle_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Candle_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Condiment_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Mouse_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Mouse_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Mouse_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_ELE_Mouse_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Mug_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Mug_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Mug_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Mug_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Towel1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Towel1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Microwave_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Microwave_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Knife_03_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Knife_03_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Plate_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Plate_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Plate_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Plate_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Bowl_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Bowl_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Bowl_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Bowl_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_PRO_Bowl_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Book_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Cellphone_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Cellphone_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Cellphone_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_5/img_351_prefab_Cellphone_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_mH_ChineseTakeoutBox_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cutlets_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pudding_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pudding_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pudding_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pudding_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pudding_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Cereal_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SMGP_PRE_Chocolate_syrup_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Apple_pie_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_APP_Coffeemaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Milkshake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_sponge_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pound_cake_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Washing_liquid_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_SHP_PRE_Toothpaste_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Toothbrush_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Apple_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Apple_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Apple_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Apple_1024_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Apple_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Lime_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Lime_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Lime_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Lime_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Lime_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Hand_soap_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Peach_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Peach_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Peach_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Peach_1024_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Peach_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Plum_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Plum_1024_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Plum_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Plum_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_FMGP_PRE_Plum_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Wine_glass_01 1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Remote_control_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Candle_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Candle_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Candle_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Condiment_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Condiment_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Condiment_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Condiment_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Mouse_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Salt_shaker_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Mug_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Mug_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Mug_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Towel1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_DHP_PRE_Pink_cupcake_1024_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Slippers_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Microwave_1_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Knife_03_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Knife_03_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Knife_03_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Knife_03_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_ELE_Keyboard_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Plate_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Plate_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Plate_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Plate_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Plate_01_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Bowl_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Bowl_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Bowl_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Bowl_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_PRO_Bowl_01_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_5.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Book_1_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_3.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_4.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_1.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_9.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_PRE_DEC_Pillow_01_01_01_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Cellphone_1_all_modified_cam_7.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Cellphone_1_all_modified_cam_8.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Cellphone_1_all_modified_cam_2.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Cellphone_1_all_modified_cam_6.png\\n', 'stimulus_anomaly/apartment_6/img_380_prefab_Cellphone_1_all_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Remote_control_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Remote_control_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Remote_control_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Remote_control_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Remote_control_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_DEC_Candle_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_DEC_Candle_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Mug_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Mug_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Mug_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Mug_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Mug_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Towel1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Towel1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_70_prefab_Towel1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_70_prefab_Towel1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_70_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Microwave_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Microwave_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Microwave_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Microwave_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Microwave_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Knife_03_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Knife_03_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Knife_03_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Knife_03_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Knife_03_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Plate_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Plate_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Plate_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Plate_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_74_prefab_PRE_PRO_Plate_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_221_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_221_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Book_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Book_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Book_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Book_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Book_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Cellphone_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_0/img_455_prefab_Cellphone_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Cellphone_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Cellphone_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_0/img_75_prefab_Cellphone_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_50_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Remote_control_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Remote_control_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Remote_control_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Candle_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Candle_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Candle_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Mug_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Mug_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Mug_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Towel1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Towel1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Microwave_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Microwave_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Microwave_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Knife_03_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Knife_03_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Knife_03_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Plate_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Plate_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Plate_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Book_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Book_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Book_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Cellphone_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Cellphone_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_1/img_51_prefab_Cellphone_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_Remote_control_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_Remote_control_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_Remote_control_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_Remote_control_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Remote_control_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Remote_control_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Remote_control_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Candle_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Candle_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Candle_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Candle_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_DEC_Candle_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_DEC_Candle_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_DEC_Candle_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_343_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Mug_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Mug_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Mug_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_PRO_Mug_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Mug_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Mug_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Mug_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_342_prefab_Towel1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_342_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_342_prefab_Towel1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_342_prefab_Towel1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Towel1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Towel1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_Microwave_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_Microwave_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_Microwave_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_346_prefab_Microwave_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_337_prefab_PRE_PRO_Knife_03_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_337_prefab_PRE_PRO_Knife_03_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_337_prefab_PRE_PRO_Knife_03_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_337_prefab_PRE_PRO_Knife_03_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Knife_03_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Knife_03_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Knife_03_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_347_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_334_prefab_PRE_PRO_Plate_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_334_prefab_PRE_PRO_Plate_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_334_prefab_PRE_PRO_Plate_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_334_prefab_PRE_PRO_Plate_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_339_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_339_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_339_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_339_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Bowl_02_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Bowl_02_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Bowl_02_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_PRE_PRO_Bowl_02_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_344_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_329_prefab_Book_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_329_prefab_Book_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_329_prefab_Book_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_2/img_329_prefab_Book_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Book_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Book_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Book_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_345_prefab_Cellphone_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_345_prefab_Cellphone_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_2/img_345_prefab_Cellphone_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_2/img_345_prefab_Cellphone_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Cellphone_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Cellphone_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_2/img_12_prefab_Cellphone_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Remote_control_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Remote_control_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Remote_control_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Candle_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Candle_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Candle_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Mug_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Mug_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Mug_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_268_prefab_Towel1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_268_prefab_Towel1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Towel1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Towel1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Towel1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Microwave_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Microwave_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Microwave_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Knife_03_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Knife_03_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Plate_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Plate_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Plate_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_266_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_3/img_266_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Book_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Book_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Book_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Cellphone_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Cellphone_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_3/img_368_prefab_Cellphone_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Remote_control_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Remote_control_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Remote_control_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_DEC_Candle_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_DEC_Candle_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_DEC_Candle_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Mug_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Mug_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Mug_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Towel1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Towel1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Towel1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Microwave_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Microwave_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_Microwave_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Knife_03_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Knife_03_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Knife_03_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Plate_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Plate_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_PRE_PRO_Plate_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_107_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_Book_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_Book_1_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_4/img_105_prefab_Book_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Cellphone_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Cellphone_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_4/img_106_prefab_Cellphone_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_mH_ChineseTakeoutBox_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cutlets_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pudding_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Cereal_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_SMGP_PRE_Chocolate_syrup_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Apple_pie_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_APP_Coffeemaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Milkshake_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_sponge_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pound_cake_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Washing_liquid_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_SHP_PRE_Toothpaste_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Toothbrush_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Apple_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Lime_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Hand_soap_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Peach_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_FMGP_PRE_Plum_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Wine_glass_01 1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_Remote_control_1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_Remote_control_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Remote_control_1_gravity_modified_cam_2.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Remote_control_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Remote_control_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Remote_control_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_DEC_Candle_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Condiment_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Mouse_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_5.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Salt_shaker_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Mug_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Mug_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Mug_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Mug_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_165_prefab_Towel1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_5/img_165_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_68_prefab_Towel1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_68_prefab_Towel1_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_5/img_68_prefab_Towel1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_68_prefab_Towel1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_DHP_PRE_Pink_cupcake_1024_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Slippers_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_Microwave_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_Microwave_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Microwave_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Microwave_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Microwave_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Microwave_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Knife_03_gravity_modified_cam_1.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Knife_03_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Knife_03_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Knife_03_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Knife_03_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Knife_03_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_170_prefab_PRE_PRO_Plate_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_170_prefab_PRE_PRO_Plate_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Plate_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Plate_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Plate_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_73_prefab_PRE_PRO_Plate_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_171_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_PRE_PRO_Bowl_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_169_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_5/img_169_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_351_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_72_prefab_Book_1_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_5/img_72_prefab_Book_1_gravity_modified_cam_3.png\\n', 'stimulus_anomaly_wall/apartment_5/img_72_prefab_Book_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_72_prefab_Book_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Cellphone_1_gravity_modified_cam_8.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Cellphone_1_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Cellphone_1_gravity_modified_cam_6.png\\n', 'stimulus_anomaly_wall/apartment_5/img_74_prefab_Cellphone_1_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_6/img_180_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_9.png\\n', 'stimulus_anomaly_wall/apartment_6/img_180_prefab_PRE_ELE_Keyboard_01_gravity_modified_cam_7.png\\n', 'stimulus_anomaly_wall/apartment_6/img_293_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_4.png\\n', 'stimulus_anomaly_wall/apartment_6/img_293_prefab_PRE_DEC_Pillow_01_01_01_gravity_modified_cam_3.png\\n']\n" ], [ "newfilePath = '/home/mengmi/Projects/Proj_context3/VirtualHome/matlab/Mat/jason_anomaly_combined.csv'\n\nwith open(newfilePath, \"w\") as f:\n fieldnames = ['classname', 'apartmentid','room','imagename','left','top','right','bottom']\n #writer = csv.writer(f)\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n #writer.writerow({'classname': classnamelist, 'apartmentid': apartidlist})\n for i, content in enumerate(classnamelist):\n writer.writerow({'classname': classnamelist[i],\n 'apartmentid': apartidlist[i],\n 'left': leftlist[i],\n 'top': toplist[i],\n 'right': rightlist[i],\n 'bottom': bottomlist[i],\n 'room': roomlist[i],\n 'imagename':imagenamelist[i]})", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec83c4a4266978357f3a32771e08187b366f5d41
4,637
ipynb
Jupyter Notebook
locale/examples/02-plot/topo-map.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
4
2020-08-07T08:19:19.000Z
2020-12-04T09:51:11.000Z
locale/examples/02-plot/topo-map.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
19
2020-08-06T00:24:30.000Z
2022-03-30T19:22:24.000Z
locale/examples/02-plot/topo-map.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
1
2021-03-09T07:50:40.000Z
2021-03-09T07:50:40.000Z
32.201389
436
0.563511
[ [ [ "%matplotlib inline\nfrom pyvista import set_plot_theme\nset_plot_theme('document')", "_____no_output_____" ] ], [ [ "Topographic Map {#ref_topo_map_example}\n===============\n\nThis is very similar to the `ref_texture_example`{.interpreted-text\nrole=\"ref\"} example except it is focused on plotting aerial imagery from\na GeoTIFF on top of some topography mesh.\n", "_____no_output_____" ] ], [ [ "# sphinx_gallery_thumbnail_number = 4\n\nimport pyvista as pv\nfrom pyvista import examples\n\n# Load the elevation data as a surface\nelevation = examples.download_crater_topo().warp_by_scalar()\n# Load the topographic map from a GeoTiff\ntopo_map = examples.download_crater_imagery()\n\nelevation", "_____no_output_____" ] ], [ [ "Let\\'s inspect the imagery that we just loaded\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi'] = 500\n\nplt.imshow(topo_map.to_array())", "_____no_output_____" ] ], [ [ "Once you have a topography mesh loaded as a surface mesh (we use a\n`pyvista.StructuredGrid`{.interpreted-text role=\"class\"} here) and an\nimage loaded as a `pyvista.Texture`{.interpreted-text role=\"class\"}\nobject using the `pyvista.read_texture`{.interpreted-text role=\"func\"}\nmethod, then you can map that imagery to the surface mesh as follows:\n", "_____no_output_____" ] ], [ [ "# Bounds of the aerial imagery - given to us\nbounds = (1818000, 1824500, 5645000, 5652500, 0, 3000)\n# Clip the elevation dataset to the map's extent\nlocal = elevation.clip_box(bounds, invert=False)\n# Apply texturing coordinates to associate the image to the surface\nlocal.texture_map_to_plane(use_bounds=True, inplace=True)", "_____no_output_____" ] ], [ [ "Now display it! Note that the imagery is aligned as we expect.\n", "_____no_output_____" ] ], [ [ "local.plot(texture=topo_map, cpos=\"xy\")", "_____no_output_____" ] ], [ [ "And here is a 3D perspective!\n", "_____no_output_____" ] ], [ [ "local.plot(texture=topo_map)", "_____no_output_____" ] ], [ [ "We could also display the entire region by extracting the surrounding\nregion and plotting the texture mapped local topography and the outside\narea\n", "_____no_output_____" ] ], [ [ "# Extract surrounding region from elevation data\nsurrounding = elevation.clip_box(bounds, invert=True)\n\n# Display with a shading technique\np = pv.Plotter()\np.add_mesh(local, texture=topo_map)\np.add_mesh(surrounding, color=\"white\")\np.enable_eye_dome_lighting()\np.camera_position = [(1831100., 5642142., 8168.),\n (1820841., 5648745., 1104.),\n (-0.435, 0.248, 0.865)]\np.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec83dd70950d2f87e6fc2ab06c2e482ef1b6bf5c
143,043
ipynb
Jupyter Notebook
notebooks/Examples.ipynb
shapiromatron/wafflemaker
44b844cea18f141e33f656f1c23adfb19a2ff7a4
[ "MIT" ]
null
null
null
notebooks/Examples.ipynb
shapiromatron/wafflemaker
44b844cea18f141e33f656f1c23adfb19a2ff7a4
[ "MIT" ]
null
null
null
notebooks/Examples.ipynb
shapiromatron/wafflemaker
44b844cea18f141e33f656f1c23adfb19a2ff7a4
[ "MIT" ]
null
null
null
448.410658
53,408
0.927553
[ [ [ "# Wafflemaker\n\nA series of examples from the waffle library shamelessly attempting to recreate examples from popular literature or similar libraries.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport wafflemaker", "_____no_output_____" ] ], [ [ "## A simple example", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(\n values=np.random.normal(1e6, 1e5, 5),\n categories=[f'category #{i}' for i in range(1, 6)],\n))\n\nwafflemaker.waffle(\n nrows=7,\n ncols=5,\n values='values',\n labels='categories',\n scale_to_dims=True,\n data=df,\n)", "_____no_output_____" ] ], [ [ "## Scaling the data manually\n\nIf you choose not to autoscale data, you manually adjust rows/columns to get the appropriate shape. Inspiration from recreating a re-creation in the R based waffle library: https://github.com/hrbrmstr/waffle", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(\n values=[84911, 14414, 10062, 8565],\n categories=[\n 'Mortgage ($85k)', \n 'Auto and tuition loans ($14k)', \n 'Home equity loans ($10k)', \n 'Credit cards ($9k)'\n ],\n hues=[\"#c7d4b6\", \"#a3aabd\", \"#a0d0de\", \"#97b5cf\"]\n))\n\ndf['scaled_values'] = df['values'] / 500.\n\nwafflemaker.waffle(\n nrows=7,\n values='scaled_values',\n labels='categories',\n hue='hues',\n scale_to_dims=False,\n data=df,\n grid_options=dict(linewidth=2),\n figure_options=dict(figsize=(14, 5)),\n title='Average Household Debt'\n)", "_____no_output_____" ] ], [ [ "## Using icons instead of a grid", "_____no_output_____" ] ], [ [ "import matplotlib.font_manager as fm\nimport requests\nimport tempfile", "_____no_output_____" ], [ "# get font and save to temp file\nfont_awesome_url = 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/fonts/fontawesome-webfont.ttf'\ntf = tempfile.NamedTemporaryFile()\nr = requests.get(font_awesome_url)\nwith open(tf.name, 'wb') as f:\n f.write(r.content)\n \n# use font in waffle plot\nprop = fm.FontProperties(fname=tf.name)\n\n# build plot\nwafflemaker.waffle(\n nrows=7, ncols=15,\n values='values', labels='categories', data=df,\n icon='\\uf015',\n background_color='#efefef',\n icon_options=dict(fontproperties=prop, size=30),\n icon_legend_options=dict(size=15),\n grid_options=dict(color='#efefef', linewidth=15),\n figure_options=dict(figsize=(14, 5)),\n title='icon: custom icon'\n )\n\n# delete temporary font\ntf.close()", "_____no_output_____" ] ], [ [ "## Building a series of panels \n\n\nBased on two excellent exmaples from the economist and flowing data.\n\n- https://www.economist.com/blogs/graphicdetail/2015/08/daily-chart-6\n- http://flowingdata.com/2017/08/03/working-on-tips/ ", "_____no_output_____" ] ], [ [ "fractions = np.array([.80, .74, .08, .61])\npositives = fractions * 120\nnegatives = 120 - positives\ntitles = [\n '80% don\\'t go out\\nafter dark',\n '74% do no activites\\nother than school',\n '8% engage in political\\ndiscussion and social\\nmovements, including offline',\n '61% would like to do\\nmore activities but are\\nprevented by safety concerns',\n]\n\nfig, axes = plt.subplots(1, 4, figsize=(16, 4))\nfig.suptitle(\"Subdued; of 120 survyed Syrian teenagers:\", y=1.2, fontsize=16)\nfor i, ax in enumerate(axes):\n wafflemaker.waffle(nrows=12, \n ncols=10,\n values=[positives[i], negatives[i]],\n title=titles[i],\n hue=['#f7931e', '#cddee8'],\n fill_direction=wafflemaker.CellFillDirection.ByRow,\n ax=ax)\n", "_____no_output_____" ], [ "fractions = np.array([\n 0.62, 0.59, 0.53, 0.52,\n 0.44, 0.34, 0.16, 0.12,\n 0.12, 0.08, 0.07, 0.06,\n])\npositives = fractions * 100\nnegatives = 100 - positives\ntitles = [\n 'Waiter/Waitress', 'Bartender', 'Food Server', 'Banquet\\nCaptain',\n 'Bar Manager', 'Busser', 'Restaurant\\nHost/Hostess', 'Asst. Rest.\\nManager',\n 'Barista', 'Pizza Cook', 'Head Chef', 'Line Cook',\n]\n\nfig, axes = plt.subplots(3, 4, figsize=(16, 12))\nfig.suptitle(\"Percentage of wage from tips\", y=.95, fontsize=16)\nfor i, axlist in enumerate(axes):\n for j, ax in enumerate(axlist):\n n = i*4 + j\n wafflemaker.waffle(nrows=10, \n ncols=10,\n values=[positives[n], negatives[n]],\n title=titles[n],\n hue=['#03a502', '#f4f1f4'],\n fill_direction=wafflemaker.CellFillDirection.ByRow,\n ax=ax)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec83de07a3d430bc14e5e4733d1c0d0de7a4b393
10,475
ipynb
Jupyter Notebook
Pandas/Installing Jupyter Extensions.ipynb
CaptSolo/LU_PySem_2020_1
e29e1fab88a0278b3a8f8dbed21f7100d2c57ea4
[ "MIT" ]
1
2020-02-21T12:36:53.000Z
2020-02-21T12:36:53.000Z
Pandas/Installing Jupyter Extensions.ipynb
CaptSolo/LU_PySem_2020_1
e29e1fab88a0278b3a8f8dbed21f7100d2c57ea4
[ "MIT" ]
null
null
null
Pandas/Installing Jupyter Extensions.ipynb
CaptSolo/LU_PySem_2020_1
e29e1fab88a0278b3a8f8dbed21f7100d2c57ea4
[ "MIT" ]
1
2020-05-15T08:19:37.000Z
2020-05-15T08:19:37.000Z
87.291667
212
0.727064
[ [ [ "# https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html", "_____no_output_____" ], [ "!pip install jupyter_contrib_nbextensions\n", "Collecting jupyter_contrib_nbextensions\n Downloading https://files.pythonhosted.org/packages/33/f0/6e2c00afda860f655fbf0f795f7310bdbf12122846344dfdc803fc7455d5/jupyter_contrib_nbextensions-0.5.1-py2.py3-none-any.whl (20.9MB)\nCollecting jupyter-nbextensions-configurator>=0.4.0 (from jupyter_contrib_nbextensions)\n Downloading https://files.pythonhosted.org/packages/51/a3/d72d5f2dc10c5ccf5a6f4c79f636bf071a5ce462dedd07af2f70384db6cb/jupyter_nbextensions_configurator-0.4.1.tar.gz (479kB)\nRequirement already satisfied: nbconvert>=4.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (5.4.1)\nRequirement already satisfied: pyyaml in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (5.1)\nRequirement already satisfied: lxml in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (4.3.2)\nRequirement already satisfied: ipython-genutils in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (0.2.0)\nRequirement already satisfied: notebook>=4.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (5.7.8)\nRequirement already satisfied: tornado in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (6.0.2)\nRequirement already satisfied: traitlets>=4.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (4.3.2)\nCollecting jupyter-highlight-selected-word>=0.1.1 (from jupyter_contrib_nbextensions)\n Downloading https://files.pythonhosted.org/packages/50/d7/19ab7cfd60bf268d2abbacc52d4295a40f52d74dfc0d938e4761ee5e598b/jupyter_highlight_selected_word-0.2.0-py2.py3-none-any.whl\nCollecting jupyter-latex-envs>=1.3.8 (from jupyter_contrib_nbextensions)\n Downloading https://files.pythonhosted.org/packages/0e/15/55805de080d5542f76920364635e96e64d3b37f678befdfe3b16aa154205/jupyter_latex_envs-1.4.6.tar.gz (861kB)\nCollecting jupyter-contrib-core>=0.3.3 (from jupyter_contrib_nbextensions)\n Downloading https://files.pythonhosted.org/packages/e6/8f/04a752a8b66a66e7092c035e5d87d2502ac7ec07f9fb6059059b6c0dc272/jupyter_contrib_core-0.3.3-py2.py3-none-any.whl\nRequirement already satisfied: jupyter-core in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter_contrib_nbextensions) (4.4.0)\nRequirement already satisfied: mistune>=0.8.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (0.8.4)\nRequirement already satisfied: jinja2 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (2.10)\nRequirement already satisfied: pygments in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (2.3.1)\nRequirement already satisfied: nbformat>=4.4 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (4.4.0)\nRequirement already satisfied: entrypoints>=0.2.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (0.3)\nRequirement already satisfied: bleach in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (3.1.0)\nRequirement already satisfied: pandocfilters>=1.4.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (1.4.2)\nRequirement already satisfied: testpath in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (0.4.2)\nRequirement already satisfied: defusedxml in c:\\programdata\\anaconda3\\lib\\site-packages (from nbconvert>=4.2->jupyter_contrib_nbextensions) (0.5.0)\nRequirement already satisfied: prometheus-client in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (0.6.0)\nRequirement already satisfied: jupyter-client>=5.2.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (5.2.4)\nRequirement already satisfied: pyzmq>=17 in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (18.0.0)\nRequirement already satisfied: ipykernel in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (5.1.0)\nRequirement already satisfied: terminado>=0.8.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (0.8.1)\nRequirement already satisfied: Send2Trash in c:\\programdata\\anaconda3\\lib\\site-packages (from notebook>=4.0->jupyter_contrib_nbextensions) (1.5.0)\nRequirement already satisfied: decorator in c:\\programdata\\anaconda3\\lib\\site-packages (from traitlets>=4.1->jupyter_contrib_nbextensions) (4.4.0)\nRequirement already satisfied: six in c:\\programdata\\anaconda3\\lib\\site-packages (from traitlets>=4.1->jupyter_contrib_nbextensions) (1.12.0)\nRequirement already satisfied: ipython in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (7.4.0)\nRequirement already satisfied: setuptools in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter-contrib-core>=0.3.3->jupyter_contrib_nbextensions) (41.0.1)\nRequirement already satisfied: MarkupSafe>=0.23 in c:\\programdata\\anaconda3\\lib\\site-packages (from jinja2->nbconvert>=4.2->jupyter_contrib_nbextensions) (1.1.1)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in c:\\programdata\\anaconda3\\lib\\site-packages (from nbformat>=4.4->nbconvert>=4.2->jupyter_contrib_nbextensions) (3.0.1)\nRequirement already satisfied: webencodings in c:\\programdata\\anaconda3\\lib\\site-packages (from bleach->nbconvert>=4.2->jupyter_contrib_nbextensions) (0.5.1)\nRequirement already satisfied: python-dateutil>=2.1 in c:\\programdata\\anaconda3\\lib\\site-packages (from jupyter-client>=5.2.0->notebook>=4.0->jupyter_contrib_nbextensions) (2.8.0)\nRequirement already satisfied: pickleshare in c:\\programdata\\anaconda3\\lib\\site-packages (from ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.7.5)\nRequirement already satisfied: backcall in c:\\programdata\\anaconda3\\lib\\site-packages (from ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.1.0)\nRequirement already satisfied: prompt-toolkit<2.1.0,>=2.0.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (2.0.9)\nRequirement already satisfied: jedi>=0.10 in c:\\programdata\\anaconda3\\lib\\site-packages (from ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.13.3)\nRequirement already satisfied: colorama; sys_platform == \"win32\" in c:\\programdata\\anaconda3\\lib\\site-packages (from ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.4.1)\nRequirement already satisfied: attrs>=17.4.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4->nbconvert>=4.2->jupyter_contrib_nbextensions) (19.1.0)\nRequirement already satisfied: pyrsistent>=0.14.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.4->nbconvert>=4.2->jupyter_contrib_nbextensions) (0.14.11)\nRequirement already satisfied: wcwidth in c:\\programdata\\anaconda3\\lib\\site-packages (from prompt-toolkit<2.1.0,>=2.0.0->ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.1.7)\nRequirement already satisfied: parso>=0.3.0 in c:\\programdata\\anaconda3\\lib\\site-packages (from jedi>=0.10->ipython->jupyter-latex-envs>=1.3.8->jupyter_contrib_nbextensions) (0.3.4)\nBuilding wheels for collected packages: jupyter-nbextensions-configurator, jupyter-latex-envs\n Building wheel for jupyter-nbextensions-configurator (setup.py): started\n Building wheel for jupyter-nbextensions-configurator (setup.py): finished with status 'done'\n Stored in directory: C:\\Users\\val-p1\\AppData\\Local\\pip\\Cache\\wheels\\15\\df\\fe\\2a74fe34709e7fdc5ae153a768675d9fda93cc7d5133ed1fb0\n Building wheel for jupyter-latex-envs (setup.py): started\n Building wheel for jupyter-latex-envs (setup.py): finished with status 'done'\n Stored in directory: C:\\Users\\val-p1\\AppData\\Local\\pip\\Cache\\wheels\\0d\\71\\2a\\164491997299b9f2479a251e254323fe35d946779e18f27956\nSuccessfully built jupyter-nbextensions-configurator jupyter-latex-envs\nInstalling collected packages: jupyter-contrib-core, jupyter-nbextensions-configurator, jupyter-highlight-selected-word, jupyter-latex-envs, jupyter-contrib-nbextensions\nSuccessfully installed jupyter-contrib-core-0.3.3 jupyter-contrib-nbextensions-0.5.1 jupyter-highlight-selected-word-0.2.0 jupyter-latex-envs-1.4.6 jupyter-nbextensions-configurator-0.4.1\n" ], [ "!jupyter contrib nbextension install --user", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec83e627c60e79cba16ba1d195d908b405e6d85a
13,550
ipynb
Jupyter Notebook
lectures/ml/clustering/Clustering Code Along.ipynb
xtianhb/spark
277a52f988112ec9ff021fe1550937311a2abab5
[ "MIT" ]
null
null
null
lectures/ml/clustering/Clustering Code Along.ipynb
xtianhb/spark
277a52f988112ec9ff021fe1550937311a2abab5
[ "MIT" ]
null
null
null
lectures/ml/clustering/Clustering Code Along.ipynb
xtianhb/spark
277a52f988112ec9ff021fe1550937311a2abab5
[ "MIT" ]
null
null
null
29.845815
510
0.531365
[ [ [ "# Clustering Code Along\n\nWe'll be working with a real data set about seeds, from UCI repository: https://archive.ics.uci.edu/ml/datasets/seeds.", "_____no_output_____" ], [ "The examined group comprised kernels belonging to three different varieties of wheat: Kama, Rosa and Canadian, 70 elements each, randomly selected for \nthe experiment. High quality visualization of the internal kernel structure was detected using a soft X-ray technique. It is non-destructive and considerably cheaper than other more sophisticated imaging techniques like scanning microscopy or laser technology. The images were recorded on 13x18 cm X-ray KODAK plates. Studies were conducted using combine harvested wheat grain originating from experimental fields, explored at the Institute of Agrophysics of the Polish Academy of Sciences in Lublin. \n\nThe data set can be used for the tasks of classification and cluster analysis.\n\n\nAttribute Information:\n\nTo construct the data, seven geometric parameters of wheat kernels were measured: \n1. area A, \n2. perimeter P, \n3. compactness C = 4*pi*A/P^2, \n4. length of kernel, \n5. width of kernel, \n6. asymmetry coefficient \n7. length of kernel groove. \nAll of these parameters were real-valued continuous.\n\nLet's see if we can cluster them in to 3 groups with K-means!", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\nspark = SparkSession.builder.appName('cluster').getOrCreate()", "21/11/12 13:40:14 WARN Utils: Your hostname, xtian-pc resolves to a loopback address: 127.0.1.1; using 192.168.1.39 instead (on interface enp5s0)\n21/11/12 13:40:14 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address\nWARNING: An illegal reflective access operation has occurred\nWARNING: Illegal reflective access by org.apache.spark.unsafe.Platform (file:/home/xtian/local/spark-3.1.2-bin-hadoop3.2/jars/spark-unsafe_2.12-3.1.2.jar) to constructor java.nio.DirectByteBuffer(long,int)\nWARNING: Please consider reporting this to the maintainers of org.apache.spark.unsafe.Platform\nWARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations\nWARNING: All illegal access operations will be denied in a future release\n21/11/12 13:40:15 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\nUsing Spark's default log4j profile: org/apache/spark/log4j-defaults.properties\nSetting default log level to \"WARN\".\nTo adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n21/11/12 13:40:15 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.\n21/11/12 13:40:15 WARN Utils: Service 'SparkUI' could not bind on port 4041. Attempting port 4042.\n21/11/12 13:40:15 WARN Utils: Service 'SparkUI' could not bind on port 4042. Attempting port 4043.\n21/11/12 13:40:15 WARN Utils: Service 'SparkUI' could not bind on port 4043. Attempting port 4044.\n" ], [ "from pyspark.ml.clustering import KMeans\n\n# Loads data.\ndataset = spark.read.csv(\"seeds_dataset.csv\",header=True,inferSchema=True)", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "dataset.describe().show()", "+-------+------------------+------------------+--------------------+-------------------+------------------+---------------------+-------------------+\n|summary| area| perimeter| compactness| length_of_kernel| width_of_kernel|asymmetry_coefficient| length_of_groove|\n+-------+------------------+------------------+--------------------+-------------------+------------------+---------------------+-------------------+\n| count| 210| 210| 210| 210| 210| 210| 210|\n| mean|14.847523809523816|14.559285714285718| 0.8709985714285714| 5.628533333333335| 3.258604761904762| 3.7001999999999997| 5.408071428571429|\n| stddev|2.9096994306873647|1.3059587265640225|0.023629416583846364|0.44306347772644983|0.3777144449065867| 1.5035589702547392|0.49148049910240543|\n| min| 10.59| 12.41| 0.8081| 4.899| 2.63| 0.765| 4.519|\n| max| 21.18| 17.25| 0.9183| 6.675| 4.033| 8.456| 6.55|\n+-------+------------------+------------------+--------------------+-------------------+------------------+---------------------+-------------------+\n\n" ], [ "dataset.columns", "_____no_output_____" ], [ "dataset.printSchema()", "root\n |-- area: double (nullable = true)\n |-- perimeter: double (nullable = true)\n |-- compactness: double (nullable = true)\n |-- length_of_kernel: double (nullable = true)\n |-- width_of_kernel: double (nullable = true)\n |-- asymmetry_coefficient: double (nullable = true)\n |-- length_of_groove: double (nullable = true)\n\n" ] ], [ [ "## Format the Data", "_____no_output_____" ] ], [ [ "from pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import VectorAssembler", "_____no_output_____" ], [ "dataset.columns", "_____no_output_____" ], [ "vec_assembler = VectorAssembler(inputCols = dataset.columns, outputCol='features')", "_____no_output_____" ], [ "final_data = vec_assembler.transform(dataset)", "_____no_output_____" ] ], [ [ "## Scale the Data\nIt is a good idea to scale our data to deal with the curse of dimensionality: https://en.wikipedia.org/wiki/Curse_of_dimensionality", "_____no_output_____" ] ], [ [ "from pyspark.ml.feature import StandardScaler", "_____no_output_____" ], [ "scaler = StandardScaler(inputCol=\"features\", outputCol=\"scaledFeatures\", withStd=True, withMean=False)", "_____no_output_____" ], [ "# Compute summary statistics by fitting the StandardScaler\nscalerModel = scaler.fit(final_data)", "_____no_output_____" ], [ "# Normalize each feature to have unit standard deviation.\nfinal_data = scalerModel.transform(final_data)", "_____no_output_____" ], [ "final_data.head()", "_____no_output_____" ] ], [ [ "## Train the Model and Evaluate", "_____no_output_____" ] ], [ [ "# Trains a k-means model.\nkmeans = KMeans(featuresCol='scaledFeatures',k=3)\nmodel = kmeans.fit(final_data)", "21/11/12 13:49:51 WARN BLAS: Failed to load implementation from: com.github.fommil.netlib.NativeSystemBLAS\n21/11/12 13:49:51 WARN BLAS: Failed to load implementation from: com.github.fommil.netlib.NativeRefBLAS\n" ], [ "# Evaluate clustering by computing Within Set Sum of Squared Errors.\n# Obsolete\n#wssse = model.computeCost(final_data)\nwssse = 428.6082011872446\nprint(\"Within Set Sum of Squared Errors = \" + str(wssse))", "Within Set Sum of Squared Errors = 428.6082011872446\n" ], [ "# Shows the result.\ncenters = model.clusterCenters()\nprint(\"Cluster Centers: \")\nfor center in centers:\n print(center)", "Cluster Centers: \n[ 6.35645488 12.40730852 37.41990178 13.93860446 9.7892399 2.41585013\n 12.29286107]\n[ 4.07497225 10.14410142 35.89816849 11.80812742 7.54416916 3.15410901\n 10.38031464]\n[ 4.96198582 10.97871333 37.30930808 12.44647267 8.62880781 1.80061978\n 10.41913733]\n" ], [ "model.transform(final_data).select('prediction').show()", "+----------+\n|prediction|\n+----------+\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 0|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 2|\n| 1|\n+----------+\nonly showing top 20 rows\n\n" ] ], [ [ "Now you are ready for your consulting Project!\n# Great Job!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
ec83f58d828c8d5f721b26b06e3f848bed41ddee
20,120
ipynb
Jupyter Notebook
downloaded_kernels/house_sales/kernel_55.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
null
null
null
downloaded_kernels/house_sales/kernel_55.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
null
null
null
downloaded_kernels/house_sales/kernel_55.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
2
2021-07-12T00:48:08.000Z
2021-08-11T12:53:05.000Z
20,120
20,120
0.696869
[ [ [ "# House Price Prediction in King County Using Keras", "_____no_output_____" ], [ "By *ironfrown*\n\nThis is a deep learning version of King County house price prediction using Keras deep learning package with Tensorflow backend. Running with GPU support is preferable. Without any major feature engineering, this approach gives MAE of around $77K.\n", "_____no_output_____" ], [ "## Preparation", "_____no_output_____" ], [ "*Load some standard Python libraries.*", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "*Load Keras libraries used in this example.*", "_____no_output_____" ] ], [ [ "import keras\nfrom keras import metrics\nfrom keras import regularizers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint\nfrom keras.utils import plot_model\nfrom keras.models import load_model", "_____no_output_____" ] ], [ [ "## Load all data", "_____no_output_____" ], [ "*Load data from CSV file and define the label column.*", "_____no_output_____" ] ], [ [ "kc_data_org = pd.read_csv(\"../input/kc_house_data.csv\")", "_____no_output_____" ] ], [ [ "*Transform dates into year, month and day and select columns.*", "_____no_output_____" ] ], [ [ "kc_data_org['sale_yr'] = pd.to_numeric(kc_data_org.date.str.slice(0, 4))\nkc_data_org['sale_month'] = pd.to_numeric(kc_data_org.date.str.slice(4, 6))\nkc_data_org['sale_day'] = pd.to_numeric(kc_data_org.date.str.slice(6, 8))\n\nkc_data = pd.DataFrame(kc_data_org, columns=[\n 'sale_yr','sale_month','sale_day',\n 'bedrooms','bathrooms','sqft_living','sqft_lot','floors',\n 'condition','grade','sqft_above','sqft_basement','yr_built',\n 'zipcode','lat','long','sqft_living15','sqft_lot15','price'])\nlabel_col = 'price'\n\nprint(kc_data.describe())", "_____no_output_____" ] ], [ [ "## Split data for training and validation", "_____no_output_____" ], [ "*Function to split a range of data frame / array indeces into three sub-ranges.*", "_____no_output_____" ] ], [ [ "def train_validate_test_split(df, train_part=.6, validate_part=.2, test_part=.2, seed=None):\n np.random.seed(seed)\n total_size = train_part + validate_part + test_part\n train_percent = train_part / total_size\n validate_percent = validate_part / total_size\n test_percent = test_part / total_size\n perm = np.random.permutation(df.index)\n m = len(df)\n train_end = int(train_percent * m)\n validate_end = int(validate_percent * m) + train_end\n train = perm[:train_end]\n validate = perm[train_end:validate_end]\n test = perm[validate_end:]\n return train, validate, test", "_____no_output_____" ] ], [ [ "*Split index ranges into three parts, however, ignore the third.*", "_____no_output_____" ] ], [ [ "train_size, valid_size, test_size = (70, 30, 0)\nkc_train, kc_valid, kc_test = train_validate_test_split(kc_data, \n train_part=train_size, \n validate_part=valid_size,\n test_part=test_size,\n seed=2017)", "_____no_output_____" ] ], [ [ "*Extract data for training and validation into x and y vectors.*", "_____no_output_____" ] ], [ [ "kc_y_train = kc_data.loc[kc_train, [label_col]]\nkc_x_train = kc_data.loc[kc_train, :].drop(label_col, axis=1)\nkc_y_valid = kc_data.loc[kc_valid, [label_col]]\nkc_x_valid = kc_data.loc[kc_valid, :].drop(label_col, axis=1)\n\nprint('Size of training set: ', len(kc_x_train))\nprint('Size of validation set: ', len(kc_x_valid))\nprint('Size of test set: ', len(kc_test), '(not converted)')", "_____no_output_____" ] ], [ [ "## Prepare data for training and validation of the Keras model", "_____no_output_____" ], [ "*Function to get statistics about a data frame.*", "_____no_output_____" ] ], [ [ "def norm_stats(df1, df2):\n dfs = df1.append(df2)\n minimum = np.min(dfs)\n maximum = np.max(dfs)\n mu = np.mean(dfs)\n sigma = np.std(dfs)\n return (minimum, maximum, mu, sigma)", "_____no_output_____" ] ], [ [ "*Function to Z-normalise the entire data frame - note stats for Z transform passed in.*", "_____no_output_____" ] ], [ [ "def z_score(col, stats):\n m, M, mu, s = stats\n df = pd.DataFrame()\n for c in col.columns:\n df[c] = (col[c]-mu[c])/s[c]\n return df", "_____no_output_____" ] ], [ [ "*Normalise training and validation predictors using the stats from training data only (to ensure the same transformation applies to both training and validation data), and then convert them into numpy arrays to be used by Keras.*", "_____no_output_____" ] ], [ [ "stats = norm_stats(kc_x_train, kc_x_valid)\narr_x_train = np.array(z_score(kc_x_train, stats))\narr_y_train = np.array(kc_y_train)\narr_x_valid = np.array(z_score(kc_x_valid, stats))\narr_y_valid = np.array(kc_y_valid)\n\nprint('Training shape:', arr_x_train.shape)\nprint('Training samples: ', arr_x_train.shape[0])\nprint('Validation samples: ', arr_x_valid.shape[0])", "_____no_output_____" ] ], [ [ "## Create Keras model", "_____no_output_____" ], [ "***Three functions to define alternative Keras models***\n\n*The first is very simple, consisting of three layers and Adam optimizer.*", "_____no_output_____" ] ], [ [ "def basic_model_1(x_size, y_size):\n t_model = Sequential()\n t_model.add(Dense(100, activation=\"tanh\", input_shape=(x_size,)))\n t_model.add(Dense(50, activation=\"relu\"))\n t_model.add(Dense(y_size))\n print(t_model.summary())\n t_model.compile(loss='mean_squared_error',\n optimizer=Adam(),\n metrics=[metrics.mae])\n return(t_model)", "_____no_output_____" ] ], [ [ "*The second with Adam optimizer consists of 4 layers and the first uses 10% dropouts.*", "_____no_output_____" ] ], [ [ "def basic_model_2(x_size, y_size):\n t_model = Sequential()\n t_model.add(Dense(100, activation=\"tanh\", input_shape=(x_size,)))\n t_model.add(Dropout(0.1))\n t_model.add(Dense(50, activation=\"relu\"))\n t_model.add(Dense(20, activation=\"relu\"))\n t_model.add(Dense(y_size))\n print(t_model.summary())\n t_model.compile(loss='mean_squared_error',\n optimizer=Adam(),\n metrics=[metrics.mae])\n return(t_model)", "_____no_output_____" ] ], [ [ "*The third is the most complex, it extends the previous model with Nadam optimizer, dropouts and L1/L2 regularisers.*", "_____no_output_____" ] ], [ [ "def basic_model_3(x_size, y_size):\n t_model = Sequential()\n t_model.add(Dense(80, activation=\"tanh\", kernel_initializer='normal', input_shape=(x_size,)))\n t_model.add(Dropout(0.2))\n t_model.add(Dense(120, activation=\"relu\", kernel_initializer='normal', \n kernel_regularizer=regularizers.l1(0.01), bias_regularizer=regularizers.l1(0.01)))\n t_model.add(Dropout(0.1))\n t_model.add(Dense(20, activation=\"relu\", kernel_initializer='normal', \n kernel_regularizer=regularizers.l1_l2(0.01), bias_regularizer=regularizers.l1_l2(0.01)))\n t_model.add(Dropout(0.1))\n t_model.add(Dense(10, activation=\"relu\", kernel_initializer='normal'))\n t_model.add(Dropout(0.0))\n t_model.add(Dense(y_size))\n t_model.compile(\n loss='mean_squared_error',\n optimizer='nadam',\n metrics=[metrics.mae])\n return(t_model)", "_____no_output_____" ] ], [ [ "*Now we create the model - use one of the above functions.*", "_____no_output_____" ] ], [ [ "model = basic_model_3(arr_x_train.shape[1], arr_y_train.shape[1])\nmodel.summary()", "_____no_output_____" ] ], [ [ "## Fit/Train Keras model", "_____no_output_____" ], [ "*Define how many epochs of training should be done and what is the batch size.*", "_____no_output_____" ] ], [ [ "epochs = 500\nbatch_size = 128\n\nprint('Epochs: ', epochs)\nprint('Batch size: ', batch_size)", "_____no_output_____" ] ], [ [ "*Specify Keras callbacks which allow additional functionality while the model is being fitted.*\n- ***ModelCheckpoint*** *allows to save the models as they are being built or improved.*\n- ***TensorBoard*** *interacts with TensorFlow interactive reporting system.*\n- ***EarlyStopping*** *watches one of the model measurements and stops fitting when no improvement.*", "_____no_output_____" ] ], [ [ "keras_callbacks = [\n # ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=True, verbose=2)\n # ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}.hdf5', monitor='val_loss', save_best_only=True, verbose=0)\n # TensorBoard(log_dir='/tmp/keras_logs/model_3', histogram_freq=0, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None),\n EarlyStopping(monitor='val_mean_absolute_error', patience=20, verbose=0)\n]", "_____no_output_____" ] ], [ [ "*Fit the model and record the history of training and validation.*<br/>\n*As we specified EarlyStopping with patience=20, with luck the training will stop in less than 200 epochs.*<br/>\n***Be patient, the fitting process takes time, use verbose=2 for visual feedback.***", "_____no_output_____" ] ], [ [ "history = model.fit(arr_x_train, arr_y_train,\n batch_size=batch_size,\n epochs=epochs,\n shuffle=True,\n verbose=0, # Change it to 2, if wished to observe execution\n validation_data=(arr_x_valid, arr_y_valid),\n callbacks=keras_callbacks)", "_____no_output_____" ] ], [ [ "## Evaluate and report performance of the trained model", "_____no_output_____" ] ], [ [ "train_score = model.evaluate(arr_x_train, arr_y_train, verbose=0)\nvalid_score = model.evaluate(arr_x_valid, arr_y_valid, verbose=0)\n\nprint('Train MAE: ', round(train_score[1], 4), ', Train Loss: ', round(train_score[0], 4)) \nprint('Val MAE: ', round(valid_score[1], 4), ', Val Loss: ', round(valid_score[0], 4))", "_____no_output_____" ] ], [ [ "*This function allows plotting of the training history*", "_____no_output_____" ] ], [ [ "def plot_hist(h, xsize=6, ysize=10):\n # Prepare plotting\n fig_size = plt.rcParams[\"figure.figsize\"]\n plt.rcParams[\"figure.figsize\"] = [xsize, ysize]\n fig, axes = plt.subplots(nrows=4, ncols=4, sharex=True)\n \n # summarize history for MAE\n plt.subplot(211)\n plt.plot(h['mean_absolute_error'])\n plt.plot(h['val_mean_absolute_error'])\n plt.title('Training vs Validation MAE')\n plt.ylabel('MAE')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n \n # summarize history for loss\n plt.subplot(212)\n plt.plot(h['loss'])\n plt.plot(h['val_loss'])\n plt.title('Training vs Validation Loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n \n # Plot it all in IPython (non-interactive)\n plt.draw()\n plt.show()\n\n return", "_____no_output_____" ] ], [ [ "*Now plot the training history, i.e. the Mean Absolute Error and Loss (Mean Squared Error), which were both defined at the time of model compilation. Note that the plot shows validation error as less than training error, which is quite deceptive. The reason for this is that training error is calculated for the entire epoch (and at its begining it was much worse than at the end), whereas the validation error is taken from the last batch (after the model improved). See the above evaluation statistics to confirm that the evaluation puts these errors in the correct order at the very end.*", "_____no_output_____" ] ], [ [ "plot_hist(history.history, xsize=8, ysize=12)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec83fa04d1f3245bc774c9a909b524a11ee3cd3f
172,049
ipynb
Jupyter Notebook
CGMPortfolio/Code/Python/Habit_grFac_by_MateoMethod.ipynb
HsinYiHung/HARK_HY
086c46af5bd037fe1ced6906c6ea917ed58b134f
[ "Apache-2.0" ]
null
null
null
CGMPortfolio/Code/Python/Habit_grFac_by_MateoMethod.ipynb
HsinYiHung/HARK_HY
086c46af5bd037fe1ced6906c6ea917ed58b134f
[ "Apache-2.0" ]
null
null
null
CGMPortfolio/Code/Python/Habit_grFac_by_MateoMethod.ipynb
HsinYiHung/HARK_HY
086c46af5bd037fe1ced6906c6ea917ed58b134f
[ "Apache-2.0" ]
null
null
null
264.284178
34,708
0.913554
[ [ [ "'''\nExample implementations of HARK.ConsumptionSaving.ConsPortfolioModel\n'''\nfrom HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\nfrom HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle\nfrom HARK.utilities import plotFuncs\nfrom copy import copy\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd \n# If the ipython process contains 'terminal' assume not in a notebook\ndef in_ipynb():\n try:\n if 'terminal' in str(type(get_ipython())):\n return False\n else:\n return True\n except NameError:\n return False\n \n# Determine whether to make the figures inline (for spyder or jupyter)\n# vs whatever is the automatic setting that will apply if run from the terminal\nif in_ipynb():\n # %matplotlib inline generates a syntax error when run from the shell\n # so do this instead\n get_ipython().run_line_magic('matplotlib', 'inline')\nelse:\n get_ipython().run_line_magic('matplotlib', 'auto')\n", "_____no_output_____" ], [ "\n# Relative risk aversion\nCRRA = 10\n# Discount factor\nDiscFac = 0.96\n\ntime_params = {'Age_born': 20, 'Age_retire': 65, 'Age_death': 100}\nt_start = time_params['Age_born']\nt_ret = time_params['Age_retire'] # We are currently interpreting this as the last period of work\nt_end = time_params['Age_death']\n\n# Survival probabilities from the author's Fortran code\nn = 80\nsurvprob = np.zeros(n+1)\nsurvprob[1] = 0.99845\nsurvprob[2] = 0.99839\nsurvprob[3] = 0.99833\nsurvprob[4] = 0.9983\nsurvprob[5] = 0.99827\nsurvprob[6] = 0.99826\nsurvprob[7] = 0.99824\nsurvprob[8] = 0.9982\nsurvprob[9] = 0.99813\nsurvprob[10] = 0.99804\nsurvprob[11] = 0.99795\nsurvprob[12] = 0.99785\nsurvprob[13] = 0.99776\nsurvprob[14] = 0.99766\nsurvprob[15] = 0.99755\nsurvprob[16] = 0.99743\nsurvprob[17] = 0.9973\nsurvprob[18] = 0.99718\nsurvprob[19] = 0.99707\nsurvprob[20] = 0.99696\nsurvprob[21] = 0.99685\nsurvprob[22] = 0.99672\nsurvprob[23] = 0.99656\nsurvprob[24] = 0.99635\nsurvprob[25] = 0.9961\nsurvprob[26] = 0.99579\nsurvprob[27] = 0.99543\nsurvprob[28] = 0.99504\nsurvprob[29] = 0.99463\nsurvprob[30] = 0.9942\nsurvprob[31] = 0.9937\nsurvprob[32] = 0.99311\nsurvprob[33] = 0.99245\nsurvprob[34] = 0.99172\nsurvprob[35] = 0.99091\nsurvprob[36] = 0.99005\nsurvprob[37] = 0.98911\nsurvprob[38] = 0.98803\nsurvprob[39] = 0.9868\nsurvprob[40] = 0.98545\nsurvprob[41] = 0.98409\nsurvprob[42] = 0.9827\nsurvprob[43] = 0.98123\nsurvprob[44] = 0.97961\nsurvprob[45] = 0.97786\nsurvprob[46] = 0.97603\nsurvprob[47] = 0.97414\nsurvprob[48] = 0.97207\nsurvprob[49] = 0.9697\nsurvprob[50] = 0.96699\nsurvprob[51] = 0.96393\nsurvprob[52] = 0.96055\nsurvprob[53] = 0.9569\nsurvprob[54] = 0.9531\nsurvprob[55] = 0.94921\nsurvprob[56] = 0.94508\nsurvprob[57] = 0.94057\nsurvprob[58] = 0.9357\nsurvprob[59] = 0.93031\nsurvprob[60] = 0.92424\nsurvprob[61] = 0.91717\nsurvprob[62] = 0.90922\nsurvprob[63] = 0.90089\nsurvprob[64] = 0.89282\nsurvprob[65] = 0.88503\nsurvprob[66] = 0.87622\nsurvprob[67] = 0.86576\nsurvprob[68] = 0.8544\nsurvprob[69] = 0.8423\nsurvprob[70] = 0.82942\nsurvprob[71] = 0.8154\nsurvprob[72] = 0.80002\nsurvprob[73] = 0.78404\nsurvprob[74] = 0.76842\nsurvprob[75] = 0.75382\nsurvprob[76] = 0.73996\nsurvprob[77] = 0.72464\nsurvprob[78] = 0.71057\nsurvprob[79] = 0.6961\nsurvprob[80] = 0.6809\n\n# Fix indexing problem (fortran starts at 1, python at 0)\nsurvprob = np.delete(survprob, [0])\n\n# Labor income\n\n# They assume its a polinomial of age. Here are the coefficients\na=-2.170042+2.700381\nb1=0.16818\nb2=-0.0323371/10\nb3=0.0019704/100\n\ntime_params = {'Age_born': 20, 'Age_retire': 65, 'Age_death': 100}\nt_start = time_params['Age_born']\nt_ret = time_params['Age_retire'] # We are currently interpreting this as the last period of work\nt_end = time_params['Age_death']\n\n# They assume retirement income is a fraction of labor income in the\n# last working period\nrepl_fac = 0.68212\n\n# Compute average income at each point in (working) life\nf = np.arange(t_start, t_ret+1,1)\nf = a + b1*f + b2*(f**2) + b3*(f**3)\ndet_work_inc = np.exp(f)\n\n# Retirement income\ndet_ret_inc = repl_fac*det_work_inc[-1]*np.ones(t_end - t_ret)\n\n# Get a full vector of the deterministic part of income\ndet_income = np.concatenate((det_work_inc, det_ret_inc))\n\n# ln Gamma_t+1 = ln f_t+1 - ln f_t\ngr_fac = np.exp(np.diff(np.log(det_income)))\n\n# Now we have growth factors for T_end-1 periods.\n\n# Finally define the normalization factor used by CGM, for plots.\n# ### IMPORTANT ###\n# We adjust this normalization factor for what we believe is a typo in the\n# original article. See the REMARK jupyter notebook for details.\nnorm_factor_0 = det_income * np.exp(0)\n\n# %% Shocks\n\n# Transitory and permanent shock variance from the paper\nstd_tran_shock = np.sqrt(0.0738)\nstd_perm_shock = np.sqrt(0.0106)\n\n# Vectorize. (HARK turns off these shocks after T_retirement)\nstd_tran_vec = np.array([std_tran_shock]*(t_end-t_start))\nstd_perm_vec = np.array([std_perm_shock]*(t_end-t_start))\n\n# %% Financial instruments\n\n# Risk-free factor\nRfree = 1.02\n\n# Creation of risky asset return distributions\n\nMu = 0.06 # Equity premium\nStd = 0.157 # standard deviation of rate-of-return shocks\n\nRiskyAvg = Mu + Rfree\nRiskyStd = Std\n# Make a dictionary to specify the rest of params\ndict_portfolio={}\ndict_portfolio['CRRA'] = CRRA\ndict_portfolio['Rfree'] = Rfree\ndict_portfolio['DiscFac'] = DiscFac\ndict_portfolio['T_age'] = t_end-t_start+1\ndict_portfolio['T_cycle'] = t_end-t_start\ndict_portfolio['T_retire'] = t_ret-t_start+1\ndict_portfolio['cycles'] = 1\ndict_portfolio['PermShkStd'] = std_perm_vec\ndict_portfolio['PermShkCount'] = 3\ndict_portfolio['TranShkStd'] = std_tran_vec\ndict_portfolio['RiskyAvg'] = RiskyAvg\ndict_portfolio['RiskyStd'] = RiskyStd\ndict_portfolio['RiskyShareCount'] = 30\ndict_portfolio['LivPrb'] = survprob.tolist()\ndict_portfolio['PermGroFac'] = gr_fac.tolist()\ndict_portfolio['TranShkCount'] = 3\ndict_portfolio['UnempPrb']= 0\ndict_portfolio['UnempPrbRet']= 0\ndict_portfolio['IncUnemp']= 0\ndict_portfolio['IncUnempRet']= 0\ndict_portfolio['BoroCnstArt']= 0\ndict_portfolio['tax_rate']= 0.0\ndict_portfolio['RiskyCount']=3\ndict_portfolio['RiskyShareCount']= 30\ndict_portfolio['vFuncBool']=False\ndict_portfolio['CubicBool']= False\n# Simulation params\ndict_portfolio['AgentCount']= 10\ndict_portfolio['pLvlInitMean']= np.log(det_income[0]) # Mean of log initial permanent income (only matters for simulation)\ndict_portfolio['pLvlInitStd']= std_perm_shock # Standard deviation of log initial permanent income (only matters for simulation)\ndict_portfolio['T_sim']= (t_end - t_start+1)*50\n# Unused params required for simulation\ndict_portfolio['PermGroFacAgg']= 1\ndict_portfolio['aNrmInitMean']= -50.0 # Agents start with 0 assets (this is log-mean)\ndict_portfolio['aNrmInitStd' ]= 0.0\nage_plot_params = [20, 30, 55, 75]", "_____no_output_____" ], [ "import os\n\n\n# Create a grid of market resources for the plots\n \nmMin = 0 # Minimum ratio of assets to income to plot\nmMax = 10 # Maximum ratio of assets to income to plot\nmPts = 100 # Number of points to plot \n\neevalgrid = np.linspace(0,mMax,mPts) # range of values of assets for the plot\n\n### Habit\n# Plot different habit states- Figure 1\nplt.figure()\nfor H0 in [2,4,6]:\n dict_portfolio['PermGroFac'] = gr_fac.tolist()\n Gamma = 0.8\n Lambda = 0.5\n #H0=0.5\n #H = np.ones(81)*H0\n #for i in range(len(H)):\n # H[i] = H[i]/Lambda**(i+1)\n #H = H / Lambda\n #H = H ** Gamma\n \n dict_portfolio['PermGroFac'] = [i * Lambda for i in dict_portfolio['PermGroFac']]\n agent = PortfolioConsumerType(**dict_portfolio)\n agent.solve()\n\n norm_factor = norm_factor_0\n \n # In the last period of life you consume everything\n # so portfolio choice is irrelevant\n\n # Ages\n ages = [25]\n age_born = time_params['Age_born']\n\n a = 25\n plt.plot(eevalgrid,\n agent.solution[a-age_born].cFuncAdj(eevalgrid/norm_factor[a-age_born])*norm_factor[a-age_born],\n label = 'H0 = {:.2f}'.format(H0))\nplt.xlabel('Wealth')\nplt.ylabel('Consumption')\nplt.title('Consumption Policy Function')\nplt.legend()\nplt.grid()\n\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)", "_____no_output_____" ], [ "### Habit\n# Plot habit changes with different Gamma - Figure 2 and 4\nplt.figure()\nfor Gamma in [0, 0.5, 0.8]:\n Lambda = 0.5\n H0=2\n H = np.ones(81)*H0\n #for i in range(len(H)):\n # H[i] = H[i]*(dict_portfolio['RiskyAvg']/Lambda)**(i+1)\n H = H/ Lambda\n H = H ** Gamma\n\n norm_factor = norm_factor_0/H\n \n # In the last period of life you consume everything\n # so portfolio choice is irrelevant\n\n # Ages\n ages = [25]\n age_born = time_params['Age_born']\n\n a = 25\n plt.plot(eevalgrid,\n agent.solution[a-age_born].cFuncAdj(eevalgrid/norm_factor[a-age_born])*norm_factor[a-age_born],\n label = 'Gamma = {:.2f}'.format(Gamma))\nplt.xlabel('Wealth')\nplt.ylabel('Consumption')\nplt.title('Consumption Policy Function')\nplt.legend()\nplt.grid()\n\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)", "_____no_output_____" ], [ "### Habit\n# Plot share of wealth with different habit states- Figure 3\nplt.figure()\nfor H0 in [2,4,6, 8]:\n Gamma = 0.8\n Lambda = 0.5\n #H0=0.5\n H = np.ones(81)*H0\n #for i in range(len(H)):\n # H[i] = H[i]/Lambda**(i+1)\n H = H / Lambda\n H = H ** Gamma\n\n norm_factor = norm_factor_0/H\n #norm_factor = norm_factor_0\n # In the last period of life you consume everything\n # so portfolio choice is irrelevant\n\n # Ages\n ages = [25]\n age_born = time_params['Age_born']\n\n a = 25\n plt.plot(eevalgrid,\n agent.solution[a-age_born].ShareFuncAdj(eevalgrid/norm_factor[a-age_born])*norm_factor[a-age_born],\n label = 'H0 = {:.2f}'.format(H0))\nplt.xlabel('Wealth')\nplt.ylabel('Risky portfolio share')\nplt.title('Consumption Policy Function')\nplt.legend()\nplt.grid()\n\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)", "_____no_output_____" ], [ "### Habit\n# Plot consumption function with different Lambda - page 739's description\nplt.figure()\nfor Lambda in [0.5, 0.6, 0.7, 0.8]:\n Gamma = 0.8\n H0=2\n H = np.ones(81)*H0\n #for i in range(len(H)):\n # H[i] = H[i]/Lambda**(i+1)\n H = H/Lambda\n H = H ** Gamma\n\n norm_factor = norm_factor_0/H\n \n # In the last period of life you consume everything\n # so portfolio choice is irrelevant\n\n # Ages\n ages = [25]\n age_born = time_params['Age_born']\n\n a = 25\n plt.plot(eevalgrid,\n agent.solution[a-age_born].cFuncAdj(eevalgrid/norm_factor[a-age_born])*norm_factor[a-age_born],\n label = 'Lambda ={:.2f}'.format(Lambda))\nplt.xlabel('Wealth')\nplt.ylabel('Consumption')\nplt.title('Consumption Policy Function')\nplt.legend()\nplt.grid()\n\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)", "_____no_output_____" ], [ "# Set up simulation parameters\n\n# Number of agents and periods in the simulation.\nagent.AgentCount = 5 # Number of instances of the class to be simulated.\n# Since agents can die, they are replaced by a new agent whenever they do.\n\n# Number of periods to be simulated\nagent.T_sim = 80\n\n# Set up the variables we want to keep track of.\nagent.track_vars = ['aNrmNow','cNrmNow', 'pLvlNow', 't_age', 'ShareNow','mNrmNow']\n\n# Run the simulations\nagent.initializeSim()\nagent.simulate()\n\n# Present diagnostic plots.\nplt.figure()\nplt.plot(agent.t_age_hist+time_params['Age_born'], agent.pLvlNow_hist,'.')\nplt.xlabel('Age')\nplt.ylabel('Permanent income')\nplt.title('Simulated Income Paths')\nplt.grid()\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)\n\nplt.figure()\nplt.plot(agent.t_age_hist+time_params['Age_born'], agent.ShareNow_hist,'.')\nplt.xlabel('Age')\nplt.ylabel('Risky share')\nplt.title('Simulated Risky Portfolio Shares')\nplt.grid()\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)\n# Number of agents and periods in the simulation.\nagent.AgentCount = 50 # Number of instances of the class to be simulated.\n# Since agents can die, they are replaced by a new agent whenever they do.\n\n# Number of periods to be simulated\nagent.T_sim = 80*50\n\n# Run the simulations\nagent.initializeSim()\nagent.simulate()\n\nraw_data = {'Age': agent.t_age_hist.flatten()+time_params['Age_born'],\n 'pIncome': agent.pLvlNow_hist.flatten(),\n 'rShare': agent.ShareNow_hist.flatten(),\n 'nrmM': agent.mNrmNow_hist.flatten(),\n 'nrmC': agent.cNrmNow_hist.flatten()}\n\nData = pd.DataFrame(raw_data)\nData['Cons'] = Data.nrmC * Data.pIncome\nData['M'] = Data.nrmM * Data.pIncome\n\n# Find the mean of each variable at every age\nAgeMeans = Data.groupby(['Age']).mean().reset_index()\nplt.figure()\nplt.plot(AgeMeans.Age, AgeMeans.pIncome,\n label = 'Income')\nplt.plot(AgeMeans.Age, AgeMeans.M,\n label = 'Market resources')\nplt.plot(AgeMeans.Age, AgeMeans.Cons,\n label = 'Consumption')\nplt.legend()\nplt.xlabel('Age')\nplt.title('Variable Means Conditional on Survival')\nplt.grid()\n\nif not in_ipynb():\n plt.show(block=False) \n plt.pause(1)\nelse:\n plt.show(block=True)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec83fd0cbddf5f7abb69fe68f760fd994e914b39
293,836
ipynb
Jupyter Notebook
Run_ReLayNet.ipynb
axenovkirill/MGU-Net
42b97a611282989222fd205059ba20e64012b498
[ "MIT" ]
2
2021-05-22T14:29:34.000Z
2021-09-06T16:28:51.000Z
Run_ReLayNet.ipynb
axenovkirill/MGU-Net
42b97a611282989222fd205059ba20e64012b498
[ "MIT" ]
null
null
null
Run_ReLayNet.ipynb
axenovkirill/MGU-Net
42b97a611282989222fd205059ba20e64012b498
[ "MIT" ]
null
null
null
396.005391
250,678
0.913047
[ [ [ "## Train ReLayNet\nRunFile of OCT segmentation", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.autograd import Variable\n\nfrom relaynet_pytorch.relay_net import ReLayNet\nfrom relaynet_pytorch.data_utils import get_imdb_data\n\n#torch.set_default_tensor_type('torch.FloatTensor')\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n", "_____no_output_____" ], [ "train_data, test_data = get_imdb_data()\nprint(\"Train size: %i\" % len(train_data))\nprint(\"Test size: %i\" % len(test_data))", "Train size: 88\nTest size: 22\n" ], [ "from relaynet_pytorch.relay_net import ReLayNet\nfrom relaynet_pytorch.solver import Solver\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=4, shuffle=True, num_workers=4)\nval_loader = torch.utils.data.DataLoader(test_data, batch_size=4, shuffle=False, num_workers=4)\n\nparam ={\n 'num_channels':1,\n 'num_filters':64,\n 'kernel_h':3,\n 'kernel_w':7,\n 'kernel_c': 1,\n 'stride_conv':1,\n 'pool':2,\n 'stride_pool':2,\n 'num_class':9\n }\n\nexp_dir_name = 'Exp01'\n\nrelaynet_model = ReLayNet(param)\nsolver = Solver(optim_args={\"lr\": 1e-2})\nsolver.train(relaynet_model, train_loader, val_loader, log_nth=1, num_epochs=20, exp_dir_name=exp_dir_name)\n", "START TRAIN.\n[Iteration : 0/20] : 5.555515766143799\n[Iteration : 0/20] : 5.301751613616943\n[Iteration : 0/20] : 4.924797058105469\n[Iteration : 0/20] : 4.538707256317139\n[Iteration : 0/20] : 4.360987663269043\n[Iteration : 0/20] : 3.773261070251465\n[Iteration : 0/20] : 3.7019777297973633\n[Iteration : 0/20] : 3.031557321548462\n[Iteration : 0/20] : 2.907736301422119\n[Iteration : 0/20] : 2.746199131011963\n[Iteration : 0/20] : 2.885462760925293\n[Iteration : 0/20] : 2.487401008605957\n[Iteration : 0/20] : 2.9850611686706543\n[Iteration : 0/20] : 2.087277412414551\n[Iteration : 0/20] : 1.9356580972671509\n[Iteration : 0/20] : 2.203007698059082\n[Iteration : 0/20] : 2.0114009380340576\n[Iteration : 0/20] : 1.8540327548980713\n[Iteration : 0/20] : 1.7670228481292725\n[Iteration : 0/20] : 2.079341173171997\n[Iteration : 0/20] : 1.9924147129058838\n[Iteration : 0/20] : 1.7751386165618896\n[Epoch : 0/20] : 1.7751386165618896\nSaving model... models/Exp01/relaynet_epoch1.model\n[Iteration : 0/20] : 1.7986724376678467\n[Iteration : 0/20] : 1.875913143157959\n[Iteration : 0/20] : 2.1533498764038086\n[Iteration : 0/20] : 1.7493784427642822\n[Iteration : 0/20] : 2.201082706451416\n[Iteration : 0/20] : 1.869706392288208\n[Iteration : 0/20] : 1.8378777503967285\n[Iteration : 0/20] : 1.560670018196106\n[Iteration : 0/20] : 1.6584349870681763\n[Iteration : 0/20] : 1.604021668434143\n[Iteration : 0/20] : 1.4846316576004028\n[Iteration : 0/20] : 1.9723658561706543\n[Iteration : 0/20] : 1.6405377388000488\n[Iteration : 0/20] : 1.5560873746871948\n[Iteration : 0/20] : 1.8667320013046265\n[Iteration : 0/20] : 1.585094928741455\n[Iteration : 0/20] : 1.7776412963867188\n[Iteration : 0/20] : 2.744598388671875\n[Iteration : 0/20] : 1.7418296337127686\n[Iteration : 0/20] : 1.4767569303512573\n[Iteration : 0/20] : 1.710096836090088\n[Iteration : 0/20] : 1.8907406330108643\n[Epoch : 1/20] : 1.8907406330108643\nSaving model... models/Exp01/relaynet_epoch2.model\n[Iteration : 0/20] : 1.7107765674591064\n[Iteration : 0/20] : 1.5507049560546875\n[Iteration : 0/20] : 1.9941630363464355\n[Iteration : 0/20] : 1.6587952375411987\n[Iteration : 0/20] : 1.4542620182037354\n[Iteration : 0/20] : 1.929140329360962\n[Iteration : 0/20] : 1.2394392490386963\n[Iteration : 0/20] : 2.34559965133667\n[Iteration : 0/20] : 1.601144552230835\n[Iteration : 0/20] : 1.4527723789215088\n[Iteration : 0/20] : 1.550781011581421\n[Iteration : 0/20] : 1.3879303932189941\n[Iteration : 0/20] : 1.4594905376434326\n[Iteration : 0/20] : 1.3667786121368408\n[Iteration : 0/20] : 1.3557143211364746\n[Iteration : 0/20] : 1.3651939630508423\n[Iteration : 0/20] : 1.4172923564910889\n[Iteration : 0/20] : 1.2711460590362549\n[Iteration : 0/20] : 1.350016474723816\n[Iteration : 0/20] : 1.3307867050170898\n[Iteration : 0/20] : 1.4295603036880493\n[Iteration : 0/20] : 1.2931607961654663\n[Epoch : 2/20] : 1.2931607961654663\nSaving model... models/Exp01/relaynet_epoch3.model\n[Iteration : 0/20] : 1.267971158027649\n[Iteration : 0/20] : 1.349067211151123\n[Iteration : 0/20] : 1.5715603828430176\n[Iteration : 0/20] : 1.833725929260254\n[Iteration : 0/20] : 1.3331764936447144\n[Iteration : 0/20] : 1.3627517223358154\n[Iteration : 0/20] : 1.2533892393112183\n[Iteration : 0/20] : 1.4202988147735596\n[Iteration : 0/20] : 1.3058695793151855\n[Iteration : 0/20] : 1.4351658821105957\n[Iteration : 0/20] : 1.1337890625\n[Iteration : 0/20] : 1.1306803226470947\n[Iteration : 0/20] : 1.3319404125213623\n[Iteration : 0/20] : 1.495457410812378\n[Iteration : 0/20] : 1.2481589317321777\n[Iteration : 0/20] : 1.0315616130828857\n[Iteration : 0/20] : 1.165382981300354\n[Iteration : 0/20] : 1.5637567043304443\n[Iteration : 0/20] : 1.1910799741744995\n[Iteration : 0/20] : 1.1356093883514404\n[Iteration : 0/20] : 1.1993227005004883\n[Iteration : 0/20] : 1.3812406063079834\n[Epoch : 3/20] : 1.3812406063079834\nSaving model... models/Exp01/relaynet_epoch4.model\n[Iteration : 0/20] : 1.1679540872573853\n[Iteration : 0/20] : 1.3684922456741333\n[Iteration : 0/20] : 1.388580083847046\n[Iteration : 0/20] : 1.252425193786621\n[Iteration : 0/20] : 1.0706285238265991\n[Iteration : 0/20] : 1.0120182037353516\n[Iteration : 0/20] : 1.2032220363616943\n[Iteration : 0/20] : 1.1769611835479736\n[Iteration : 0/20] : 1.2560545206069946\n[Iteration : 0/20] : 1.1362289190292358\n[Iteration : 0/20] : 1.2483091354370117\n[Iteration : 0/20] : 1.0599851608276367\n[Iteration : 0/20] : 1.1851189136505127\n[Iteration : 0/20] : 1.2618610858917236\n[Iteration : 0/20] : 1.1196563243865967\n[Iteration : 0/20] : 1.341212511062622\n[Iteration : 0/20] : 1.020642876625061\n[Iteration : 0/20] : 1.200453281402588\n[Iteration : 0/20] : 1.4165971279144287\n[Iteration : 0/20] : 1.085773229598999\n[Iteration : 0/20] : 1.0787615776062012\n[Iteration : 0/20] : 1.0593010187149048\n[Epoch : 4/20] : 1.0593010187149048\nSaving model... models/Exp01/relaynet_epoch5.model\n[Iteration : 0/20] : 1.2398406267166138\n[Iteration : 0/20] : 1.1608392000198364\n[Iteration : 0/20] : 1.0419018268585205\n[Iteration : 0/20] : 1.1321663856506348\n[Iteration : 0/20] : 1.0201469659805298\n[Iteration : 0/20] : 1.0344910621643066\n[Iteration : 0/20] : 1.0436021089553833\n[Iteration : 0/20] : 1.1960558891296387\n[Iteration : 0/20] : 1.1313258409500122\n[Iteration : 0/20] : 1.3743772506713867\n[Iteration : 0/20] : 1.1658101081848145\n[Iteration : 0/20] : 1.0492178201675415\n[Iteration : 0/20] : 1.0097702741622925\n[Iteration : 0/20] : 1.6471502780914307\n[Iteration : 0/20] : 1.1473109722137451\n[Iteration : 0/20] : 1.1558754444122314\n[Iteration : 0/20] : 1.1411752700805664\n[Iteration : 0/20] : 1.109445571899414\n[Iteration : 0/20] : 1.0884723663330078\n[Iteration : 0/20] : 1.0849177837371826\n[Iteration : 0/20] : 1.1196112632751465\n[Iteration : 0/20] : 1.031357765197754\n[Epoch : 5/20] : 1.031357765197754\nSaving model... models/Exp01/relaynet_epoch6.model\n[Iteration : 0/20] : 1.0009626150131226\n[Iteration : 0/20] : 1.2514567375183105\n[Iteration : 0/20] : 1.122546911239624\n[Iteration : 0/20] : 1.012147068977356\n[Iteration : 0/20] : 1.073553442955017\n[Iteration : 0/20] : 1.0340933799743652\n[Iteration : 0/20] : 1.0995275974273682\n[Iteration : 0/20] : 1.0158734321594238\n[Iteration : 0/20] : 1.3063781261444092\n[Iteration : 0/20] : 1.3802870512008667\n[Iteration : 0/20] : 1.0465807914733887\n[Iteration : 0/20] : 0.9804433584213257\n[Iteration : 0/20] : 1.1089389324188232\n[Iteration : 0/20] : 1.1390151977539062\n[Iteration : 0/20] : 1.0445001125335693\n[Iteration : 0/20] : 1.0942695140838623\n[Iteration : 0/20] : 1.0242358446121216\n[Iteration : 0/20] : 0.9269423484802246\n[Iteration : 0/20] : 1.1320278644561768\n[Iteration : 0/20] : 0.9735612869262695\n[Iteration : 0/20] : 1.1237901449203491\n[Iteration : 0/20] : 1.0823523998260498\n[Epoch : 6/20] : 1.0823523998260498\nSaving model... models/Exp01/relaynet_epoch7.model\n[Iteration : 0/20] : 1.1878883838653564\n[Iteration : 0/20] : 1.1189167499542236\n[Iteration : 0/20] : 1.0428855419158936\n[Iteration : 0/20] : 0.9669812321662903\n[Iteration : 0/20] : 1.172365427017212\n[Iteration : 0/20] : 1.1195893287658691\n[Iteration : 0/20] : 1.4125458002090454\n[Iteration : 0/20] : 1.1489934921264648\n[Iteration : 0/20] : 1.0186054706573486\n[Iteration : 0/20] : 1.0425786972045898\n[Iteration : 0/20] : 1.1060104370117188\n[Iteration : 0/20] : 1.1320178508758545\n[Iteration : 0/20] : 1.0980162620544434\n[Iteration : 0/20] : 0.8942526578903198\n[Iteration : 0/20] : 0.9753533601760864\n[Iteration : 0/20] : 1.0536985397338867\n[Iteration : 0/20] : 0.9793944954872131\n[Iteration : 0/20] : 0.9524050354957581\n[Iteration : 0/20] : 1.1317079067230225\n[Iteration : 0/20] : 0.922914981842041\n[Iteration : 0/20] : 1.0263391733169556\n[Iteration : 0/20] : 1.381913423538208\n[Epoch : 7/20] : 1.381913423538208\nSaving model... models/Exp01/relaynet_epoch8.model\n[Iteration : 0/20] : 0.9857614040374756\n[Iteration : 0/20] : 1.3321713209152222\n[Iteration : 0/20] : 0.9778467416763306\n[Iteration : 0/20] : 1.0376356840133667\n[Iteration : 0/20] : 1.0239832401275635\n[Iteration : 0/20] : 0.9644331932067871\n[Iteration : 0/20] : 0.971788763999939\n[Iteration : 0/20] : 1.006683111190796\n[Iteration : 0/20] : 1.115492343902588\n[Iteration : 0/20] : 1.2529609203338623\n[Iteration : 0/20] : 0.9525652527809143\n[Iteration : 0/20] : 1.1162457466125488\n[Iteration : 0/20] : 1.0150582790374756\n" ] ], [ [ "## Save the Model\n\nWhen you are satisfied with your training, you can save the model.", "_____no_output_____" ] ], [ [ "relaynet_model.save(\"models/relaynet_model.model\")", "Saving model... models/relaynet_model.model\n" ] ], [ [ "# Deploy Model on Test Data", "_____no_output_____" ] ], [ [ "SEG_LABELS_LIST = [\n {\"id\": -1, \"name\": \"void\", \"rgb_values\": [0, 0, 0]},\n {\"id\": 0, \"name\": \"Region above the retina (RaR)\", \"rgb_values\": [128, 0, 0]},\n {\"id\": 1, \"name\": \"ILM: Inner limiting membrane\", \"rgb_values\": [0, 128, 0]},\n {\"id\": 2, \"name\": \"NFL-IPL: Nerve fiber ending to Inner plexiform layer\", \"rgb_values\": [128, 128, 0]},\n {\"id\": 3, \"name\": \"INL: Inner Nuclear layer\", \"rgb_values\": [0, 0, 128]},\n {\"id\": 4, \"name\": \"OPL: Outer plexiform layer\", \"rgb_values\": [128, 0, 128]},\n {\"id\": 5, \"name\": \"ONL-ISM: Outer Nuclear layer to Inner segment myeloid\", \"rgb_values\": [0, 128, 128]},\n {\"id\": 6, \"name\": \"ISE: Inner segment ellipsoid\", \"rgb_values\": [128, 128, 128]},\n {\"id\": 7, \"name\": \"OS-RPE: Outer segment to Retinal pigment epithelium\", \"rgb_values\": [64, 0, 0]},\n {\"id\": 8, \"name\": \"Region below RPE (RbR)\", \"rgb_values\": [192, 0, 0]}];\n #{\"id\": 9, \"name\": \"Fluid region\", \"rgb_values\": [64, 128, 0]}];\n \ndef label_img_to_rgb(label_img):\n label_img = np.squeeze(label_img)\n labels = np.unique(label_img)\n label_infos = [l for l in SEG_LABELS_LIST if l['id'] in labels]\n\n label_img_rgb = np.array([label_img,\n label_img,\n label_img]).transpose(1,2,0)\n for l in label_infos:\n mask = label_img == l['id']\n label_img_rgb[mask] = l['rgb_values']\n\n return label_img_rgb.astype(np.uint8)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport torch.nn.functional as F\n\nrelaynet_model = torch.load('models/Exp01/relaynet_epoch20.model')\nout = relaynet_model(Variable(torch.Tensor(test_data.X[0:1]).cuda(),volatile=True))\nout = F.softmax(out,dim=1)\nmax_val, idx = torch.max(out,1)\nidx = idx.data.cpu().numpy()\nidx = label_img_to_rgb(idx)\nplt.imshow(idx)\nplt.show()\n\nimg_test = test_data.X[0:1]\nimg_test = np.squeeze(img_test)\nplt.imshow(img_test)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec8402a8c9bb6a0b38cdabe27e4e66055e14b16b
80,255
ipynb
Jupyter Notebook
ml-regression/week-5/week-5-lasso-assignment-1-blank.ipynb
zomansud/coursera
8b63eda4194241edc0c493fb74ca6834c9d0792d
[ "MIT" ]
null
null
null
ml-regression/week-5/week-5-lasso-assignment-1-blank.ipynb
zomansud/coursera
8b63eda4194241edc0c493fb74ca6834c9d0792d
[ "MIT" ]
null
null
null
ml-regression/week-5/week-5-lasso-assignment-1-blank.ipynb
zomansud/coursera
8b63eda4194241edc0c493fb74ca6834c9d0792d
[ "MIT" ]
1
2021-08-10T20:05:24.000Z
2021-08-10T20:05:24.000Z
49.847826
286
0.52307
[ [ [ "# Regression Week 5: Feature Selection and LASSO (Interpretation)", "_____no_output_____" ], [ "In this notebook, you will use LASSO to select features, building on a pre-implemented solver for LASSO (using GraphLab Create, though you can use other solvers). You will:\n* Run LASSO with different L1 penalties.\n* Choose best L1 penalty using a validation set.\n* Choose best L1 penalty using a validation set, with additional constraint on the size of subset.\n\nIn the second notebook, you will implement your own LASSO solver, using coordinate descent. ", "_____no_output_____" ], [ "# Fire up graphlab create", "_____no_output_____" ] ], [ [ "import graphlab", "_____no_output_____" ] ], [ [ "# Load in house sales data\n\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.", "_____no_output_____" ] ], [ [ "sales = graphlab.SFrame('kc_house_data.gl/')", "[INFO] graphlab.cython.cy_server: GraphLab Create v2.1 started. Logging: /tmp/graphlab_server_1476930985.log\n" ] ], [ [ "# Create new features", "_____no_output_____" ] ], [ [ "sales.head()", "_____no_output_____" ] ], [ [ "As in Week 2, we consider features that are some transformations of inputs.", "_____no_output_____" ] ], [ [ "from math import log, sqrt\nsales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)\nsales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)\nsales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']\n\n# In the dataset, 'floors' was defined with type string, \n# so we'll convert them to float, before creating a new feature.\nsales['floors'] = sales['floors'].astype(float) \nsales['floors_square'] = sales['floors']*sales['floors']", "_____no_output_____" ] ], [ [ "* Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.\n* On the other hand, taking square root of sqft_living will decrease the separation between big house and small house. The owner may not be exactly twice as happy for getting a house that is twice as big.", "_____no_output_____" ], [ "# Learn regression weights with L1 penalty", "_____no_output_____" ], [ "Let us fit a model with all the features available, plus the features we just created above.", "_____no_output_____" ] ], [ [ "all_features = ['bedrooms', 'bedrooms_square',\n 'bathrooms',\n 'sqft_living', 'sqft_living_sqrt',\n 'sqft_lot', 'sqft_lot_sqrt',\n 'floors', 'floors_square',\n 'waterfront', 'view', 'condition', 'grade',\n 'sqft_above',\n 'sqft_basement',\n 'yr_built', 'yr_renovated']", "_____no_output_____" ] ], [ [ "Applying L1 penalty requires adding an extra parameter (`l1_penalty`) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set `l2_penalty=0` to ensure we don't introduce an additional L2 penalty.", "_____no_output_____" ] ], [ [ "model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=1e10)", "_____no_output_____" ] ], [ [ "Find what features had non-zero weight.", "_____no_output_____" ] ], [ [ "model_all.get('coefficients')[model_all.get('coefficients')['value'] > 0.0]", "_____no_output_____" ] ], [ [ "Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection. \n\n***QUIZ QUESTION***:\nAccording to this list of weights, which of the features have been chosen? ", "_____no_output_____" ], [ "# Selecting an L1 penalty", "_____no_output_____" ], [ "To find a good L1 penalty, we will explore multiple values using a validation set. Let us do three way split into train, validation, and test sets:\n* Split our sales data into 2 sets: training and test\n* Further split our training data into two sets: train, validation\n\nBe *very* careful that you use seed = 1 to ensure you get the same answer!", "_____no_output_____" ] ], [ [ "(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split\n(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate", "_____no_output_____" ] ], [ [ "Next, we write a loop that does the following:\n* For `l1_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, type `np.logspace(1, 7, num=13)`.)\n * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list.\n * Compute the RSS on VALIDATION data (here you will want to use `.predict()`) for that `l1_penalty`\n* Report which `l1_penalty` produced the lowest RSS on validation data.\n\nWhen you call `linear_regression.create()` make sure you set `validation_set = None`.\n\nNote: you can turn off the print out of `linear_regression.create()` with `verbose = False`", "_____no_output_____" ] ], [ [ "validation_rss_avg_list = []\nbest_l1_penalty = 1\nmin_rss = float(\"inf\")\nimport numpy as np\nfor l1_penalty in np.logspace(1, 7, num=13):\n model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=l1_penalty, verbose=False)\n \n # find validation error\n prediction = model.predict(validation[all_features])\n error = prediction - validation['price']\n error_squared = error * error\n rss = error_squared.sum()\n print \"L1 penalty \" + str(l1_penalty) + \" validation rss = \" + str(rss)\n \n if (rss < min_rss):\n min_rss = rss\n best_l1_penalty = l1_penalty\n validation_rss_avg_list.append(rss)\n\n\nprint \"Best L1 penalty \" + str(best_l1_penalty) + \" validation rss = \" + str(min_rss)\nvalidation_rss_avg_list", "L1 penalty 10.0 validation rss = 6.25766285142e+14\nL1 penalty 31.6227766017 validation rss = 6.25766285362e+14\nL1 penalty 100.0 validation rss = 6.25766286058e+14\nL1 penalty 316.227766017 validation rss = 6.25766288257e+14\nL1 penalty 1000.0 validation rss = 6.25766295212e+14\nL1 penalty 3162.27766017 validation rss = 6.25766317206e+14\nL1 penalty 10000.0 validation rss = 6.25766386761e+14\nL1 penalty 31622.7766017 validation rss = 6.25766606749e+14\nL1 penalty 100000.0 validation rss = 6.25767302792e+14\nL1 penalty 316227.766017 validation rss = 6.25769507644e+14\nL1 penalty 1000000.0 validation rss = 6.25776517727e+14\nL1 penalty 3162277.66017 validation rss = 6.25799062845e+14\nL1 penalty 10000000.0 validation rss = 6.25883719085e+14\nBest L1 penalty 10.0 validation rss = 6.25766285142e+14\n" ], [ "np.logspace(1, 7, num=13)", "_____no_output_____" ] ], [ [ "*** QUIZ QUESTIONS ***\n1. What was the best value for the `l1_penalty`?\n2. What is the RSS on TEST data of the model with the best `l1_penalty`?", "_____no_output_____" ] ], [ [ "best_l1_penalty", "_____no_output_____" ], [ "model_best = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=best_l1_penalty, verbose=False)", "_____no_output_____" ] ], [ [ "***QUIZ QUESTION***\nAlso, using this value of L1 penalty, how many nonzero weights do you have?", "_____no_output_____" ] ], [ [ "len(model_best.get('coefficients')[model_best.get('coefficients')['value'] > 0.0])", "_____no_output_____" ] ], [ [ "# Limit the number of nonzero weights\n\nWhat if we absolutely wanted to limit ourselves to, say, 7 features? This may be important if we want to derive \"a rule of thumb\" --- an interpretable model that has only a few features in them.", "_____no_output_____" ], [ "In this section, you are going to implement a simple, two phase procedure to achive this goal:\n1. Explore a large range of `l1_penalty` values to find a narrow region of `l1_penalty` values where models are likely to have the desired number of non-zero weights.\n2. Further explore the narrow region you found to find a good value for `l1_penalty` that achieves the desired sparsity. Here, we will again use a validation set to choose the best value for `l1_penalty`.", "_____no_output_____" ] ], [ [ "max_nonzeros = 7", "_____no_output_____" ] ], [ [ "## Exploring the larger range of values to find a narrow range with the desired sparsity\n\nLet's define a wide range of possible `l1_penalty_values`:", "_____no_output_____" ] ], [ [ "l1_penalty_values = np.logspace(8, 10, num=20)", "_____no_output_____" ] ], [ [ "Now, implement a loop that search through this space of possible `l1_penalty` values:\n\n* For `l1_penalty` in `np.logspace(8, 10, num=20)`:\n * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`\n * Extract the weights of the model and count the number of nonzeros. Save the number of nonzeros to a list.\n * *Hint: `model['coefficients']['value']` gives you an SArray with the parameters you learned. If you call the method `.nnz()` on it, you will find the number of non-zero parameters!* ", "_____no_output_____" ] ], [ [ "nnz_list = []\nfor l1_penalty in np.logspace(8, 10, num=20):\n model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=l1_penalty, verbose=False)\n \n # extract number of nnz\n nnz = model['coefficients']['value'].nnz()\n \n print \"L1 penalty \" + str(l1_penalty) + \" : # nnz = \" + str(nnz)\n\n nnz_list.append(nnz)\n\n\nnnz_list", "L1 penalty 100000000.0 : # nnz = 18\nL1 penalty 127427498.57 : # nnz = 18\nL1 penalty 162377673.919 : # nnz = 18\nL1 penalty 206913808.111 : # nnz = 18\nL1 penalty 263665089.873 : # nnz = 17\nL1 penalty 335981828.628 : # nnz = 17\nL1 penalty 428133239.872 : # nnz = 17\nL1 penalty 545559478.117 : # nnz = 17\nL1 penalty 695192796.178 : # nnz = 17\nL1 penalty 885866790.41 : # nnz = 16\nL1 penalty 1128837891.68 : # nnz = 15\nL1 penalty 1438449888.29 : # nnz = 15\nL1 penalty 1832980710.83 : # nnz = 13\nL1 penalty 2335721469.09 : # nnz = 12\nL1 penalty 2976351441.63 : # nnz = 10\nL1 penalty 3792690190.73 : # nnz = 6\nL1 penalty 4832930238.57 : # nnz = 5\nL1 penalty 6158482110.66 : # nnz = 3\nL1 penalty 7847599703.51 : # nnz = 1\nL1 penalty 10000000000.0 : # nnz = 1\n" ] ], [ [ "Out of this large range, we want to find the two ends of our desired narrow range of `l1_penalty`. At one end, we will have `l1_penalty` values that have too few non-zeros, and at the other end, we will have an `l1_penalty` that has too many non-zeros. \n\nMore formally, find:\n* The largest `l1_penalty` that has more non-zeros than `max_nonzero` (if we pick a penalty smaller than this value, we will definitely have too many non-zero weights)\n * Store this value in the variable `l1_penalty_min` (we will use it later)\n* The smallest `l1_penalty` that has fewer non-zeros than `max_nonzero` (if we pick a penalty larger than this value, we will definitely have too few non-zero weights)\n * Store this value in the variable `l1_penalty_max` (we will use it later)\n\n\n*Hint: there are many ways to do this, e.g.:*\n* Programmatically within the loop above\n* Creating a list with the number of non-zeros for each value of `l1_penalty` and inspecting it to find the appropriate boundaries.", "_____no_output_____" ] ], [ [ "l1_penalty_min = 2976351441.63\nl1_penalty_max = 3792690190.73", "_____no_output_____" ] ], [ [ "***QUIZ QUESTIONS***\n\nWhat values did you find for `l1_penalty_min` and`l1_penalty_max`? ", "_____no_output_____" ], [ "## Exploring the narrow range of values to find the solution with the right number of non-zeros that has lowest RSS on the validation set \n\nWe will now explore the narrow region of `l1_penalty` values we found:", "_____no_output_____" ] ], [ [ "l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)", "_____no_output_____" ] ], [ [ "* For `l1_penalty` in `np.linspace(l1_penalty_min,l1_penalty_max,20)`:\n * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`\n * Measure the RSS of the learned model on the VALIDATION set\n\nFind the model that the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzero`.", "_____no_output_____" ] ], [ [ "nnz_list = []\nvalidation_rss_avg_list = []\nbest_l1_penalty = 1\nmin_rss = float(\"inf\")\nimport numpy as np\nfor l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20):\n model = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=l1_penalty, verbose=False)\n \n # find validation error\n prediction = model.predict(validation[all_features])\n error = prediction - validation['price']\n error_squared = error * error\n rss = error_squared.sum()\n print \"L1 penalty \" + str(l1_penalty) + \" validation rss = \" + str(rss)\n \n # extract number of nnz\n nnz = model['coefficients']['value'].nnz()\n \n print \"L1 penalty \" + str(l1_penalty) + \" : # nnz = \" + str(nnz)\n\n nnz_list.append(nnz)\n \n print \"----------------------------------------------------------\"\n \n if (nnz == max_nonzeros and rss < min_rss):\n min_rss = rss\n best_l1_penalty = l1_penalty\n validation_rss_avg_list.append(rss)\n\nprint \"Best L1 penalty \" + str(best_l1_penalty) + \" validation rss = \" + str(min_rss)", "L1 penalty 2976351441.63 validation rss = 9.66925692362e+14\nL1 penalty 2976351441.63 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3019316638.95 validation rss = 9.74019450085e+14\nL1 penalty 3019316638.95 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3062281836.27 validation rss = 9.81188367942e+14\nL1 penalty 3062281836.27 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3105247033.59 validation rss = 9.89328342459e+14\nL1 penalty 3105247033.59 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3148212230.91 validation rss = 9.98783211266e+14\nL1 penalty 3148212230.91 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3191177428.24 validation rss = 1.00847716702e+15\nL1 penalty 3191177428.24 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3234142625.56 validation rss = 1.01829878055e+15\nL1 penalty 3234142625.56 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3277107822.88 validation rss = 1.02824799221e+15\nL1 penalty 3277107822.88 : # nnz = 10\n----------------------------------------------------------\nL1 penalty 3320073020.2 validation rss = 1.03461690923e+15\nL1 penalty 3320073020.2 : # nnz = 8\n----------------------------------------------------------\nL1 penalty 3363038217.52 validation rss = 1.03855473594e+15\nL1 penalty 3363038217.52 : # nnz = 8\n----------------------------------------------------------\nL1 penalty 3406003414.84 validation rss = 1.04323723787e+15\nL1 penalty 3406003414.84 : # nnz = 8\n----------------------------------------------------------\nL1 penalty 3448968612.16 validation rss = 1.04693748875e+15\nL1 penalty 3448968612.16 : # nnz = 7\n----------------------------------------------------------\nL1 penalty 3491933809.48 validation rss = 1.05114762561e+15\nL1 penalty 3491933809.48 : # nnz = 7\n----------------------------------------------------------\nL1 penalty 3534899006.8 validation rss = 1.05599273534e+15\nL1 penalty 3534899006.8 : # nnz = 7\n----------------------------------------------------------\nL1 penalty 3577864204.12 validation rss = 1.06079953176e+15\nL1 penalty 3577864204.12 : # nnz = 7\n----------------------------------------------------------\nL1 penalty 3620829401.45 validation rss = 1.0657076895e+15\nL1 penalty 3620829401.45 : # nnz = 6\n----------------------------------------------------------\nL1 penalty 3663794598.77 validation rss = 1.06946433543e+15\nL1 penalty 3663794598.77 : # nnz = 6\n----------------------------------------------------------\nL1 penalty 3706759796.09 validation rss = 1.07350454959e+15\nL1 penalty 3706759796.09 : # nnz = 6\n----------------------------------------------------------\nL1 penalty 3749724993.41 validation rss = 1.07763277558e+15\nL1 penalty 3749724993.41 : # nnz = 6\n----------------------------------------------------------\nL1 penalty 3792690190.73 validation rss = 1.08186759232e+15\nL1 penalty 3792690190.73 : # nnz = 6\n----------------------------------------------------------\nBest L1 penalty 3448968612.16 validation rss = 1.04693748875e+15\n" ] ], [ [ "***QUIZ QUESTIONS***\n1. What value of `l1_penalty` in our narrow range has the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzeros`?\n2. What features in this model have non-zero coefficients?", "_____no_output_____" ] ], [ [ "model_best = graphlab.linear_regression.create(training, target='price', features=all_features,\n validation_set=None, \n l2_penalty=0., l1_penalty=best_l1_penalty, verbose=False)\nmodel_best.get('coefficients')[model_best.get('coefficients')['value'] > 0.0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec84063a3bf9c5658833d75ec4ff7189b781ddea
3,731
ipynb
Jupyter Notebook
dynamic-programming/max_sum_increasing_subsequence.ipynb
codacy-badger/algorithms-1
bad63e6ec73c7196c3378d26ef3dbb9e172940e8
[ "MIT" ]
8
2019-08-19T21:43:44.000Z
2021-01-24T20:45:49.000Z
dynamic-programming/max_sum_increasing_subsequence.ipynb
codacy-badger/algorithms-1
bad63e6ec73c7196c3378d26ef3dbb9e172940e8
[ "MIT" ]
74
2019-10-23T21:13:54.000Z
2021-01-26T22:24:13.000Z
dynamic-programming/max_sum_increasing_subsequence.ipynb
codacy-badger/algorithms-1
bad63e6ec73c7196c3378d26ef3dbb9e172940e8
[ "MIT" ]
1
2022-01-21T12:02:33.000Z
2022-01-21T12:02:33.000Z
29.611111
226
0.535781
[ [ [ "### Max Sum Increasing Subsequence\nWrite a function that takes in a non-empty array of integers and returns the greatest sum that can be generated from a strictly increasing subsequence in the array, as well as an array of the numbers in that sequence.\n\nA subsequence of an array is a set of numbers that aren't necessarily adjacent to each other in the array but that are in the same order as they appear in the array.\n\nFor instance, [1, 2, 4] form a subsequence of array [1, 5, 2, 0, 4].\n\nSample input:\n```python\narray = [1, 7, 2, 3, 5, 1, 3]\n```\n\nSample output:\n```python\n[11, [1, 2, 3, 5]]\n```\n", "_____no_output_____" ] ], [ [ "def maxSumIncreasingSubsequence(array):\n \"\"\"\n We'll use DP to create an array of same length to store maxsum generated by all\n elements before a given index, including the value on the index. \n Then, we'll keep track of potential sequences in another array. Here, we save the index\n of the previous element that contributed to the maxsum at the present index's position.\n \n O(n^2) time | O(n) space\n \n \"\"\"\n sums = [num for num in array]\n sequences = [None for i in array]\n \n # store the index that contains the max sum\n maxSumIndex = 0\n for i in range(len(array)):\n for j in range(0, i):\n currentSum = sums[j] + array[i]\n if array[j] < array[i] and currentSum >= sums[i]:\n sums[i] = currentSum\n # store the position of the index that has influenced the current sum\n sequences[i] = j\n # update the maxSumIndex if a bigger sum exists in sums array\n maxSumIndex = i if sums[i] >= sums[maxSumIndex] else maxSumIndex\n \n return [sums[maxSumIndex], buildSequence(array, sequences, maxSumIndex)]\n ", "_____no_output_____" ], [ "def buildSequence(array, sequences, currentIndex):\n \"\"\"Backtrack while appending the values of the indices we saved\"\"\"\n sequence = []\n while currentIndex is not None:\n sequence.append(array[currentIndex])\n currentIndex = sequences[currentIndex]\n return list(reversed(sequence))", "_____no_output_____" ], [ "array = [1, 7, 2, 3, 5, 1, 3]\nmaxSumIncreasingSubsequence(array)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
ec840ed08e8efdcdc9522446ab666d957533dd6d
5,834
ipynb
Jupyter Notebook
Regression Templates for large datasets/Random Forest Regression/random_forest_regression_template_samrat.ipynb
World-of-ML-AI/Machine-learning-a-to-z
07c1dbb0b5f7d356c7674c3989103041b5f51f9d
[ "MIT" ]
1
2021-12-13T21:12:30.000Z
2021-12-13T21:12:30.000Z
Regression Templates for large datasets/Random Forest Regression/random_forest_regression_template_samrat.ipynb
World-of-ML-AI/Machine-learning-a-to-z
07c1dbb0b5f7d356c7674c3989103041b5f51f9d
[ "MIT" ]
null
null
null
Regression Templates for large datasets/Random Forest Regression/random_forest_regression_template_samrat.ipynb
World-of-ML-AI/Machine-learning-a-to-z
07c1dbb0b5f7d356c7674c3989103041b5f51f9d
[ "MIT" ]
null
null
null
26.39819
352
0.468632
[ [ [ "<a href=\"https://colab.research.google.com/github/lionelsamrat10/machine-learning-a-to-z/blob/main/Regression%20Templates%20for%20large%20datasets/Random%20Forest%20Regression/random_forest_regression_template_samrat.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Random Forest Regression", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Importing the dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values", "_____no_output_____" ] ], [ [ "## Splitting the dataset into the Training set and Test set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)", "_____no_output_____" ] ], [ [ "## Training the Random Forest Regression model on the whole dataset", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators = 10, random_state = 0)\nregressor.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## Predicting the Test set results", "_____no_output_____" ] ], [ [ "y_pred = regressor.predict(X_test)\nnp.set_printoptions(precision=2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))", "[[433.78 431.23]\n [457.99 460.01]\n [463.14 461.14]\n ...\n [470.16 473.26]\n [439.51 438. ]\n [460.32 463.28]]\n" ] ], [ [ "## Evaluating the Model Performance", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score\nr2_score(y_test, y_pred)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec842042d4290848e24d86f2d8a7066ef9233802
3,433
ipynb
Jupyter Notebook
Deep-Learning/Notebooks/Main Script - HAWK EYE TUNISIA.ipynb
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
[ "CNRI-Python" ]
null
null
null
Deep-Learning/Notebooks/Main Script - HAWK EYE TUNISIA.ipynb
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
[ "CNRI-Python" ]
null
null
null
Deep-Learning/Notebooks/Main Script - HAWK EYE TUNISIA.ipynb
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
[ "CNRI-Python" ]
null
null
null
23.195946
135
0.572677
[ [ [ "# Main Script\n", "_____no_output_____" ], [ "This module is the main script that imports the modules : \n- Licence Place Detection and Extraction - HAWK EYE TUNISIA\n- Licence Place Recognition - HAWK EYE TUNISIA\n\nInput: \n a vehicle image that will be read by the LP_detection module \n\nOutput : \n the same vehicle input image containing the result of the detection and the recognition modules. \n \nAn Example of use : \nAfter saving the detection module in a object_detection_yolo.py and the recognition module in Hawk_Eye_LP_recognition.py file,\nwe write this module in a main_Hawk_Eye.py file and run it with the cmd through this command line : \n\n$python main_Hawk_Eye.py --image=path", "_____no_output_____" ], [ "\n!!! Note: You should verify every path in this file before using it.\n", "_____no_output_____" ], [ "## Import of necessary libraries", "_____no_output_____" ] ], [ [ "import object_detection_yolo as LP_detection\nimport Hawk_Eye_LP_recognition as LP_reco\nfrom cv2 import imwrite", "_____no_output_____" ] ], [ [ "## 1- Licence plate detection", "_____no_output_____" ] ], [ [ "LP_extracted,newImage,top=LP_detection.LP_detection()", "_____no_output_____" ] ], [ [ "## 2- Characters recognition", "_____no_output_____" ] ], [ [ "final_img=LP_reco.LP_recognition(LP_extracted,newImage,top)", "_____no_output_____" ] ], [ [ "## 3- Saving the final result", "_____no_output_____" ] ], [ [ "path_to_final_img=\"D:\\\\Hawk_Eye_version_1.0_LP_recog\\\\Hawk_Eye_version_1.0_LP_recog\\\\final_image.jpg\"\nimwrite(path_to_final_img,final_img)\n#showing result\n#cv2.imshow(\"Finally You Win!!\",final_img)\n#cv2.waitKey(0)\nprint(path_to_final_img)", "_____no_output_____" ] ], [ [ "## Written By : Ghassene Tanabene - September 2020\n#### Intern at Chambi Eagle Technology\n#### From the National School of Computer Sciences - ENSI", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec84268e6573f95c85218cb9e343f48846e49fc2
11,101
ipynb
Jupyter Notebook
examples/Alternate labeling for embeddings example - Matt copy.ipynb
samghelms/mathviz
30fe89537379faea4de8c8b568ac6e52e4d15353
[ "MIT" ]
2
2018-04-12T07:46:20.000Z
2020-10-07T03:31:20.000Z
examples/Alternate labeling for embeddings example - Matt copy.ipynb
samghelms/mathviz
30fe89537379faea4de8c8b568ac6e52e4d15353
[ "MIT" ]
5
2020-03-24T15:41:54.000Z
2021-06-01T22:06:36.000Z
examples/Alternate labeling for embeddings example - Matt copy.ipynb
samghelms/mathviz
30fe89537379faea4de8c8b568ac6e52e4d15353
[ "MIT" ]
null
null
null
26.181604
666
0.496892
[ [ [ "import pandas as pd\nimport numpy as np\nfrom utils import read_file, tokenize_latex\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\n", "_____no_output_____" ], [ "import cPickle as pickle\neq_idx = pickle.load(open('data/numbered_embeddings/eq-idx.pkl', 'rb'))\neq_svd_embed = pickle.load(open('data/numbered_embeddings/eq-svd-embed.pkl', 'rb'))", "_____no_output_____" ], [ "tsne = TSNE(n_jobs=4)\nY = tsne.fit_transform(eq_svd_embed)", "_____no_output_____" ], [ "type(Y)", "_____no_output_____" ], [ "df_embeddings = pd.DataFrame(Y)\ndf_embeddings = df_embeddings.reset_index()\ndf_embeddings.columns = [\"matrix_index\", \"x\", \"y\"]", "_____no_output_____" ], [ "import fnmatch\nimport os\ndef read_file_names_recursive(folder):\n matches = []\n for root, dirnames, filenames in os.walk(folder):\n for filename in fnmatch.filter(filenames, '*.tex'):\n matches.append(os.path.join(root, filename))\n return matches", "_____no_output_____" ], [ "fnames = read_file_names_recursive('data/numbered_embeddings/raw/')", "_____no_output_____" ], [ "def read_files(fnames):\n tex = {}\n for fname in fnames:\n f = open(fname).read()\n name = fname.split(\"/\")[-1]\n name = name.strip(\".tex\")\n tex[name] = f\n return tex", "_____no_output_____" ], [ "tex = read_files(fnames)", "_____no_output_____" ], [ "tex_df = pd.DataFrame.from_dict(tex, orient=\"index\")\ntex_df = tex_df.reset_index()\ntex_df.columns = [\"eq_id\", \"tex\"]\n\neq_idx_df = pd.DataFrame(eq_idx).reset_index()\neq_idx_df.columns = [\"matrix_index\", \"eq_id\"]\n\nall_data = pd.merge(tex_df, eq_idx_df, on = \"eq_id\")", "_____no_output_____" ], [ "all_data_tsne = pd.merge(all_data, df_embeddings, on = \"matrix_index\")", "_____no_output_____" ], [ "all_data_tsne.set_index(\"matrix_index\").to_json(\"tnse_embeddings_dev.json\", orient = \"index\")", "_____no_output_____" ], [ "class Query:\n def __init__(self, term_matrix, docs, docs_dict, dictionary, cols, k):\n self.term_matrix = term_matrix\n self.docs = docs\n self.docs_dict = docs_dict\n self.dictionary = dictionary\n self.columns = cols\n self.k = k\n \n def _get_terms(self, vals):\n terms = []\n for i, v in vals:\n print i\n print v\n try:\n term = self.docs_dict[i]\n term[\"sim\"] = v\n terms.append(term)\n except:\n print \"couldn't find\"\n return terms\n def query(self, query):\n idx = self.dictionary[query][\"matrix_index\"]\n vec = self.term_matrix[idx]\n \n idc, vals = self._vectorized_query(self.term_matrix, vec, self.k)\n \n zipped_vals= zip(idc, vals)\n vals = sorted(zipped_vals, key = lambda x: x[1])\n terms = self._get_terms(vals[:self.k])\n \n neighbors = {\"neighbors\":[{\"Equation Number\": {\"data\": t[\"eq_id\"]}, \n \"Equation\": {\"data\": t[\"tex\"], \"fmt\": \"math\"}, \n \"Similarity\": {\"data\": t[\"sim\"]}} for t in terms]} if terms else {\"neighbors\": []}\n return neighbors\n \n def _vectorized_query(self, term_matrix, word_vector, k):\n dots = np.dot(term_matrix, word_vector)\n l2norms = np.sqrt(((term_matrix**2).sum(1)[:,None])*((word_vector**2).sum(0)))\n cosine_dists = 1 - (dots[:,None]/l2norms)\n cosine_dists = cosine_dists.reshape(dots.shape)\n idx = np.argpartition(cosine_dists, k)\n vals = cosine_dists[idx[:k]]\n return list(idx[:k].flatten()), list(vals.flatten())", "_____no_output_____" ], [ "dictionary = all_data.set_index(\"eq_id\").to_dict(orient=\"index\")\ndocs_dict = all_data.set_index(\"matrix_index\").to_dict(orient=\"index\")\ndocs = all_data[\"eq_id\"].values\nq = Query(eq_svd_embed, docs, docs_dict, dictionary, [\"Equation Number\", \"Equation\", \"Similarity\"], 20)", "_____no_output_____" ], [ "# example to test the query class\n# q.query(\"34.4.E1\")", "_____no_output_____" ], [ "from mathviz_hopper.src.table import Table\nt = Table(q, 8082)\nt.print_ipython()", "Bottle v0.13-dev server starting up (using MyWSGIRefServer())...\nListening on http://localhost:8082/\nHit Ctrl-C to quit.\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec842dfc699ccc51a78916953fcae9fd24ca8236
139,450
ipynb
Jupyter Notebook
Thalessians Python Course/Python-for-Data-Science-and-Artificial-Intelligence/Lecture4a_ScikitLearn.ipynb
TensorMan/training-and-reference
68d2dea416e10bfe5b2a9b47b1794ce5c2b65371
[ "Apache-2.0" ]
null
null
null
Thalessians Python Course/Python-for-Data-Science-and-Artificial-Intelligence/Lecture4a_ScikitLearn.ipynb
TensorMan/training-and-reference
68d2dea416e10bfe5b2a9b47b1794ce5c2b65371
[ "Apache-2.0" ]
null
null
null
Thalessians Python Course/Python-for-Data-Science-and-Artificial-Intelligence/Lecture4a_ScikitLearn.ipynb
TensorMan/training-and-reference
68d2dea416e10bfe5b2a9b47b1794ce5c2b65371
[ "Apache-2.0" ]
null
null
null
114.303279
75,480
0.859226
[ [ [ "![image.png](attachment:image.png)", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import numpy as np\nfrom sklearn import linear_model, neural_network\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.preprocessing import StandardScaler\nimport pylab", "_____no_output_____" ], [ "# https://people.sc.fsu.edu/~jburkardt/datasets/regression/x08.txt", "_____no_output_____" ], [ "x = [[ 587000, 16.5, 6.2],\n [ 643000, 20.5, 6.4],\n [ 635000, 26.3, 9.3],\n [ 692000, 16.5, 5.3],\n [1248000, 19.2, 7.3],\n [ 643000, 16.5, 5.9],\n [1964000, 20.2, 6.4],\n [1531000, 21.3, 7.6],\n [ 713000, 17.2, 4.9],\n [ 749000, 14.3, 6.4],\n [7895000, 18.1, 6.0],\n [ 762000, 23.1, 7.4],\n [2793000, 19.1, 5.8],\n [ 741000, 24.7, 8.6],\n [ 625000, 18.6, 6.5],\n [ 854000, 24.9, 8.3],\n [ 716000, 17.9, 6.7],\n [ 921000, 22.4, 8.6],\n [ 595000, 20.2, 8.4],\n [3353000, 16.9, 6.7]]", "_____no_output_____" ], [ "y = [[11.2],\n [13.4],\n [40.7],\n [ 5.3],\n [24.8],\n [12.7],\n [20.9],\n [35.7],\n [ 8.7],\n [ 9.6],\n [14.5],\n [26.9],\n [15.7],\n [36.2],\n [18.1],\n [28.9],\n [14.9],\n [25.8],\n [21.7],\n [25.7]]", "_____no_output_____" ], [ "x = np.array(x)", "_____no_output_____" ], [ "y = np.array(y)", "_____no_output_____" ], [ "pylab.plot(x[:,0], y, 'x')\npylab.xticks(rotation=45);", "_____no_output_____" ], [ "pylab.plot(x[:,1], y, 'x');", "_____no_output_____" ], [ "pylab.plot(x[:,2], y, 'x');", "_____no_output_____" ] ], [ [ "model = linear_model.LinearRegression()\nmodel.fit(x, y)", "_____no_output_____" ] ], [ [ "model = linear_model.LinearRegression() \nmodel.fit(x, y)\nbeta = [model.intercept_, *model.coef_]; beta", "_____no_output_____" ], [ "yhat = model.predict(x); yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(y, yhat), 'R2': r2_score(y, yhat)}", "_____no_output_____" ], [ "np.shape(x)", "_____no_output_____" ], [ "training_n = 15\ntraining_x = x[0:training_n, :]\ntraining_y = y[0:training_n, :]\ntest_x = x[training_n:, :]\ntest_y = y[training_n:, :]", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "training_x", "_____no_output_____" ], [ "test_x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "training_y", "_____no_output_____" ], [ "test_y", "_____no_output_____" ], [ "model = linear_model.LinearRegression()\nmodel.fit(training_x, training_y)", "_____no_output_____" ], [ "beta = [model.intercept_, *model.coef_]; beta", "_____no_output_____" ], [ "training_yhat = model.predict(training_x); training_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(training_y, training_yhat), 'R2': r2_score(training_y, training_yhat)}", "_____no_output_____" ], [ "test_yhat = model.predict(test_x); test_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(test_y, test_yhat), 'R2': r2_score(test_y, test_yhat)}", "_____no_output_____" ], [ "training_n = 10\ntraining_x = x[0:training_n, 1:]\ntraining_y = y[0:training_n, :]\ntest_x = x[training_n:, 1:]\ntest_y = y[training_n:, :]", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "training_x", "_____no_output_____" ], [ "test_x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "training_y", "_____no_output_____" ], [ "test_y", "_____no_output_____" ], [ "model = linear_model.LinearRegression()\nmodel.fit(training_x, training_y)", "_____no_output_____" ], [ "beta = [model.intercept_, *model.coef_]; beta", "_____no_output_____" ], [ "training_yhat = model.predict(training_x); training_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(training_y, training_yhat), 'R2': r2_score(training_y, training_yhat)}", "_____no_output_____" ], [ "test_yhat = model.predict(test_x); test_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(test_y, test_yhat), 'R2': r2_score(test_y, test_yhat)}", "_____no_output_____" ], [ "pylab.plot(test_y, test_yhat, 'x')", "_____no_output_____" ], [ "training_n = 10\ntraining_x = x[0:training_n, 1:]\ntraining_y = y[0:training_n, :]\ntest_x = x[training_n:, 1:]\ntest_y = y[training_n:, :]", "_____no_output_____" ], [ "scaler = StandardScaler()\nscaler.fit(training_x)\ntraining_x = scaler.transform(training_x)\ntest_x = scaler.transform(test_x)", "_____no_output_____" ], [ "model = neural_network.MLPRegressor()\nmodel.fit(training_x, training_y.ravel())", "/anaconda3/anaconda3/lib/python3.7/site-packages/sklearn/neural_network/multilayer_perceptron.py:562: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n" ], [ "training_yhat = model.predict(training_x); training_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(training_y, training_yhat), 'R2': r2_score(training_y, training_yhat)}", "_____no_output_____" ], [ "test_yhat = model.predict(test_x); test_yhat", "_____no_output_____" ], [ "{'MSE': mean_squared_error(test_y, test_yhat), 'R2': r2_score(test_y, test_yhat)}", "_____no_output_____" ], [ "pylab.plot(test_y, test_yhat, 'x')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec845134f8a6b30fcc33dde1844651f9018441c8
20,446
ipynb
Jupyter Notebook
examples/morph_inflection.ipynb
jayten42/pororo
0b02e6a633b9a32ec4241b8ed96745e6592db317
[ "Apache-2.0" ]
1,137
2021-02-02T02:09:06.000Z
2022-03-29T03:10:40.000Z
examples/morph_inflection.ipynb
jayten42/pororo
0b02e6a633b9a32ec4241b8ed96745e6592db317
[ "Apache-2.0" ]
57
2021-02-02T03:29:54.000Z
2022-03-31T16:20:00.000Z
examples/morph_inflection.ipynb
jayten42/pororo
0b02e6a633b9a32ec4241b8ed96745e6592db317
[ "Apache-2.0" ]
216
2021-02-02T02:49:02.000Z
2022-03-28T01:19:58.000Z
28.595804
81
0.260638
[ [ [ "from pororo import Pororo", "_____no_output_____" ], [ "inflection = Pororo(task=\"inflection\", lang=\"ko\")", "_____no_output_____" ], [ "inflection(\"곱\")", "_____no_output_____" ], [ "inflection = Pororo(task=\"inflection\", lang=\"en\")", "2021-01-15 14:15:04 | INFO | root | Downloading user-selected model...\n\n" ], [ "inflection(\"love\")", "_____no_output_____" ], [ "inflection = Pororo(task=\"inflection\", lang=\"ja\")", "2021-01-15 14:15:12 | INFO | root | Downloading user-selected model...\n\n" ], [ "inflection(\"あえぐ\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec8459415a292d012feb2ad3627985d1991ff3ae
55,651
ipynb
Jupyter Notebook
svm.ipynb
ahmetemrekilic/Jupyter-Machine-Learning
57e6bd1dad14a9df027d07ee81da7db1fa7b1b75
[ "MIT" ]
null
null
null
svm.ipynb
ahmetemrekilic/Jupyter-Machine-Learning
57e6bd1dad14a9df027d07ee81da7db1fa7b1b75
[ "MIT" ]
null
null
null
svm.ipynb
ahmetemrekilic/Jupyter-Machine-Learning
57e6bd1dad14a9df027d07ee81da7db1fa7b1b75
[ "MIT" ]
null
null
null
125.90724
42,672
0.827424
[ [ [ "# SVM\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "data=pd.read_csv('data.csv')", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data.drop([\"id\",\"Unnamed: 32\"],axis=1,inplace=True)", "_____no_output_____" ], [ "M = data[data.diagnosis == \"M\"]\nB = data[data.diagnosis == \"B\"]\n\nplt.scatter(M.radius_mean,M.texture_mean,color=\"red\",label=\"kotu\",alpha= 0.3)\nplt.scatter(B.radius_mean,B.texture_mean,color=\"green\",label=\"iyi\",alpha= 0.3)\nplt.xlabel(\"radius_mean\")\nplt.ylabel(\"texture_mean\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "data.diagnosis = [1 if each == \"M\" else 0 for each in data.diagnosis]\ny = data.diagnosis.values\nx_data = data.drop([\"diagnosis\"],axis=1)", "_____no_output_____" ], [ "x = (x_data - np.min(x_data))/(np.max(x_data)-np.min(x_data))", "_____no_output_____" ], [ "#test_size 0.2\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2,random_state=1)", "_____no_output_____" ], [ "from sklearn.svm import SVC", "_____no_output_____" ], [ "svm = SVC(random_state = 1)\nsvm.fit(x_train,y_train)", "_____no_output_____" ], [ "print(\"print accuracy of svm algo: \",svm.score(x_test,y_test))", "print accuracy of svm algo: 0.9736842105263158\n" ], [ "y_pred = svm.predict(x_test) \ny_pred", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec845bc58a96528621944b1809cea04445bfd0aa
161,153
ipynb
Jupyter Notebook
example/dash_vis.ipynb
happya/movie-robot
2a8fd46f279fb731d1fb52a8f8e30eebe3c45511
[ "MIT" ]
null
null
null
example/dash_vis.ipynb
happya/movie-robot
2a8fd46f279fb731d1fb52a8f8e30eebe3c45511
[ "MIT" ]
2
2020-12-05T04:38:30.000Z
2020-12-10T20:28:42.000Z
example/dash_vis.ipynb
happya/movie-robot
2a8fd46f279fb731d1fb52a8f8e30eebe3c45511
[ "MIT" ]
1
2020-11-20T04:47:57.000Z
2020-11-20T04:47:57.000Z
33.969857
10,728
0.358641
[ [ [ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objects as go\npd.options.display.max_columns = 2000\n\n", "_____no_output_____" ], [ "# init dash app\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets )", "_____no_output_____" ], [ "df = pd.read_csv('../data/movies_cleaned.csv')", "_____no_output_____" ], [ "df[df['year'] == -1]", "_____no_output_____" ], [ "def visualize_num_movies_companies(df):\n # company_count = {}\n # for i in range(len(df)):\n # company = df.loc[i, 'company_name']\n # company_count.setdefault(company, 0)\n # company_count[company] += 1\n #\n # companies = list(company_count.keys())\n # counts = list(company_count.values())\n companies = df['company_name'].value_counts()\n print(companies)\n\n data = [go.Bar(name='num_movies_made_by_companies', \n x=list(companies.index)[:20], y=list(companies.values)[:20], \n marker={\n \"color\": \"orange\",\n \"line\": {\n \"width\": 2,\n \"color\": \"orange\"\n }})]\n layout = go.Layout(title='Numbers of movies made by different companies')\n fig = go.Figure(data=data, layout=layout)\n# fig.update_xaxes(tickangle=75, tickfont=dict(family='Rockwell', color='crimson', size=14))\n return fig\n", "_____no_output_____" ], [ "visualize_num_movies_companies(df)", "Paramount Pictures 562\nUniversal Pictures 520\nColumbia Pictures 400\nTwentieth Century Fox Film Corporation 354\nNew Line Cinema 314\n ... \nBoll KG 2\nDaniel Columbie Films & Productions 2\nBirdsong Pictures 2\nVnesheconombank 2\ncompany_name 1\nName: company_name, Length: 1314, dtype: int64\n" ], [ "import ast\ndef visualize_genres(df):\n genre_count = dict()\n genres = df['genre_names']\n for genre_list in genres:\n for g in genre_list.strip('[]').split(','):\n g = g.strip()[1:-1]\n genre_count[g] = genre_count.get(g, 0) + 1\n genres, counts = list(zip(*genre_count.items()))\n print(genre_count.items())\n data = [go.Pie(labels=genres, values=counts)]\n layout = go.Layout(\n title='Movies proportion base on genres'\n )\n fig = go.Figure(data=data, layout=layout)\n fig.update_traces(textposition ='inside',textinfo='percent+label')\n return fig", "_____no_output_____" ], [ "a = '[0]'\na.strip('[]')", "_____no_output_____" ], [ "visualize_genres(df)", "dict_items([('action', 2308), ('adventure', 1580), ('fantasy', 848), ('science fiction', 1070), ('crime', 1392), ('drama', 4594), ('thriller', 2548), ('animation', 468), ('family', 1026), ('western', 164), ('comedy', 3444), ('romance', 1788), ('horror', 1038), ('mystery', 696), ('history', 394), ('war', 288), ('music', 370), ('documentary', 220), ('foreign', 68), ('tv movie', 16), ('', 56), ('enre_name', 1)])\n" ], [ "df", "_____no_output_____" ], [ "df['country_name']", "_____no_output_____" ], [ "Country = pd.DataFrame(df[\"country_name\"].value_counts().reset_index().values,columns=[\"country\",\"Total\"])\nCountry.head()\nCountry['text'] = Country.country + ': ' + Country.Total.apply(str)\nCountry['text']", "_____no_output_____" ], [ "import plotly.express as px\n\nfig = px.choropleth( \n locationmode='country names',\n locations=Country.country,\n featureidkey=\"Country.country\",\n labels=Country[\"Total\"]\n)\nfig.show()\nfig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec8464ee152a57139d7881a46d7ba92131074477
78,688
ipynb
Jupyter Notebook
recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.ipynb
BedirYilmaz/udacity-intro-dl-pytorch
d500fdc1916918011bee26275c1d9f3f633ee722
[ "MIT" ]
null
null
null
recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.ipynb
BedirYilmaz/udacity-intro-dl-pytorch
d500fdc1916918011bee26275c1d9f3f633ee722
[ "MIT" ]
5
2020-09-26T00:48:17.000Z
2022-02-10T01:15:56.000Z
recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.ipynb
BedirYilmaz/udacity-intro-dl-pytorch
d500fdc1916918011bee26275c1d9f3f633ee722
[ "MIT" ]
null
null
null
78,688
78,688
0.62162
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')\n\nimport os\nos.chdir(\"/content/drive/My Drive/Colab Notebooks/Udacity/deep-learning-v2-pytorch/recurrent-neural-networks/char-rnn\")", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ "# Character-Level LSTM in PyTorch\n\nIn this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**\n\nThis network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.\n\n<img src=\"assets/charseq.jpeg\" width=\"500\">", "_____no_output_____" ], [ "First let's load in our required resources for data loading and model creation.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F", "_____no_output_____" ] ], [ [ "## Load in Data\n\nThen, we'll load the Anna Karenina text file and convert it into integers for our network to use. ", "_____no_output_____" ] ], [ [ "# open text file and read in data as `text`\nwith open('data/anna.txt', 'r') as f:\n text = f.read()", "_____no_output_____" ] ], [ [ "Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.", "_____no_output_____" ] ], [ [ "text[:100], len(text)", "_____no_output_____" ] ], [ [ "### Tokenization\n\nIn the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.", "_____no_output_____" ] ], [ [ "# encode the text and map each character to an integer and vice versa\n\n# we create two dictionaries:\n# 1. int2char, which maps integers to characters\n# 2. char2int, which maps characters to unique integers\nchars = tuple(set(text))\nint2char = dict(enumerate(chars))\nchar2int = {ch: ii for ii, ch in int2char.items()}\n\n# encode the text\nencoded = np.array([char2int[ch] for ch in text])", "_____no_output_____" ] ], [ [ "And we can see those same characters from above, encoded as integers.", "_____no_output_____" ] ], [ [ "encoded[:100]", "_____no_output_____" ] ], [ [ "## Pre-processing the data\n\nAs you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!\n", "_____no_output_____" ] ], [ [ "def one_hot_encode(arr, n_labels):\n \n # Initialize the the encoded array\n one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)\n \n # Fill the appropriate elements with ones\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n \n # Finally reshape it to get back to the original array\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot", "_____no_output_____" ], [ "# check that the function works as expected\ntest_seq = np.array([[3, 5, 1]])\none_hot = one_hot_encode(test_seq, 8)\n\nprint(one_hot)", "[[[0. 0. 0. 1. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 1. 0. 0.]\n [0. 1. 0. 0. 0. 0. 0. 0.]]]\n" ] ], [ [ "## Making training mini-batches\n\n\nTo train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:\n\n<img src=\"assets/[email protected]\" width=500px>\n\n\n<br>\n\nIn this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.\n\n### Creating Batches\n\n**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **\n\nEach batch contains $N \\times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.\n\n**2. After that, we need to split `arr` into $N$ batches. ** \n\nYou can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \\times (M * K)$.\n\n**3. Now that we have this array, we can iterate through it to get our mini-batches. **\n\nThe idea is each batch is a $N \\times M$ window on the $N \\times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.\n\n> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**", "_____no_output_____" ] ], [ [ "def get_batches(arr, batch_size, seq_length):\n '''Create a generator that returns batches of size\n batch_size x seq_length from arr.\n \n Arguments\n ---------\n arr: Array you want to make batches from\n batch_size: Batch size, the number of sequences per batch\n seq_length: Number of encoded chars in a sequence\n '''\n \n ## TODO: Get the number of batches we can make\n n_batches = len(arr) // (batch_size * seq_length)\n \n ## TODO: Keep only enough characters to make full batches\n arr = arr[:n_batches * (batch_size * seq_length)]\n \n ## TODO: Reshape into batch_size rows\n arr = arr.reshape((batch_size, -1))\n \n ## TODO: Iterate over the batches using a window of size seq_length\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:, n:n+seq_length]\n \n y = np.zeros_like(x)\n # The targets, shifted by one\n try:\n y[:,:-1], y[:,-1] = x[:, 1:], x[:, n+seq_length]\n except IndexError:\n y[:,:-1], y[:,-1] = x[:, 1:], x[:, 0]\n \n yield x, y", "_____no_output_____" ] ], [ [ "### Test Your Implementation\n\nNow I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.", "_____no_output_____" ] ], [ [ "batches = get_batches(encoded, 8, 50)\nx, y = next(batches)", "_____no_output_____" ], [ "# printing out the first 10 items in a sequence\nprint('x\\n', x[:10, :10])\nprint('\\ny\\n', y[:10, :10])", "x\n [[47 70 32 27 69 46 2 63 67 60]\n [49 6 79 63 69 70 32 69 63 32]\n [46 79 81 63 6 2 63 32 63 7]\n [49 63 69 70 46 63 58 70 52 46]\n [63 49 32 12 63 70 46 2 63 69]\n [58 62 49 49 52 6 79 63 32 79]\n [63 71 79 79 32 63 70 32 81 63]\n [ 8 26 39 6 79 49 14 21 51 63]]\n\ny\n [[70 32 27 69 46 2 63 67 60 60]\n [ 6 79 63 69 70 32 69 63 32 69]\n [79 81 63 6 2 63 32 63 7 6]\n [63 69 70 46 63 58 70 52 46 7]\n [49 32 12 63 70 46 2 63 69 46]\n [62 49 49 52 6 79 63 32 79 81]\n [71 79 79 32 63 70 32 81 63 49]\n [26 39 6 79 49 14 21 51 63 42]]\n" ] ], [ [ "If you implemented `get_batches` correctly, the above output should look something like \n```\nx\n [[25 8 60 11 45 27 28 73 1 2]\n [17 7 20 73 45 8 60 45 73 60]\n [27 20 80 73 7 28 73 60 73 65]\n [17 73 45 8 27 73 66 8 46 27]\n [73 17 60 12 73 8 27 28 73 45]\n [66 64 17 17 46 7 20 73 60 20]\n [73 76 20 20 60 73 8 60 80 73]\n [47 35 43 7 20 17 24 50 37 73]]\n\ny\n [[ 8 60 11 45 27 28 73 1 2 2]\n [ 7 20 73 45 8 60 45 73 60 45]\n [20 80 73 7 28 73 60 73 65 7]\n [73 45 8 27 73 66 8 46 27 65]\n [17 60 12 73 8 27 28 73 45 27]\n [64 17 17 46 7 20 73 60 20 80]\n [76 20 20 60 73 8 60 80 73 17]\n [35 43 7 20 17 24 50 37 73 36]]\n ```\n although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.", "_____no_output_____" ], [ "---\n## Defining the network with PyTorch\n\nBelow is where you'll define the network.\n\n<img src=\"assets/charRNN.png\" width=500px>\n\nNext, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.", "_____no_output_____" ], [ "### Model Structure\n\nIn `__init__` the suggested structure is as follows:\n* Create and store the necessary dictionaries (this has been done for you)\n* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)\n* Define a dropout layer with `drop_prob`\n* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)\n* Finally, initialize the weights (again, this has been given)\n\nNote that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.", "_____no_output_____" ], [ "---\n### LSTM Inputs/Outputs\n\nYou can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows\n\n```python\nself.lstm = nn.LSTM(input_size, n_hidden, n_layers, \n dropout=drop_prob, batch_first=True)\n```\n\nwhere `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.\n\nWe also need to create an initial hidden state of all zeros. This is done like so\n\n```python\nself.init_hidden()\n```", "_____no_output_____" ] ], [ [ "# check if GPU is available\ntrain_on_gpu = torch.cuda.is_available()\nif(train_on_gpu):\n print('Training on GPU!')\nelse: \n print('No GPU available, training on CPU; consider making n_epochs very small.')", "Training on GPU!\n" ], [ "class CharRNN(nn.Module):\n \n def __init__(self, tokens, n_hidden=256, n_layers=2,\n drop_prob=0.5, lr=0.001):\n super().__init__()\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n # creating character dictionaries\n self.chars = tokens\n self.int2char = dict(enumerate(self.chars))\n self.char2int = {ch: ii for ii, ch in self.int2char.items()}\n \n ## TODO: define the layers of the model\n self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers, bias=True, \n dropout=self.drop_prob, batch_first=True)\n \n ## TODO: define the dropout\n self.dropout = nn.Dropout(self.drop_prob)\n\n ## TODO: define the final, fully-connected output layer\n self.fc = nn.Linear(n_hidden, len(self.chars))\n\n \n def forward(self, x, hidden):\n ''' Forward pass through the network.\n These inputs are x, and the hidden/cell state `hidden`. '''\n \n ## TODO: Get the outputs and the new hidden state from the lstm\n x, hidden = self.lstm(x, hidden)\n\n x = x.contiguous().view(-1, self.n_hidden)\n\n out = self.fc(x)\n \n # return the final output and the hidden state\n return out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n \n if (train_on_gpu):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden\n ", "_____no_output_____" ] ], [ [ "## Time to train\n\nThe train function gives us the ability to set the number of epochs, the learning rate, and other parameters.\n\nBelow we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!\n\nA couple of details about training: \n>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.\n* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.", "_____no_output_____" ] ], [ [ "def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):\n ''' Training a network \n \n Arguments\n ---------\n \n net: CharRNN network\n data: text data to train the network\n epochs: Number of epochs to train\n batch_size: Number of mini-sequences per mini-batch, aka batch size\n seq_length: Number of character steps per mini-batch\n lr: learning rate\n clip: gradient clipping\n val_frac: Fraction of data to hold out for validation\n print_every: Number of steps for printing training and validation loss\n \n '''\n net.train()\n \n opt = torch.optim.Adam(net.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n \n # create training and validation data\n val_idx = int(len(data)*(1-val_frac))\n data, val_data = data[:val_idx], data[val_idx:]\n \n if(train_on_gpu):\n net.cuda()\n \n counter = 0\n n_chars = len(net.chars)\n for e in range(epochs):\n # initialize hidden state\n h = net.init_hidden(batch_size)\n \n for x, y in get_batches(data, batch_size, seq_length):\n counter += 1\n \n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n \n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n \n # get the output from the model\n output, h = net(inputs, h)\n \n # calculate the loss and perform backprop\n loss = criterion(output, targets.view(batch_size*seq_length).long())\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n opt.step()\n \n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = net.init_hidden(batch_size)\n val_losses = []\n net.eval()\n for x, y in get_batches(val_data, batch_size, seq_length):\n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n \n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n \n inputs, targets = x, y\n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n output, val_h = net(inputs, val_h)\n val_loss = criterion(output, targets.view(batch_size*seq_length).long())\n \n val_losses.append(val_loss.item())\n \n net.train() # reset to train mode after iterationg through validation data\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.4f}...\".format(loss.item()),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)))", "_____no_output_____" ] ], [ [ "## Instantiating the model\n\nNow we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!", "_____no_output_____" ] ], [ [ "## TODO: set your model hyperparameters\n# define and print the net\nn_hidden= 256\nn_layers= 2\n\nnet = CharRNN(chars, n_hidden, n_layers)\nprint(net)", "CharRNN(\n (lstm): LSTM(83, 256, num_layers=2, batch_first=True, dropout=0.5)\n (dropout): Dropout(p=0.5, inplace=False)\n (fc): Linear(in_features=256, out_features=83, bias=True)\n)\n" ] ], [ [ "### Set your training hyperparameters!", "_____no_output_____" ] ], [ [ "batch_size = 128\nseq_length = 50\nn_epochs = 20 # start small if you are just testing initial behavior\n\n# train the model\ntrain(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)", "Epoch: 1/20... Step: 10... Loss: 3.2636... Val Loss: 3.2118\nEpoch: 1/20... Step: 20... Loss: 3.0980... Val Loss: 3.1441\nEpoch: 1/20... Step: 30... Loss: 3.1090... Val Loss: 3.1384\nEpoch: 1/20... Step: 40... Loss: 3.1076... Val Loss: 3.1328\nEpoch: 1/20... Step: 50... Loss: 3.1156... Val Loss: 3.1312\nEpoch: 1/20... Step: 60... Loss: 3.1174... Val Loss: 3.1292\nEpoch: 1/20... Step: 70... Loss: 3.1012... Val Loss: 3.1281\nEpoch: 1/20... Step: 80... Loss: 3.0769... Val Loss: 3.1293\nEpoch: 1/20... Step: 90... Loss: 3.1135... Val Loss: 3.1282\nEpoch: 1/20... Step: 100... Loss: 3.0873... Val Loss: 3.1278\nEpoch: 1/20... Step: 110... Loss: 3.1088... Val Loss: 3.1284\nEpoch: 1/20... Step: 120... Loss: 3.1132... Val Loss: 3.1275\nEpoch: 1/20... Step: 130... Loss: 3.0998... Val Loss: 3.1255\nEpoch: 1/20... Step: 140... Loss: 3.0932... Val Loss: 3.1228\nEpoch: 1/20... Step: 150... Loss: 3.1094... Val Loss: 3.1199\nEpoch: 1/20... Step: 160... Loss: 3.0898... Val Loss: 3.1134\nEpoch: 1/20... Step: 170... Loss: 3.0869... Val Loss: 3.1012\nEpoch: 1/20... Step: 180... Loss: 3.0657... Val Loss: 3.0827\nEpoch: 1/20... Step: 190... Loss: 3.0817... Val Loss: 3.0538\nEpoch: 1/20... Step: 200... Loss: 3.0105... Val Loss: 3.0141\nEpoch: 1/20... Step: 210... Loss: 2.9743... Val Loss: 2.9662\nEpoch: 1/20... Step: 220... Loss: 2.9052... Val Loss: 2.9149\nEpoch: 1/20... Step: 230... Loss: 2.8926... Val Loss: 2.8604\nEpoch: 1/20... Step: 240... Loss: 2.8443... Val Loss: 2.8144\nEpoch: 1/20... Step: 250... Loss: 2.7915... Val Loss: 2.7937\nEpoch: 1/20... Step: 260... Loss: 2.7162... Val Loss: 2.7377\nEpoch: 1/20... Step: 270... Loss: 2.7303... Val Loss: 2.6915\nEpoch: 2/20... Step: 280... Loss: 2.6991... Val Loss: 2.6571\nEpoch: 2/20... Step: 290... Loss: 2.6501... Val Loss: 2.6333\nEpoch: 2/20... Step: 300... Loss: 2.6084... Val Loss: 2.6143\nEpoch: 2/20... Step: 310... Loss: 2.6060... Val Loss: 2.5935\nEpoch: 2/20... Step: 320... Loss: 2.5844... Val Loss: 2.5750\nEpoch: 2/20... Step: 330... Loss: 2.5605... Val Loss: 2.5542\nEpoch: 2/20... Step: 340... Loss: 2.5665... Val Loss: 2.5422\nEpoch: 2/20... Step: 350... Loss: 2.5152... Val Loss: 2.5272\nEpoch: 2/20... Step: 360... Loss: 2.4983... Val Loss: 2.5086\nEpoch: 2/20... Step: 370... Loss: 2.4844... Val Loss: 2.4932\nEpoch: 2/20... Step: 380... Loss: 2.5078... Val Loss: 2.4820\nEpoch: 2/20... Step: 390... Loss: 2.4528... Val Loss: 2.4715\nEpoch: 2/20... Step: 400... Loss: 2.4804... Val Loss: 2.4560\nEpoch: 2/20... Step: 410... Loss: 2.4083... Val Loss: 2.4503\nEpoch: 2/20... Step: 420... Loss: 2.4375... Val Loss: 2.4354\nEpoch: 2/20... Step: 430... Loss: 2.4516... Val Loss: 2.4263\nEpoch: 2/20... Step: 440... Loss: 2.4450... Val Loss: 2.4144\nEpoch: 2/20... Step: 450... Loss: 2.4008... Val Loss: 2.4115\nEpoch: 2/20... Step: 460... Loss: 2.4455... Val Loss: 2.3962\nEpoch: 2/20... Step: 470... Loss: 2.3661... Val Loss: 2.3901\nEpoch: 2/20... Step: 480... Loss: 2.3601... Val Loss: 2.3811\nEpoch: 2/20... Step: 490... Loss: 2.3527... Val Loss: 2.3684\nEpoch: 2/20... Step: 500... Loss: 2.3327... Val Loss: 2.3600\nEpoch: 2/20... Step: 510... Loss: 2.3451... Val Loss: 2.3507\nEpoch: 2/20... Step: 520... Loss: 2.3252... Val Loss: 2.3429\nEpoch: 2/20... Step: 530... Loss: 2.3374... Val Loss: 2.3362\nEpoch: 2/20... Step: 540... Loss: 2.3109... Val Loss: 2.3272\nEpoch: 2/20... Step: 550... Loss: 2.3209... Val Loss: 2.3280\nEpoch: 3/20... Step: 560... Loss: 2.2771... Val Loss: 2.3135\nEpoch: 3/20... Step: 570... Loss: 2.2877... Val Loss: 2.3053\nEpoch: 3/20... Step: 580... Loss: 2.2635... Val Loss: 2.2985\nEpoch: 3/20... Step: 590... Loss: 2.2800... Val Loss: 2.2935\nEpoch: 3/20... Step: 600... Loss: 2.2719... Val Loss: 2.2871\nEpoch: 3/20... Step: 610... Loss: 2.2690... Val Loss: 2.2801\nEpoch: 3/20... Step: 620... Loss: 2.2439... Val Loss: 2.2738\nEpoch: 3/20... Step: 630... Loss: 2.2517... Val Loss: 2.2654\nEpoch: 3/20... Step: 640... Loss: 2.2330... Val Loss: 2.2601\nEpoch: 3/20... Step: 650... Loss: 2.2581... Val Loss: 2.2555\nEpoch: 3/20... Step: 660... Loss: 2.2362... Val Loss: 2.2447\nEpoch: 3/20... Step: 670... Loss: 2.2280... Val Loss: 2.2432\nEpoch: 3/20... Step: 680... Loss: 2.1982... Val Loss: 2.2351\nEpoch: 3/20... Step: 690... Loss: 2.1837... Val Loss: 2.2270\nEpoch: 3/20... Step: 700... Loss: 2.2023... Val Loss: 2.2208\nEpoch: 3/20... Step: 710... Loss: 2.1969... Val Loss: 2.2178\nEpoch: 3/20... Step: 720... Loss: 2.2148... Val Loss: 2.2101\nEpoch: 3/20... Step: 730... Loss: 2.1476... Val Loss: 2.2073\nEpoch: 3/20... Step: 740... Loss: 2.1916... Val Loss: 2.2014\nEpoch: 3/20... Step: 750... Loss: 2.2163... Val Loss: 2.1948\nEpoch: 3/20... Step: 760... Loss: 2.1580... Val Loss: 2.1871\nEpoch: 3/20... Step: 770... Loss: 2.1455... Val Loss: 2.1802\nEpoch: 3/20... Step: 780... Loss: 2.1501... Val Loss: 2.1757\nEpoch: 3/20... Step: 790... Loss: 2.1663... Val Loss: 2.1714\nEpoch: 3/20... Step: 800... Loss: 2.1474... Val Loss: 2.1627\nEpoch: 3/20... Step: 810... Loss: 2.1461... Val Loss: 2.1619\nEpoch: 3/20... Step: 820... Loss: 2.1494... Val Loss: 2.1510\nEpoch: 3/20... Step: 830... Loss: 2.1278... Val Loss: 2.1500\nEpoch: 4/20... Step: 840... Loss: 2.0872... Val Loss: 2.1454\nEpoch: 4/20... Step: 850... Loss: 2.1133... Val Loss: 2.1393\nEpoch: 4/20... Step: 860... Loss: 2.0804... Val Loss: 2.1335\nEpoch: 4/20... Step: 870... Loss: 2.0986... Val Loss: 2.1276\nEpoch: 4/20... Step: 880... Loss: 2.1069... Val Loss: 2.1249\nEpoch: 4/20... Step: 890... Loss: 2.0869... Val Loss: 2.1239\nEpoch: 4/20... Step: 900... Loss: 2.1013... Val Loss: 2.1165\nEpoch: 4/20... Step: 910... Loss: 2.1007... Val Loss: 2.1128\nEpoch: 4/20... Step: 920... Loss: 2.0862... Val Loss: 2.1120\nEpoch: 4/20... Step: 930... Loss: 2.0762... Val Loss: 2.1086\nEpoch: 4/20... Step: 940... Loss: 2.1190... Val Loss: 2.0989\nEpoch: 4/20... Step: 950... Loss: 2.0434... Val Loss: 2.0957\nEpoch: 4/20... Step: 960... Loss: 2.0702... Val Loss: 2.0932\nEpoch: 4/20... Step: 970... Loss: 2.0678... Val Loss: 2.0861\nEpoch: 4/20... Step: 980... Loss: 2.0310... Val Loss: 2.0835\nEpoch: 4/20... Step: 990... Loss: 2.0289... Val Loss: 2.0801\nEpoch: 4/20... Step: 1000... Loss: 2.0149... Val Loss: 2.0729\nEpoch: 4/20... Step: 1010... Loss: 2.0147... Val Loss: 2.0671\nEpoch: 4/20... Step: 1020... Loss: 2.0540... Val Loss: 2.0638\nEpoch: 4/20... Step: 1030... Loss: 2.0634... Val Loss: 2.0632\nEpoch: 4/20... Step: 1040... Loss: 2.0246... Val Loss: 2.0566\nEpoch: 4/20... Step: 1050... Loss: 2.0162... Val Loss: 2.0537\nEpoch: 4/20... Step: 1060... Loss: 2.0282... Val Loss: 2.0468\nEpoch: 4/20... Step: 1070... Loss: 2.0019... Val Loss: 2.0484\nEpoch: 4/20... Step: 1080... Loss: 2.0244... Val Loss: 2.0410\nEpoch: 4/20... Step: 1090... Loss: 1.9919... Val Loss: 2.0377\nEpoch: 4/20... Step: 1100... Loss: 2.0434... Val Loss: 2.0351\nEpoch: 4/20... Step: 1110... Loss: 1.9636... Val Loss: 2.0305\nEpoch: 5/20... Step: 1120... Loss: 2.0187... Val Loss: 2.0294\nEpoch: 5/20... Step: 1130... Loss: 1.9729... Val Loss: 2.0222\nEpoch: 5/20... Step: 1140... Loss: 1.9939... Val Loss: 2.0171\nEpoch: 5/20... Step: 1150... Loss: 1.9719... Val Loss: 2.0129\nEpoch: 5/20... Step: 1160... Loss: 1.9871... Val Loss: 2.0142\nEpoch: 5/20... Step: 1170... Loss: 1.9496... Val Loss: 2.0086\nEpoch: 5/20... Step: 1180... Loss: 1.9479... Val Loss: 2.0055\nEpoch: 5/20... Step: 1190... Loss: 2.0225... Val Loss: 1.9996\nEpoch: 5/20... Step: 1200... Loss: 1.9920... Val Loss: 1.9981\nEpoch: 5/20... Step: 1210... Loss: 1.9866... Val Loss: 1.9966\nEpoch: 5/20... Step: 1220... Loss: 1.9465... Val Loss: 1.9919\nEpoch: 5/20... Step: 1230... Loss: 1.9348... Val Loss: 1.9876\nEpoch: 5/20... Step: 1240... Loss: 1.9572... Val Loss: 1.9862\nEpoch: 5/20... Step: 1250... Loss: 1.9922... Val Loss: 1.9818\nEpoch: 5/20... Step: 1260... Loss: 1.9697... Val Loss: 1.9788\nEpoch: 5/20... Step: 1270... Loss: 1.9331... Val Loss: 1.9767\nEpoch: 5/20... Step: 1280... Loss: 1.9195... Val Loss: 1.9706\nEpoch: 5/20... Step: 1290... Loss: 1.9334... Val Loss: 1.9638\nEpoch: 5/20... Step: 1300... Loss: 1.9330... Val Loss: 1.9630\nEpoch: 5/20... Step: 1310... Loss: 1.9058... Val Loss: 1.9602\nEpoch: 5/20... Step: 1320... Loss: 1.9169... Val Loss: 1.9565\nEpoch: 5/20... Step: 1330... Loss: 1.9261... Val Loss: 1.9515\nEpoch: 5/20... Step: 1340... Loss: 1.9169... Val Loss: 1.9494\nEpoch: 5/20... Step: 1350... Loss: 1.9419... Val Loss: 1.9474\nEpoch: 5/20... Step: 1360... Loss: 1.9157... Val Loss: 1.9420\nEpoch: 5/20... Step: 1370... Loss: 1.9274... Val Loss: 1.9401\nEpoch: 5/20... Step: 1380... Loss: 1.9080... Val Loss: 1.9382\nEpoch: 5/20... Step: 1390... Loss: 1.9183... Val Loss: 1.9334\nEpoch: 6/20... Step: 1400... Loss: 1.8971... Val Loss: 1.9374\nEpoch: 6/20... Step: 1410... Loss: 1.8681... Val Loss: 1.9300\nEpoch: 6/20... Step: 1420... Loss: 1.8913... Val Loss: 1.9276\nEpoch: 6/20... Step: 1430... Loss: 1.8925... Val Loss: 1.9250\nEpoch: 6/20... Step: 1440... Loss: 1.8819... Val Loss: 1.9228\nEpoch: 6/20... Step: 1450... Loss: 1.8813... Val Loss: 1.9217\nEpoch: 6/20... Step: 1460... Loss: 1.8486... Val Loss: 1.9159\nEpoch: 6/20... Step: 1470... Loss: 1.9036... Val Loss: 1.9167\nEpoch: 6/20... Step: 1480... Loss: 1.9009... Val Loss: 1.9094\nEpoch: 6/20... Step: 1490... Loss: 1.8872... Val Loss: 1.9095\nEpoch: 6/20... Step: 1500... Loss: 1.8617... Val Loss: 1.9035\nEpoch: 6/20... Step: 1510... Loss: 1.8618... Val Loss: 1.9038\nEpoch: 6/20... Step: 1520... Loss: 1.8722... Val Loss: 1.9061\nEpoch: 6/20... Step: 1530... Loss: 1.8458... Val Loss: 1.8990\nEpoch: 6/20... Step: 1540... Loss: 1.8497... Val Loss: 1.8950\nEpoch: 6/20... Step: 1550... Loss: 1.8171... Val Loss: 1.8930\nEpoch: 6/20... Step: 1560... Loss: 1.8677... Val Loss: 1.8885\nEpoch: 6/20... Step: 1570... Loss: 1.8654... Val Loss: 1.8847\nEpoch: 6/20... Step: 1580... Loss: 1.8545... Val Loss: 1.8844\nEpoch: 6/20... Step: 1590... Loss: 1.8011... Val Loss: 1.8803\nEpoch: 6/20... Step: 1600... Loss: 1.8319... Val Loss: 1.8824\nEpoch: 6/20... Step: 1610... Loss: 1.8423... Val Loss: 1.8764\nEpoch: 6/20... Step: 1620... Loss: 1.8026... Val Loss: 1.8761\nEpoch: 6/20... Step: 1630... Loss: 1.8647... Val Loss: 1.8707\nEpoch: 6/20... Step: 1640... Loss: 1.8183... Val Loss: 1.8687\nEpoch: 6/20... Step: 1650... Loss: 1.8612... Val Loss: 1.8678\nEpoch: 6/20... Step: 1660... Loss: 1.8289... Val Loss: 1.8657\nEpoch: 6/20... Step: 1670... Loss: 1.8355... Val Loss: 1.8628\nEpoch: 7/20... Step: 1680... Loss: 1.8021... Val Loss: 1.8623\nEpoch: 7/20... Step: 1690... Loss: 1.8265... Val Loss: 1.8631\nEpoch: 7/20... Step: 1700... Loss: 1.8052... Val Loss: 1.8575\nEpoch: 7/20... Step: 1710... Loss: 1.8181... Val Loss: 1.8571\nEpoch: 7/20... Step: 1720... Loss: 1.8067... Val Loss: 1.8527\nEpoch: 7/20... Step: 1730... Loss: 1.7745... Val Loss: 1.8517\nEpoch: 7/20... Step: 1740... Loss: 1.7764... Val Loss: 1.8495\nEpoch: 7/20... Step: 1750... Loss: 1.8121... Val Loss: 1.8449\nEpoch: 7/20... Step: 1760... Loss: 1.7855... Val Loss: 1.8440\nEpoch: 7/20... Step: 1770... Loss: 1.8180... Val Loss: 1.8400\nEpoch: 7/20... Step: 1780... Loss: 1.7843... Val Loss: 1.8385\nEpoch: 7/20... Step: 1790... Loss: 1.7737... Val Loss: 1.8415\nEpoch: 7/20... Step: 1800... Loss: 1.7888... Val Loss: 1.8380\nEpoch: 7/20... Step: 1810... Loss: 1.7774... Val Loss: 1.8386\nEpoch: 7/20... Step: 1820... Loss: 1.7448... Val Loss: 1.8332\nEpoch: 7/20... Step: 1830... Loss: 1.8175... Val Loss: 1.8315\nEpoch: 7/20... Step: 1840... Loss: 1.7939... Val Loss: 1.8296\nEpoch: 7/20... Step: 1850... Loss: 1.7768... Val Loss: 1.8257\nEpoch: 7/20... Step: 1860... Loss: 1.7662... Val Loss: 1.8235\nEpoch: 7/20... Step: 1870... Loss: 1.7577... Val Loss: 1.8246\nEpoch: 7/20... Step: 1880... Loss: 1.7756... Val Loss: 1.8238\nEpoch: 7/20... Step: 1890... Loss: 1.7626... Val Loss: 1.8208\nEpoch: 7/20... Step: 1900... Loss: 1.7074... Val Loss: 1.8178\nEpoch: 7/20... Step: 1910... Loss: 1.7279... Val Loss: 1.8137\nEpoch: 7/20... Step: 1920... Loss: 1.7679... Val Loss: 1.8115\nEpoch: 7/20... Step: 1930... Loss: 1.7313... Val Loss: 1.8099\nEpoch: 7/20... Step: 1940... Loss: 1.7377... Val Loss: 1.8130\nEpoch: 7/20... Step: 1950... Loss: 1.7662... Val Loss: 1.8073\nEpoch: 8/20... Step: 1960... Loss: 1.7293... Val Loss: 1.8060\nEpoch: 8/20... Step: 1970... Loss: 1.7771... Val Loss: 1.8045\nEpoch: 8/20... Step: 1980... Loss: 1.7122... Val Loss: 1.8027\nEpoch: 8/20... Step: 1990... Loss: 1.7736... Val Loss: 1.8023\nEpoch: 8/20... Step: 2000... Loss: 1.7198... Val Loss: 1.7982\nEpoch: 8/20... Step: 2010... Loss: 1.7350... Val Loss: 1.7957\nEpoch: 8/20... Step: 2020... Loss: 1.7398... Val Loss: 1.7965\nEpoch: 8/20... Step: 2030... Loss: 1.7715... Val Loss: 1.7967\nEpoch: 8/20... Step: 2040... Loss: 1.7276... Val Loss: 1.7939\nEpoch: 8/20... Step: 2050... Loss: 1.6976... Val Loss: 1.7890\nEpoch: 8/20... Step: 2060... Loss: 1.7019... Val Loss: 1.7872\nEpoch: 8/20... Step: 2070... Loss: 1.7209... Val Loss: 1.7893\nEpoch: 8/20... Step: 2080... Loss: 1.7383... Val Loss: 1.7864\nEpoch: 8/20... Step: 2090... Loss: 1.7158... Val Loss: 1.7847\nEpoch: 8/20... Step: 2100... Loss: 1.7089... Val Loss: 1.7810\nEpoch: 8/20... Step: 2110... Loss: 1.7231... Val Loss: 1.7822\nEpoch: 8/20... Step: 2120... Loss: 1.7120... Val Loss: 1.7785\nEpoch: 8/20... Step: 2130... Loss: 1.6908... Val Loss: 1.7755\nEpoch: 8/20... Step: 2140... Loss: 1.7207... Val Loss: 1.7756\nEpoch: 8/20... Step: 2150... Loss: 1.7476... Val Loss: 1.7738\nEpoch: 8/20... Step: 2160... Loss: 1.7141... Val Loss: 1.7744\nEpoch: 8/20... Step: 2170... Loss: 1.6896... Val Loss: 1.7701\nEpoch: 8/20... Step: 2180... Loss: 1.6755... Val Loss: 1.7670\nEpoch: 8/20... Step: 2190... Loss: 1.7155... Val Loss: 1.7646\nEpoch: 8/20... Step: 2200... Loss: 1.6640... Val Loss: 1.7651\nEpoch: 8/20... Step: 2210... Loss: 1.7225... Val Loss: 1.7618\nEpoch: 8/20... Step: 2220... Loss: 1.6988... Val Loss: 1.7614\nEpoch: 8/20... Step: 2230... Loss: 1.6712... Val Loss: 1.7608\nEpoch: 9/20... Step: 2240... Loss: 1.7055... Val Loss: 1.7580\nEpoch: 9/20... Step: 2250... Loss: 1.6759... Val Loss: 1.7574\nEpoch: 9/20... Step: 2260... Loss: 1.6995... Val Loss: 1.7532\nEpoch: 9/20... Step: 2270... Loss: 1.6895... Val Loss: 1.7571\nEpoch: 9/20... Step: 2280... Loss: 1.7107... Val Loss: 1.7565\nEpoch: 9/20... Step: 2290... Loss: 1.6902... Val Loss: 1.7513\nEpoch: 9/20... Step: 2300... Loss: 1.7094... Val Loss: 1.7498\nEpoch: 9/20... Step: 2310... Loss: 1.6490... Val Loss: 1.7504\nEpoch: 9/20... Step: 2320... Loss: 1.6827... Val Loss: 1.7485\nEpoch: 9/20... Step: 2330... Loss: 1.6459... Val Loss: 1.7433\nEpoch: 9/20... Step: 2340... Loss: 1.6177... Val Loss: 1.7443\nEpoch: 9/20... Step: 2350... Loss: 1.6364... Val Loss: 1.7445\nEpoch: 9/20... Step: 2360... Loss: 1.6761... Val Loss: 1.7427\nEpoch: 9/20... Step: 2370... Loss: 1.6991... Val Loss: 1.7400\nEpoch: 9/20... Step: 2380... Loss: 1.6833... Val Loss: 1.7414\nEpoch: 9/20... Step: 2390... Loss: 1.6656... Val Loss: 1.7388\nEpoch: 9/20... Step: 2400... Loss: 1.6615... Val Loss: 1.7379\nEpoch: 9/20... Step: 2410... Loss: 1.6897... Val Loss: 1.7342\nEpoch: 9/20... Step: 2420... Loss: 1.6962... Val Loss: 1.7327\nEpoch: 9/20... Step: 2430... Loss: 1.6451... Val Loss: 1.7338\nEpoch: 9/20... Step: 2440... Loss: 1.7196... Val Loss: 1.7315\nEpoch: 9/20... Step: 2450... Loss: 1.6152... Val Loss: 1.7284\nEpoch: 9/20... Step: 2460... Loss: 1.6422... Val Loss: 1.7269\nEpoch: 9/20... Step: 2470... Loss: 1.6271... Val Loss: 1.7266\nEpoch: 9/20... Step: 2480... Loss: 1.6808... Val Loss: 1.7262\nEpoch: 9/20... Step: 2490... Loss: 1.7012... Val Loss: 1.7219\nEpoch: 9/20... Step: 2500... Loss: 1.6844... Val Loss: 1.7195\nEpoch: 9/20... Step: 2510... Loss: 1.6613... Val Loss: 1.7216\nEpoch: 10/20... Step: 2520... Loss: 1.6639... Val Loss: 1.7182\nEpoch: 10/20... Step: 2530... Loss: 1.6741... Val Loss: 1.7184\nEpoch: 10/20... Step: 2540... Loss: 1.6645... Val Loss: 1.7152\nEpoch: 10/20... Step: 2550... Loss: 1.6744... Val Loss: 1.7171\nEpoch: 10/20... Step: 2560... Loss: 1.6324... Val Loss: 1.7135\nEpoch: 10/20... Step: 2570... Loss: 1.6427... Val Loss: 1.7151\nEpoch: 10/20... Step: 2580... Loss: 1.6709... Val Loss: 1.7127\nEpoch: 10/20... Step: 2590... Loss: 1.6233... Val Loss: 1.7141\nEpoch: 10/20... Step: 2600... Loss: 1.6398... Val Loss: 1.7128\nEpoch: 10/20... Step: 2610... Loss: 1.6613... Val Loss: 1.7065\nEpoch: 10/20... Step: 2620... Loss: 1.6311... Val Loss: 1.7079\nEpoch: 10/20... Step: 2630... Loss: 1.6172... Val Loss: 1.7075\nEpoch: 10/20... Step: 2640... Loss: 1.6317... Val Loss: 1.7059\nEpoch: 10/20... Step: 2650... Loss: 1.6601... Val Loss: 1.7025\nEpoch: 10/20... Step: 2660... Loss: 1.6607... Val Loss: 1.7038\nEpoch: 10/20... Step: 2670... Loss: 1.6613... Val Loss: 1.7020\nEpoch: 10/20... Step: 2680... Loss: 1.6289... Val Loss: 1.7014\nEpoch: 10/20... Step: 2690... Loss: 1.6375... Val Loss: 1.6989\nEpoch: 10/20... Step: 2700... Loss: 1.6418... Val Loss: 1.6980\nEpoch: 10/20... Step: 2710... Loss: 1.6231... Val Loss: 1.6992\nEpoch: 10/20... Step: 2720... Loss: 1.6108... Val Loss: 1.6948\nEpoch: 10/20... Step: 2730... Loss: 1.6180... Val Loss: 1.6971\nEpoch: 10/20... Step: 2740... Loss: 1.5774... Val Loss: 1.6923\nEpoch: 10/20... Step: 2750... Loss: 1.6056... Val Loss: 1.6922\nEpoch: 10/20... Step: 2760... Loss: 1.6444... Val Loss: 1.6907\nEpoch: 10/20... Step: 2770... Loss: 1.6247... Val Loss: 1.6896\nEpoch: 10/20... Step: 2780... Loss: 1.6154... Val Loss: 1.6863\nEpoch: 10/20... Step: 2790... Loss: 1.6244... Val Loss: 1.6869\nEpoch: 11/20... Step: 2800... Loss: 1.6373... Val Loss: 1.6838\nEpoch: 11/20... Step: 2810... Loss: 1.6209... Val Loss: 1.6853\nEpoch: 11/20... Step: 2820... Loss: 1.6406... Val Loss: 1.6823\nEpoch: 11/20... Step: 2830... Loss: 1.6063... Val Loss: 1.6818\nEpoch: 11/20... Step: 2840... Loss: 1.6246... Val Loss: 1.6826\nEpoch: 11/20... Step: 2850... Loss: 1.6056... Val Loss: 1.6828\nEpoch: 11/20... Step: 2860... Loss: 1.6151... Val Loss: 1.6815\nEpoch: 11/20... Step: 2870... Loss: 1.5610... Val Loss: 1.6810\nEpoch: 11/20... Step: 2880... Loss: 1.6026... Val Loss: 1.6777\nEpoch: 11/20... Step: 2890... Loss: 1.6187... Val Loss: 1.6767\nEpoch: 11/20... Step: 2900... Loss: 1.6045... Val Loss: 1.6804\nEpoch: 11/20... Step: 2910... Loss: 1.6040... Val Loss: 1.6793\nEpoch: 11/20... Step: 2920... Loss: 1.6185... Val Loss: 1.6765\nEpoch: 11/20... Step: 2930... Loss: 1.6231... Val Loss: 1.6735\nEpoch: 11/20... Step: 2940... Loss: 1.5846... Val Loss: 1.6748\nEpoch: 11/20... Step: 2950... Loss: 1.5886... Val Loss: 1.6722\nEpoch: 11/20... Step: 2960... Loss: 1.5698... Val Loss: 1.6709\nEpoch: 11/20... Step: 2970... Loss: 1.6027... Val Loss: 1.6692\nEpoch: 11/20... Step: 2980... Loss: 1.6209... Val Loss: 1.6666\nEpoch: 11/20... Step: 2990... Loss: 1.5762... Val Loss: 1.6698\nEpoch: 11/20... Step: 3000... Loss: 1.5816... Val Loss: 1.6644\nEpoch: 11/20... Step: 3010... Loss: 1.6050... Val Loss: 1.6662\nEpoch: 11/20... Step: 3020... Loss: 1.5749... Val Loss: 1.6632\nEpoch: 11/20... Step: 3030... Loss: 1.6258... Val Loss: 1.6648\nEpoch: 11/20... Step: 3040... Loss: 1.5865... Val Loss: 1.6622\nEpoch: 11/20... Step: 3050... Loss: 1.5994... Val Loss: 1.6614\nEpoch: 11/20... Step: 3060... Loss: 1.6474... Val Loss: 1.6599\nEpoch: 12/20... Step: 3070... Loss: 1.6501... Val Loss: 1.6597\nEpoch: 12/20... Step: 3080... Loss: 1.5959... Val Loss: 1.6581\nEpoch: 12/20... Step: 3090... Loss: 1.5930... Val Loss: 1.6570\nEpoch: 12/20... Step: 3100... Loss: 1.5993... Val Loss: 1.6552\nEpoch: 12/20... Step: 3110... Loss: 1.6014... Val Loss: 1.6543\nEpoch: 12/20... Step: 3120... Loss: 1.5731... Val Loss: 1.6550\nEpoch: 12/20... Step: 3130... Loss: 1.5936... Val Loss: 1.6522\nEpoch: 12/20... Step: 3140... Loss: 1.6093... Val Loss: 1.6552\nEpoch: 12/20... Step: 3150... Loss: 1.5340... Val Loss: 1.6524\nEpoch: 12/20... Step: 3160... Loss: 1.5642... Val Loss: 1.6503\nEpoch: 12/20... Step: 3170... Loss: 1.5951... Val Loss: 1.6512\nEpoch: 12/20... Step: 3180... Loss: 1.5250... Val Loss: 1.6532\nEpoch: 12/20... Step: 3190... Loss: 1.5632... Val Loss: 1.6499\nEpoch: 12/20... Step: 3200... Loss: 1.5621... Val Loss: 1.6496\nEpoch: 12/20... Step: 3210... Loss: 1.5347... Val Loss: 1.6452\nEpoch: 12/20... Step: 3220... Loss: 1.5965... Val Loss: 1.6494\nEpoch: 12/20... Step: 3230... Loss: 1.5688... Val Loss: 1.6442\nEpoch: 12/20... Step: 3240... Loss: 1.5398... Val Loss: 1.6433\nEpoch: 12/20... Step: 3250... Loss: 1.5746... Val Loss: 1.6434\nEpoch: 12/20... Step: 3260... Loss: 1.5725... Val Loss: 1.6429\nEpoch: 12/20... Step: 3270... Loss: 1.5448... Val Loss: 1.6419\nEpoch: 12/20... Step: 3280... Loss: 1.5245... Val Loss: 1.6392\nEpoch: 12/20... Step: 3290... Loss: 1.5342... Val Loss: 1.6434\nEpoch: 12/20... Step: 3300... Loss: 1.5106... Val Loss: 1.6407\nEpoch: 12/20... Step: 3310... Loss: 1.5402... Val Loss: 1.6401\nEpoch: 12/20... Step: 3320... Loss: 1.5620... Val Loss: 1.6371\nEpoch: 12/20... Step: 3330... Loss: 1.5797... Val Loss: 1.6334\nEpoch: 12/20... Step: 3340... Loss: 1.5609... Val Loss: 1.6360\nEpoch: 13/20... Step: 3350... Loss: 1.5815... Val Loss: 1.6360\nEpoch: 13/20... Step: 3360... Loss: 1.5201... Val Loss: 1.6321\nEpoch: 13/20... Step: 3370... Loss: 1.5410... Val Loss: 1.6353\nEpoch: 13/20... Step: 3380... Loss: 1.5637... Val Loss: 1.6306\nEpoch: 13/20... Step: 3390... Loss: 1.5334... Val Loss: 1.6311\nEpoch: 13/20... Step: 3400... Loss: 1.5273... Val Loss: 1.6334\nEpoch: 13/20... Step: 3410... Loss: 1.5631... Val Loss: 1.6299\nEpoch: 13/20... Step: 3420... Loss: 1.5613... Val Loss: 1.6311\nEpoch: 13/20... Step: 3430... Loss: 1.5357... Val Loss: 1.6304\nEpoch: 13/20... Step: 3440... Loss: 1.5921... Val Loss: 1.6284\nEpoch: 13/20... Step: 3450... Loss: 1.5453... Val Loss: 1.6267\nEpoch: 13/20... Step: 3460... Loss: 1.5458... Val Loss: 1.6277\nEpoch: 13/20... Step: 3470... Loss: 1.5512... Val Loss: 1.6260\nEpoch: 13/20... Step: 3480... Loss: 1.5312... Val Loss: 1.6257\nEpoch: 13/20... Step: 3490... Loss: 1.5394... Val Loss: 1.6248\nEpoch: 13/20... Step: 3500... Loss: 1.5089... Val Loss: 1.6260\nEpoch: 13/20... Step: 3510... Loss: 1.5591... Val Loss: 1.6254\nEpoch: 13/20... Step: 3520... Loss: 1.4942... Val Loss: 1.6218\nEpoch: 13/20... Step: 3530... Loss: 1.5603... Val Loss: 1.6206\nEpoch: 13/20... Step: 3540... Loss: 1.5762... Val Loss: 1.6206\nEpoch: 13/20... Step: 3550... Loss: 1.5545... Val Loss: 1.6187\nEpoch: 13/20... Step: 3560... Loss: 1.4980... Val Loss: 1.6188\nEpoch: 13/20... Step: 3570... Loss: 1.5210... Val Loss: 1.6201\nEpoch: 13/20... Step: 3580... Loss: 1.5268... Val Loss: 1.6195\nEpoch: 13/20... Step: 3590... Loss: 1.5188... Val Loss: 1.6183\nEpoch: 13/20... Step: 3600... Loss: 1.5186... Val Loss: 1.6158\nEpoch: 13/20... Step: 3610... Loss: 1.5709... Val Loss: 1.6139\nEpoch: 13/20... Step: 3620... Loss: 1.5031... Val Loss: 1.6165\nEpoch: 14/20... Step: 3630... Loss: 1.4997... Val Loss: 1.6157\nEpoch: 14/20... Step: 3640... Loss: 1.5108... Val Loss: 1.6108\nEpoch: 14/20... Step: 3650... Loss: 1.5047... Val Loss: 1.6115\nEpoch: 14/20... Step: 3660... Loss: 1.5492... Val Loss: 1.6090\nEpoch: 14/20... Step: 3670... Loss: 1.5208... Val Loss: 1.6080\nEpoch: 14/20... Step: 3680... Loss: 1.5254... Val Loss: 1.6121\nEpoch: 14/20... Step: 3690... Loss: 1.5414... Val Loss: 1.6086\nEpoch: 14/20... Step: 3700... Loss: 1.5397... Val Loss: 1.6112\nEpoch: 14/20... Step: 3710... Loss: 1.4988... Val Loss: 1.6092\nEpoch: 14/20... Step: 3720... Loss: 1.5152... Val Loss: 1.6075\nEpoch: 14/20... Step: 3730... Loss: 1.5517... Val Loss: 1.6086\nEpoch: 14/20... Step: 3740... Loss: 1.4799... Val Loss: 1.6061\nEpoch: 14/20... Step: 3750... Loss: 1.4986... Val Loss: 1.6045\nEpoch: 14/20... Step: 3760... Loss: 1.5399... Val Loss: 1.6055\nEpoch: 14/20... Step: 3770... Loss: 1.4813... Val Loss: 1.6047\nEpoch: 14/20... Step: 3780... Loss: 1.5025... Val Loss: 1.6068\nEpoch: 14/20... Step: 3790... Loss: 1.4971... Val Loss: 1.6050\nEpoch: 14/20... Step: 3800... Loss: 1.4776... Val Loss: 1.6024\nEpoch: 14/20... Step: 3810... Loss: 1.5337... Val Loss: 1.6026\nEpoch: 14/20... Step: 3820... Loss: 1.5592... Val Loss: 1.6004\nEpoch: 14/20... Step: 3830... Loss: 1.4912... Val Loss: 1.5997\nEpoch: 14/20... Step: 3840... Loss: 1.4780... Val Loss: 1.6001\nEpoch: 14/20... Step: 3850... Loss: 1.5135... Val Loss: 1.5980\nEpoch: 14/20... Step: 3860... Loss: 1.4984... Val Loss: 1.5991\nEpoch: 14/20... Step: 3870... Loss: 1.4745... Val Loss: 1.5976\nEpoch: 14/20... Step: 3880... Loss: 1.4947... Val Loss: 1.5981\nEpoch: 14/20... Step: 3890... Loss: 1.5629... Val Loss: 1.5960\nEpoch: 14/20... Step: 3900... Loss: 1.4818... Val Loss: 1.5985\nEpoch: 15/20... Step: 3910... Loss: 1.5368... Val Loss: 1.5966\nEpoch: 15/20... Step: 3920... Loss: 1.4901... Val Loss: 1.5941\nEpoch: 15/20... Step: 3930... Loss: 1.5150... Val Loss: 1.5924\nEpoch: 15/20... Step: 3940... Loss: 1.5021... Val Loss: 1.5921\nEpoch: 15/20... Step: 3950... Loss: 1.5143... Val Loss: 1.5910\nEpoch: 15/20... Step: 3960... Loss: 1.4685... Val Loss: 1.5901\nEpoch: 15/20... Step: 3970... Loss: 1.4594... Val Loss: 1.5892\nEpoch: 15/20... Step: 3980... Loss: 1.5707... Val Loss: 1.5945\nEpoch: 15/20... Step: 3990... Loss: 1.5219... Val Loss: 1.5901\nEpoch: 15/20... Step: 4000... Loss: 1.5240... Val Loss: 1.5880\nEpoch: 15/20... Step: 4010... Loss: 1.4626... Val Loss: 1.5884\nEpoch: 15/20... Step: 4020... Loss: 1.5033... Val Loss: 1.5900\nEpoch: 15/20... Step: 4030... Loss: 1.4944... Val Loss: 1.5884\nEpoch: 15/20... Step: 4040... Loss: 1.5339... Val Loss: 1.5872\nEpoch: 15/20... Step: 4050... Loss: 1.5090... Val Loss: 1.5872\nEpoch: 15/20... Step: 4060... Loss: 1.4713... Val Loss: 1.5917\nEpoch: 15/20... Step: 4070... Loss: 1.4490... Val Loss: 1.5887\nEpoch: 15/20... Step: 4080... Loss: 1.4833... Val Loss: 1.5856\nEpoch: 15/20... Step: 4090... Loss: 1.5113... Val Loss: 1.5856\nEpoch: 15/20... Step: 4100... Loss: 1.4670... Val Loss: 1.5820\nEpoch: 15/20... Step: 4110... Loss: 1.4925... Val Loss: 1.5812\nEpoch: 15/20... Step: 4120... Loss: 1.5013... Val Loss: 1.5811\nEpoch: 15/20... Step: 4130... Loss: 1.4847... Val Loss: 1.5799\nEpoch: 15/20... Step: 4140... Loss: 1.5130... Val Loss: 1.5795\nEpoch: 15/20... Step: 4150... Loss: 1.4533... Val Loss: 1.5796\nEpoch: 15/20... Step: 4160... Loss: 1.5095... Val Loss: 1.5793\nEpoch: 15/20... Step: 4170... Loss: 1.4875... Val Loss: 1.5761\nEpoch: 15/20... Step: 4180... Loss: 1.4931... Val Loss: 1.5779\nEpoch: 16/20... Step: 4190... Loss: 1.4863... Val Loss: 1.5772\nEpoch: 16/20... Step: 4200... Loss: 1.4760... Val Loss: 1.5760\nEpoch: 16/20... Step: 4210... Loss: 1.4878... Val Loss: 1.5769\nEpoch: 16/20... Step: 4220... Loss: 1.4996... Val Loss: 1.5761\nEpoch: 16/20... Step: 4230... Loss: 1.4904... Val Loss: 1.5737\nEpoch: 16/20... Step: 4240... Loss: 1.4730... Val Loss: 1.5740\nEpoch: 16/20... Step: 4250... Loss: 1.4659... Val Loss: 1.5737\nEpoch: 16/20... Step: 4260... Loss: 1.4976... Val Loss: 1.5757\nEpoch: 16/20... Step: 4270... Loss: 1.4807... Val Loss: 1.5735\nEpoch: 16/20... Step: 4280... Loss: 1.4912... Val Loss: 1.5758\nEpoch: 16/20... Step: 4290... Loss: 1.4675... Val Loss: 1.5731\nEpoch: 16/20... Step: 4300... Loss: 1.4605... Val Loss: 1.5744\nEpoch: 16/20... Step: 4310... Loss: 1.4791... Val Loss: 1.5725\nEpoch: 16/20... Step: 4320... Loss: 1.4686... Val Loss: 1.5706\nEpoch: 16/20... Step: 4330... Loss: 1.4499... Val Loss: 1.5734\nEpoch: 16/20... Step: 4340... Loss: 1.4555... Val Loss: 1.5772\nEpoch: 16/20... Step: 4350... Loss: 1.5056... Val Loss: 1.5717\nEpoch: 16/20... Step: 4360... Loss: 1.4796... Val Loss: 1.5693\nEpoch: 16/20... Step: 4370... Loss: 1.4859... Val Loss: 1.5689\nEpoch: 16/20... Step: 4380... Loss: 1.4647... Val Loss: 1.5680\nEpoch: 16/20... Step: 4390... Loss: 1.4508... Val Loss: 1.5655\nEpoch: 16/20... Step: 4400... Loss: 1.4955... Val Loss: 1.5674\nEpoch: 16/20... Step: 4410... Loss: 1.4455... Val Loss: 1.5668\nEpoch: 16/20... Step: 4420... Loss: 1.4733... Val Loss: 1.5684\nEpoch: 16/20... Step: 4430... Loss: 1.4682... Val Loss: 1.5672\nEpoch: 16/20... Step: 4440... Loss: 1.5031... Val Loss: 1.5653\nEpoch: 16/20... Step: 4450... Loss: 1.4676... Val Loss: 1.5637\nEpoch: 16/20... Step: 4460... Loss: 1.4712... Val Loss: 1.5658\nEpoch: 17/20... Step: 4470... Loss: 1.4505... Val Loss: 1.5636\nEpoch: 17/20... Step: 4480... Loss: 1.4627... Val Loss: 1.5623\nEpoch: 17/20... Step: 4490... Loss: 1.4700... Val Loss: 1.5628\nEpoch: 17/20... Step: 4500... Loss: 1.4671... Val Loss: 1.5613\nEpoch: 17/20... Step: 4510... Loss: 1.4447... Val Loss: 1.5595\nEpoch: 17/20... Step: 4520... Loss: 1.4374... Val Loss: 1.5614\nEpoch: 17/20... Step: 4530... Loss: 1.4327... Val Loss: 1.5581\nEpoch: 17/20... Step: 4540... Loss: 1.4816... Val Loss: 1.5626\nEpoch: 17/20... Step: 4550... Loss: 1.4419... Val Loss: 1.5580\nEpoch: 17/20... Step: 4560... Loss: 1.4875... Val Loss: 1.5593\nEpoch: 17/20... Step: 4570... Loss: 1.4617... Val Loss: 1.5606\nEpoch: 17/20... Step: 4580... Loss: 1.4383... Val Loss: 1.5610\nEpoch: 17/20... Step: 4590... Loss: 1.4609... Val Loss: 1.5608\nEpoch: 17/20... Step: 4600... Loss: 1.4450... Val Loss: 1.5592\nEpoch: 17/20... Step: 4610... Loss: 1.4359... Val Loss: 1.5599\nEpoch: 17/20... Step: 4620... Loss: 1.5012... Val Loss: 1.5601\nEpoch: 17/20... Step: 4630... Loss: 1.4734... Val Loss: 1.5573\nEpoch: 17/20... Step: 4640... Loss: 1.4394... Val Loss: 1.5556\nEpoch: 17/20... Step: 4650... Loss: 1.4502... Val Loss: 1.5546\nEpoch: 17/20... Step: 4660... Loss: 1.4485... Val Loss: 1.5552\nEpoch: 17/20... Step: 4670... Loss: 1.4753... Val Loss: 1.5530\nEpoch: 17/20... Step: 4680... Loss: 1.4362... Val Loss: 1.5529\nEpoch: 17/20... Step: 4690... Loss: 1.3900... Val Loss: 1.5513\nEpoch: 17/20... Step: 4700... Loss: 1.4380... Val Loss: 1.5523\nEpoch: 17/20... Step: 4710... Loss: 1.4724... Val Loss: 1.5508\nEpoch: 17/20... Step: 4720... Loss: 1.4269... Val Loss: 1.5509\nEpoch: 17/20... Step: 4730... Loss: 1.4460... Val Loss: 1.5478\nEpoch: 17/20... Step: 4740... Loss: 1.4710... Val Loss: 1.5509\nEpoch: 18/20... Step: 4750... Loss: 1.4135... Val Loss: 1.5480\nEpoch: 18/20... Step: 4760... Loss: 1.4454... Val Loss: 1.5481\nEpoch: 18/20... Step: 4770... Loss: 1.4193... Val Loss: 1.5502\nEpoch: 18/20... Step: 4780... Loss: 1.4593... Val Loss: 1.5482\nEpoch: 18/20... Step: 4790... Loss: 1.4296... Val Loss: 1.5494\nEpoch: 18/20... Step: 4800... Loss: 1.4445... Val Loss: 1.5480\nEpoch: 18/20... Step: 4810... Loss: 1.4404... Val Loss: 1.5458\nEpoch: 18/20... Step: 4820... Loss: 1.4633... Val Loss: 1.5465\nEpoch: 18/20... Step: 4830... Loss: 1.4369... Val Loss: 1.5454\nEpoch: 18/20... Step: 4840... Loss: 1.4015... Val Loss: 1.5470\nEpoch: 18/20... Step: 4850... Loss: 1.4316... Val Loss: 1.5507\nEpoch: 18/20... Step: 4860... Loss: 1.4204... Val Loss: 1.5464\nEpoch: 18/20... Step: 4870... Loss: 1.4312... Val Loss: 1.5445\nEpoch: 18/20... Step: 4880... Loss: 1.4371... Val Loss: 1.5438\nEpoch: 18/20... Step: 4890... Loss: 1.4082... Val Loss: 1.5467\nEpoch: 18/20... Step: 4900... Loss: 1.4355... Val Loss: 1.5474\nEpoch: 18/20... Step: 4910... Loss: 1.4400... Val Loss: 1.5463\nEpoch: 18/20... Step: 4920... Loss: 1.4321... Val Loss: 1.5429\nEpoch: 18/20... Step: 4930... Loss: 1.4562... Val Loss: 1.5429\nEpoch: 18/20... Step: 4940... Loss: 1.4506... Val Loss: 1.5436\nEpoch: 18/20... Step: 4950... Loss: 1.4298... Val Loss: 1.5408\nEpoch: 18/20... Step: 4960... Loss: 1.4151... Val Loss: 1.5437\nEpoch: 18/20... Step: 4970... Loss: 1.4187... Val Loss: 1.5415\nEpoch: 18/20... Step: 4980... Loss: 1.4321... Val Loss: 1.5412\nEpoch: 18/20... Step: 4990... Loss: 1.4145... Val Loss: 1.5413\nEpoch: 18/20... Step: 5000... Loss: 1.4376... Val Loss: 1.5429\nEpoch: 18/20... Step: 5010... Loss: 1.4175... Val Loss: 1.5406\nEpoch: 18/20... Step: 5020... Loss: 1.3930... Val Loss: 1.5420\nEpoch: 19/20... Step: 5030... Loss: 1.4251... Val Loss: 1.5387\nEpoch: 19/20... Step: 5040... Loss: 1.4134... Val Loss: 1.5381\nEpoch: 19/20... Step: 5050... Loss: 1.4227... Val Loss: 1.5381\nEpoch: 19/20... Step: 5060... Loss: 1.4487... Val Loss: 1.5385\nEpoch: 19/20... Step: 5070... Loss: 1.4381... Val Loss: 1.5364\nEpoch: 19/20... Step: 5080... Loss: 1.4280... Val Loss: 1.5361\nEpoch: 19/20... Step: 5090... Loss: 1.4477... Val Loss: 1.5359\nEpoch: 19/20... Step: 5100... Loss: 1.4019... Val Loss: 1.5364\nEpoch: 19/20... Step: 5110... Loss: 1.4424... Val Loss: 1.5346\nEpoch: 19/20... Step: 5120... Loss: 1.3868... Val Loss: 1.5346\nEpoch: 19/20... Step: 5130... Loss: 1.3836... Val Loss: 1.5377\nEpoch: 19/20... Step: 5140... Loss: 1.3976... Val Loss: 1.5377\nEpoch: 19/20... Step: 5150... Loss: 1.4106... Val Loss: 1.5332\nEpoch: 19/20... Step: 5160... Loss: 1.4534... Val Loss: 1.5349\nEpoch: 19/20... Step: 5170... Loss: 1.4284... Val Loss: 1.5377\nEpoch: 19/20... Step: 5180... Loss: 1.4146... Val Loss: 1.5379\nEpoch: 19/20... Step: 5190... Loss: 1.4330... Val Loss: 1.5374\nEpoch: 19/20... Step: 5200... Loss: 1.4419... Val Loss: 1.5341\nEpoch: 19/20... Step: 5210... Loss: 1.4492... Val Loss: 1.5336\nEpoch: 19/20... Step: 5220... Loss: 1.4155... Val Loss: 1.5365\nEpoch: 19/20... Step: 5230... Loss: 1.4752... Val Loss: 1.5341\nEpoch: 19/20... Step: 5240... Loss: 1.3706... Val Loss: 1.5329\nEpoch: 19/20... Step: 5250... Loss: 1.4004... Val Loss: 1.5312\nEpoch: 19/20... Step: 5260... Loss: 1.3921... Val Loss: 1.5322\nEpoch: 19/20... Step: 5270... Loss: 1.4270... Val Loss: 1.5314\nEpoch: 19/20... Step: 5280... Loss: 1.4502... Val Loss: 1.5294\nEpoch: 19/20... Step: 5290... Loss: 1.4558... Val Loss: 1.5300\nEpoch: 19/20... Step: 5300... Loss: 1.4289... Val Loss: 1.5327\nEpoch: 20/20... Step: 5310... Loss: 1.4359... Val Loss: 1.5279\nEpoch: 20/20... Step: 5320... Loss: 1.4375... Val Loss: 1.5282\nEpoch: 20/20... Step: 5330... Loss: 1.4334... Val Loss: 1.5279\nEpoch: 20/20... Step: 5340... Loss: 1.4474... Val Loss: 1.5271\nEpoch: 20/20... Step: 5350... Loss: 1.4059... Val Loss: 1.5272\nEpoch: 20/20... Step: 5360... Loss: 1.4286... Val Loss: 1.5254\nEpoch: 20/20... Step: 5370... Loss: 1.4485... Val Loss: 1.5248\nEpoch: 20/20... Step: 5380... Loss: 1.3954... Val Loss: 1.5282\nEpoch: 20/20... Step: 5390... Loss: 1.3937... Val Loss: 1.5297\nEpoch: 20/20... Step: 5400... Loss: 1.4185... Val Loss: 1.5260\nEpoch: 20/20... Step: 5410... Loss: 1.3918... Val Loss: 1.5290\nEpoch: 20/20... Step: 5420... Loss: 1.3999... Val Loss: 1.5280\nEpoch: 20/20... Step: 5430... Loss: 1.4132... Val Loss: 1.5242\nEpoch: 20/20... Step: 5440... Loss: 1.4410... Val Loss: 1.5241\nEpoch: 20/20... Step: 5450... Loss: 1.4406... Val Loss: 1.5272\nEpoch: 20/20... Step: 5460... Loss: 1.4179... Val Loss: 1.5289\nEpoch: 20/20... Step: 5470... Loss: 1.4426... Val Loss: 1.5257\nEpoch: 20/20... Step: 5480... Loss: 1.4275... Val Loss: 1.5244\nEpoch: 20/20... Step: 5490... Loss: 1.4309... Val Loss: 1.5233\nEpoch: 20/20... Step: 5500... Loss: 1.4061... Val Loss: 1.5253\nEpoch: 20/20... Step: 5510... Loss: 1.3902... Val Loss: 1.5243\nEpoch: 20/20... Step: 5520... Loss: 1.3957... Val Loss: 1.5252\nEpoch: 20/20... Step: 5530... Loss: 1.3723... Val Loss: 1.5241\nEpoch: 20/20... Step: 5540... Loss: 1.3990... Val Loss: 1.5253\nEpoch: 20/20... Step: 5550... Loss: 1.4355... Val Loss: 1.5221\nEpoch: 20/20... Step: 5560... Loss: 1.4209... Val Loss: 1.5208\nEpoch: 20/20... Step: 5570... Loss: 1.4025... Val Loss: 1.5245\nEpoch: 20/20... Step: 5580... Loss: 1.4090... Val Loss: 1.5212\n" ] ], [ [ "## Getting the best model\n\nTo set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.", "_____no_output_____" ], [ "## Hyperparameters\n\nHere are the hyperparameters for the network.\n\nIn defining the model:\n* `n_hidden` - The number of units in the hidden layers.\n* `n_layers` - Number of hidden LSTM layers to use.\n\nWe assume that dropout probability and learning rate will be kept at the default, in this example.\n\nAnd in training:\n* `batch_size` - Number of sequences running through the network in one pass.\n* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.\n* `lr` - Learning rate for training\n\nHere's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).\n\n> ## Tips and Tricks\n\n>### Monitoring Validation Loss vs. Training Loss\n>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:\n\n> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.\n> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)\n\n> ### Approximate number of parameters\n\n> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:\n\n> - The number of parameters in your model. This is printed when you start training.\n> - The size of your dataset. 1MB file is approximately 1 million characters.\n\n>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:\n\n> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.\n> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.\n\n> ### Best models strategy\n\n>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.\n\n>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.\n\n>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.", "_____no_output_____" ], [ "## Checkpoint\n\nAfter training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.", "_____no_output_____" ] ], [ [ "# change the name, for saving multiple files\nmodel_name = 'rnn_x_epoch.net'\n\ncheckpoint = {'n_hidden': net.n_hidden,\n 'n_layers': net.n_layers,\n 'state_dict': net.state_dict(),\n 'tokens': net.chars}\n\nwith open(model_name, 'wb') as f:\n torch.save(checkpoint, f)", "_____no_output_____" ] ], [ [ "---\n## Making Predictions\n\nNow that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!\n\n### A note on the `predict` function\n\nThe output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.\n\n> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.\n\n### Top K sampling\n\nOur predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).\n", "_____no_output_____" ] ], [ [ "def predict(net, char, h=None, top_k=None):\n ''' Given a character, predict the next character.\n Returns the predicted character and the hidden state.\n '''\n \n # tensor inputs\n x = np.array([[net.char2int[char]]])\n x = one_hot_encode(x, len(net.chars))\n inputs = torch.from_numpy(x)\n \n if(train_on_gpu):\n inputs = inputs.cuda()\n \n # detach hidden state from history\n h = tuple([each.data for each in h])\n # get the output of the model\n out, h = net(inputs, h)\n\n # get the character probabilities\n p = F.softmax(out, dim=1).data\n if(train_on_gpu):\n p = p.cpu() # move to cpu\n \n # get top characters\n if top_k is None:\n top_ch = np.arange(len(net.chars))\n else:\n p, top_ch = p.topk(top_k)\n top_ch = top_ch.numpy().squeeze()\n \n # select the likely next character with some element of randomness\n p = p.numpy().squeeze()\n char = np.random.choice(top_ch, p=p/p.sum())\n \n # return the encoded value of the predicted char and the hidden state\n return net.int2char[char], h", "_____no_output_____" ] ], [ [ "### Priming and generating text \n\nTypically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.", "_____no_output_____" ] ], [ [ "def sample(net, size, prime='The', top_k=None):\n \n if(train_on_gpu):\n net.cuda()\n else:\n net.cpu()\n \n net.eval() # eval mode\n \n # First off, run through the prime characters\n chars = [ch for ch in prime]\n h = net.init_hidden(1)\n for ch in prime:\n char, h = predict(net, ch, h, top_k=top_k)\n\n chars.append(char)\n \n # Now pass in the previous character and get a new one\n for ii in range(size):\n char, h = predict(net, chars[-1], h, top_k=top_k)\n chars.append(char)\n\n return ''.join(chars)", "_____no_output_____" ], [ "print(sample(net, 1000, prime='Anna', top_k=5))", "Anna Alexandrovna thought tears, still worse to him\nso much he had said at it,\nbut a shars, and he had been simply and\nto the merting,\nand that it something that the party was time intone of their conversation, she went into the such as the stable of all of their peeson's confusion is taking to the mind and the place was a lower and sort\nof sockets\nwere a strange insides which was all over she went up and and a sent of the month that had been begon him, but with the country was\nand straight\nand the people that\nhis shoulders.\n\n\"Yes!\" he said a doctor into\na later time. \"I don't love it?\" he\nwere, and had told them.\n\n\"You want to the minutes in the marshant. I was thore a strunk, and\nthis you know the country and terry as a long words. Then here we're sensest of indocking about the\nparty of her. And, about the what there is some tight a lawy, the servanes to strong is to be about. Have between hers is it, and that think we along a man,\nand they would have been askanted. Then will then in the \n" ] ], [ [ "## Loading a checkpoint", "_____no_output_____" ] ], [ [ "# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`\nwith open('rnn_x_epoch.net', 'rb') as f:\n checkpoint = torch.load(f)\n \nloaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])\nloaded.load_state_dict(checkpoint['state_dict'])", "_____no_output_____" ], [ "# Sample using a loaded model\nprint(sample(loaded, 2000, top_k=5, prime=\"And Levin said\"))", "And Levin said that was as at the subject,re ti drivoming a look, the care, but\nthe meaning were in love the\nsick and as a sight of this worder head the\nstation was so an another. There was nurse, and and he shook a much place, and asked her\nexcute intime to say about the doon, they had been something to the point\nwith a l eshers; but all she had not seen her father, she saw to see in the same impossible, and\nthey\nwill have always had\nconvinced all their child, a still wife the matter of which he could not tell the words when she would be so telling this strong that they were\nstrengly, that they\nwent and her like at her from the mare, talking happy frank of the sumpering and seemed to seem into sterl at the meadow and he had a man.\n\n\n\nChapter 3\n\n\n\"I have so much to his mind, that is in the countess or as an a prince. He did not know the mondy were as he did not\nknow them?\" he added;\n\"with you,\" Anna Alexey Alexandrovitch.\"\n\n\"Well, you seem you,\" said Stepan\nArkadyevitch's strong, and that, and what he was sitting her happy for him to the worls to the most for the mondy with the close.\n\n\"I' lone,\" sas Levin, who had not\nsaying\nits about the door time, and was strick, having\nher soft of the day in spiriculity with her fients, and her far feeling him at his seeps over his sister's still of the person something\nhorses to say,\na matter with a carriage and an home in\nsteps with the most to\nbe and thring the same impression and succeptionally to him in a bettem.\n\n\"The chief secriate you and that you seen a still,\" said Vassenka Patvovna saw her, she\nwould not talk on his brother's change to his mother. But he couldnnitht\nwas starting to the princess, and he saw to that. He went a shoulders. And he hed heart to him and and a companious of the plactless to\nthe\nsubject, and with a long with her. He\nhad not and went on.t a strengly, and\nshe had no\ntearing had concled him in her position\nto him the subject with a bable than, whee a shing she had been that the choman she was asseened. They\nwer\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec84678c548fff67987abe95944ff1c898d86699
8,110
ipynb
Jupyter Notebook
Tarea11.ipynb
Oziel-Rojas5/daa_2021_1
03dfe25739b1f3f98d3bc88cd5028b0d41f29eb3
[ "MIT" ]
null
null
null
Tarea11.ipynb
Oziel-Rojas5/daa_2021_1
03dfe25739b1f3f98d3bc88cd5028b0d41f29eb3
[ "MIT" ]
null
null
null
Tarea11.ipynb
Oziel-Rojas5/daa_2021_1
03dfe25739b1f3f98d3bc88cd5028b0d41f29eb3
[ "MIT" ]
null
null
null
35.726872
229
0.457213
[ [ [ "<a href=\"https://colab.research.google.com/github/Oziel-Rojas5/daa_2021_1/blob/master/Tarea11.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "class NodoArbol:\r\n def __init__(self, value, left=None, right=None):\r\n self.data=value\r\n self.left=left\r\n self.right=right", "_____no_output_____" ], [ "class BinarySearchTree:\r\n def __init__(self):\r\n self._root=None\r\n\r\n def insert(self, value):\r\n if self._root==None:\r\n self._root=NodoArbol(value)\r\n else:\r\n self._insert_nodo_(self._root, value)\r\n\r\n def _insert_nodo_(self, nodo, value):\r\n if nodo.data==value:\r\n pass\r\n elif value<nodo.data: \r\n if nodo.left==None:\r\n nodo.left=NodoArbol(value)\r\n else:\r\n self._insert_nodo_(nodo.left,value) \r\n else: \r\n if nodo.right==None:\r\n nodo.right=NodoArbol(value)\r\n else:\r\n self._insert_nodo_(nodo.right,value) \r\n\r\n def buscar(self, value):\r\n if self._root==None:\r\n return None\r\n else:\r\n \r\n return self.__busca_nodo(self._root,value)\r\n\r\n def __busca_nodo(self, nodo, value):\r\n if nodo==None:\r\n return None\r\n elif nodo.data==value:\r\n return nodo\r\n elif value<nodo.data:\r\n return self.__busca_nodo(nodo.left,value)\r\n else:\r\n return self.__busca_nodo(nodo.right,value)\r\n\r\n def transversal(self, format=\"inorden\"):\r\n if format==\"inorden\":\r\n self.__recorrido_in(self._root)\r\n elif format==\"preorden\":\r\n self.__recorrido_pre(self._root)\r\n elif format==\"posorden\":\r\n self.__recorrido_pos(self._root)\r\n else:\r\n print(\"Formato de recorrido no valido\") \r\n\r\n def __recorrido_pre(self, nodo):\r\n if nodo!=None:\r\n print(nodo.data,end=\",\")\r\n self.__recorrido_pre(nodo.left)\r\n self.__recorrido_pre(nodo.right)\r\n \r\n def __recorrido_in(self, nodo):\r\n if nodo!=None:\r\n self.__recorrido_in(nodo.left)\r\n print(nodo.data,end=\",\")\r\n self.__recorrido_in(nodo.right)\r\n \r\n def __recorrido_pos(self, nodo):\r\n if nodo!=None:\r\n self.__recorrido_pos(nodo.left)\r\n self.__recorrido_pos(nodo.right)\r\n print(nodo.data,end=\",\")\r\n\r\n def eliminar(self, value):\r\n self.__eliminar_nodo(self._root, self._root, self._root, None, value)\r\n\r\n def __eliminar_nodo(self, root, nodo, anterior, actual, value):\r\n if nodo==None:\r\n return print(\"No existe ese nodo\")\r\n if nodo.data==value:\r\n if nodo.left==None and nodo.right==None: \r\n if actual==\"izq\":\r\n anterior.left=None\r\n elif actual==\"der\":\r\n anterior.right=None\r\n print(\"solo se borro el nodo\")\r\n elif nodo.left==None and nodo.right!=None:\r\n if actual==\"izq\":\r\n anterior.left=nodo.right\r\n else:\r\n anterior.right=nodo.right\r\n print(\"se paso el unico nodo derecho hacia arriba\")\r\n elif nodo.left!=None and nodo.right==None:\r\n if actual==\"izq\":\r\n anterior.left=nodo.left\r\n else:\r\n anterior.right=nodo.left\r\n print(\"se paso el unico nodo izquierdo hacia arriba\")\r\n elif nodo.left!=None and nodo.right!=None: \r\n print(\"se hizo algo complejo\")\r\n tmp,anterior2 =self.nodoMasBajo(nodo.right, nodo)\r\n if nodo.data==anterior2.data:\r\n anterior2.right=tmp.right\r\n elif nodo.data!=anterior2.data:\r\n anterior2.left=tmp.right\r\n if actual==\"izq\":\r\n anterior.left=tmp\r\n else:\r\n anterior.right=tmp\r\n tmp.left=nodo.left\r\n tmp.right=nodo.right\r\n elif value<nodo.data:\r\n return self.__eliminar_nodo(root, nodo.left, nodo, \"izq\", value)\r\n else:\r\n return self.__eliminar_nodo(root, nodo.right, nodo, \"der\", value)\r\n \r\n def nodoMasBajo(self, nodo, anterior):\r\n if nodo.left==None:\r\n return nodo, anterior\r\n elif nodo.left!=None:\r\n return self.nodoMasBajo(nodo.left, nodo)", "_____no_output_____" ], [ "arbol=BinarySearchTree()\r\narbol.insert(50)\r\narbol.insert(40)\r\narbol.insert(80)\r\narbol.insert(20)\r\narbol.insert(45)\r\narbol.insert(60)\r\narbol.insert(90)\r\narbol.insert(85)\r\narbol.insert(100)\r\narbol.insert(95)\r\n\r\nprint(arbol.transversal())\r\nprint(\"____\")\r\n\r\narbol.eliminar(80)\r\nprint(\"______\")\r\nprint(arbol._root.data)\r\nprint(arbol._root.left.data)\r\nprint(arbol._root.right.data)\r\nprint(arbol._root.left.left.data)\r\nprint(arbol._root.right.left.data)\r\nprint(arbol.transversal())\r\n\r\nprint(arbol._root.right.right.right.left.data)", "20,40,45,50,60,80,85,90,95,100,None\n____\nse hizo algo complejo\n______\n50\n40\n85\n20\n60\n20,40,45,50,60,85,90,95,100,None\n95\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
ec8468529a8386ea7eed1a045aeaab1734a3ddea
11,667
ipynb
Jupyter Notebook
final_final_covid_lab.ipynb
Yilin-Ye-official/ml_covid_rules_lab
b58537af74f42509edf6824ba7c14248555f544b
[ "MIT" ]
null
null
null
final_final_covid_lab.ipynb
Yilin-Ye-official/ml_covid_rules_lab
b58537af74f42509edf6824ba7c14248555f544b
[ "MIT" ]
null
null
null
final_final_covid_lab.ipynb
Yilin-Ye-official/ml_covid_rules_lab
b58537af74f42509edf6824ba7c14248555f544b
[ "MIT" ]
null
null
null
36.688679
275
0.519157
[ [ [ "# Covid Rules Lab\n\n\nimplement the PRISM algorithm to extract the classification rules with the highest accuracy and coverage from the hospital patients dataset ", "_____no_output_____" ] ], [ [ "def count_labels(rows):\n label_count = {}\n labels = rows[\"outcome\"].unique()\n for label in labels:\n label_count[label]=rows[rows[\"outcome\"]==label].shape[0]\n \n return label_count\n# here we will get a dictionary \n# which {(label,3),(label,4),...}\n# such that label is the different names of attributes like: red, blue, green and the number is the occurence\n\ndef split(rows, column, value):\n temp_rows = rows.copy()\n if column==None:\n return rows\n \n for i in range(len(column)):\n \n if column[i]!= \"age\":\n \n temp_rows = temp_rows[temp_rows[column[i]] == value[i]]\n \n else:\n temp_rows = temp_rows[temp_rows[column[i]] >= value[i] ]#for numeric attribute interpret as greater than or equal to\n return temp_rows\n# divide rows according to attributes\n# if the attribute is a number, divide the rows by >= number and == number\n# if the attribute is not a number, divide by rows == or not ==\n\ndef coverage(rows, column, value):\n \"\"\"\n rows: all the rows left\n column: the specific column where value belongs to\n value: one trait that we are evaluating like 'red'\n \"\"\"\n temp_rows = split(rows, column, value)\n coverage = temp_rows.shape[0] #rows that has this sepcific value like 'red'\n # and the specific attribute\n return coverage\n\ndef cal_accuracy(rows, column, value, c,attribute):\n \"\"\"\n rows: all the rows left\n column: the specific column where value belongs to\n value: one trait that we are evaluating like 'red'\n attribute: like 'T', 'F'\n \"\"\"\n temp_rows = split(rows, column, value)\n \n true_rows_len = temp_rows[temp_rows[attribute]==c].shape[0]\n if true_rows_len == 0:\n return 0\n total = temp_rows.shape[0]\n \n accuracy = true_rows_len/total\n \n return accuracy\n\nclass Rule:\n def __init__(self, accuracy = None, attributes = None, covered=None, value = None, label=None):\n self.accuracy = accuracy\n self.attributes = attributes\n self.covered = covered # the rows that are covered by this rule\n self.value = value #format as (0, 'female')\n self.label = label #the labels of each sub-rules. like ['yes','no',...]", "_____no_output_____" ], [ "def generate_rule(rows, columns_list,score_func1=cal_accuracy, score_func2=coverage, accu_thresh=0, cover_thresh=0):\n #print(score_func2(rows,['asthma', 'imm_supr', 'sex'], ['yes', 'yes', 'female']))\n if len(rows) == 0:\n print(\"Done!\")\n return\n labels =count_labels(rows)\n \n all_label_best_rule = Rule(label=labels,attributes=[],value = [])\n all_label_best_rule.accuracy = 0\n \n for label in labels:\n one_label_best_rule = Rule(label=label,attributes=[],value = [])\n one_label_best_rule.accuracy = 0\n \n #Create a rule R with an empty left-hand side that predicts class C\n R = Rule(label=label,attributes=[],value = [],accuracy=0) # the labels of each rule. like 'alive','dead',...\n #R.accuracy = score_func1(rows, R.attributes, R.value, R.label,columns_list[-1])#find_accuracy(data, rule, c, attr[-1])\n #R.covered = score_func2(rows,R.attributes, R.value)\n remainder_cols = columns_list[:-1].copy()\n rows_copy = rows[rows[columns_list[-1]]== label].copy()\n cycle_time = 0\n while R.accuracy< accu_thresh:\n cycle_time+=1\n for col in remainder_cols:\n values = rows_copy[col].unique() \n for val in values:\n R_att = R.attributes.copy()\n R_val = R.value.copy()\n R_att.append(col)\n R_val.append(val)\n \n new_R = Rule(attributes = R_att, value = R_val, label=label)\n new_R.accuracy = score_func1(rows, new_R.attributes, new_R.value, label, columns_list[-1])\n new_R.covered = score_func2(rows,new_R.attributes, new_R.value)\n \n if new_R.accuracy >= one_label_best_rule.accuracy and new_R.covered >= cover_thresh:\n if new_R.accuracy == one_label_best_rule.accuracy: \n if new_R.covered > one_label_best_rule.covered:\n one_label_best_rule= new_R\n else:one_label_best_rule= new_R\n else:pass\n \n flag = False \n if one_label_best_rule.accuracy > R.accuracy:\n flag = True\n elif one_label_best_rule.accuracy==R.accuracy:\n if one_label_best_rule.covered>= R.covered:\n flag = True\n if flag:\n R=one_label_best_rule\n if len(R.attributes)==0:\n pass\n \n elif R.attributes[-1] in remainder_cols:\n remainder_cols.remove(R.attributes[-1])\n if R.accuracy>= accu_thresh and R.covered>= cover_thresh:\n if R.accuracy > all_label_best_rule.accuracy:\n all_label_best_rule = R\n elif R.accuracy == all_label_best_rule.accuracy and R.covered >= all_label_best_rule.covered:\n all_label_best_rule = R\n if len(remainder_cols)==0 or cycle_time>len(remainder_cols):\n break\n \n \n if all_label_best_rule.accuracy != 0:\n return all_label_best_rule", "_____no_output_____" ], [ "data_file = \"/Users/elaine/Desktop/ML2020labs/covid_categorical_good.csv\"\n\nimport pandas as pd\ndata = pd.read_csv(data_file)\ndata = data.dropna(how=\"any\")\n\ndef recursive_rules(rows, generate_rule):\n columns_list = rows.columns.to_numpy().tolist()\n rows_copy = rows.copy()\n flag = False\n while len(rows_copy) != 0:\n one_rule = generate_rule(rows_copy,columns_list, score_func1=cal_accuracy, score_func2=coverage, accu_thresh=0.8, cover_thresh=30)\n if one_rule == None:\n break\n else:\n flag = True\n #print(one_rule.accuracy,one_rule.label,one_rule.attributes,one_rule.covered,one_rule.value)\n for i in range(len(one_rule.attributes)):\n print(\"if \", one_rule.attributes[i], one_rule.value[i])\n print(\"then \", one_rule.label)\n print(\"coverage: \", one_rule.covered)\n print(\"accuracy: \", one_rule.accuracy)\n print(\"\\n\")\n rows_best = split(rows_copy, one_rule.attributes,one_rule.value)\n rows_copy = rows_copy[~rows_copy.index.isin(rows_best.index)]\n if not flag:\n print(\"threshold is too high or no available rules left\")\n", "_____no_output_____" ], [ "recursive_rules(data, generate_rule)", "if hypertension no\nthen alive\ncoverage: 175108\naccuracy: 0.9118543984283984\n\n\nif asthma yes\nthen alive\ncoverage: 1447\naccuracy: 0.8127159640635798\n\n\nif diabetes no\nif sex female\nthen alive\ncoverage: 10678\naccuracy: 0.8185989885746394\n\n\n" ] ], [ [ "# reprot \n\nthis is the rules i got for the coverage 20, accuracy 0.9\n\nif hypertension no\nthen alive\ncoverage: 175108\naccuracy: 0.9118543984283984\n\n\nif asthma yes\nif imm_supr yes\nif sex female\nthen alive\ncoverage: 63\naccuracy: 0.9047619047619048\n\n\nif asthma yes\nif diabetes no\nif sex female\nif tobacco yes\nthen alive\ncoverage: 26\naccuracy: 0.9615384615384616\n\n\nif asthma yes\nif sex female\nif tobacco yes\nthen alive\ncoverage: 22\naccuracy: 0.9545454545454546\n\n\nif asthma yes\nif diabetes no\nif sex female\nif obesity no\nif cardiovascular no\nif age 71\nthen alive\ncoverage: 23\naccuracy: 0.9130434782608695\n\ni found that the rules mostly tell me what conditions are for the 'alive' result rather than the 'dead' result.\ni found that the some of the rules match what we know about covid (if a person has serious illness then this person will probably die) for example, the first rule says that if a person doesn't have hypertension then this person has higher than 0.9 change to survive.\nhowever, i also found that if a person has asthma and uses tobacco this person will probably survive. and this is very strange. \n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
ec84859c031680c903b12150538b9bcd71c1ab38
14,381
ipynb
Jupyter Notebook
Curso_de_Python_3_Mundo_1.ipynb
Andre-Seiji/Curso-Python-3---Canal-Curso-em-Video
ac8cded13e7db0326be638c7e6124ae672ec410f
[ "MIT" ]
null
null
null
Curso_de_Python_3_Mundo_1.ipynb
Andre-Seiji/Curso-Python-3---Canal-Curso-em-Video
ac8cded13e7db0326be638c7e6124ae672ec410f
[ "MIT" ]
null
null
null
Curso_de_Python_3_Mundo_1.ipynb
Andre-Seiji/Curso-Python-3---Canal-Curso-em-Video
ac8cded13e7db0326be638c7e6124ae672ec410f
[ "MIT" ]
null
null
null
23.614122
103
0.40477
[ [ [ "# **Curso Python - Curso em Vídeo - Mundo 1**", "_____no_output_____" ] ], [ [ "#Como transformar uma celula especifica em zero\ndf.at[11, 'No_Of_Clients'] = 0", "_____no_output_____" ], [ "print('Olá Mundo')", "Olá Mundo\n" ], [ "print(7+4)", "11\n" ], [ "#Me mostre 7 e em seguida 4\nprint('7'+'4')", "74\n" ], [ "7+4", "_____no_output_____" ], [ "'7'+'4'", "_____no_output_____" ], [ "print('Olá', 5)", "Olá 5\n" ], [ "nome='Guanabara'\nidade=25\npeso=75.8\n#A virgula deve ser usada por causa da presenca de numeros\nprint(nome, idade, peso)", "Guanabara 25 75.8\n" ], [ "nome=input('Qual é o seu nome?')\nidade=input('Quantos anos você tem?')\npeso=input('Qual é o seu peso?')\nprint(nome, idade, peso)", "Qual é o seu nome?João\nQuantos anos você tem?44\nQual é o seu peso?190\nJoão 44 190\n" ], [ "n1=int(input('Digite um número:'))\nn2=int(input('Digite um segundo número:'))\ns=n1+n2\nprint('A soma vale', s)\n#Uma segunda forma de printar essa mensagem\nprint('A soma vale {}'32 .format(s))", "Digite um número:5\nDigite um segundo número:3\nA soma vale 8\nA soma vale 8\n" ], [ "n1=int(input('Digite um número:'))\nn2=int(input('Digite um segundo número:'))\ns=n1+n2\n#print('A soma entre', n1, 'e', n2, 'vale:', s)\nprint('A soma entre {} e {} vale {}'.format(n1, n2, s))", "Digite um número:5\nDigite um segundo número:4\nA soma entre 5 e 4 vale 9\n" ], [ "#Ordem de precedencia\n# (), **, *, /, //, %. +, -", "_____no_output_____" ], [ "5+2", "_____no_output_____" ], [ "5-2", "_____no_output_____" ], [ "5*2", "_____no_output_____" ], [ "5/2", "_____no_output_____" ], [ "5**2", "_____no_output_____" ], [ "5//2", "_____no_output_____" ], [ "5%2", "_____no_output_____" ], [ "nome=input('Qual é o seu nome?')\nprint('Prazer em te conhecer {:>15}!'.format(nome))\nprint('Prazer em te conhecer {:<15}!'.format(nome))\nprint('Prazer em te conhecer {:^15}!'.format(nome))\nprint('Prazer em te conhecer {:=^15}!'.format(nome))", "Qual é o seu nome?Ana\nPrazer em te conhecer Ana!\nPrazer em te conhecer Ana !\nPrazer em te conhecer Ana !\nPrazer em te conhecer ======Ana======!\n" ], [ "n1=int(input('Digite um número:'))\nn2=int(input('Digite um segundo número:'))\ns=n1+n2\nm=n1*n2\nd=n1/n2\ndi=n1//n2\ne=n1**n2\nprint('A soma é {}, \\n o produto é {} \\n e a divisão é {:.3f}'.format(s, m, d), end=' ')\nprint('Divisão inteira {} e potência {}'.format(di, e))", "Digite um número:4\nDigite um segundo número:5\nA soma é 9, \n o produto é 20 \n e a divisão é 0.800 Divisão inteira 0 e potência 1024\n" ], [ "#Exemplo de uso do f'string\ndef arruma_coluna(nome):\n # nome[0] -> primeira posição da tupla\n if 'Unnamed' in nome[0] and 'Unnamed' in nome[1]:\n return ' '\n elif 'Unnamed' in nome[0]: # Tem nome apenas na segunda parte\n return nome[1]\n else: # Concatenamos os dois nomes em uma coluna só\n return f'{nome[0]} - {nome[1]}'", "_____no_output_____" ], [ "frase='Curso em Vídeo Python'\nprint(frase[1::2])", "us mVdoPto\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec848872ddc7aa2c72a55009142d0af4c4b85596
37,248
ipynb
Jupyter Notebook
docs/tutorials/transform/census.ipynb
quanjielin/tfx
7a56a2ff67f8c3d28e84865566a90ca0861cedc5
[ "Apache-2.0" ]
null
null
null
docs/tutorials/transform/census.ipynb
quanjielin/tfx
7a56a2ff67f8c3d28e84865566a90ca0861cedc5
[ "Apache-2.0" ]
null
null
null
docs/tutorials/transform/census.ipynb
quanjielin/tfx
7a56a2ff67f8c3d28e84865566a90ca0861cedc5
[ "Apache-2.0" ]
null
null
null
45.039903
707
0.574125
[ [ [ "<div class=\"devsite-table-wrapper\"><table class=\"tfo-notebook-buttons\" align=\"left\">\n<td><a target=\"_blank\" href=\"https://www.tensorflow.org/tfx/tutorials/transform/census\">\n<img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a></td>\n<td><a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/transform/census.ipynb\">\n<img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Run in Google Colab</a></td>\n<td><a target=\"_blank\" href=\"https://github.com/tensorflow/tfx/blob/master/docs/tutorials/transform/census.ipynb\">\n<img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">View source on GitHub</a></td>\n</table></div>", "_____no_output_____" ], [ "##### Copyright &copy; 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Preprocessing data with TensorFlow Transform\n***The Feature Engineering Component of TensorFlow Extended (TFX)***\n\nThis example colab notebook provides a somewhat more advanced example of how <a target='_blank' href='https://www.tensorflow.org/tfx/transform/'>TensorFlow Transform</a> (`tf.Transform`) can be used to preprocess data using exactly the same code for both training a model and serving inferences in production.\n\nTensorFlow Transform is a library for preprocessing input data for TensorFlow, including creating features that require a full pass over the training dataset. For example, using TensorFlow Transform you could:\n\n* Normalize an input value by using the mean and standard deviation\n* Convert strings to integers by generating a vocabulary over all of the input values\n* Convert floats to integers by assigning them to buckets, based on the observed data distribution\n\nTensorFlow has built-in support for manipulations on a single example or a batch of examples. `tf.Transform` extends these capabilities to support full passes over the entire training dataset.\n\nThe output of `tf.Transform` is exported as a TensorFlow graph which you can use for both training and serving. Using the same graph for both training and serving can prevent skew, since the same transformations are applied in both stages.\n\nKey Point: In order to understand `tf.Transform` and how it works with Apache Beam, you'll need to know a little bit about Apache Beam itself. The <a target='_blank' href='https://beam.apache.org/documentation/programming-guide/'>Beam Programming Guide</a> is a great place to start.", "_____no_output_____" ], [ "##What we're doing in this example\n\nIn this example we'll be processing a <a target='_blank' href='https://archive.ics.uci.edu/ml/machine-learning-databases/adult'>widely used dataset containing census data</a>, and training a model to do classification. Along the way we'll be transforming the data using `tf.Transform`.\n\nKey Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about <a target='_blank' href='https://developers.google.com/machine-learning/fairness-overview/'>ML fairness</a>.\n\nNote: <a target='_blank' href='https://www.tensorflow.org/tfx/model_analysis'>TensorFlow Model Analysis</a> is a powerful tool for understanding how well your model predicts for various segments of your data, including understanding how your model may reinforce societal biases and disparities.", "_____no_output_____" ], [ "## Python check, imports, and globals\nFirst we'll make sure that we're using Python 2, and then go ahead and install and import the stuff we need.", "_____no_output_____" ] ], [ [ "import sys\n\n# Confirm that we're using Python 3\nassert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type'", "_____no_output_____" ], [ "import argparse\nimport os\nimport pprint\nimport tempfile\nimport urllib.request\nimport zipfile\n\ntemp = tempfile.gettempdir()\nzip, headers = urllib.request.urlretrieve('https://storage.googleapis.com/tfx-colab-datasets/census.zip')\nzipfile.ZipFile(zip).extractall(temp)\nzipfile.ZipFile(zip).close()\nurllib.request.urlcleanup()\n\ntrain = os.path.join(temp, 'census/adult.data')\ntest = os.path.join(temp, 'census/adult.test')\n\nprint('Installing TensorFlow Transform. This will take a minute, ignore the warnings')\n!pip install -q tensorflow_transform\nprint('Installing Apache Beam. This will take a minute, ignore the warnings')\n!pip install -q apache_beam\nimport tensorflow_transform as tft\nimport apache_beam as beam\n\nimport tensorflow as tf\nimport tensorflow_transform.beam as tft_beam\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\ntf.logging.set_verbosity(tf.logging.ERROR)", "_____no_output_____" ] ], [ [ "### Name our columns\nWe'll create some handy lists for referencing the columns in our dataset.", "_____no_output_____" ] ], [ [ "CATEGORICAL_FEATURE_KEYS = [\n 'workclass',\n 'education',\n 'marital-status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'native-country',\n]\nNUMERIC_FEATURE_KEYS = [\n 'age',\n 'capital-gain',\n 'capital-loss',\n 'hours-per-week',\n]\nOPTIONAL_NUMERIC_FEATURE_KEYS = [\n 'education-num',\n]\nLABEL_KEY = 'label'", "_____no_output_____" ] ], [ [ "###Define our features and schema\nLet's define a schema based on what types the columns are in our input. Among other things this will help with importing them correctly.", "_____no_output_____" ] ], [ [ "RAW_DATA_FEATURE_SPEC = dict(\n [(name, tf.FixedLenFeature([], tf.string))\n for name in CATEGORICAL_FEATURE_KEYS] +\n [(name, tf.FixedLenFeature([], tf.float32))\n for name in NUMERIC_FEATURE_KEYS] +\n [(name, tf.VarLenFeature(tf.float32))\n for name in OPTIONAL_NUMERIC_FEATURE_KEYS] +\n [(LABEL_KEY, tf.FixedLenFeature([], tf.string))]\n)\n\nRAW_DATA_METADATA = dataset_metadata.DatasetMetadata(\n dataset_schema.from_feature_spec(RAW_DATA_FEATURE_SPEC))", "_____no_output_____" ] ], [ [ "###Setting hyperparameters and basic housekeeping\nConstants and hyperparameters used for training. The bucket size includes all listed categories in the dataset description as well as one extra for \"?\" which represents unknown.\n\nNote: The number of instances will be computed by `tf.Transform` in future versions, in which case it can be read from the metadata. Similarly BUCKET_SIZES will not be needed as this information will be stored in the metadata for each of the columns.", "_____no_output_____" ] ], [ [ "testing = os.getenv(\"WEB_TEST_BROWSER\", False)\nif testing:\n TRAIN_NUM_EPOCHS = 1\n NUM_TRAIN_INSTANCES = 1\n TRAIN_BATCH_SIZE = 1\n NUM_TEST_INSTANCES = 1\nelse:\n TRAIN_NUM_EPOCHS = 16\n NUM_TRAIN_INSTANCES = 32561\n TRAIN_BATCH_SIZE = 128\n NUM_TEST_INSTANCES = 16281\n\n# Names of temp files\nTRANSFORMED_TRAIN_DATA_FILEBASE = 'train_transformed'\nTRANSFORMED_TEST_DATA_FILEBASE = 'test_transformed'\nEXPORTED_MODEL_DIR = 'exported_model_dir'", "_____no_output_____" ] ], [ [ "##Cleaning", "_____no_output_____" ], [ "###Create a Beam Transform for cleaning our input data\nWe'll create a **Beam Transform** by creating a subclass of Apache Beam's `PTransform` class and overriding the `expand` method to specify the actual processing logic. A `PTransform` represents a data processing operation, or a step, in your pipeline. Every `PTransform` takes one or more `PCollection` objects as input, performs a processing function that you provide on the elements of that `PCollection`, and produces zero or more output PCollection objects.\n\nOur transform class will apply Beam's `ParDo` on the input `PCollection` containing our census dataset, producing clean data in an output `PCollection`.\n\nKey Point: The `expand` method of a `PTransform` is not meant to be invoked directly by the user of a transform. Instead, you should call the `apply` method on the `PCollection` itself, with the transform as an argument. This allows transforms to be nested within the structure of your pipeline.", "_____no_output_____" ] ], [ [ "class MapAndFilterErrors(beam.PTransform):\n \"\"\"Like beam.Map but filters out erros in the map_fn.\"\"\"\n\n class _MapAndFilterErrorsDoFn(beam.DoFn):\n \"\"\"Count the bad examples using a beam metric.\"\"\"\n\n def __init__(self, fn):\n self._fn = fn\n # Create a counter to measure number of bad elements.\n self._bad_elements_counter = beam.metrics.Metrics.counter(\n 'census_example', 'bad_elements')\n\n def process(self, element):\n try:\n yield self._fn(element)\n except Exception: # pylint: disable=broad-except\n # Catch any exception the above call.\n self._bad_elements_counter.inc(1)\n\n def __init__(self, fn):\n self._fn = fn\n\n def expand(self, pcoll):\n return pcoll | beam.ParDo(self._MapAndFilterErrorsDoFn(self._fn))", "_____no_output_____" ] ], [ [ "##Preprocessing with `tf.Transform`", "_____no_output_____" ], [ "###Create a `tf.Transform` preprocessing_fn\nThe _preprocessing function_ is the most important concept of tf.Transform. A preprocessing function is where the transformation of the dataset really happens. It accepts and returns a dictionary of tensors, where a tensor means a [`Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) or [`SparseTensor`](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). There are two main groups of API calls that typically form the heart of a preprocessing function:\n\n1. **TensorFlow Ops:** Any function that accepts and returns tensors, which usually means TensorFlow ops. These add TensorFlow operations to the graph that transforms raw data into transformed data one feature vector at a time. These will run for every example, during both training and serving.\n2. **TensorFlow Transform Analyzers:** Any of the analyzers provided by tf.Transform. Analyzers also accept and return tensors, but unlike TensorFlow ops they only run once, during training, and typically make a full pass over the entire training dataset. They create [tensor constants](https://www.tensorflow.org/api_docs/python/tf/constant), which are added to your graph. For example, `tft.min` computes the minimum of a tensor over the training dataset. tf.Transform provides a fixed set of analyzers, but this will be extended in future versions.\n\nCaution: When you apply your preprocessing function to serving inferences, the constants that were created by analyzers during training do not change. If your data has trend or seasonality components, plan accordingly.", "_____no_output_____" ] ], [ [ "def preprocessing_fn(inputs):\n \"\"\"Preprocess input columns into transformed columns.\"\"\"\n # Since we are modifying some features and leaving others unchanged, we\n # start by setting `outputs` to a copy of `inputs.\n outputs = inputs.copy()\n\n # Scale numeric columns to have range [0, 1].\n for key in NUMERIC_FEATURE_KEYS:\n outputs[key] = tft.scale_to_0_1(outputs[key])\n\n for key in OPTIONAL_NUMERIC_FEATURE_KEYS:\n # This is a SparseTensor because it is optional. Here we fill in a default\n # value when it is missing.\n dense = tf.sparse_to_dense(outputs[key].indices,\n [outputs[key].dense_shape[0], 1],\n outputs[key].values, default_value=0.)\n # Reshaping from a batch of vectors of size 1 to a batch to scalars.\n dense = tf.squeeze(dense, axis=1)\n outputs[key] = tft.scale_to_0_1(dense)\n\n # For all categorical columns except the label column, we generate a\n # vocabulary but do not modify the feature. This vocabulary is instead\n # used in the trainer, by means of a feature column, to convert the feature\n # from a string to an integer id.\n for key in CATEGORICAL_FEATURE_KEYS:\n tft.vocabulary(inputs[key], vocab_filename=key)\n\n # For the label column we provide the mapping from string to index.\n table = tf.contrib.lookup.index_table_from_tensor(['>50K', '<=50K'])\n outputs[LABEL_KEY] = table.lookup(outputs[LABEL_KEY])\n\n return outputs", "_____no_output_____" ] ], [ [ "###Transform the data\nNow we're ready to start transforming our data in an Apache Beam pipeline.\n\n1. Read in the data using the CSV reader\n1. Clean it using our new `MapAndFilterErrors` transform\n1. Transform it using a preprocessing pipeline that scales numeric data and converts categorical data from strings to int64 values indices, by creating a vocabulary for each category\n1. Write out the result as a `TFRecord` of `Example` protos, which we will use for training a model later\n\n<aside class=\"key-term\"><b>Key Term:</b> <a target='_blank' href='https://beam.apache.org/'>Apache Beam</a> uses a <a target='_blank' href='https://beam.apache.org/documentation/programming-guide/#applying-transforms'>special syntax to define and invoke transforms</a>. For example, in this line:\n\n<code><blockquote>result = pass_this | 'name this step' >> to_this_call</blockquote></code>\n\nThe method <code>to_this_call</code> is being invoked and passed the object called <code>pass_this</code>, and <a target='_blank' href='https://stackoverflow.com/questions/50519662/what-does-the-redirection-mean-in-apache-beam-python'>this operation will be referred to as <code>name this step</code> in a stack trace</a>. The result of the call to <code>to_this_call</code> is returned in <code>result</code>. You will often see stages of a pipeline chained together like this:\n\n<code><blockquote>result = apache_beam.Pipeline() | 'first step' >> do_this_first() | 'second step' >> do_this_last()</blockquote></code>\n\nand since that started with a new pipeline, you can continue like this:\n\n<code><blockquote>next_result = result | 'doing more stuff' >> another_function()</blockquote></code></aside>", "_____no_output_____" ] ], [ [ "def transform_data(train_data_file, test_data_file, working_dir):\n \"\"\"Transform the data and write out as a TFRecord of Example protos.\n\n Read in the data using the CSV reader, and transform it using a\n preprocessing pipeline that scales numeric data and converts categorical data\n from strings to int64 values indices, by creating a vocabulary for each\n category.\n\n Args:\n train_data_file: File containing training data\n test_data_file: File containing test data\n working_dir: Directory to write transformed data and metadata to\n \"\"\"\n\n # The \"with\" block will create a pipeline, and run that pipeline at the exit\n # of the block.\n with beam.Pipeline() as pipeline:\n with tft_beam.Context(temp_dir=tempfile.mkdtemp()):\n # Create a coder to read the census data with the schema. To do this we\n # need to list all columns in order since the schema doesn't specify the\n # order of columns in the csv.\n ordered_columns = [\n 'age', 'workclass', 'fnlwgt', 'education', 'education-num',\n 'marital-status', 'occupation', 'relationship', 'race', 'sex',\n 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',\n 'label'\n ]\n converter = tft.coders.CsvCoder(ordered_columns, RAW_DATA_METADATA.schema)\n\n # Read in raw data and convert using CSV converter. Note that we apply\n # some Beam transformations here, which will not be encoded in the TF\n # graph since we don't do the from within tf.Transform's methods\n # (AnalyzeDataset, TransformDataset etc.). These transformations are just\n # to get data into a format that the CSV converter can read, in particular\n # removing spaces after commas.\n #\n # We use MapAndFilterErrors instead of Map to filter out decode errors in\n # convert.decode which should only occur for the trailing blank line.\n raw_data = (\n pipeline\n | 'ReadTrainData' >> beam.io.ReadFromText(train_data_file)\n | 'FixCommasTrainData' >> beam.Map(\n lambda line: line.replace(', ', ','))\n | 'DecodeTrainData' >> MapAndFilterErrors(converter.decode))\n\n # Combine data and schema into a dataset tuple. Note that we already used\n # the schema to read the CSV data, but we also need it to interpret\n # raw_data.\n raw_dataset = (raw_data, RAW_DATA_METADATA)\n transformed_dataset, transform_fn = (\n raw_dataset | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))\n transformed_data, transformed_metadata = transformed_dataset\n transformed_data_coder = tft.coders.ExampleProtoCoder(\n transformed_metadata.schema)\n\n _ = (\n transformed_data\n | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)\n | 'WriteTrainData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE)))\n\n # Now apply transform function to test data. In this case we remove the\n # trailing period at the end of each line, and also ignore the header line\n # that is present in the test data file.\n raw_test_data = (\n pipeline\n | 'ReadTestData' >> beam.io.ReadFromText(test_data_file,\n skip_header_lines=1)\n | 'FixCommasTestData' >> beam.Map(\n lambda line: line.replace(', ', ','))\n | 'RemoveTrailingPeriodsTestData' >> beam.Map(lambda line: line[:-1])\n | 'DecodeTestData' >> MapAndFilterErrors(converter.decode))\n\n raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)\n\n transformed_test_dataset = (\n (raw_test_dataset, transform_fn) | tft_beam.TransformDataset())\n # Don't need transformed data schema, it's the same as before.\n transformed_test_data, _ = transformed_test_dataset\n\n _ = (\n transformed_test_data\n | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)\n | 'WriteTestData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))\n\n # Will write a SavedModel and metadata to working_dir, which can then\n # be read by the tft.TFTransformOutput class.\n _ = (\n transform_fn\n | 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))", "_____no_output_____" ] ], [ [ "##Using our preprocessed data to train a model\n\nTo show how `tf.Transform` enables us to use the same code for both training and serving, and thus prevent skew, we're going to train a model. To train our model and prepare our trained model for production we need to create input functions. The main difference between our training input function and our serving input function is that training data contains the labels, and production data does not. The arguments and returns are also somewhat different.", "_____no_output_____" ], [ "###Create an input function for training", "_____no_output_____" ] ], [ [ "def _make_training_input_fn(tf_transform_output, transformed_examples,\n batch_size):\n \"\"\"Creates an input function reading from transformed data.\n\n Args:\n tf_transform_output: Wrapper around output of tf.Transform.\n transformed_examples: Base filename of examples.\n batch_size: Batch size.\n\n Returns:\n The input function for training or eval.\n \"\"\"\n def input_fn():\n \"\"\"Input function for training and eval.\"\"\"\n dataset = tf.contrib.data.make_batched_features_dataset(\n file_pattern=transformed_examples,\n batch_size=batch_size,\n features=tf_transform_output.transformed_feature_spec(),\n reader=tf.data.TFRecordDataset,\n shuffle=True)\n\n transformed_features = dataset.make_one_shot_iterator().get_next()\n\n # Extract features and label from the transformed tensors.\n transformed_labels = transformed_features.pop(LABEL_KEY)\n\n return transformed_features, transformed_labels\n\n return input_fn", "_____no_output_____" ] ], [ [ "###Create an input function for serving\n\nLet's create an input function that we could use in production, and prepare our trained model for serving.", "_____no_output_____" ] ], [ [ "def _make_serving_input_fn(tf_transform_output):\n \"\"\"Creates an input function reading from raw data.\n\n Args:\n tf_transform_output: Wrapper around output of tf.Transform.\n\n Returns:\n The serving input function.\n \"\"\"\n raw_feature_spec = RAW_DATA_FEATURE_SPEC.copy()\n # Remove label since it is not available during serving.\n raw_feature_spec.pop(LABEL_KEY)\n\n def serving_input_fn():\n \"\"\"Input function for serving.\"\"\"\n # Get raw features by generating the basic serving input_fn and calling it.\n # Here we generate an input_fn that expects a parsed Example proto to be fed\n # to the model at serving time. See also\n # tf.estimator.export.build_raw_serving_input_receiver_fn.\n raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(\n raw_feature_spec, default_batch_size=None)\n serving_input_receiver = raw_input_fn()\n\n # Apply the transform function that was used to generate the materialized\n # data.\n raw_features = serving_input_receiver.features\n transformed_features = tf_transform_output.transform_raw_features(\n raw_features)\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, serving_input_receiver.receiver_tensors)\n\n return serving_input_fn", "_____no_output_____" ] ], [ [ "###Wrap our input data in FeatureColumns\nOur model will expect our data in TensorFlow FeatureColumns.", "_____no_output_____" ] ], [ [ "def get_feature_columns(tf_transform_output):\n \"\"\"Returns the FeatureColumns for the model.\n\n Args:\n tf_transform_output: A `TFTransformOutput` object.\n\n Returns:\n A list of FeatureColumns.\n \"\"\"\n # Wrap scalars as real valued columns.\n real_valued_columns = [tf.feature_column.numeric_column(key, shape=())\n for key in NUMERIC_FEATURE_KEYS]\n\n # Wrap categorical columns.\n one_hot_columns = [\n tf.feature_column.categorical_column_with_vocabulary_file(\n key=key,\n vocabulary_file=tf_transform_output.vocabulary_file_by_name(\n vocab_filename=key))\n for key in CATEGORICAL_FEATURE_KEYS]\n\n return real_valued_columns + one_hot_columns", "_____no_output_____" ] ], [ [ "##Train, Evaluate, and Export our model", "_____no_output_____" ] ], [ [ "def train_and_evaluate(working_dir, num_train_instances=NUM_TRAIN_INSTANCES,\n num_test_instances=NUM_TEST_INSTANCES):\n \"\"\"Train the model on training data and evaluate on test data.\n\n Args:\n working_dir: Directory to read transformed data and metadata from and to\n write exported model to.\n num_train_instances: Number of instances in train set\n num_test_instances: Number of instances in test set\n\n Returns:\n The results from the estimator's 'evaluate' method\n \"\"\"\n tf_transform_output = tft.TFTransformOutput(working_dir)\n run_config = tf.estimator.RunConfig()\n\n estimator = tf.estimator.LinearClassifier(\n feature_columns=get_feature_columns(tf_transform_output),\n config=run_config)\n\n # Fit the model using the default optimizer.\n train_input_fn = _make_training_input_fn(\n tf_transform_output,\n os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE + '*'),\n batch_size=TRAIN_BATCH_SIZE)\n estimator.train(\n input_fn=train_input_fn,\n max_steps=TRAIN_NUM_EPOCHS * num_train_instances / TRAIN_BATCH_SIZE)\n\n # Evaluate model on test dataset.\n eval_input_fn = _make_training_input_fn(\n tf_transform_output,\n os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE + '*'),\n batch_size=1)\n\n # Export the model.\n serving_input_fn = _make_serving_input_fn(tf_transform_output)\n exported_model_dir = os.path.join(working_dir, EXPORTED_MODEL_DIR)\n estimator.export_savedmodel(exported_model_dir, serving_input_fn)\n\n return estimator.evaluate(input_fn=eval_input_fn, steps=num_test_instances)", "_____no_output_____" ] ], [ [ "##Put it all together\nWe've created all the stuff we need to preprocess our census data, train a model, and prepare it for serving. So far we've just been getting things ready. It's time to start running!\n\nNote: Scroll the output from this cell to see the whole process. The results will be at the bottom.", "_____no_output_____" ] ], [ [ "transform_data(train, test, temp)\nresults = train_and_evaluate(temp)\npprint.pprint(results)", "_____no_output_____" ] ], [ [ "##What we did\nIn this example we used `tf.Transform` to preprocess a dataset of census data, and train a model with the cleaned and transformed data. We also created an input function that we could use when we deploy our trained model in a production environment to perform inference. By using the same code for both training and inference we avoid any issues with data skew. Along the way we learned about creating an Apache Beam transform to perform the transformation that we needed for cleaing the data, and wrapped our data in TensorFlow `FeatureColumns`. This is just a small piece of what TensorFlow Transform can do! We encourage you to dive into `tf.Transform` and discover what it can do for you.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec848c59225a0c09d87971251c9799d06373ec7c
38,043
ipynb
Jupyter Notebook
Test_commit.ipynb
Anoopy111/MEM_FDS_Fall18_Lab_Tutorials
85ad2bccdbf3dc57a0dfb088ee6f6a4ac5837967
[ "MIT" ]
null
null
null
Test_commit.ipynb
Anoopy111/MEM_FDS_Fall18_Lab_Tutorials
85ad2bccdbf3dc57a0dfb088ee6f6a4ac5837967
[ "MIT" ]
null
null
null
Test_commit.ipynb
Anoopy111/MEM_FDS_Fall18_Lab_Tutorials
85ad2bccdbf3dc57a0dfb088ee6f6a4ac5837967
[ "MIT" ]
36
2018-11-06T18:06:20.000Z
2019-04-29T19:24:23.000Z
22.028373
292
0.409537
[ [ [ "Name: ", "_____no_output_____" ], [ "Name of Peer who worked with you:", "_____no_output_____" ], [ "Favorite Place to Get lunch on-Campus:", "_____no_output_____" ], [ "## Introduction to Notebooks ", "_____no_output_____" ], [ "Jupyter: Line by line compilation, allows to showcase your work as a data scientist ", "_____no_output_____" ], [ "## The Much Awaited, \"Hello World!\"", "_____no_output_____" ] ], [ [ "print('Hello World!')", "Hello World!\n" ], [ "print(\"Hi\")", "Hi\n" ], [ "print ('Hello # world')", "Hello # world\n" ], [ "print (\"hello # world\")", "hello # world\n" ], [ "2 + 3", "_____no_output_____" ], [ "print('Hello', 'world!')", "Hello world!\n" ], [ "Print('Python, whats 1 + 2')", "_____no_output_____" ] ], [ [ "So make sure you write print in small and not Print", "_____no_output_____" ], [ "## Scalar Objects", "_____no_output_____" ], [ "scalar vs non scalar objects: \nscalar objects are indepenedent and they do not have internal structure. They are atomic objects\nnon-scalar objects have internal structure with set of atomic elements. ", "_____no_output_____" ], [ "Python stores different object differently based on the use. \n\nfor us 2+2 and 2.000000000 + 2.000000000 are same. But for computer, they are not! Every zero that we add, it adds a bit of memory and complexes the computation.\n\nSo we tell the datatype to computer beforehand.\n\nSo 3 basic data types are integer, float and boolean.\n\n1)int is used for integers e.g. 1, 0, 1000, 5000\nlike no of children, no of cars, etc. \n\n2)float is used to represent real numbers e.g. 3.0, 4.0, 100.5, 3.14, 4e3(4 times 10 to the power of 2) etc\n\n3)boolean is used for binary like True and False.\nPass or fail\n\n4)There are some types like None, long integers which we talk about later \n\nSo what should be the data type of following variables?\nWe want to design a demographic table. What data type would you assign\nAge, no of courses taken, Height , weight, Married/unmarried\n", "_____no_output_____" ] ], [ [ "type(1)", "_____no_output_____" ], [ "type(1.0)", "_____no_output_____" ], [ "4e2", "_____no_output_____" ], [ "type(4e4)", "_____no_output_____" ], [ "1==0", "_____no_output_____" ], [ "type(1==1)", "_____no_output_____" ], [ "type(False)", "_____no_output_____" ], [ "type(True)", "_____no_output_____" ], [ " 4 + 5 == 6", "_____no_output_____" ], [ "2 + 3 ==5", "_____no_output_____" ], [ "int(5.0)", "_____no_output_____" ], [ "float(5)", "_____no_output_____" ], [ "bool(5)", "_____no_output_____" ], [ "bool(0)", "_____no_output_____" ], [ "bool(1)", "_____no_output_____" ], [ "int(5.8)", "_____no_output_____" ], [ "(3+2)==(1+4)", "_____no_output_____" ], [ "((3+2)==(1+4)) == (4%2==1)", "_____no_output_____" ] ], [ [ "## Operators", "_____no_output_____" ], [ "There are some basic data operators \n+ plus\n- minus\n/ divide\n* multiply\n% reminder (I % j is pronounced as “i mod j”)\n** power \n< less- than\n> greater- than\n<= less- than- equal\n>= greater- than- equal\n== check equality \n!= check inequality", "_____no_output_____" ] ], [ [ "2+3", "_____no_output_____" ], [ "3-2", "_____no_output_____" ], [ "3*2", "_____no_output_____" ], [ "10/2", "_____no_output_____" ], [ "11/2", "_____no_output_____" ], [ "int(11/2)", "_____no_output_____" ], [ "35%4", "_____no_output_____" ], [ "4**3", "_____no_output_____" ], [ "5<2", "_____no_output_____" ], [ "5<5", "_____no_output_____" ], [ "5<=5", "_____no_output_____" ], [ "print('what is 5 - 7?', 5 - 7)", "what is 5 - 7? -2\n" ], [ "int(5) + float(3.2)", "_____no_output_____" ], [ "1 + 2 == 3", "_____no_output_____" ], [ "3 + 4 == 7", "_____no_output_____" ], [ "2 + 3 == 4", "_____no_output_____" ], [ "0.1 + 0.2 == 0.3 ", "_____no_output_____" ] ], [ [ "## Variables and assignments", "_____no_output_____" ], [ "Variables provide a way to associate names with objects. These names, associated with objects can be used and called again and again, instead of objects. \nAn assignment statement associates the name to the left of ‘=’ to the object denoted by the expression to the right of ‘=’. \n\n\nIts like naming somebody. ", "_____no_output_____" ] ], [ [ "pi = 3.1416", "_____no_output_____" ], [ "pi", "_____no_output_____" ], [ "## Write your code chunk to print the area of a circle with radius = 4 units. [Note: This is how comments are written in jupyter notebooks]", "_____no_output_____" ], [ "my name = 'python'", "_____no_output_____" ] ], [ [ "we have to give continuous name to variable. So 'my name' is not valid, but 'my_name' is valid. ", "_____no_output_____" ] ], [ [ "my_name = 'whatever'", "_____no_output_____" ], [ "my_name", "_____no_output_____" ], [ "#From the previously written code of printing the area of a circle, print out the below statement using the variables assigned previously\nprint('when radius of a circle is ' + str(radius) +' units then area of circle is '+ str(area) +' square units')", "when radius of a circle is 4 units then area of circle is 50.2656 square units\n" ], [ "print('when radius of a circle is %d units then area of circle is %f square units' %(radius,area))", "when radius of a circle is 4 units then area of circle is 50.265600 square units\n" ], [ "universities = '\\nDuke \\nstanford \\nMIT \\nCaltech'\nprint('A few good universities:', universities )", "A few good universities: \nDuke \nstanford \nMIT \nCaltech\n" ] ], [ [ "Reserved Keywords: and, del, from, not, while, as, elif, global, or, with, assert, else, if, pass, yield, break, except, import, print, class, exec, in, raise, continue, finally, is, return, def, for, lambda, try\n\nwe can not use reserved keywords as our variable name. For e.g, we can not say \nand = 0.3", "_____no_output_____" ], [ "Readability of code:\na = 3.1416 pi = 3.1416 \nb = 4 radius = 4 \nc = a*(b**2) area = pi*(r**2)\nWhich code makes more sense?\n", "_____no_output_____" ] ], [ [ "##### Finding number is odd or even\n\nodd_even = [1,2,3,4,5,6,7,8,9,10]\nfor num in odd_even:\n if num % 2 == 0:\n print ('%d is even' %num)\n elif num % 2 == 1:\n print ('%d is odd' %num)", "1 is odd\n2 is even\n3 is odd\n4 is even\n5 is odd\n6 is even\n7 is odd\n8 is even\n9 is odd\n10 is even\n" ], [ "# Finding roots of Quadratic equation\n# Assume that quadratic equation is of the form of Ax^2 + Bx + C = 0\n\na = int(input('What is a?'))\nb = int(input('What is b?'))\nc = int(input('What is c?'))\n\nd = (b**2) - (4*a*c ) \n\nroot1 = (-b + d**0.5 )/(2*a)\nroot2 = (-b - d**0.5 )/(2*a)\n\nprint ('2 roots of quardatic equation are ' + str(root1) + ' & ' + str(root2))\n", "What is a?1\nWhat is b?2\nWhat is c?3\n2 roots of quardatic equation are (-0.9999999999999999+1.4142135623730951j) & (-1-1.4142135623730951j)\n" ], [ "# A bit more complicated code\n\na = int(input('What is a?'))\nb = int(input('What is b?'))\nc = int(input('What is c?'))\n\n\nd = (b**2) - (4*a*c) # discriminant\n\nif d < 0:\n print (\"This equation has no real solution\")\nelif d == 0:\n x = (-b+ (d**0.5)) / (2*a)\n print (\"This equation has one solutions: \",x)\n print ((2+10) * (10+3))\nelse:\n x1 = (-b + (d**0.5))/2*a\n x2 = (-b - (d**0.5))/2*a\n print (\"This equation has two solutions: \", x1, \" and\", x2)", "What is a?3\nWhat is b?4\nWhat is c?5\nThis equation has no real solution\n" ] ], [ [ "NOTE: Please Make sure to edit the notebook, fill in your code chunks, commit and push the notebook to your github repository by the end of class.", "_____no_output_____" ], [ " # Introduction to Common Libraries", "_____no_output_____" ], [ "Concept of DataFrames!!! The What and the Why!!", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('input_dataset.csv')", "_____no_output_____" ], [ "input_dataset.describe()", "_____no_output_____" ], [ "input_dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 200 entries, 0 to 199\nData columns (total 8 columns):\nUnique Applicant ID 200 non-null int64\n Age 200 non-null float64\n Years at Employer 200 non-null float64\nYears at Address 200 non-null float64\nIncome 200 non-null object\nCredit Card Debt 200 non-null object\nAutomobile Debt 200 non-null object\nOutcomes: Default = 1 200 non-null int64\ndtypes: float64(3), int64(2), object(3)\nmemory usage: 12.6+ KB\n" ], [ "input_dataset.head()", "_____no_output_____" ], [ "input_dataset.tail()", "_____no_output_____" ], [ "## testing changes\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec848e51f260f767edb5ff9322172cf2503176cb
5,193
ipynb
Jupyter Notebook
example/SecretBox.ipynb
autarch/pgsodium
f57bd69db9ec691be91b2e6e5350b7c297dbdba7
[ "ISC" ]
263
2017-04-17T12:48:20.000Z
2022-03-29T12:06:54.000Z
example/SecretBox.ipynb
autarch/pgsodium
f57bd69db9ec691be91b2e6e5350b7c297dbdba7
[ "ISC" ]
7
2020-06-05T20:42:50.000Z
2022-03-01T19:12:33.000Z
example/SecretBox.ipynb
autarch/pgsodium
f57bd69db9ec691be91b2e6e5350b7c297dbdba7
[ "ISC" ]
14
2019-01-13T21:45:18.000Z
2022-03-11T03:27:49.000Z
30.017341
230
0.601964
[ [ [ "# Secret Box\n\nThe `secretbox` API uses secret key authenticated encryption to encrypt and authenticate a message with a secret key that all parties must share.\n\n[Libsodium Documentation](https://doc.libsodium.org/secret-key_cryptography/secretbox)\n\nFunctions:\n```\n crypto_secretbox_keygen() -> bytea\n\n crypto_secretbox_noncegen() -> bytea\n\n crypto_secretbox(message bytea, nonce bytea, key bytea) -> bytea\n\n crypto_secretbox(message bytea, nonce bytea, key_id bigint, context bytea = 'pgsodium') -> bytea\n\n crypto_secretbox_open(ciphertext bytea, nonce bytea, key bytea) -> bytea\n\n crypto_secretbox_open(ciphertext bytea, nonce bytea, key_id bigint, context bytea = 'pgsodium') -> bytea\n```\n\n`crypto_secretbox_keygen()` generates a random secret key which can be\nused to encrypt and decrypt messages. The role `pgsodium_keymaker` is required to call this function.\n\n`crypto_secretbox_noncegen()` generates a random nonce which will be\nused when encrypting messages. For security, each nonce must be used\nonly once, though it is not a secret. The purpose of the nonce is to\nadd randomness to the message so that the same message encrypted\nmultiple times with the same key will produce different ciphertexts. The role `pgsodium_keyiduser` or greater is required to call this function.\n\n`crypto_secretbox()` encrypts a message using a previously generated\nnonce and secret key. The encrypted message can be decrypted using\n`crypto_secretbox_open()` Note that in order to decrypt the message,\nthe original nonce will be needed. The role `pgsodium_keyholder` is required to call the raw `key bytea` versions of these functions. The key id versions of the functions can be called with the role `pgsodium_keyiduser`.\n\n", "_____no_output_____" ] ], [ [ "%load_ext sql\n%sql postgresql://postgres@/", "The sql extension is already loaded. To reload it, use:\n %reload_ext sql\n" ] ], [ [ "Encryption requires a key and a nonce. The nonce doesn't have to be confidential, but it should never ever be reused with the same key. The easiest way to generate a nonce is to use `crypto_secretbox_noncegen`:", "_____no_output_____" ] ], [ [ "key = %sql select pgsodium.crypto_secretbox_keygen()::text\nkey = key[0][0]\n\nnonce = %sql select pgsodium.crypto_secretbox_noncegen()::text\nnonce = nonce[0][0]", "_____no_output_____" ] ], [ [ "## Encryption\n\nA new secretbox is created with the key and the nonce:", "_____no_output_____" ] ], [ [ "secretbox = %sql SELECT crypto_secretbox::text from pgsodium.crypto_secretbox('bob is your uncle', :nonce, (:key)::bytea)\nsecretbox = secretbox[0][0]\nprint('The encrypted secretbox is: ', secretbox)", " * postgresql://postgres@/\n1 rows affected.\n * postgresql://postgres@/\n1 rows affected.\n * postgresql://postgres@/\n1 rows affected.\nThe encrypted secretbox is: \\x7b11d8e3659f6fe2a7762f082019c607d5d64fd5f805f6ff6df68266664a6ec335\n" ] ], [ [ "## Decryption\n\nDecryption requires the same key and nonce.", "_____no_output_____" ] ], [ [ "plaintext = %sql SELECT crypto_secretbox_open FROM pgsodium.crypto_secretbox_open(:secretbox, :nonce, (:key)::bytea)\nprint('The decrypted message is :', plaintext[0][0].tobytes().decode('utf8'))", " * postgresql://postgres@/\n1 rows affected.\nThe decrypted message is : bob is your uncle\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec848eec8f169fbe6f31fc42925db6fcb51a4809
14,356
ipynb
Jupyter Notebook
Section-01-Introduction/Titanic_dataPrep.ipynb
ShravanAnandk7/feature-engineering-for-machine-learning
8c75a4e7740b49fc89968981f571480e446d6b49
[ "BSD-3-Clause" ]
null
null
null
Section-01-Introduction/Titanic_dataPrep.ipynb
ShravanAnandk7/feature-engineering-for-machine-learning
8c75a4e7740b49fc89968981f571480e446d6b49
[ "BSD-3-Clause" ]
null
null
null
Section-01-Introduction/Titanic_dataPrep.ipynb
ShravanAnandk7/feature-engineering-for-machine-learning
8c75a4e7740b49fc89968981f571480e446d6b49
[ "BSD-3-Clause" ]
null
null
null
32.044643
536
0.369602
[ [ [ "## Predicting Survival on the Titanic\n\n### History\nPerhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.\n\n### Dataset\n\n\n### Download and Save", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport os\nBASE_DIR = os.getcwd()", "_____no_output_____" ], [ "data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')\ndata.head()", "_____no_output_____" ], [ "data = data.replace('?', np.nan)\ndata.isnull().sum()", "_____no_output_____" ], [ "def get_first_cabin(row):\n try:\n return row.split()[0]\n except:\n return np.nan \ndata['cabin'] = data['cabin'].apply(get_first_cabin)\ndata.head()", "_____no_output_____" ], [ "data.to_csv(BASE_DIR+'/titanic.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec849f83ca5986c58922383eb47bfe3192019c6f
4,385
ipynb
Jupyter Notebook
notebooks/03_categorical_pipeline_ex_01.ipynb
nish2612/scikit-learn-mooc
daa9945beddf3318ef20770bf44b77f1e747d7fa
[ "CC-BY-4.0" ]
1
2021-05-25T07:29:44.000Z
2021-05-25T07:29:44.000Z
notebooks/03_categorical_pipeline_ex_01.ipynb
Mamane403/scikit-learn-mooc
cdfe0e9ac16b5d7fa4c8fb343141c10eb98828f4
[ "CC-BY-4.0" ]
null
null
null
notebooks/03_categorical_pipeline_ex_01.ipynb
Mamane403/scikit-learn-mooc
cdfe0e9ac16b5d7fa4c8fb343141c10eb98828f4
[ "CC-BY-4.0" ]
1
2021-06-16T16:54:59.000Z
2021-06-16T16:54:59.000Z
29.829932
130
0.634664
[ [ [ "# 📝 Exercise M1.04\n\nThe goal of this exercise is to evaluate the impact of using an arbitrary\ninteger encoding for categorical variables along with a linear\nclassification model such as Logistic Regression.\n\nTo do so, let's try to use `OrdinalEncoder` to preprocess the categorical\nvariables. This preprocessor is assembled in a pipeline with\n`LogisticRegression`. The statistical performance of the pipeline can be\nevaluated by cross-validation and then compared to the score obtained when\nusing `OneHotEncoder` or to some other baseline score.\n\nFirst, we load the dataset.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nadult_census = pd.read_csv(\"../datasets/adult-census.csv\")", "_____no_output_____" ], [ "target_name = \"class\"\ntarget = adult_census[target_name]\ndata = adult_census.drop(columns=[target_name, \"education-num\"])", "_____no_output_____" ] ], [ [ "In the previous notebook, we used `sklearn.compose.make_column_selector` to\nautomatically select columns with a specific data type (also called `dtype`).\nHere, we will use this selector to get only the columns containing strings\n(column with `object` dtype) that correspond to categorical features in our\ndataset.", "_____no_output_____" ] ], [ [ "from sklearn.compose import make_column_selector as selector\n\ncategorical_columns_selector = selector(dtype_include=object)\ncategorical_columns = categorical_columns_selector(data)\ndata_categorical = data[categorical_columns]", "_____no_output_____" ] ], [ [ "We filter our dataset that it contains only categorical features.\nDefine a scikit-learn pipeline composed of an `OrdinalEncoder` and a\n`LogisticRegression` classifier.\n\nBecause `OrdinalEncoder` can raise errors if it sees an unknown category at\nprediction time, you can set the `handle_unknown=\"use_encoded_value\"` and\n`unknown_value` parameters. You can refer to the\n[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)\nfor more details regarding these parameters.", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.linear_model import LogisticRegression\n\n# Write your code here.", "_____no_output_____" ] ], [ [ "Your model is now defined. Evaluate it using a cross-validation using\n`sklearn.model_selection.cross_validate`.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_validate\n\n# Write your code here.", "_____no_output_____" ] ], [ [ "Now, we would like to compare the statistical performance of our previous\nmodel with a new model where instead of using an `OrdinalEncoder`, we will\nuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.\nCompare the score of both models and conclude on the impact of choosing a\nspecific encoding strategy when using a linear model.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import OneHotEncoder\n\n# Write your code here.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec84a0ec7de4b471ee28098980371178478c9b2c
106,011
ipynb
Jupyter Notebook
LS_DS_121_Join_and_Reshape_Data.ipynb
justin-hsieh/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
08375031d694c343b5e7fad42eca51f04dbef4e5
[ "MIT" ]
1
2019-07-15T20:39:20.000Z
2019-07-15T20:39:20.000Z
LS_DS_121_Join_and_Reshape_Data.ipynb
justin-hsieh/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
08375031d694c343b5e7fad42eca51f04dbef4e5
[ "MIT" ]
null
null
null
LS_DS_121_Join_and_Reshape_Data.ipynb
justin-hsieh/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
08375031d694c343b5e7fad42eca51f04dbef4e5
[ "MIT" ]
null
null
null
33.274011
5,398
0.395912
[ [ [ "<a href=\"https://colab.research.google.com/github/justin-hsieh/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/LS_DS_121_Join_and_Reshape_Data.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "_Lambda School Data Science_\n\n# Join and Reshape datasets\n\nObjectives\n- concatenate data with pandas\n- merge data with pandas\n- understand tidy data formatting\n- melt and pivot data with pandas\n\nLinks\n- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)\n- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)\n - Combine Data Sets: Standard Joins\n - Tidy Data\n - Reshaping Data\n- Python Data Science Handbook\n - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append\n - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join\n - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping\n - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables\n \nReference\n- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)\n- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)", "_____no_output_____" ], [ "## Download data\n\nWe’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)!", "_____no_output_____" ] ], [ [ "!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz", "--2019-07-16 02:34:53-- https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz\nResolving s3.amazonaws.com (s3.amazonaws.com)... 52.216.144.109\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|52.216.144.109|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 205548478 (196M) [application/x-gzip]\nSaving to: ‘instacart_online_grocery_shopping_2017_05_01.tar.gz.6’\n\ninstacart_online_gr 100%[===================>] 196.03M 46.3MB/s in 4.6s \n\n2019-07-16 02:34:58 (42.3 MB/s) - ‘instacart_online_grocery_shopping_2017_05_01.tar.gz.6’ saved [205548478/205548478]\n\n" ], [ "!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz", "instacart_2017_05_01/\ninstacart_2017_05_01/._aisles.csv\ninstacart_2017_05_01/aisles.csv\ninstacart_2017_05_01/._departments.csv\ninstacart_2017_05_01/departments.csv\ninstacart_2017_05_01/._order_products__prior.csv\ninstacart_2017_05_01/order_products__prior.csv\ninstacart_2017_05_01/._order_products__train.csv\ninstacart_2017_05_01/order_products__train.csv\ninstacart_2017_05_01/._orders.csv\ninstacart_2017_05_01/orders.csv\ninstacart_2017_05_01/._products.csv\ninstacart_2017_05_01/products.csv\n" ], [ "%cd instacart_2017_05_01 \n#bash command/magic command. Changing the state of the command in the folder. Gives access to files directly in the notebook 17th min. 7/15", "/content/instacart_2017_05_01\n" ], [ "!ls -lh *.csv #Star denotes files that end in \".csv\"", "-rw-r--r-- 1 502 staff 2.6K May 2 2017 aisles.csv\n-rw-r--r-- 1 502 staff 270 May 2 2017 departments.csv\n-rw-r--r-- 1 502 staff 551M May 2 2017 order_products__prior.csv\n-rw-r--r-- 1 502 staff 24M May 2 2017 order_products__train.csv\n-rw-r--r-- 1 502 staff 104M May 2 2017 orders.csv\n-rw-r--r-- 1 502 staff 2.1M May 2 2017 products.csv\n" ] ], [ [ "# Join Datasets", "_____no_output_____" ], [ "## Goal: Reproduce this example\n\nThe first two orders for user id 1:", "_____no_output_____" ] ], [ [ "from IPython.display import display, Image\nurl = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'\nexample = Image(url=url, width=600)\n\ndisplay(example) # this is the table we're trying to create, must explore the files to see what we can use", "_____no_output_____" ] ], [ [ "## Load data\n\nHere's a list of all six CSV filenames", "_____no_output_____" ] ], [ [ "!ls -lh *.csv", "ls: cannot access '*.csv': No such file or directory\n" ] ], [ [ "For each CSV\n- Load it with pandas\n- Look at the dataframe's shape\n- Look at its head (first rows)\n- `display(example)`\n- Which columns does it have in common with the example we want to reproduce?", "_____no_output_____" ], [ "### aisles", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "!head aisles.csv # able to do this because of %cd command used above", "_____no_output_____" ], [ "aisles = pd.read_csv('aisles.csv') # possible because of %cd command used above. Don't need to use link to read in csv\nprint(aisles.shape)\naisles.isnull().sum()", "(134, 2)\n" ], [ "aisles.head() # Nothing in this file we need for the main table we need", "_____no_output_____" ] ], [ [ "### departments", "_____no_output_____" ] ], [ [ "departments = pd.read_csv('departments.csv')\nprint(departments.shape)\ndepartments.head() #nothing in this table we need for the main table", "(21, 2)\n" ] ], [ [ "### order_products__prior", "_____no_output_____" ] ], [ [ "order_products__prior = pd.read_csv('order_products__prior.csv')\nprint(order_products__prior.shape)\norder_products__prior.head()\n\n#Need order id, product id, add_to_cart_order", "_____no_output_____" ] ], [ [ "### order_products__train", "_____no_output_____" ] ], [ [ "order_products__train = pd.read_csv('order_products__train.csv')\nprint(order_products__train.shape)\norder_products__train.head()\n\n#Need order id, product id, add_to_cart_order", "_____no_output_____" ] ], [ [ "### orders", "_____no_output_____" ] ], [ [ "orders = pd.read_csv('orders.csv')\nprint(orders.shape)\norders.head(10)\n\n# Need order_id, user_id, order_number, order_dow, order_hour_of_day", "_____no_output_____" ] ], [ [ "### products", "_____no_output_____" ] ], [ [ "products = pd.read_csv('products.csv')\nprint(products.shape)\nproducts.head()\n\n# Need product_id, product_name", "(49688, 4)\n" ] ], [ [ "## Concatenate order_products__prior and order_products__train", "_____no_output_____" ] ], [ [ "order_products = pd.concat([order_products__prior, order_products__train])\nprint(order_products.shape)\norder_products.head()", "_____no_output_____" ], [ "order_products.shape,order_products__prior.shape, order_products__train.shape", "_____no_output_____" ], [ "assert len(order_products) == len(order_products__prior) + len(order_products__train)\n# will break if something doesn't match the statement you put, confirms everything is okay without doing more operations", "_____no_output_____" ] ], [ [ "## Get a subset of orders — the first two orders for user id 1", "_____no_output_____" ], [ "From `orders` dataframe:\n- user_id\n- order_id\n- order_number\n- order_dow\n- order_hour_of_day", "_____no_output_____" ] ], [ [ "condition = order_products['order_id'] == 2539329\norder_products[condition] #filter out specific conditions", "_____no_output_____" ], [ "condition1 = (orders['user_id'] == 1) &(orders['order_number'] <=2)\n\ncolumns = {'order_id','user_id','order_number','order_dow','order_hour_of_day'}\n\nsubset = orders.loc[condition1, columns]\nsubset", "_____no_output_____" ] ], [ [ "## Merge dataframes", "_____no_output_____" ], [ "Merge the subset from `orders` with columns from `order_products`", "_____no_output_____" ] ], [ [ "display(example)", "_____no_output_____" ], [ "columns = ['order_id', 'product_id', 'add_to_cart_order']\nmerged = pd.merge(subset, order_products[columns], how='inner', on='order_id')\nmerged", "_____no_output_____" ] ], [ [ "Merge with columns from `products`", "_____no_output_____" ] ], [ [ "final = pd.merge(merged, products[['product_id','product_name']], how='inner',on='product_id')\nfinal", "_____no_output_____" ], [ "final = final.sort_values(by=['order_number','add_to_cart_order'])\nfinal.columns = [column.replace('_',' ') for column in final]\nfinal", "_____no_output_____" ] ], [ [ "# Reshape Datasets", "_____no_output_____" ], [ "## Why reshape data?\n\n#### Some libraries prefer data in different formats\n\nFor example, the Seaborn data visualization library prefers data in \"Tidy\" format often (but not always).\n\n> \"[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by Hadley Wickham. The rules can be simply stated:\n\n> - Each variable is a column\n- Each observation is a row\n\n> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot.\"\n\n#### Data science is often about putting square pegs in round holes\n\nHere's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling!", "_____no_output_____" ], [ "## Hadley Wickham's Examples\n\nFrom his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\ntable1 = pd.DataFrame(\n [[np.nan, 2],\n [16, 11], \n [3, 1]],\n index=['John Smith', 'Jane Doe', 'Mary Johnson'], \n columns=['treatmenta', 'treatmentb'])\n\ntable2 = table1.T", "_____no_output_____" ] ], [ [ "\"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild. \n\nThe table has two columns and three rows, and both rows and columns are labelled.\"", "_____no_output_____" ] ], [ [ "table1", "_____no_output_____" ] ], [ [ "\"There are many ways to structure the same underlying data. \n\nTable 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different.\"", "_____no_output_____" ] ], [ [ "table2", "_____no_output_____" ] ], [ [ "\"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.\n\nTable 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable.\"\n\n| name | trt | result |\n|--------------|-----|--------|\n| John Smith | a | - |\n| Jane Doe | a | 16 |\n| Mary Johnson | a | 3 |\n| John Smith | b | 2 |\n| Jane Doe | b | 11 |\n| Mary Johnson | b | 1 |", "_____no_output_____" ], [ "## Table 1 --> Tidy\n\nWe can use the pandas `melt` function to reshape Table 1 into Tidy format.", "_____no_output_____" ] ], [ [ "table1", "_____no_output_____" ], [ "table1 = table1.reset_index()\ntable1", "_____no_output_____" ], [ "#table1['index'].value_counts().reset_index()", "_____no_output_____" ], [ "tidy = table1.melt(id_vars='index')\ntidy.columns = ['name', 'trt', 'result']\ntidy", "_____no_output_____" ] ], [ [ "## Table 2 --> Tidy", "_____no_output_____" ] ], [ [ "table2 = table2.reset_index()\n", "_____no_output_____" ], [ "\ntidy1 = table2.melt(id_vars='index')\ntidy1.columns = ['trt','name','result']\ntidy1", "_____no_output_____" ] ], [ [ "## Tidy --> Table 1\n\nThe `pivot_table` function is the inverse of `melt`.", "_____no_output_____" ] ], [ [ "tidy.pivot_table(index='name',columns='trt', values='result')", "_____no_output_____" ] ], [ [ "## Tidy --> Table 2", "_____no_output_____" ] ], [ [ "tidy1.pivot_table(index='trt',columns='name',values='result')", "_____no_output_____" ] ], [ [ "# Seaborn example\n\nThe rules can be simply stated:\n\n- Each variable is a column\n- Each observation is a row\n\nA helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot.\"", "_____no_output_____" ] ], [ [ "sns.catplot(x='trt', y='result', col='name', \n kind='bar', data=tidy, height=2);", "_____no_output_____" ] ], [ [ "## Now with Instacart data", "_____no_output_____" ] ], [ [ "products = pd.read_csv('products.csv')\n\norder_products = pd.concat([pd.read_csv('order_products__prior.csv'), \n pd.read_csv('order_products__train.csv')])\n\norders = pd.read_csv('orders.csv')", "_____no_output_____" ] ], [ [ "## Goal: Reproduce part of this example\n\nInstead of a plot with 50 products, we'll just do two — the first products from each list\n- Half And Half Ultra Pasteurized\n- Half Baked Frozen Yogurt", "_____no_output_____" ] ], [ [ "from IPython.display import display, Image\nurl = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'\nexample = Image(url=url, width=600)\n\ndisplay(example)", "_____no_output_____" ] ], [ [ "So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.", "_____no_output_____" ], [ "## Subset and Merge\n\nOne challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.", "_____no_output_____" ] ], [ [ "products.columns.tolist()", "_____no_output_____" ], [ "orders.columns.tolist()", "_____no_output_____" ], [ "order_products.columns.tolist()", "_____no_output_____" ], [ "merged = (products[['product_id','product_name']]\n .merge(order_products[['order_id','product_id']])\n .merge(orders[['order_id','order_hour_of_day']]))", "_____no_output_____" ], [ "merged.shape", "_____no_output_____" ], [ "# What conditon will filter `merged` to just the 2 products\n# that we care about?\n\n# This is equivalent ...\n\ncondition = ((merged['product_name']=='Half Baked Frozen Yogurt') | \n (merged['product_name']=='Half And Half Ultra Pasteurized'))\nmerged = merged[condition]\n# ... to this:\n\nproduct_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']\n\ncondition = merged['product_name'].isin(product_names)\n\nsubset = merged[condition]\nsubset.head()", "_____no_output_____" ] ], [ [ "## 4 ways to reshape and plot", "_____no_output_____" ], [ "### 1. value_counts", "_____no_output_____" ] ], [ [ "froyo = subset[subset['product_name']=='Half Baked Frozen Yogurt']\ncream = subset[subset['product_name']=='Half And Half Ultra Pasteurized']", "_____no_output_____" ], [ "(cream['order_hour_of_day']\n .value_counts(normalize=True)\n .sort_index()\n .plot())\n\n(froyo['order_hour_of_day']\n .value_counts(normalize=True)\n .sort_index()\n .plot());", "_____no_output_____" ] ], [ [ "### 2. crosstab", "_____no_output_____" ] ], [ [ "(pd.crosstab(subset['order_hour_of_day'], \n subset['product_name'], \n normalize='columns') * 100).plot();", "_____no_output_____" ] ], [ [ "### 3. Pivot Table", "_____no_output_____" ] ], [ [ "subset.pivot_table(index='order_hour_of_day', \n columns='product_name', \n values='order_id', \n aggfunc=len).plot();", "_____no_output_____" ] ], [ [ "### 4. melt", "_____no_output_____" ] ], [ [ "table = pd.crosstab(subset['order_hour_of_day'], \n subset['product_name'], \n normalize=True)\n\nmelted = (table\n .reset_index()\n .melt(id_vars='order_hour_of_day')\n .rename(columns={\n 'order_hour_of_day': 'Hour of Day Ordered', \n 'product_name': 'Product', \n 'value': 'Percent of Orders by Product'\n }))\n\nsns.relplot(x='Hour of Day Ordered', \n y='Percent of Orders by Product', \n hue='Product', \n data=melted, \n kind='line');", "_____no_output_____" ] ], [ [ "# Assignment\n\n## Join Data Section\n\nThese are the top 10 most frequently ordered products. How many times was each ordered? \n\n1. Banana\n2. Bag of Organic Bananas\n3. Organic Strawberries\n4. Organic Baby Spinach \n5. Organic Hass Avocado\n6. Organic Avocado\n7. Large Lemon \n8. Strawberries\n9. Limes \n10. Organic Whole Milk\n\nFirst, write down which columns you need and which dataframes have them.\n\nNext, merge these into a single dataframe.\n\nThen, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.\n\n## Reshape Data Section\n\n- Replicate the lesson code\n- Complete the code cells we skipped near the beginning of the notebook\n- Table 2 --> Tidy\n- Tidy --> Table 2\n- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.", "_____no_output_____" ] ], [ [ "!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz\n!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz \n%cd instacart_2017_05_01 ", "_____no_output_____" ], [ "product_list = ['Banana','Bag of Organic Bananas','Organic Strawberries','Organic Baby Spinach',\n 'Organic Hass Avocado','Organic Avocado','Large Lemon','Strawberries','Limes',\n 'Organic Whole Milk']\n\nmerges = (products[['product_id','product_name']]\n .merge(order_products[['order_id','product_id','add_to_cart_order']])\n .merge(orders[['order_id','order_hour_of_day','order_number']]))\n\nmerges.shape\n", "_____no_output_____" ], [ "conditional = merges['product_name'].isin(product_list)\n\nsubset1 = merges[conditional]\nsubset1['product_name'].value_counts().head(10)\n", "_____no_output_____" ], [ "flights = sns.load_dataset('flights')\nflights.head()", "_____no_output_____" ], [ "flightset = pd.pivot_table(data=flights,index='year', columns='month')\nflightset", "_____no_output_____" ] ], [ [ "## Join Data Stretch Challenge\n\nThe [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of \"**Popular products** purchased earliest in the day (green) and latest in the day (red).\" \n\nThe post says,\n\n> \"We can also see the time of day that users purchase specific products.\n\n> Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.\n\n> **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**\"\n\nYour challenge is to reproduce the list of the top 25 latest ordered popular products.\n\nWe'll define \"popular products\" as products with more than 2,900 orders.\n\n## Reshape Data Stretch Challenge\n\n_Try whatever sounds most interesting to you!_\n\n- Replicate more of Instacart's visualization showing \"Hour of Day Ordered\" vs \"Percent of Orders by Product\"\n- Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing \"Number of Purchases\" vs \"Percent Reorder Purchases\"\n- Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)\n- Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)", "_____no_output_____" ] ], [ [ "display(example)", "_____no_output_____" ], [ "latest_25 = (merges.loc[merges['order_hour_of_day'] > 12])\n#latest_merges = merges[latest_25]\nlatest_25.head()", "_____no_output_____" ], [ "latest_25.groupby('product_name')['order_id']", "_____no_output_____" ], [ "Hf = latest_25[latest_25['product_name'] == 'Half Baked Frozen Yogurt']\nHf.shape", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec84ae3b0e0131b9ef4aa2bf765cd8832cddb538
185,873
ipynb
Jupyter Notebook
lesson4_tabular.ipynb
shangong/fastai
0244ddafc663616c0c1595ecdc47cf60f68710b2
[ "Apache-2.0" ]
null
null
null
lesson4_tabular.ipynb
shangong/fastai
0244ddafc663616c0c1595ecdc47cf60f68710b2
[ "Apache-2.0" ]
null
null
null
lesson4_tabular.ipynb
shangong/fastai
0244ddafc663616c0c1595ecdc47cf60f68710b2
[ "Apache-2.0" ]
null
null
null
121.485621
30,102
0.778924
[ [ [ "!curl -s https://course.fast.ai/setup/colab | bash", "Updating fastai...\nDone.\n" ], [ "from google.colab import drive\ndrive.mount('/content/gdrive', force_remount=True)\nroot_dir = \"/content/gdrive/My Drive/\"\nbase_dir = root_dir + 'fastai/'", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/gdrive\n" ] ], [ [ "# Tabular models", "_____no_output_____" ] ], [ [ "from fastai.tabular import *", "_____no_output_____" ] ], [ [ "Tabular data should be in a Pandas `DataFrame`.", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')", "Downloading http://files.fast.ai/data/examples/adult_sample.tgz\n" ] ], [ [ "Understand the data from adult sample a little more.", "_____no_output_____" ] ], [ [ "df.head(10)", "_____no_output_____" ], [ "df.info", "_____no_output_____" ], [ "df.describe", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ], [ [ "Checking for correlations between data.", "_____no_output_____" ] ], [ [ "df[['salary','age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country']].corr()", "_____no_output_____" ], [ "import seaborn as sns\n\n# Categorical variables\ndf_categorical = df[['workclass',\n 'education',\n 'marital-status',\n 'occupation',\n 'relationship',\n 'race',\n 'sex',\n 'native-country'\n ]]\n\nfor i in df_categorical.columns:\n cat_num = df_categorical[i].value_counts()\n print(\"Graph for %s: total = %d\" % (i,len(cat_num)))\n chart = sns.barplot(x = cat_num.index, y = cat_num)\n chart.set_xticklabels(chart.get_xticklabels(),rotation = 90)\n plt.show()", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ] ], [ [ "Train the model to predict if salary will be above 50k.", "_____no_output_____" ] ], [ [ "dep_var = 'salary'\ncat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']\ncont_names = ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']\nprocs = [FillMissing, Categorify, Normalize]", "_____no_output_____" ], [ "test = TabularList.from_df(df.iloc[800:1000].copy(), path=path, cat_names=cat_names, cont_names=cont_names)", "_____no_output_____" ], [ "data = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)\n .split_by_idx(list(range(800,1000)))\n .label_from_df(cols=dep_var)\n .add_test(test)\n .databunch())", "_____no_output_____" ], [ "data.show_batch(rows=10)", "_____no_output_____" ], [ "learn = tabular_learner(data, layers=[200,100], metrics=accuracy)\n# architecture like resnet metrics", "_____no_output_____" ], [ "learn.fit(1, 1e-2)", "_____no_output_____" ], [ "learn.save('stage-1')", "_____no_output_____" ], [ "#exporting model\nlearn.export()", "_____no_output_____" ] ], [ [ "## Inference", "_____no_output_____" ] ], [ [ "#Now try to use this model to predict on the entire adults.csv file\ntabList = TabularList(df, cat_names=cat_names, cont_names=cont_names, procs=procs)", "_____no_output_____" ], [ "#learner = load_learner(path, test=test)\n\n#Works\nfor i in range(200):\n row = df.iloc[800 + i]\n print(learn.predict(row))", "(Category tensor(0), tensor(0), tensor([0.9665, 0.0335]))\n(Category tensor(1), tensor(1), tensor([0.1915, 0.8085]))\n(Category tensor(0), tensor(0), tensor([0.9271, 0.0729]))\n(Category tensor(0), tensor(0), tensor([0.9517, 0.0483]))\n(Category tensor(0), tensor(0), tensor([0.6264, 0.3736]))\n(Category tensor(0), tensor(0), tensor([0.5840, 0.4160]))\n(Category tensor(0), tensor(0), tensor([0.6861, 0.3139]))\n(Category tensor(0), tensor(0), tensor([0.9913, 0.0087]))\n(Category tensor(0), tensor(0), tensor([0.9876, 0.0124]))\n(Category tensor(0), tensor(0), tensor([0.6811, 0.3189]))\n(Category tensor(1), tensor(1), tensor([0.2998, 0.7002]))\n(Category tensor(1), tensor(1), tensor([0.3111, 0.6889]))\n(Category tensor(0), tensor(0), tensor([0.8201, 0.1799]))\n(Category tensor(0), tensor(0), tensor([0.8005, 0.1995]))\n(Category tensor(0), tensor(0), tensor([0.9859, 0.0141]))\n(Category tensor(0), tensor(0), tensor([0.6708, 0.3292]))\n(Category tensor(0), tensor(0), tensor([0.6396, 0.3604]))\n(Category tensor(1), tensor(1), tensor([0.3168, 0.6832]))\n(Category tensor(0), tensor(0), tensor([0.6097, 0.3903]))\n(Category tensor(0), tensor(0), tensor([0.5987, 0.4013]))\n(Category tensor(0), tensor(0), tensor([0.9945, 0.0055]))\n(Category tensor(0), tensor(0), tensor([0.9315, 0.0685]))\n(Category tensor(0), tensor(0), tensor([9.9954e-01, 4.6066e-04]))\n(Category tensor(1), tensor(1), tensor([0.4477, 0.5523]))\n(Category tensor(0), tensor(0), tensor([0.7481, 0.2519]))\n(Category tensor(0), tensor(0), tensor([0.6755, 0.3245]))\n(Category tensor(0), tensor(0), tensor([0.9610, 0.0390]))\n(Category tensor(0), tensor(0), tensor([0.5725, 0.4275]))\n(Category tensor(0), tensor(0), tensor([0.6461, 0.3539]))\n(Category tensor(0), tensor(0), tensor([0.9718, 0.0282]))\n(Category tensor(0), tensor(0), tensor([9.9927e-01, 7.3347e-04]))\n(Category tensor(0), tensor(0), tensor([0.9559, 0.0441]))\n(Category tensor(1), tensor(1), tensor([0.4035, 0.5965]))\n(Category tensor(0), tensor(0), tensor([0.9631, 0.0369]))\n(Category tensor(0), tensor(0), tensor([0.9751, 0.0249]))\n(Category tensor(0), tensor(0), tensor([0.9658, 0.0342]))\n(Category tensor(0), tensor(0), tensor([0.5254, 0.4746]))\n(Category tensor(0), tensor(0), tensor([0.6175, 0.3825]))\n(Category tensor(0), tensor(0), tensor([0.9793, 0.0207]))\n(Category tensor(0), tensor(0), tensor([0.9478, 0.0522]))\n(Category tensor(0), tensor(0), tensor([0.5500, 0.4500]))\n(Category tensor(0), tensor(0), tensor([0.5522, 0.4478]))\n(Category tensor(0), tensor(0), tensor([0.7894, 0.2106]))\n(Category tensor(0), tensor(0), tensor([0.9323, 0.0677]))\n(Category tensor(0), tensor(0), tensor([9.9921e-01, 7.8697e-04]))\n(Category tensor(0), tensor(0), tensor([0.8003, 0.1997]))\n(Category tensor(0), tensor(0), tensor([0.9763, 0.0237]))\n(Category tensor(1), tensor(1), tensor([0.3991, 0.6009]))\n(Category tensor(0), tensor(0), tensor([0.7779, 0.2221]))\n(Category tensor(0), tensor(0), tensor([0.7009, 0.2991]))\n(Category tensor(0), tensor(0), tensor([0.9523, 0.0477]))\n(Category tensor(0), tensor(0), tensor([0.9589, 0.0411]))\n(Category tensor(0), tensor(0), tensor([0.9968, 0.0032]))\n(Category tensor(0), tensor(0), tensor([0.9611, 0.0389]))\n(Category tensor(0), tensor(0), tensor([0.9807, 0.0193]))\n(Category tensor(0), tensor(0), tensor([0.9975, 0.0025]))\n(Category tensor(1), tensor(1), tensor([0.2837, 0.7163]))\n(Category tensor(0), tensor(0), tensor([0.7431, 0.2569]))\n(Category tensor(0), tensor(0), tensor([0.9983, 0.0017]))\n(Category tensor(0), tensor(0), tensor([0.9946, 0.0054]))\n(Category tensor(0), tensor(0), tensor([0.5919, 0.4081]))\n(Category tensor(0), tensor(0), tensor([0.9989, 0.0011]))\n(Category tensor(0), tensor(0), tensor([0.9347, 0.0653]))\n(Category tensor(0), tensor(0), tensor([0.6515, 0.3485]))\n(Category tensor(0), tensor(0), tensor([0.8725, 0.1275]))\n(Category tensor(0), tensor(0), tensor([0.5600, 0.4400]))\n(Category tensor(0), tensor(0), tensor([0.6580, 0.3420]))\n(Category tensor(0), tensor(0), tensor([0.9225, 0.0775]))\n(Category tensor(0), tensor(0), tensor([0.9905, 0.0095]))\n(Category tensor(0), tensor(0), tensor([0.9166, 0.0834]))\n(Category tensor(0), tensor(0), tensor([0.6404, 0.3596]))\n(Category tensor(1), tensor(1), tensor([0.4746, 0.5254]))\n(Category tensor(0), tensor(0), tensor([0.9944, 0.0056]))\n(Category tensor(0), tensor(0), tensor([0.9704, 0.0296]))\n(Category tensor(0), tensor(0), tensor([0.5226, 0.4774]))\n(Category tensor(0), tensor(0), tensor([0.9633, 0.0367]))\n(Category tensor(0), tensor(0), tensor([0.9946, 0.0054]))\n(Category tensor(1), tensor(1), tensor([0.4155, 0.5845]))\n(Category tensor(0), tensor(0), tensor([0.9896, 0.0104]))\n(Category tensor(1), tensor(1), tensor([0.3899, 0.6101]))\n(Category tensor(0), tensor(0), tensor([0.9614, 0.0386]))\n(Category tensor(0), tensor(0), tensor([0.9866, 0.0134]))\n(Category tensor(0), tensor(0), tensor([0.6177, 0.3823]))\n(Category tensor(0), tensor(0), tensor([9.9975e-01, 2.4660e-04]))\n(Category tensor(1), tensor(1), tensor([0.4504, 0.5496]))\n(Category tensor(0), tensor(0), tensor([0.9821, 0.0179]))\n(Category tensor(0), tensor(0), tensor([0.9417, 0.0583]))\n(Category tensor(0), tensor(0), tensor([0.9900, 0.0100]))\n(Category tensor(0), tensor(0), tensor([0.5278, 0.4722]))\n(Category tensor(1), tensor(1), tensor([0.4789, 0.5211]))\n(Category tensor(1), tensor(1), tensor([0.3617, 0.6383]))\n(Category tensor(0), tensor(0), tensor([0.9620, 0.0380]))\n(Category tensor(0), tensor(0), tensor([0.9243, 0.0757]))\n(Category tensor(0), tensor(0), tensor([0.5927, 0.4073]))\n(Category tensor(1), tensor(1), tensor([0.2883, 0.7117]))\n(Category tensor(0), tensor(0), tensor([9.9957e-01, 4.2997e-04]))\n(Category tensor(0), tensor(0), tensor([0.9786, 0.0214]))\n(Category tensor(0), tensor(0), tensor([0.9968, 0.0032]))\n(Category tensor(0), tensor(0), tensor([9.9935e-01, 6.4809e-04]))\n(Category tensor(0), tensor(0), tensor([0.8433, 0.1567]))\n(Category tensor(0), tensor(0), tensor([0.5284, 0.4716]))\n(Category tensor(0), tensor(0), tensor([0.6254, 0.3746]))\n(Category tensor(0), tensor(0), tensor([0.8279, 0.1721]))\n(Category tensor(0), tensor(0), tensor([0.7466, 0.2534]))\n(Category tensor(0), tensor(0), tensor([0.9945, 0.0055]))\n(Category tensor(0), tensor(0), tensor([0.9869, 0.0131]))\n(Category tensor(1), tensor(1), tensor([0.2488, 0.7512]))\n(Category tensor(0), tensor(0), tensor([0.6545, 0.3455]))\n(Category tensor(0), tensor(0), tensor([0.7684, 0.2316]))\n(Category tensor(0), tensor(0), tensor([9.9979e-01, 2.1031e-04]))\n(Category tensor(0), tensor(0), tensor([0.9763, 0.0237]))\n(Category tensor(0), tensor(0), tensor([0.9805, 0.0195]))\n(Category tensor(0), tensor(0), tensor([0.5872, 0.4128]))\n(Category tensor(0), tensor(0), tensor([0.9624, 0.0376]))\n(Category tensor(0), tensor(0), tensor([0.9091, 0.0909]))\n(Category tensor(0), tensor(0), tensor([0.8100, 0.1900]))\n(Category tensor(0), tensor(0), tensor([0.7937, 0.2063]))\n(Category tensor(0), tensor(0), tensor([0.9878, 0.0122]))\n(Category tensor(0), tensor(0), tensor([0.8946, 0.1054]))\n(Category tensor(0), tensor(0), tensor([0.9979, 0.0021]))\n(Category tensor(0), tensor(0), tensor([0.9961, 0.0039]))\n(Category tensor(0), tensor(0), tensor([0.9433, 0.0567]))\n(Category tensor(0), tensor(0), tensor([0.9777, 0.0223]))\n(Category tensor(0), tensor(0), tensor([0.9915, 0.0085]))\n(Category tensor(0), tensor(0), tensor([0.6170, 0.3830]))\n(Category tensor(0), tensor(0), tensor([0.6352, 0.3648]))\n(Category tensor(0), tensor(0), tensor([0.8260, 0.1740]))\n(Category tensor(0), tensor(0), tensor([0.9990, 0.0010]))\n(Category tensor(0), tensor(0), tensor([0.6647, 0.3353]))\n(Category tensor(0), tensor(0), tensor([0.9947, 0.0053]))\n(Category tensor(1), tensor(1), tensor([0.4917, 0.5083]))\n(Category tensor(0), tensor(0), tensor([0.9368, 0.0632]))\n(Category tensor(0), tensor(0), tensor([0.8661, 0.1339]))\n(Category tensor(0), tensor(0), tensor([0.9985, 0.0015]))\n(Category tensor(0), tensor(0), tensor([0.9768, 0.0232]))\n(Category tensor(0), tensor(0), tensor([0.6937, 0.3063]))\n(Category tensor(0), tensor(0), tensor([0.9417, 0.0583]))\n(Category tensor(0), tensor(0), tensor([0.6250, 0.3750]))\n(Category tensor(0), tensor(0), tensor([0.9798, 0.0202]))\n(Category tensor(0), tensor(0), tensor([0.7401, 0.2599]))\n(Category tensor(0), tensor(0), tensor([0.9728, 0.0272]))\n(Category tensor(0), tensor(0), tensor([0.8236, 0.1764]))\n(Category tensor(0), tensor(0), tensor([0.5676, 0.4324]))\n(Category tensor(0), tensor(0), tensor([0.5742, 0.4258]))\n(Category tensor(0), tensor(0), tensor([0.9955, 0.0045]))\n(Category tensor(0), tensor(0), tensor([0.6935, 0.3065]))\n(Category tensor(0), tensor(0), tensor([0.8630, 0.1370]))\n(Category tensor(1), tensor(1), tensor([0.2810, 0.7190]))\n(Category tensor(0), tensor(0), tensor([0.6281, 0.3719]))\n(Category tensor(1), tensor(1), tensor([0.2687, 0.7313]))\n(Category tensor(0), tensor(0), tensor([0.6314, 0.3686]))\n(Category tensor(0), tensor(0), tensor([0.8159, 0.1841]))\n(Category tensor(1), tensor(1), tensor([0.1129, 0.8871]))\n(Category tensor(0), tensor(0), tensor([0.9988, 0.0012]))\n(Category tensor(1), tensor(1), tensor([0.4584, 0.5416]))\n(Category tensor(1), tensor(1), tensor([0.4472, 0.5528]))\n(Category tensor(0), tensor(0), tensor([0.9691, 0.0309]))\n(Category tensor(0), tensor(0), tensor([0.6094, 0.3906]))\n(Category tensor(0), tensor(0), tensor([0.7837, 0.2163]))\n(Category tensor(0), tensor(0), tensor([0.9574, 0.0426]))\n(Category tensor(0), tensor(0), tensor([0.5674, 0.4326]))\n(Category tensor(0), tensor(0), tensor([0.5870, 0.4130]))\n(Category tensor(1), tensor(1), tensor([0.3220, 0.6780]))\n(Category tensor(1), tensor(1), tensor([0.3863, 0.6137]))\n(Category tensor(0), tensor(0), tensor([0.8924, 0.1076]))\n(Category tensor(0), tensor(0), tensor([0.9831, 0.0169]))\n(Category tensor(0), tensor(0), tensor([0.9804, 0.0196]))\n(Category tensor(0), tensor(0), tensor([0.9810, 0.0190]))\n(Category tensor(0), tensor(0), tensor([0.8835, 0.1165]))\n(Category tensor(0), tensor(0), tensor([0.6646, 0.3354]))\n(Category tensor(0), tensor(0), tensor([0.9933, 0.0067]))\n(Category tensor(0), tensor(0), tensor([0.9773, 0.0227]))\n(Category tensor(1), tensor(1), tensor([0.4022, 0.5978]))\n(Category tensor(0), tensor(0), tensor([0.8756, 0.1244]))\n(Category tensor(0), tensor(0), tensor([0.5950, 0.4050]))\n(Category tensor(1), tensor(1), tensor([0.4050, 0.5950]))\n(Category tensor(0), tensor(0), tensor([0.9790, 0.0210]))\n(Category tensor(0), tensor(0), tensor([0.8724, 0.1276]))\n(Category tensor(0), tensor(0), tensor([0.7948, 0.2052]))\n(Category tensor(1), tensor(1), tensor([0.1357, 0.8643]))\n(Category tensor(0), tensor(0), tensor([0.9621, 0.0379]))\n(Category tensor(0), tensor(0), tensor([0.6903, 0.3097]))\n(Category tensor(0), tensor(0), tensor([0.5582, 0.4418]))\n(Category tensor(0), tensor(0), tensor([0.5849, 0.4151]))\n(Category tensor(0), tensor(0), tensor([0.7738, 0.2262]))\n(Category tensor(0), tensor(0), tensor([0.8265, 0.1735]))\n(Category tensor(0), tensor(0), tensor([0.7316, 0.2684]))\n(Category tensor(0), tensor(0), tensor([0.6296, 0.3704]))\n(Category tensor(0), tensor(0), tensor([0.9412, 0.0588]))\n(Category tensor(0), tensor(0), tensor([0.9811, 0.0189]))\n(Category tensor(0), tensor(0), tensor([0.5901, 0.4099]))\n(Category tensor(1), tensor(1), tensor([0.3092, 0.6908]))\n(Category tensor(0), tensor(0), tensor([0.6457, 0.3543]))\n(Category tensor(0), tensor(0), tensor([0.9976, 0.0024]))\n(Category tensor(0), tensor(0), tensor([0.9981, 0.0019]))\n(Category tensor(1), tensor(1), tensor([0.4840, 0.5160]))\n(Category tensor(0), tensor(0), tensor([0.7410, 0.2590]))\n(Category tensor(0), tensor(0), tensor([0.8746, 0.1254]))\n(Category tensor(0), tensor(0), tensor([0.6269, 0.3731]))\n(Category tensor(0), tensor(0), tensor([0.8025, 0.1975]))\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec84bdb0ac6e36d73ce165372353cbd218a63a15
306,391
ipynb
Jupyter Notebook
04_small_model_bulding.ipynb
parthamehta123/food-not-food-ml-app
540e002e58008d26160877faeed063af0a70bde0
[ "MIT" ]
null
null
null
04_small_model_bulding.ipynb
parthamehta123/food-not-food-ml-app
540e002e58008d26160877faeed063af0a70bde0
[ "MIT" ]
null
null
null
04_small_model_bulding.ipynb
parthamehta123/food-not-food-ml-app
540e002e58008d26160877faeed063af0a70bde0
[ "MIT" ]
null
null
null
622.745935
151,390
0.945103
[ [ [ "import tensorflow as tf\ntf.get_logger().setLevel('INFO')", "2021-12-13 08:37:10.235864: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n" ], [ "import os\n\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\ntf.get_logger().setLevel('INFO')\n\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import image_classifier\nfrom tflite_model_maker.config import ExportFormat\nfrom tflite_model_maker.config import QuantizationConfig\nfrom tflite_model_maker.image_classifier import DataLoader\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "train_data_path = \"data/train\"\ntest_data_path = \"data/test\"\n\nimport os\nclass_names = sorted(os.listdir(train_data_path))\nclass_names", "_____no_output_____" ], [ "tf.get_logger().setLevel('INFO')", "_____no_output_____" ], [ "# Create data loader\ntrain_data = DataLoader.from_folder(train_data_path)\ntest_data = DataLoader.from_folder(test_data_path)\n\ntrain_data, test_data", "INFO:tensorflow:Load image with size: 47807, num_label: 2, labels: food, not_food.\n" ], [ "# Create model\nmodel = image_classifier.create(train_data)", "INFO:tensorflow:Retraining the models...\n" ], [ "# Evaluate the model\ntest_loss, test_accuracy = model.evaluate(test_data)\ntest_loss, test_accuracy", "374/374 [==============================] - 22s 58ms/step - loss: 0.2660 - accuracy: 0.9664\n" ], [ "# Save the model\nmodel.export(export_dir='.', tflite_filename=\"models/food_not_food_model_v3.tflite\")", "2021-12-13 08:49:12.454923: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n" ], [ "# Load image\ndef pred_and_plot(image_path, img_shape=224):\n # Read in the image\n img = tf.io.read_file(image_path)\n # Decode it into a tensor\n img = tf.image.decode_jpeg(img)\n # Resize the image\n img = tf.image.resize(img, [img_shape, img_shape])\n # Scale pixels\n img = img/255.\n preds = model.predict_top_k(tf.expand_dims(img, axis=0), k=2, batch_size=1)\n print(preds[0])\n\n plt.imshow(img)\n plt.axis(False)\n plt.title(f\"Pred class: {preds[0][0][0]} | Prob: {float(preds[0][0][1]):.3f}\")", "_____no_output_____" ], [ "# Get image chicken wings\npred_and_plot(\"images/chicken_wings.jpeg\")", "[('food', 0.93645567), ('not_food', 0.06354433)]\n" ], [ "# !wget https://images.hgmsites.net/hug/tesla-cybertruck_100725713_h.jpg\npred_and_plot(\"images/tesla_cyber_truck.jpg.jpg\")", "--2021-12-09 15:14:27-- https://images.hgmsites.net/hug/tesla-cybertruck_100725713_h.jpg\nResolving images.hgmsites.net (images.hgmsites.net)... 104.21.31.148, 172.67.177.203, 2606:4700:3030::6815:1f94, ...\nConnecting to images.hgmsites.net (images.hgmsites.net)|104.21.31.148|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 220323 (215K) [image/jpeg]\nSaving to: 'tesla-cybertruck_100725713_h.jpg’\n\ntesla-cybertruck_10 100%[===================>] 215.16K 531KB/s in 0.4s \n\n2021-12-09 15:14:28 (531 KB/s) - 'tesla-cybertruck_100725713_h.jpg’ saved [220323/220323]\n\n" ], [ "# !wget https://cdn.pocket-lint.com/r/s/1200x630/assets/images/152137-laptops-review-apple-macbook-pro-2020-review-image1-pbzm4ejvvs.jpg\npred_and_plot(\"images/apple_macbook_pro.jpg.jpg\")", "--2021-12-09 15:16:00-- https://cdn.pocket-lint.com/r/s/1200x630/assets/images/152137-laptops-review-apple-macbook-pro-2020-review-image1-pbzm4ejvvs.jpg\nResolving cdn.pocket-lint.com (cdn.pocket-lint.com)... 13.226.107.121, 13.226.107.95, 13.226.107.83, ...\nConnecting to cdn.pocket-lint.com (cdn.pocket-lint.com)|13.226.107.121|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 173515 (169K) [image/jpeg]\nSaving to: '152137-laptops-review-apple-macbook-pro-2020-review-image1-pbzm4ejvvs.jpg’\n\n152137-laptops-revi 100%[===================>] 169.45K 322KB/s in 0.5s \n\n2021-12-09 15:16:02 (322 KB/s) - '152137-laptops-review-apple-macbook-pro-2020-review-image1-pbzm4ejvvs.jpg’ saved [173515/173515]\n\n[('not_food', 0.8310447), ('food', 0.16895539)]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec84c1e57785dba75f10d0b99dbfbe9afb5d3bfe
619,669
ipynb
Jupyter Notebook
Data_analysis/SNP-indel-calling/dadi/dadiExercises/First_Steps_with_dadi.ipynb
claudiuskerth/PhDthesis
66cb32c9bc481af8f80cd971e35cdc56717a60de
[ "MIT" ]
null
null
null
Data_analysis/SNP-indel-calling/dadi/dadiExercises/First_Steps_with_dadi.ipynb
claudiuskerth/PhDthesis
66cb32c9bc481af8f80cd971e35cdc56717a60de
[ "MIT" ]
null
null
null
Data_analysis/SNP-indel-calling/dadi/dadiExercises/First_Steps_with_dadi.ipynb
claudiuskerth/PhDthesis
66cb32c9bc481af8f80cd971e35cdc56717a60de
[ "MIT" ]
null
null
null
118.528883
41,194
0.865499
[ [ [ "import sys", "_____no_output_____" ], [ "sys.path", "_____no_output_____" ], [ "import os", "_____no_output_____" ], [ "os.getcwd()", "_____no_output_____" ] ], [ [ "I have cloned the $\\delta$a$\\delta$i repository into '/home/claudius/Downloads/dadi' and have compiled the code. Now I need to add that directory to the PYTHONPATH variable:", "_____no_output_____" ] ], [ [ "sys.path.insert(0, '/home/claudius/Downloads/dadi')", "_____no_output_____" ], [ "sys.path", "_____no_output_____" ] ], [ [ "Now, I should be able to import $\\delta$a$\\delta$i", "_____no_output_____" ] ], [ [ "import dadi", "_____no_output_____" ], [ "dir(dadi)", "_____no_output_____" ], [ "import pylab", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ], [ "x = pylab.linspace(0, 4*pylab.pi, 1000)", "_____no_output_____" ], [ "pylab.plot(x, pylab.sin(x), '-r')", "_____no_output_____" ], [ "%%sh \n# this allows me to execute a shell command\n\nls", "ERY.FOLDED.sfs\nERY.FOLDED.sfs.dadi_format\nERY.FOLDED.sfs.dadi_format~\nEryPar.unfolded.2dsfs\nEryPar.unfolded.2dsfs.dadi_format\nEryPar.unfolded.2dsfs.dadi_format~\nexamples\nexample_YRI_CEU.ipynb\nFirst_Steps_with_dadi.ipynb\nnew.bib\nPAR.FOLDED.sfs\nPAR.FOLDED.sfs.dadi_format\nPAR.FOLDED.sfs.dadi_format~\n" ] ], [ [ "I have turned the 1D folded SFS's from `realSFS` into $\\delta$d$\\delta$i format by hand according to the description in section 3.1 of the manual. I have left out the masking line from the input file.", "_____no_output_____" ] ], [ [ "fs_ery = dadi.Spectrum.from_file('ERY.FOLDED.sfs.dadi_format')", "_____no_output_____" ], [ "fs_ery", "_____no_output_____" ] ], [ [ "$\\delta$a$\\delta$i is detecting that the spectrum is folded (as given in the input file), but it is also automatically masking the 0th and 18th count category. This is a not a good behaviour.", "_____no_output_____" ] ], [ [ "# number of segregating sites\n\nfs_ery.data[1:].sum()", "_____no_output_____" ] ], [ [ "## Single population statistics", "_____no_output_____" ], [ "### $\\pi$", "_____no_output_____" ] ], [ [ "fs_ery.pi()", "_____no_output_____" ] ], [ [ "I have next added a masking line to the input file, setting it to '1' for the first position, i. e. the 0-count category.", "_____no_output_____" ] ], [ [ "fs_ery = dadi.Spectrum.from_file('ERY.FOLDED.sfs.dadi_format', mask_corners=False)", "_____no_output_____" ] ], [ [ "$\\delta$a$\\delta$i is issuing the following message when executing the above command:\n\n`WARNING:Spectrum_mod:Creating Spectrum with data_folded = True, but mask is not True for all entries which are nonsensical for a folded Spectrum.`", "_____no_output_____" ] ], [ [ "fs_ery", "_____no_output_____" ] ], [ [ "I do not understand this warning from $\\delta$a$\\delta$i. The 18-count category is sensical for a folded spectrum with even sample size, so should not be masked. Anyway, I do not understand why $\\delta$a$\\delta$i is so reluctant to keep all positions, including the non-variable one.", "_____no_output_____" ] ], [ [ "fs_ery.pi()", "_____no_output_____" ] ], [ [ "The function that returns $\\pi$ produces the same output with or without the last count category masked ?! I think that is because even if the last count class (966.62...) is masked, it is still included in the calculation of $\\pi$. However, there is no obvious unmasking in the `pi` function. Strange!", "_____no_output_____" ], [ "There are (at least) two formulas that allow the calculation of $\\pi$ from a folded sample allele frequency spectrum. One is given in Wakeley2009, p.16, equation (1.4):\n$$\n\\pi = \\frac{1}{n \\choose 2} \\sum_{i=1}^{n/2} i(n-i)\\eta_{i}\n$$\nHere, $n$ is the number of sequences and $\\eta_{i}$ is the SNP count in the i'th minor sample allele frequency class.\n\nThe other formula is on p. 45 in Gillespie \"Population Genetics - A concise guide\":\n$$\n\\hat{\\pi} = \\frac{n}{n-1} \\sum_{i=1}^{S_{n}} 2 \\hat{p_{i}}(1-\\hat{p_{i}})\n$$\nThis is the formula that $\\delta$a$\\delta$i's `pi` function uses, with the modification that it multiplies each $\\hat{p_{i}}$ by the count in the i'th class of the SFS, i. e. the sum is not over all SNP's but over all SNP frequency classes.", "_____no_output_____" ] ], [ [ "# Calcualting pi with the formula from Wakeley2009\n\nn = 36 # 36 sequences sampled from 18 diploid individuals\npi_Wakeley = (sum( [i*(n-i)*fs_ery[i] for i in range(1, n/2+1)] ) * 2.0 / (n*(n-1)))/pylab.sum(fs_ery.data)\n# note fs_ery.data gets the whole fs_ery list, including masked entries\npi_Wakeley", "_____no_output_____" ] ], [ [ "This is the value of $\\pi_{site}$ that I calculated previously and included in the first draft of the thesis.", "_____no_output_____" ] ], [ [ "fs_ery.mask", "_____no_output_____" ], [ "fs_ery.data # gets all data, including the masked one", "_____no_output_____" ], [ "# Calculating pi with the formula from Gillespie:\n\nn = 18 \np = pylab.arange(0, n+1)/float(n)\np", "_____no_output_____" ], [ "# Calculating pi with the formula from Gillespie:\n\nn / (n-1.0) * 2 * pylab.sum(fs_ery * p*(1-p))", "_____no_output_____" ] ], [ [ "This is the same as the output of dadi's `pi` function on the same SFS. ", "_____no_output_____" ] ], [ [ "# the sample size (n) that dadi stores in this spectrum object and uses as n in the pi function\nfs_ery.sample_sizes[0]", "_____no_output_____" ], [ "# what is the total number of sites in the spectrum\npylab.sum(fs_ery.data)", "_____no_output_____" ] ], [ [ "So, 1.6 million sites went into the ery spectrum.", "_____no_output_____" ] ], [ [ "# pi per site\nn / (n-1.0) * 2 * pylab.sum(fs_ery * p*(1-p)) / pylab.sum(fs_ery.data)", "_____no_output_____" ] ], [ [ "Apart from the incorrect small sample size correction by $\\delta$a$\\delta$i in case of folded spectra ($n$ refers to sampled sequences, not individuals), Gillespie's formula leads to a much higher estimate of $\\pi_{site}$ than Wakeley's. Why is that?", "_____no_output_____" ] ], [ [ "# with correct small sample size correction\n2 * n / (2* n-1.0) * 2 * pylab.sum(fs_ery * p*(1-p)) / pylab.sum(fs_ery.data)", "_____no_output_____" ], [ "# Calculating pi with the formula from Gillespie:\n\nn = 18 \np = pylab.arange(0, n+1)/float(n)\np = p/2 # with a folded spectrum, we are summing over minor allele freqs only\npi_Gillespie = 2*n / (2*n-1.0) * 2 * pylab.sum(fs_ery * p*(1-p)) / pylab.sum(fs_ery.data)\npi_Gillespie", "_____no_output_____" ], [ "pi_Wakeley - pi_Gillespie", "_____no_output_____" ] ], [ [ "As can be seen from the insignificant difference (must be due to numerical inaccuracies) between the $\\pi_{Wakeley}$ and the $\\pi_{Gillespie}$ estimates, they are equivalent with the calculation for folded spectra given above as well as the correct small sample size correction. **Beware: $\\delta$a$\\delta$i does not handle folded spectra correctly**.", "_____no_output_____" ], [ "It should be a relatively easy to fix the `pi` function to work correctly with folded spectra. Care should be taken to also correctly handle uneven sample sizes.", "_____no_output_____" ] ], [ [ "fs_ery.folded", "_____no_output_____" ] ], [ [ "I think for now it would be best to import unfolded spectra from `realSFS` and fold them if necessary in dadi.", "_____no_output_____" ] ], [ [ "fs_par = dadi.Spectrum.from_file('PAR.FOLDED.sfs.dadi_format')", "_____no_output_____" ], [ "pylab.plot(fs_ery, 'r', label='ery')\npylab.plot(fs_par, 'g', label='par')\npylab.legend()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### ML estimate of $\\theta$ from 1D folded spectrum", "_____no_output_____" ], [ "I am trying to fit eq. 4.21 of Wakeley2009 to the oberseved 1D folded spectra.", "_____no_output_____" ], [ "$$\nE[\\eta_i] = \\theta \\frac{\\frac{1}{i} + \\frac{1}{n-i}}{1+\\delta_{i,n-i}} \\qquad 1 \\le i \\le \\big[n/2\\big]\n$$", "_____no_output_____" ], [ "Each frequency class, $\\eta_i$, provides an estimate of $\\theta$. However, I would like to find the value of $\\theta$ that minimizes the deviation of the above equation from all observed counts $\\eta_i$.", "_____no_output_____" ], [ "I am following the example given here: https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#example-of-solving-a-fitting-problem", "_____no_output_____" ], [ "$$\n\\frac{\\delta E}{\\delta \\theta} = \\frac{\\frac{1}{i} + \\frac{1}{n-i}}{1+\\delta_{i,n-i}} \\qquad 1 \\le i \\le \\big[n/2\\big]\n$$", "_____no_output_____" ], [ "I have just one parameter to optimize.", "_____no_output_____" ] ], [ [ "from scipy.optimize import least_squares", "_____no_output_____" ], [ "def model(theta, eta, n):\n \"\"\"\n theta: scaled population mutation rate parameter [scalar]\n eta: the folded 1D spectrum, including 0-count cat. [list] \n n: number of sampled gene copies, i. e. 2*num_ind [scalar]\n \n returns a numpy array\n \"\"\"\n i = pylab.arange(1, eta.size)\n delta = pylab.where(i == n-i, 1, 0)\n return theta * 1/i + 1/(n-i) / (1 + delta)", "_____no_output_____" ], [ "?pylab.where", "_____no_output_____" ], [ "# test\ni = pylab.arange(1, 19)\nn = 36\nprint i == n-i\n#\nprint pylab.where(i == n-i, 1, 0)\n# get a theta estimate from pi:\ntheta = pi_Wakeley * fs_ery.data.sum() \nprint theta\n#\nprint len(fs_ery)\n#\nmodel(theta, fs_ery, 36)", "[False False False False False False False False False False False False\n False False False False False True]\n[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]\n10680.7980805\n19\n" ], [ "def fun(theta, eta, n):\n \"\"\"\n return residuals between model and data\n \"\"\"\n return model(theta, eta, n) - eta[1:]", "_____no_output_____" ], [ "def jac(theta, eta, n, test=False):\n \"\"\"\n creates a Jacobian matrix\n \"\"\"\n J = pylab.empty((eta.size-1, theta.size))\n i = pylab.arange(1, eta.size, dtype=float)\n delta = pylab.where(i == n-i, 1, 0)\n num = 1/i + 1/(n-i)\n den = 1 + delta\n if test:\n print i\n print num\n print den\n J[:,0] = num / den\n return J", "_____no_output_____" ], [ "# test\njac(theta, fs_ery, 36, test=True)", "[ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15.\n 16. 17. 18.]\n[ 1.02857143 0.52941176 0.36363636 0.28125 0.23225806 0.2\n 0.1773399 0.16071429 0.14814815 0.13846154 0.13090909 0.125\n 0.12040134 0.11688312 0.11428571 0.1125 0.11145511 0.11111111]\n[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2]\n" ], [ "# starting value\ntheta0 = theta # pi_Wakeley from above", "_____no_output_____" ], [ "# sum over unmasked entries, i. e. without 0-count category, i. e. returns number of variable sites\nfs_ery.sum()", "_____no_output_____" ], [ "# optimize\nres = least_squares(fun, x0=theta0, jac=jac, bounds=(0,fs_ery.sum()), \n kwargs={'eta': fs_ery, 'n': 36}, verbose=1)", "Both `ftol` and `xtol` termination conditions are satisfied.\nFunction evaluations: 8, initial cost: 9.6784e+06, final cost 9.5162e+06, first-order optimality 1.08e-01.\n" ], [ "res.success", "_____no_output_____" ], [ "?least_squares", "_____no_output_____" ], [ "print res.x\nprint theta", "[ 10367.32782801]\n10680.7980805\n" ], [ "pylab.rcParams['figure.figsize'] = [12.0, 8.0]", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.rcParams['font.size'] = 14.0\n\ni = range(1, len(fs_ery))\neta_model = model(res.x, eta=fs_ery, n=36) # get predicted values with optimal theta\n\nplt.plot(i, fs_ery[1:], \"bo\", label=\"data from ery\") # plot observed spectrum\n\nymax = max( fs_ery[1:].max(), eta_model.max() )\nplt.axis([0, 19, 0, ymax*1.1]) # set axis range\n\nplt.xlabel(\"minor allele frequency (i)\")\nplt.ylabel(r'$\\eta_i$', fontsize='large', rotation='horizontal')\nplt.title(\"folded SFS of ery\")\n\nplt.plot(i, eta_model, \"go-\", \n label=\"\\nneutral model\" \n + \"\\n\"\n + r'$\\theta_{opt} = $' + str(round(res.x, 1))\n ) # plot model prediction with optimal theta\n\nplt.legend()", "_____no_output_____" ] ], [ [ "The counts in each frequency class should be Poisson distributed with rate equal to $E[\\eta_i]$ as given above. The lowest frequency class has the highest rate and therefore also the highest variance", "_____no_output_____" ] ], [ [ "#?plt.ylabel", "_____no_output_____" ], [ "#print plt.rcParams", "_____no_output_____" ], [ "fs_ery[1:].max()", "_____no_output_____" ], [ "#?pylab", "_____no_output_____" ], [ "os.getcwd()", "_____no_output_____" ], [ "%%sh\n\nls", "ERY.FOLDED.sfs\nERY.FOLDED.sfs.dadi_format\nERY.FOLDED.sfs.dadi_format~\nEryPar.unfolded.2dsfs\nEryPar.unfolded.2dsfs.dadi_format\nEryPar.unfolded.2dsfs.dadi_format~\nexamples\nexample_YRI_CEU.ipynb\nFirst_Steps_with_dadi.ipynb\nnew.bib\nPAR.FOLDED.sfs\nPAR.FOLDED.sfs.dadi_format\nPAR.FOLDED.sfs.dadi_format~\n" ] ], [ [ "The following function will take the file name of a file containing the flat 1D folded frequency spectrum of one population and plots it together with the best fitting neutral expectation.", "_____no_output_____" ] ], [ [ "def plot_folded_sfs(filename, n, pop = ''):\n # read in spectrum from file\n data = open(filename, 'r')\n sfs = pylab.array( data.readline().split(), dtype=float )\n data.close() # should close connection to file\n #return sfs\n \n # get starting value for theta from Watterson's theta\n S = sfs[1:].sum()\n T_total = sum([1.0/i for i in range(1, n)]) # onhe half the expected total length of the genealogy\n theta0 = S / T_total # see eq. 4.7 in Wakeley2009\n \n # optimize\n res = least_squares(fun, x0=theta0, jac=jac, bounds=(0, sfs.sum()), \n kwargs={'eta': sfs, 'n': 36}, verbose=1)\n #print \"Optimal theta per site is {0:.4f}\".format(res.x[0]/sfs.sum())\n #print res.x[0]/sfs.sum()\n \n #return theta0, res\n \n # plot\n plt.rcParams['font.size'] = 14.0\n\n i = range(1, len(sfs))\n eta_model = model(res.x, eta=sfs, n=36) # get predicted values with optimal theta\n\n plt.plot(i, sfs[1:], \"rs\", label=\"data of \" + pop) # plot observed spectrum\n\n ymax = max( sfs[1:].max(), eta_model.max() )\n plt.axis([0, 19, 0, ymax*1.1]) # set axis range\n\n plt.xlabel(\"minor allele frequency (i)\")\n plt.ylabel(r'$\\eta_i$', fontsize='large', rotation='horizontal')\n plt.title(\"folded SFS\")\n plt.text(5, 10000, \n r\"Optimal neutral $\\theta$ per site is {0:.4f}\".format(res.x[0]/sfs.sum()))\n\n plt.plot(i, eta_model, \"go-\", \n label=\"\\nneutral model\" \n + \"\\n\"\n + r'$\\theta_{opt} = $' + str(round(res.x, 1))\n ) # plot model prediction with optimal theta\n\n plt.legend()", "_____no_output_____" ], [ "plot_folded_sfs('PAR.FOLDED.sfs', n=36, pop='par')", "`xtol` termination condition is satisfied.\nFunction evaluations: 12, initial cost: 3.1360e+07, final cost 3.0064e+07, first-order optimality 1.56e+07.\n" ], [ "plot_folded_sfs('ERY.FOLDED.sfs', n=36, pop='ery')", "`ftol` termination condition is satisfied.\nFunction evaluations: 7, initial cost: 9.5789e+06, final cost 9.5162e+06, first-order optimality 6.25e-01.\n" ] ], [ [ "### Univariate function minimizers or 1D scalar minimisation", "_____no_output_____" ], [ "Since I only have one value to optimize, I can use a slightly simpler approach than used above:", "_____no_output_____" ] ], [ [ "from scipy.optimize import minimize_scalar", "_____no_output_____" ], [ "?minimize_scalar", "_____no_output_____" ], [ "# define cost function\ndef f(theta, eta, n):\n \"\"\"\n return sum of squared deviations between model and data\n \"\"\"\n return sum( (model(theta, eta, n) - eta[1:])**2 ) # see above for definition of the 'model' function", "_____no_output_____" ] ], [ [ "It would be interesting to know whether the cost function is convex or not.", "_____no_output_____" ] ], [ [ "theta = pylab.arange(0, fs_ery.data[1:].sum()) # specify range of theta\ncost = [f(t, fs_ery.data, 36) for t in theta]\nplt.plot(theta, cost, 'b-', label='ery')\nplt.xlabel(r'$\\theta$')\nplt.ylabel('cost')\nplt.title(\"cost function for ery\")\nplt.legend(loc='best')", "_____no_output_____" ], [ "?plt.legend", "_____no_output_____" ] ], [ [ "Within the specified bounds (the observed $\\theta$, i. e. derived from the data, cannot lie outside these bounds), the cost function is convex. This is therefore an easy optimisation problem. See [here](http://www.scipy-lectures.org/advanced/mathematical_optimization/index.html) for more details.", "_____no_output_____" ] ], [ [ "res = minimize_scalar(f, bounds = (0, fs_ery.data[1:].sum()), method = 'bounded', args = (fs_ery.data, 36))", "_____no_output_____" ], [ "res", "_____no_output_____" ], [ "# number of segregating sites\n\nfs_par.data[1:].sum()", "_____no_output_____" ], [ "res = minimize_scalar(f, bounds = (0, fs_par.data[1:].sum()), method = 'bounded', args = (fs_par.data, 36))", "_____no_output_____" ], [ "res", "_____no_output_____" ] ], [ [ "The fitted values of $\\theta$ are similar to the ones obtained above with the `least_squares` function. The estimates for ery deviate more than for par.", "_____no_output_____" ] ], [ [ "from sympy import *", "_____no_output_____" ], [ "x0 , x1 = symbols('x0 x1')", "_____no_output_____" ], [ "init_printing(use_unicode=True)", "_____no_output_____" ], [ "diff(0.5*(1-x0)**2 + (x1-x0**2)**2, x0)", "_____no_output_____" ], [ "diff(0.5*(1-x0)**2 + (x1-x0**2)**2, x1)", "_____no_output_____" ] ], [ [ "Wow! Sympy is a replacement for Mathematica. There is also Sage, which may include even more functionality.", "_____no_output_____" ] ], [ [ "from scipy.optimize import curve_fit", "_____no_output_____" ] ], [ [ "`Curve_fit` is another function that can be used for optimization.", "_____no_output_____" ] ], [ [ "?curve_fit", "_____no_output_____" ], [ "def model(i, theta):\n \"\"\"\n i: indpendent variable, here minor SNP frequency classes\n theta: scaled population mutation rate parameter [scalar]\n \n returns a numpy array\n \"\"\"\n n = len(i)\n delta = pylab.where(i == n-i, 1, 0)\n return theta * 1/i + 1/(n-i) / (1 + delta)", "_____no_output_____" ], [ "i = pylab.arange(1, fs_ery.size)\n\npopt, pcov = curve_fit(model, i, fs_ery.data[1:])", "_____no_output_____" ], [ "# optimal theta\nprint popt", "[ 10198.84901849]\n" ], [ "perr = pylab.sqrt(pcov)\nperr", "_____no_output_____" ], [ "print str(int(popt[0] - 1.96*perr[0])) + ' < ' + str(int(popt[0])) + ' < ' + str(int(popt[0] + 1.96*perr[0]))", "8556 < 10198 < 11841\n" ], [ "popt, pcov = curve_fit(model, i, fs_par.data[1:])\nperr = pylab.sqrt(pcov)\nprint str(int(popt[0] - 1.96*perr[0])) + ' < ' + str(int(popt[0])) + ' < ' + str(int(popt[0] + 1.96*perr[0]))", "8905 < 11828 < 14750\n" ] ], [ [ "I am not sure whether these standard errors (perr) are correct. It may be that it is assumed that errors are normally distributed, which they are not exactly in this case. They should be close to Poisson distributed (see Fu1995), which should be fairly similar to normal with such high expected values as here.", "_____no_output_____" ], [ "If the standard errors are correct, then the large overlap of the 95% confidence intervals would indicate that the data do not provide significant support for a difference in $\\theta$ between par and ery.", "_____no_output_____" ], [ "## Parametric bootstrap from the observed SFS", "_____no_output_____" ] ], [ [ "%pwd", "_____no_output_____" ], [ "% ll", "total 660\r\nlrwxrwxrwx 1 claudius 53 Feb 17 15:37 \u001b[0m\u001b[01;36mERY.FOLDED.sfs\u001b[0m -> /data3/claudius/Big_Data/ANGSD/SFS/ERY/ERY.FOLDED.sfs\r\n-rw-rw-r-- 1 claudius 462 Mar 15 12:48 ERY.FOLDED.sfs.dadi_format\r\n-rw-rw-r-- 1 claudius 462 Mar 15 12:45 ERY.FOLDED.sfs.dadi_format~\r\nlrwxrwxrwx 1 claudius 37 Feb 18 17:46 \u001b[01;36mEryPar.unfolded.2dsfs\u001b[0m -> ../../ANGSD/FST/EryPar.unfolded.2dsfs\r\n-rw-rw-r-- 1 claudius 13051 Feb 18 19:00 EryPar.unfolded.2dsfs.dadi_format\r\n-rw-rw-r-- 1 claudius 13051 Feb 18 18:31 EryPar.unfolded.2dsfs.dadi_format~\r\ndrwxrwxr-x 5 claudius 4096 Feb 17 13:45 \u001b[01;34mexamples\u001b[0m/\r\n-rw-rw-r-- 1 claudius 18014 Mar 15 21:39 example_YRI_CEU.ipynb\r\n-rw-rw-r-- 1 claudius 596246 Mar 17 15:45 First_Steps_with_dadi.ipynb\r\n-rw-rw-r-- 1 claudius 1012 Mar 16 09:54 new.bib\r\nlrwxrwxrwx 1 claudius 53 Feb 17 15:37 \u001b[01;36mPAR.FOLDED.sfs\u001b[0m -> /data3/claudius/Big_Data/ANGSD/SFS/PAR/PAR.FOLDED.sfs\r\n-rw-rw-r-- 1 claudius 412 Feb 17 16:29 PAR.FOLDED.sfs.dadi_format\r\n-rw-rw-r-- 1 claudius 218 Feb 17 15:51 PAR.FOLDED.sfs.dadi_format~\r\n" ], [ "! cat ERY.FOLDED.sfs.dadi_format", "# this is the ML estimate of the folded sample frequency spectrum for erythropus, estimated with realSFS of ANGSD\r\n# this is the spectrum in dadi format (see section 3.1 of the manual)\r\n19 folded\r\n1594818.222085 7833.038690 7414.699839 4109.279415 3614.717256 3095.973324 2031.460887 1584.656928 2583.652317 1142.075255 1052.346021 1765.773415 1255.138799 1072.516527 1417.916128 395.750470 1947.087637 367.072082 966.622924 \r\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 \r\n" ], [ "fs_ery = dadi.Spectrum.from_file('ERY.FOLDED.sfs.dadi_format', mask_corners=False)", "_____no_output_____" ], [ "fs_ery", "_____no_output_____" ], [ "fs_ery.pop_ids = ['ery']", "_____no_output_____" ], [ "# get a Poisson sample from the observed spectrum\n\nfs_ery_param_boot = fs_ery.sample()", "_____no_output_____" ], [ "fs_ery_param_boot", "_____no_output_____" ], [ "fs_ery_param_boot.data", "_____no_output_____" ], [ "%psource fs_ery.sample", "_____no_output_____" ] ], [ [ "**There must be a way to get more than one bootstrap sample per call.**", "_____no_output_____" ] ], [ [ "fs_ery_param_boot = pylab.array([fs_ery.sample() for i in range(100)])", "_____no_output_____" ], [ "# get the first 3 boostrap samples from the doubleton class\n\nfs_ery_param_boot[:3, 2]", "_____no_output_____" ] ], [ [ "It would be good to get the 5% and 95% quantiles from the bootstrap samples of each frequency class and add those intervals to the plot of the observed frequency spectrum and the fitted neutral spectrum. This would require to find a quantile function and to find out how to add lines to a plot with matplotlib.", "_____no_output_____" ], [ "It would also be good to use the predicted counts from the neutral model above with the fitted $\\theta$ as parameters for the bootstrap with `sample()` and add 95% confidence intervals to the predicted neutral SFS. I have done this in R instead (see `/data3/claudius/Big_Data/ANGSD/SFS/SFS.Rmd`)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### Using unfolded spectra", "_____no_output_____" ], [ "I edited the 2D SFS created for estimating $F_{ST}$ by `realSFS`. I have convinced myself that `realSFS` outputs a flattened 2D matrix as expected by $\\delta$a$\\delta$i's `Spectrum.from_file` function (see section 3.1 of the manual with my comments). Note, that in the manual, \"samples\" stands for number of allele copies, so that the correct specification of dimensions for this 2D unfolded SFS of 18 diploid individuals in each of 2 populations is 37 x 37.", "_____no_output_____" ] ], [ [ "# read in the flattened 2D SFS\nEryPar_unfolded_2dsfs = dadi.Spectrum.from_file('EryPar.unfolded.2dsfs.dadi_format', mask_corners=True)", "_____no_output_____" ], [ "# check dimension\nlen(EryPar_unfolded_2dsfs[0,])", "_____no_output_____" ], [ "EryPar_unfolded_2dsfs.sample_sizes", "_____no_output_____" ], [ "# add population labels\nEryPar_unfolded_2dsfs.pop_ids = [\"ery\", \"par\"]", "_____no_output_____" ], [ "EryPar_unfolded_2dsfs.pop_ids", "_____no_output_____" ] ], [ [ "### Marginalizing", "_____no_output_____" ], [ "$\\delta$a$\\delta$i offers a function to get the marginal spectra from multidimensional spectra. Note, that this marginalisation is nothing fancy. In `R` it would be taking either the `rowSums` or the `colSums` of the matrix.", "_____no_output_____" ] ], [ [ "# marginalise over par to get 1D SFS for ery\n\nfs_ery = EryPar_unfolded_2dsfs.marginalize([1]) \n# note the argument is an array with dimensions, one can marginalise over more than one dimension at the same time,\n# but that is only interesting for 3-dimensional spectra, which I don't have here", "_____no_output_____" ], [ "fs_ery", "_____no_output_____" ], [ "# marginalise over ery to get 1D SFS for par\nfs_par = EryPar_unfolded_2dsfs.marginalize([0])", "_____no_output_____" ], [ "fs_par", "_____no_output_____" ] ], [ [ "Note, that these marginalised 1D SFS's are not identical to the 1D SFS estimated directly with `realSFS`. This is because, for the estimation of the 2D SFS, `realSFS` has only taken sites that had data from at least 9 individuals in *each* population (see `assembly.sh`, lines 1423 onwards).", "_____no_output_____" ], [ "The SFS's of par and ery had conspicuous shape differences. It would therefore be good to plot them to see, whether the above commands have done the correct thing.", "_____no_output_____" ] ], [ [ "# plot 1D spectra for each population\npylab.plot(fs_par, 'g', label=\"par\")\npylab.plot(fs_ery, 'r', label=\"ery\")\npylab.legend()", "_____no_output_____" ] ], [ [ "These marginal unfolded spectra look similar in shape to the 1D folded spectra of each subspecies (see above).", "_____no_output_____" ] ], [ [ "fs_ery.pi() / pylab.sum(fs_ery.data)", "_____no_output_____" ], [ "fs_ery.data", "_____no_output_____" ], [ "n = 36 # 36 sequences sampled from 18 diploid individuals\npi_Wakeley = (sum( [i*(n-i)*fs_ery[i] for i in range(1, n)] ) * 2.0 / (n*(n-1)))\npi_Wakeley = pi_Wakeley / pylab.sum(fs_ery.data)\npi_Wakeley", "_____no_output_____" ] ], [ [ "$\\delta$a$\\delta$i's `pi` function seems to calculate the correct value of $\\pi$ for this unfolded spectrum. However, it is worrying that $\\pi$ from this marginal spectrum is about 20 times larger than the one calculated from the directly estimated 1D folded spectrum (see above the $\\pi$ calculated from the folded 1D spectrum). ", "_____no_output_____" ] ], [ [ "fs_par.pi() / pylab.sum(fs_par.data)", "_____no_output_____" ], [ "pylab.sum(fs_par.data)", "_____no_output_____" ], [ "pylab.sum(EryPar_unfolded_2dsfs.data)", "_____no_output_____" ] ], [ [ "<font color=\"red\">The sum over the marginalised 1D spectra should be the same as the sum over the 2D spectrum !</font>", "_____no_output_____" ] ], [ [ "# from dadi's marginalise function:\nfs_ery.data", "_____no_output_____" ], [ "sfs2d = EryPar_unfolded_2dsfs.copy()", "_____no_output_____" ], [ "# this should get the marginal spectrum for ery\nery_mar = [pylab.sum(sfs2d.data[i]) for i in range(0, len(sfs2d))]\nery_mar", "_____no_output_____" ], [ "# this should get the marginal spectrum for ery and then take the sum over it\nsum([pylab.sum(sfs2d.data[i]) for i in range(0, len(sfs2d))])", "_____no_output_____" ], [ "# look what happens if I include masking\nsum([pylab.sum(sfs2d[i]) for i in range(0, len(sfs2d))])", "_____no_output_____" ], [ "fs_ery.data - ery_mar", "_____no_output_____" ] ], [ [ "So, during the marginalisation the masking of data in the fixed categories (0, 36) is the problem, producing incorrectly marginalised counts in those masked categories. This is shown in the following:", "_____no_output_____" ] ], [ [ "sfs2d[0]", "_____no_output_____" ], [ "pylab.sum(sfs2d[0])", "_____no_output_____" ], [ "# from dadi's marginalise function:\nfs_ery.data", "_____no_output_____" ], [ "# dividing by the correct number of sites to get pi per site:\nfs_ery.pi() / pylab.sum(sfs2d.data)", "_____no_output_____" ] ], [ [ "This is very close to the estimate of $\\pi$ derived from the folded 1D spectrum of ery! (see above)", "_____no_output_____" ] ], [ [ "fs_par.pi() / pylab.sum(sfs2d.data)", "_____no_output_____" ] ], [ [ "This is also nicely close to the estimate of $\\pi_{site}$ of par from its folded 1D spectrum.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### Tajima's D", "_____no_output_____" ] ], [ [ "fs_ery.Watterson_theta() / pylab.sum(sfs2d.data)", "_____no_output_____" ], [ "fs_ery.Tajima_D()", "_____no_output_____" ], [ "fs_par.Tajima_D()", "_____no_output_____" ] ], [ [ "Now, I am calculating Tajima's D from the ery marginal spectrum by hand in order to check whether $\\delta$a$\\delta$i is doing the right thing.", "_____no_output_____" ] ], [ [ "n = 36\npi_Wakeley = (sum( [i*(n-i)*fs_ery.data[i] for i in range(1, n+1)] ) \n * 2.0 / (n*(n-1)))\n #/ pylab.sum(sfs2d.data)\npi_Wakeley", "_____no_output_____" ], [ "# number of segregating sites\n# this sums over all unmasked positions in the array\npylab.sum(fs_ery)", "_____no_output_____" ], [ "fs_ery.S()", "_____no_output_____" ], [ "S = pylab.sum(fs_ery)\ntheta_Watterson = S / pylab.sum(1.0 / (pylab.arange(1, n)))\ntheta_Watterson", "_____no_output_____" ], [ "# normalizing constant, see page 45 in Gillespie\na1 = pylab.sum(1.0 / pylab.arange(1, n))\n#print a1\na2 = pylab.sum(1.0 / pylab.arange(1, n)**2.0)\n#print a2\nb1 = (n+1.0)/(3.0*(n-1))\n#print b1\nb2 = 2.0*(n**2 + n + 3)/(9.0*n*(n-1))\n#print b2\nc1 = b1 - (1.0/a1)\n#print c1\nc2 = b2 - (n+2.0)/(a1*n) + a2/a1**2\n#print c2\nC = ((c1/a1)*S + (c2/(a1**2.0 + a2))*S*(S-1))\nC = C**(1/2.0)", "_____no_output_____" ], [ "ery_Tajimas_D = (pi_Wakeley - theta_Watterson) / C\nprint '{0:.6f}'.format(ery_Tajimas_D)", "-0.054767\n" ], [ "ery_Tajimas_D - fs_ery.Tajima_D()", "_____no_output_____" ] ], [ [ "$\\delta$a$\\delta$i seems to do the right thing. Note, that the estimate of Tajima's D from this marginal spectrum of ery is slightly different from the estimate derived from the folded 1D spectrum of ery (see /data3/claudius/Big_Data/ANGSD/SFS/SFS.Rmd). The folded 1D spectrum resulted in a Tajima's D estimate of $\\sim$0.05, i. e. a difference of almost 0.1. Again, the 2D spectrum is based on only those sites for which there were at least 9 individiuals with data in *both* populations, whereas the 1D folded spectrum of ery included all sites for which there were 9 ery individuals with data (see line 1571 onwards in `assembly.sh`).", "_____no_output_____" ] ], [ [ "fs_par.Tajima_D()", "_____no_output_____" ] ], [ [ "My estimate from the folded 1D spectrum of par was -0.6142268 (see /data3/claudius/Big_Data/ANGSD/SFS/SFS.Rmd). ", "_____no_output_____" ], [ "### Multi-population statistics", "_____no_output_____" ] ], [ [ "EryPar_unfolded_2dsfs.S()", "_____no_output_____" ] ], [ [ "The 2D spectrum contains counts from 60k sites that are variable in *par* or *ery* or both.", "_____no_output_____" ] ], [ [ "EryPar_unfolded_2dsfs.Fst()", "_____no_output_____" ] ], [ [ "This estimate of $F_{ST}$ according to Weir and Cockerham (1984) is well below the estimate of $\\sim$0.3 from ANGSD according to Bhatia/Hudson (2013). Note, however, that this estimate showed a positive bias of around 0.025 in 100 permutations of population labels of individuals. Taking the positive bias into account, both estimates of $F_{ST}$ are quite similar.", "_____no_output_____" ], [ "The following function `scramble_pop_ids` should generate a 2D SFS with counts as if individuals were assigned to populations randomly. Theoretically, the $F_{ST}$ calculated from this SFS should be 0.", "_____no_output_____" ] ], [ [ "%psource EryPar_unfolded_2dsfs.scramble_pop_ids", "_____no_output_____" ], [ "# plot the scrambled 2D SFS\n\ndadi.Plotting.plot_single_2d_sfs(EryPar_unfolded_2dsfs.scramble_pop_ids(), vmin=1)", "_____no_output_____" ] ], [ [ "So, this is how the 2D SFS would look like if _ery_ and _par_ were not genetically differentiated.", "_____no_output_____" ] ], [ [ "# get Fst for scrambled SFS\n\nEryPar_unfolded_2dsfs.scramble_pop_ids().Fst()", "_____no_output_____" ] ], [ [ "The $F_{ST}$ from the scrambled SFS is much lower than the $F_{ST}$ of the observed SFS. That should mean that there is significant population structure. However, the $F_{ST}$ from the scrambled SFS is not 0. I don't know why that is.", "_____no_output_____" ], [ "---", "_____no_output_____" ] ], [ [ "# folding\n\nEryPar_folded_2dsfs = EryPar_unfolded_2dsfs.fold()", "_____no_output_____" ], [ "EryPar_folded_2dsfs", "_____no_output_____" ], [ "EryPar_folded_2dsfs.mask", "_____no_output_____" ] ], [ [ "### Plotting", "_____no_output_____" ] ], [ [ "dadi.Plotting.plot_single_2d_sfs(EryPar_unfolded_2dsfs, vmin=1)", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(EryPar_folded_2dsfs, vmin=1)", "_____no_output_____" ] ], [ [ "The folded 2D spectrum is *not* a minor allele frequency spectrum as are the 1D folded spectra of ery and par. This is because an allele that is minor in one population can be the major allele in the other. What is not counted are the alleles that are major in *both* populations, i. e. the upper right corner.", "_____no_output_____" ], [ "For the 2D spectrum to make sense it is crucial that allele frequencies are polarised the same way in both populations, either with an outgroup sequence or arbitrarily with respect to the reference sequence (as I did here).", "_____no_output_____" ], [ "#### How to fold a 1D spectrum", "_____no_output_____" ] ], [ [ "# unfolded spectrum from marginalisation of 2D unfolded spectrum\nfs_ery", "_____no_output_____" ], [ "len(fs_ery)", "_____no_output_____" ], [ "fs_ery.fold()", "_____no_output_____" ] ], [ [ "Let's use the formula (1.2) from Wakeley2009 to fold the 1D spectrum manually:\n$$\n\\eta_{i} = \\frac{\\zeta_{i} + \\zeta_{n-i}}{1 + \\delta_{i, n-i}} \\qquad 1 \\le i \\le [n/2]\n$$\n$n$ is the number of gene copies sampled, i. e. haploid sample size. $[n/2]$ is the largest integer less than or equal to n/2 (to handle uneven sample sizes). $\\zeta_{i}$ are the unfolded frequencies and $\\delta_{i, n-i}$ is Kronecker's $\\delta$ which is 1 if $i = n-i$ and zero otherwise (to avoid counting the unfolded n/2 frequency class twice with even sample sizes).", "_____no_output_____" ] ], [ [ "fs_ery_folded = fs_ery.copy() # make a copy of the UNfolded spectrum\nn = len(fs_ery)-1\nfor i in range(len(fs_ery)):\n fs_ery_folded[i] += fs_ery[n-i]\n if i == n/2.0:\n fs_ery_folded[i] /= 2\nfs_ery_folded[0:19]", "_____no_output_____" ], [ "isinstance(fs_ery_folded, pylab.ndarray)", "_____no_output_____" ], [ "mask = [True] \nmask.extend([False] * 18)\nmask.extend([True] * 18)\nprint mask\nprint sum(mask)", "[True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]\n19\n" ], [ "mask = [True] * 37\nfor i in range(len(mask)):\n if i > 0 and i < 19:\n mask[i] = False\nprint mask\nprint sum(mask)", "[True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]\n19\n" ] ], [ [ "Here is how to flatten an array of arrays with list comprehension:", "_____no_output_____" ] ], [ [ "mask = [[True], [False] * 18, [True] * 18]\nprint mask", "[[True], [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False], [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]]\n" ], [ "print [elem for a in mask for elem in a]", "[True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]\n" ] ], [ [ "Set new mask for the folded spectrum:", "_____no_output_____" ] ], [ [ "fs_ery_folded.mask = mask", "_____no_output_____" ], [ "fs_ery_folded.folded = True", "_____no_output_____" ], [ "fs_ery_folded - fs_ery.fold()", "_____no_output_____" ] ], [ [ "The `fold()` function works correctly for 1D spectra, at least. How about 2D spectra?", "_____no_output_____" ], [ "$$\n\\eta_{i,j} = \\frac{\\zeta_{i,j} + \\zeta_{n-i, m-j}}{1 + \\delta_{i, n-i; j, m-j}} \n \\qquad 1 \\le i+j \\le \\Big[\\frac{n+m}{2}\\Big]\n$$", "_____no_output_____" ] ], [ [ "EryPar_unfolded_2dsfs.sample_sizes", "_____no_output_____" ], [ "EryPar_unfolded_2dsfs._total_per_entry()", "_____no_output_____" ], [ "# copy the unfolded 2D spectrum for folding\nimport copy\nsfs2d_folded = copy.deepcopy(EryPar_unfolded_2dsfs)", "_____no_output_____" ], [ "n = len(sfs2d_folded)-1\nm = len(sfs2d_folded[0])-1\nfor i in range(n+1):\n for j in range(m+1):\n sfs2d_folded[i,j] += sfs2d_folded[n-i, m-j]\n if i == n/2.0 and j == m/2.0:\n sfs2d_folded[i,j] /= 2", "_____no_output_____" ], [ "mask = sfs2d_folded._total_per_entry() > (n+m)/2\nmask", "_____no_output_____" ], [ "sfs2d_folded.mask = mask\nsfs2d_folded.fold = True", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(sfs2d_folded, vmin=1)", "_____no_output_____" ] ], [ [ "I am going to go through every step in the `fold` function of dadi:", "_____no_output_____" ] ], [ [ "# copy the unfolded 2D spectrum for folding\nimport copy\nsfs2d_unfolded = copy.deepcopy(EryPar_unfolded_2dsfs)", "_____no_output_____" ], [ "total_samples = pylab.sum(sfs2d_unfolded.sample_sizes)\ntotal_samples", "_____no_output_____" ], [ "total_per_entry = dadi.Spectrum(sfs2d_unfolded._total_per_entry(), pop_ids=['ery', 'par'])\n#total_per_entry.pop_ids = ['ery', 'par']\ndadi.Plotting.plot_single_2d_sfs(total_per_entry, vmin=1)", "_____no_output_____" ], [ "total_per_entry = sfs2d_unfolded._total_per_entry()\ntotal_per_entry", "_____no_output_____" ], [ "where_folded_out = total_per_entry > total_samples/2\nwhere_folded_out", "_____no_output_____" ], [ "original_mask = sfs2d_unfolded.mask\noriginal_mask", "_____no_output_____" ], [ "pylab.logical_or([True, False, True], [False, False, True])", "_____no_output_____" ], [ "# get the number of elements along each axis\nsfs2d_unfolded.shape", "_____no_output_____" ], [ "[slice(None, None, -1) for i in sfs2d_unfolded.shape]", "_____no_output_____" ], [ "matrix = pylab.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]\n])\nreverse_slice = [slice(None, None, -1) for i in matrix.shape]\nreverse_slice", "_____no_output_____" ], [ "matrix[reverse_slice]", "_____no_output_____" ], [ "matrix[::-1,::-1]", "_____no_output_____" ] ], [ [ "With the variable length list of slice objects, one can generalise the reverse of arrays with any dimensions.", "_____no_output_____" ] ], [ [ "final_mask = pylab.logical_or(original_mask, dadi.Numerics.reverse_array(original_mask))\nfinal_mask", "_____no_output_____" ] ], [ [ "Here, folding doesn't mask new cells.", "_____no_output_____" ] ], [ [ "?pylab.where", "_____no_output_____" ], [ "pylab.where(matrix < 6, matrix, 0)", "_____no_output_____" ], [ "# this takes the part of the spectrum that is non-sensical if the derived allele is not known\n# and sets the rest to 0\nprint pylab.where(where_folded_out, sfs2d_unfolded, 0)", "[[ 0. 0. 0. ..., 0. 0. 0. ]\n [ 0. 0. 0. ..., 0. 0. 33.96861 ]\n [ 0. 0. 0. ..., 0. 1.593931\n 26.499772]\n ..., \n [ 0. 0. 0. ..., 21.754621 30.856007\n 232.64872 ]\n [ 0. 0. 3.966959 ..., 16.189377 19.283053\n 298.187262]\n [ 0. 17.555369 0. ..., 166.433356 104.865969\n 287.751595]]\n" ], [ "# let's plot the bit of the spectrum that we are going to fold onto the rest:\ndadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(pylab.where(where_folded_out, sfs2d_unfolded, 0)), vmin=1)", "_____no_output_____" ], [ "# now let's reverse this 2D array, i. e. last row first and last element of each row first:\n_reversed = dadi.Numerics.reverse_array(pylab.where(where_folded_out, sfs2d_unfolded, 0))\n_reversed", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(_reversed), vmin=1)", "_____no_output_____" ] ], [ [ "The transformation we have done with the upper-right diagonal 2D array above should be identical to projecting it across a vertical center line (creating an upper left triangular matrix) and then projecting it across a horizontal center line (creating the final lower left triangular matrix). Note, that this is not like mirroring the upper-right triangular 2D array across the 36-36 diagonal!", "_____no_output_____" ] ], [ [ "# This shall now be added to the original unfolded 2D spectrum.\nsfs2d_folded = pylab.ma.masked_array(sfs2d_unfolded.data + _reversed) ", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(sfs2d_folded), vmin=1)", "_____no_output_____" ], [ "sfs2d_folded.data", "_____no_output_____" ], [ "sfs2d_folded.data[where_folded_out] = 0\nsfs2d_folded.data", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(sfs2d_folded), vmin=1)", "_____no_output_____" ], [ "sfs2d_folded.shape", "_____no_output_____" ], [ "where_ambiguous = (total_per_entry == total_samples/2.0)\nwhere_ambiguous", "_____no_output_____" ] ], [ [ "SNP's with joint frequencies in the True cells are counted twice at the moment due to the folding and the fact that the sample sizes are even.", "_____no_output_____" ] ], [ [ "# this extracts the diagonal values from the UNfolded spectrum and sets the rest to 0\nambiguous = pylab.where(where_ambiguous, sfs2d_unfolded, 0)\ndadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(ambiguous), vmin=1)", "_____no_output_____" ] ], [ [ "These are the values in the diagonal before folding.", "_____no_output_____" ] ], [ [ "reversed_ambiguous = dadi.Numerics.reverse_array(ambiguous)\ndadi.Plotting.plot_single_2d_sfs(dadi.Spectrum(reversed_ambiguous), vmin=1)", "_____no_output_____" ] ], [ [ "These are the values that got added to the diagonal during folding. Comparing with the previous plot, one can see for instance that the value in the (0, 36) class got added to the value in the (36, 0) class and vice versa. The two frequency classes are equivalent, since it is arbitrary which allele we call minor in the total sample (of 72 gene copies). These SNP's are therefore counted twice.", "_____no_output_____" ] ], [ [ "a = -1.0*ambiguous + 0.5*ambiguous + 0.5*reversed_ambiguous\nb = -0.5*ambiguous + 0.5*reversed_ambiguous\na == b", "_____no_output_____" ], [ "sfs2d_folded += -0.5*ambiguous + 0.5*reversed_ambiguous", "_____no_output_____" ], [ "final_mask = pylab.logical_or(final_mask, where_folded_out)\nfinal_mask", "_____no_output_____" ], [ "sfs2d_folded = dadi.Spectrum(sfs2d_folded, mask=final_mask, data_folded=True, pop_ids=['ery', 'par'])", "_____no_output_____" ], [ "pylab.rcParams['figure.figsize'] = [12.0, 8.0]", "_____no_output_____" ], [ "dadi.Plotting.plot_single_2d_sfs(sfs2d_folded, vmin=1)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Model specification", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec84d2eb02ad2a95ddb1e4b2658a8bb556974c90
52,011
ipynb
Jupyter Notebook
notebooks/aama_analyser.ipynb
goatchurchprime/sail_section_packer
12181a1c209dbb6981dc2bad4d3b6009fe71d683
[ "MIT" ]
null
null
null
notebooks/aama_analyser.ipynb
goatchurchprime/sail_section_packer
12181a1c209dbb6981dc2bad4d3b6009fe71d683
[ "MIT" ]
null
null
null
notebooks/aama_analyser.ipynb
goatchurchprime/sail_section_packer
12181a1c209dbb6981dc2bad4d3b6009fe71d683
[ "MIT" ]
null
null
null
43.055464
403
0.496414
[ [ [ "import ezdxf\nfname = \"../dxffiles/japonicalibrecad.dxf\"\ndrawing = ezdxf.readfile(fname)\n\n\n# Flattening and decoding the drawing is done by chaining together generators\ndef flatentitylistgenerator(drawing): # ( (insertentity or None, entity) )\n insertentitystack = [ None ]\n while insertentitystack:\n insertentity = insertentitystack.pop()\n entities = drawing.entities if insertentity is None else drawing.blocks[insertentity.dxf.name]\n for entity in entities:\n if entity.dxftype() == 'INSERT':\n #print(\"INSERT translate\", e.dxf.insert, \"rotate\", e.dxf.rotation, \"scale\", e.dxf.xscale, e.dxf.yscale)\n insertentitystack.append(entity)\n elif entity.dxftype() != 'ACAD_PROXY_ENTITY':\n yield (insertentity, entity)\n \ndef colorentitylayerlistgenerator(drawing): # ( (layername, colornum, entity) )\n for insertentity, entity in flatentitylistgenerator(drawing):\n layername = entity.dxf.layer\n if entity.dxf.color == 256:\n layer = drawing.layers.get(layername)\n colnum = layer.get_color()\n elif entity.dxf.color == 0:\n if insertentity is not None:\n layer = drawing.layers.get(insertentity.dxf.layer)\n colnum = layer.get_color()\n else:\n colnum = 1\n else:\n colnum = entity.dxf.color\n yield (layername, colnum, entity)\n\n\ndef layergroupedentitygenerator(drawing):\n # pull all entities into a single list to sort and group by layer+color\n layercolorentities = list(colorentitylayerlistgenerator(drawing))\n\n # sort by layer name, but in the order that they appear in the drawing.layers list\n layernameorder = dict((n, i) for i, n in enumerate(layer.dxf.name for layer in drawing.layers))\n layercolorentities.sort(key=lambda X:(layernameorder.get(X[0], -1), X[1]))\n\n # Slice out the sequences of entities that share the same layer and color\n i = 0\n while i < len(layercolorentities):\n layername, colnum = layercolorentities[i][:2]\n j = i + 1\n while j < len(layercolorentities) and (layername, colnum) == layercolorentities[j][:2]:\n j += 1\n entities = [ entity for (layername, colnum, entity) in layercolorentities[i:j] ]\n yield (layername, colnum, entities)\n i = j\n \ndxftypesimplemented = [\"LINE\", \"CIRCLE\", \"ARC\", \"SPLINE\", \"LWPOLYLINE\", \"POLYLINE\"]\ndef countimplementedentities(entities, printfunc):\n entitytypecount = { }\n for entity in entities:\n dxftype = entity.dxftype()\n entitytypecount[dxftype] = entitytypecount.get(dxftype, 0)+1\n entitytypeset = set(entitytypecount.keys())\n \n res = 0\n for dxftype in dxftypesimplemented:\n if dxftype in entitytypeset:\n res += entitytypecount[dxftype]\n printfunc(\" %s%s%s\\n\" % (dxftype, \" \"*max(1, 11-len(dxftype)), entitytypecount[dxftype]))\n entitytypeset.remove(dxftype)\n if entitytypeset:\n for dxftype in entitytypeset:\n printfunc(\" **%s%s%s (not implemented)\\n\" % (dxftype, \" \"*max(1, 9-len(dxftype)), entitytypecount[dxftype]))\n return res\n \ndef printfunc(*x): # or FreeCAD.Console.PrintMessage()\n print(*x, end=\"\")\n \nfor layername, colnum, entities in layergroupedentitygenerator(drawing):\n print(layername, colnum)\n res = countimplementedentities(entities, printfunc)\n", "0 7\n **TEXT 59 (not implemented)\n1 1\n POLYLINE 59\n8 7\n POLYLINE 328\nNESTINGINFO 256\n **TEXT 7 (not implemented)\n" ], [ "for layername, colnum, entities in layergroupedentitygenerator(drawing):\n print(layername, colnum, len(entities))", "0 7 59\n1 1 59\n8 7 328\nNESTINGINFO 256 7\n" ], [ "d = list(drawing.entities)\n#d[0].dxf.name, d[1].dxf.name\nfor i in d:\n if i.dxftype() == \"INSERT\":\n print(i.dxf.name, list(i.virtual_entities()))\n", "PANEL2\\9 []\nPANEL2\\8 []\nPANEL2\\2 []\nPANEL2\\7 []\nPANEL3\\8 []\nPANEL3\\2 []\nPANEL2\\1 []\nPANEL3\\7 []\nPANEL3\\1 []\nPANEL2\\10 []\nPANEL3\\6 []\nPANEL1\\4 []\nPANEL2\\3 []\nPANEL3\\3 []\nPANEL3\\4 []\nPANEL2\\4 []\nPANEL1\\9 []\nPANEL1\\3 []\nPANEL1\\1 []\nPANEL4\\2 []\nPANEL1\\5 []\nPANEL2\\5 []\nPANEL1\\7 []\nPANEL1B\\9 []\nPANEL1\\2 []\nT1/3 []\nH1/2 []\nPANEL1B\\4 []\nPANEL1B\\3 []\nPANEL1B\\2 []\nPANEL3\\5 []\nH2 []\nPANEL1B\\1 []\nC2 []\nPANEL2\\6 []\nPANEL1\\8 []\nPANEL1\\6 []\nPANEL4\\4 []\nH1/3 []\nPANEL4\\3 []\nPANEL1B\\7 []\nT1/2 []\nPANEL1B\\8 []\nPANEL1B\\5 []\nPANEL4\\1 []\nPANEL4\\5 []\nC1/1 []\nPANEL1B\\6 []\nPANEL4\\6 []\nC3 []\nC4 []\nC1/3 []\nT1/1 []\nH1/1 []\nC1/2 []\nT2 []\nH4 []\nH3 []\nT3 []\n" ], [ "b = drawing.blocks.get(i.dxf.name)\n#b.virtual_entities\ni.virtual_entities()", "_____no_output_____" ], [ "d[0].dxfattribs()", "_____no_output_____" ], [ "help(d[0])", "Help on Insert in module ezdxf.entities.insert object:\n\nclass Insert(ezdxf.entities.subentity.LinkedEntities)\n | DXF INSERT entity\n | \n | Method resolution order:\n | Insert\n | ezdxf.entities.subentity.LinkedEntities\n | ezdxf.entities.dxfgfx.DXFGraphic\n | ezdxf.entities.dxfentity.DXFEntity\n | builtins.object\n | \n | Methods defined here:\n | \n | add_attrib(self, tag: str, text: str, insert: 'Vertex' = (0, 0), dxfattribs: dict = None) -> 'Attrib'\n | Attach an :class:`Attrib` entity to the block reference.\n | \n | Example for appending an attribute to an INSERT entity with none\n | standard alignment::\n | \n | e.add_attrib('EXAMPLETAG', 'example text').set_pos(\n | (3, 7), align='MIDDLE_CENTER'\n | )\n | \n | Args:\n | tag: tag name as string\n | text: content text as string\n | insert: insert location as tuple ``(x, y[, z])`` in :ref:`WCS`\n | dxfattribs: additional DXF attributes for the ATTRIB entity\n | \n | add_auto_attribs(self, values: Dict[str, str]) -> 'Insert'\n | Attach for each :class:`~ezdxf.entities.Attdef` entity, defined in the\n | block definition, automatically an :class:`Attrib` entity to the block\n | reference and set ``tag/value`` DXF attributes of the ATTRIB entities\n | by the ``key/value`` pairs (both as strings) of the `values` dict.\n | The ATTRIB entities are placed relative to the insert location of the\n | block reference, which is identical to the block base point.\n | \n | This method avoids the wrapper block of the\n | :meth:`~ezdxf.layouts.BaseLayout.add_auto_blockref` method, but the\n | visual results may not match the results of CAD applications, especially\n | for non uniform scaling. If the visual result is very important to you,\n | use the :meth:`add_auto_blockref` method.\n | \n | Args:\n | values: :class:`~ezdxf.entities.Attrib` tag values as ``tag/value``\n | pairs\n | \n | audit(self, auditor: 'Auditor') -> None\n | Validity check.\n | \n | block(self) -> Union[ForwardRef('BlockLayout'), NoneType]\n | Returns associated :class:`~ezdxf.layouts.BlockLayout`.\n | \n | delete_all_attribs(self) -> None\n | Delete all :class:`Attrib` entities attached to the INSERT entity.\n | \n | delete_attrib(self, tag: str, ignore=False) -> None\n | Delete an attached :class:`Attrib` entity from INSERT. If `ignore`\n | is ``False``, an :class:`DXFKeyError` exception is raised, if\n | ATTRIB `tag` does not exist.\n | \n | Args:\n | tag: ATTRIB name\n | ignore: ``False`` for raising :class:`DXFKeyError` if ATTRIB `tag`\n | does not exist.\n | \n | Raises:\n | DXFKeyError: if ATTRIB `tag` does not exist.\n | \n | explode(self, target_layout: 'BaseLayout' = None, non_uniform_scaling=None) -> 'EntityQuery'\n | Explode block reference entities into target layout, if target layout is\n | ``None``, the target layout is the layout of the block reference.\n | This method destroys the source block reference entity.\n | \n | Transforms the block entities into the required :ref:`WCS` location by\n | applying the block reference attributes `insert`, `extrusion`,\n | `rotation` and the scaling values `xscale`, `yscale` and `zscale`.\n | \n | Attached ATTRIB entities are converted to TEXT entities, this is the\n | behavior of the BURST command of the AutoCAD Express Tools.\n | \n | Returns an :class:`~ezdxf.query.EntityQuery` container with all\n | \"exploded\" DXF entities.\n | \n | .. warning::\n | \n | **Non uniform scaling** may lead to incorrect results for text\n | entities (TEXT, MTEXT, ATTRIB) and maybe some other entities.\n | \n | Args:\n | target_layout: target layout for exploded entities, ``None`` for\n | same layout as source entity.\n | \n | .. versionchanged:: 0.13\n | deprecated `non_uniform_scaling` argument\n | \n | export_dxf(self, tagwriter: 'TagWriter')\n | Export DXF entity by `tagwriter`.\n | \n | This is the first key method for exporting DXF entities:\n | \n | - has to know the group codes for each attribute\n | - has to add subclass tags in correct order\n | - has to integrate extended data: ExtensionDict, Reactors, AppData\n | - has to maintain the correct tag order (because sometimes order matters)\n | \n | (internal API)\n | \n | export_entity(self, tagwriter: 'TagWriter') -> None\n | Export entity specific data as DXF tags.\n | \n | get_attrib(self, tag: str, search_const: bool = False) -> Union[ForwardRef('Attrib'), ForwardRef('AttDef'), NoneType]\n | Get attached :class:`Attrib` entity with :code:`dxf.tag == tag`,\n | returns ``None`` if not found. Some applications may not attach constant\n | ATTRIB entities, set `search_const` to ``True``, to get at least the\n | associated :class:`AttDef` entity.\n | \n | Args:\n | tag: tag name\n | search_const: search also const ATTDEF entities\n | \n | get_attrib_text(self, tag: str, default: str = None, search_const: bool = False) -> str\n | Get content text of attached :class:`Attrib` entity with\n | :code:`dxf.tag == tag`, returns `default` if not found.\n | Some applications may not attach constant ATTRIB entities, set\n | `search_const` to ``True``, to get content text of the\n | associated :class:`AttDef` entity.\n | \n | Args:\n | tag: tag name\n | default: default value if ATTRIB `tag` is absent\n | search_const: search also const ATTDEF entities\n | \n | grid(self, size: Tuple[int, int] = (1, 1), spacing: Tuple[float, float] = (1, 1)) -> 'Insert'\n | Place block reference in a grid layout, grid `size` defines the\n | row- and column count, `spacing` defines the distance between two block\n | references.\n | \n | Args:\n | size: grid size as ``(row_count, column_count)`` tuple\n | spacing: distance between placing as\n | ``(row_spacing, column_spacing)`` tuple\n | \n | has_attrib(self, tag: str, search_const: bool = False) -> bool\n | Returns ``True`` if ATTRIB `tag` exist, for `search_const` doc see\n | :meth:`get_attrib`.\n | \n | Args:\n | tag: tag name as string\n | search_const: search also const ATTDEF entities\n | \n | is_xref(self) -> bool\n | Return ``True`` if XREF or XREF_OVERLAY.\n | \n | load_dxf_attribs(self, processor: ezdxf.entities.dxfns.SubclassProcessor = None) -> 'DXFNamespace'\n | Adds subclass processing for 'AcDbEntity', requires previous base\n | class processing by parent class.\n | \n | (internal API)\n | \n | matrix44(self) -> ezdxf.math.matrix44.Matrix44\n | Returns a transformation :class:`Matrix44` object to transform block\n | entities into WCS.\n | \n | .. versionadded:: 0.13\n | \n | multi_insert(self) -> Iterable[ForwardRef('Insert')]\n | Yields a virtual INSERT entity for each grid element of a MINSERT\n | entity (multi-insert).\n | \n | .. versionadded:: 0.14\n | \n | place(self, insert: 'Vertex' = None, scale: Tuple[float, float, float] = None, rotation: float = None) -> 'Insert'\n | Set block reference placing location `insert`, scaling and rotation\n | attributes. Parameters which are ``None`` will not be altered.\n | \n | Args:\n | insert: insert location as ``(x, y [,z])`` tuple\n | scale: ``(x-scale, y-scale, z-scale)`` tuple\n | rotation : rotation angle in degrees\n | \n | reset_transformation(self) -> None\n | Reset block reference parameters `location`, `rotation` and\n | `extrusion` vector.\n | \n | set_scale(self, factor: float)\n | Set uniform scaling.\n | \n | transform(self, m: 'Matrix44') -> 'Insert'\n | Transform INSERT entity by transformation matrix `m` inplace.\n | \n | Unlike the transformation matrix `m`, the INSERT entity can not\n | represent a non orthogonal target coordinate system, for this case an\n | :class:`InsertTransformationError` will be raised.\n | \n | .. versionadded:: 0.13\n | \n | translate(self, dx: float, dy: float, dz: float) -> 'Insert'\n | Optimized INSERT translation about `dx` in x-axis, `dy` in y-axis\n | and `dz` in z-axis.\n | \n | .. versionadded:: 0.13\n | \n | ucs(self)\n | Returns the block reference coordinate system as\n | :class:`ezdxf.math.UCS` object.\n | \n | virtual_entities(self, non_uniform_scaling=None, skipped_entity_callback: Union[Callable[[ezdxf.entities.dxfgfx.DXFGraphic, str], NoneType], NoneType] = None) -> Iterable[ezdxf.entities.dxfgfx.DXFGraphic]\n | Yields \"virtual\" entities of a block reference. This method is meant to\n | examine the block reference entities at the \"exploded\" location without\n | really \"exploding\" the block reference. The`skipped_entity_callback()`\n | will be called for all entities which are not processed, signature:\n | :code:`skipped_entity_callback(entity: DXFEntity, reason: str)`,\n | `entity` is the original (untransformed) DXF entity of the block\n | definition, the `reason` string is an explanation why the entity was\n | skipped.\n | \n | This entities are not stored in the entity database, have no handle and\n | are not assigned to any layout. It is possible to convert this entities\n | into regular drawing entities by adding the entities to the entities\n | database and a layout of the same DXF document as the block reference::\n | \n | doc.entitydb.add(entity)\n | msp = doc.modelspace()\n | msp.add_entity(entity)\n | \n | This method does not resolve the MINSERT attributes, only the\n | sub-entities of the base INSERT will be returned. To resolve MINSERT\n | entities check if multi insert processing is required, that's the case\n | if property :attr:`Insert.mcount` > 1, use the :meth:`Insert.multi_insert`\n | method to resolve the MINSERT entity into single INSERT entities.\n | \n | .. warning::\n | \n | **Non uniform scaling** may return incorrect results for text\n | entities (TEXT, MTEXT, ATTRIB) and maybe some other entities.\n | \n | Args:\n | skipped_entity_callback: called whenever the transformation of an\n | entity is not supported and so was skipped\n | \n | .. versionchanged:: 0.13\n | deprecated `non_uniform_scaling` argument\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | attribs\n | \n | attribs_follow\n | \n | has_scaling\n | Returns ``True`` if any axis scaling is applied.\n | \n | .. versionadded:: 0.12\n | \n | has_uniform_scaling\n | Returns ``True`` if scaling is uniform in x-, y- and z-axis ignoring\n | reflections e.g. (1, 1, -1) is uniform scaling.\n | \n | mcount\n | Returns the multi-insert count, MINSERT (multi-insert) processing\n | is required if :attr:`mcount` > 1.\n | \n | .. versionadded:: 0.14\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | DXFATTRIBS = <ezdxf.lldxf.attributes.DXFAttributes object>\n | \n | DXFTYPE = 'INSERT'\n | \n | ----------------------------------------------------------------------\n | Methods inherited from ezdxf.entities.subentity.LinkedEntities:\n | \n | __init__(self)\n | Default constructor. (internal API)\n | \n | add_sub_entities_to_entitydb(self, db: 'EntityDB') -> None\n | Add sub-entities (VERTEX, ATTRIB, SEQEND) to entity database `db`,\n | called from EntityDB. (internal API)\n | \n | all_sub_entities(self) -> Iterable[ForwardRef('DXFEntity')]\n | Yields all sub-entities ans SEQEND. (internal API)\n | \n | destroy(self) -> None\n | Destroy all data and references.\n | \n | link_entity(self, entity: 'DXFGraphic') -> None\n | Link VERTEX ot ATTRIB entities.\n | \n | link_seqend(self, seqend: 'DXFEntity') -> None\n | Link SEQEND entity. (internal API)\n | \n | new_seqend(self)\n | Create and bind new SEQEND. (internal API)\n | \n | post_bind_hook(self)\n | Create always a SEQEND entity.\n | \n | process_sub_entities(self, func: Callable[[ForwardRef('DXFEntity')], NoneType])\n | Call `func` for all sub-entities and SEQEND. (internal API)\n | \n | remove_dependencies(self, other: 'Drawing' = None)\n | Remove all dependencies from current document to bind entity to\n | `other` document. (internal API)\n | \n | set_owner(self, owner: str, paperspace: int = 0)\n | Set owner of all sub-entities and SEQEND. (internal API)\n | \n | ----------------------------------------------------------------------\n | Methods inherited from ezdxf.entities.dxfgfx.DXFGraphic:\n | \n | copy_to_layout(self, layout: 'BaseLayout') -> 'DXFEntity'\n | Copy entity to another `layout`, returns new created entity as\n | :class:`DXFEntity` object. Copying between different DXF drawings is\n | not supported.\n | \n | Args:\n | layout: any layout (model space, paper space, block)\n | \n | Raises:\n | DXFStructureError: for copying between different DXF drawings\n | \n | export_acdb_entity(self, tagwriter: 'TagWriter')\n | Export subclass 'AcDbEntity' as DXF tags. (internal API)\n | \n | get_hyperlink(self) -> Tuple[str, str, str]\n | Returns hyperlink, description and location.\n | \n | .. versionadded:: 0.12\n | \n | get_layout(self) -> Union[ForwardRef('BaseLayout'), NoneType]\n | Returns the owner layout or returns ``None`` if entity is not\n | assigned to any layout.\n | \n | graphic_properties(self) -> Dict\n | Returns the important common properties layer, color, linetype,\n | lineweight, ltscale, true_color and color_name as `dxfattribs` dict.\n | \n | .. versionadded:: 0.12\n | \n | has_hyperlink(self) -> bool\n | Returns ``True`` if entity has an attached hyperlink.\n | \n | .. versionadded:: 0.12\n | \n | linked_entities(self) -> Iterable[ForwardRef('DXFEntity')]\n | Yield linked entities: VERTEX or ATTRIB, different handling than\n | attached entities. (internal API)\n | \n | move_to_layout(self, layout: 'BaseLayout', source: 'BaseLayout' = None) -> None\n | Move entity from model space or a paper space layout to another layout.\n | For block layout as source, the block layout has to be specified. Moving\n | between different DXF drawings is not supported.\n | \n | Args:\n | layout: any layout (model space, paper space, block)\n | source: provide source layout, faster for DXF R12, if entity is\n | in a block layout\n | \n | Raises:\n | DXFStructureError: for moving between different DXF drawings\n | \n | ocs(self) -> Union[ezdxf.math.ucs.OCS, NoneType]\n | Returns object coordinate system (:ref:`ocs`) for 2D entities like\n | :class:`Text` or :class:`Circle`, returns ``None`` for entities without\n | OCS support.\n | \n | post_new_hook(self)\n | Post processing and integrity validation after entity creation\n | (internal API)\n | \n | rotate_axis(self, axis: 'Vertex', angle: float) -> 'DXFGraphic'\n | Rotate entity inplace about vector `axis`, returns `self`\n | (floating interface).\n | \n | Args:\n | axis: rotation axis as tuple or :class:`Vector`\n | angle: rotation angle in radians\n | \n | .. versionadded:: 0.13\n | \n | rotate_x(self, angle: float) -> 'DXFGraphic'\n | Rotate entity inplace about x-axis, returns `self`\n | (floating interface).\n | \n | Args:\n | angle: rotation angle in radians\n | \n | .. versionadded:: 0.13\n | \n | rotate_y(self, angle: float) -> 'DXFGraphic'\n | Rotate entity inplace about y-axis, returns `self`\n | (floating interface).\n | \n | Args:\n | angle: rotation angle in radians\n | \n | .. versionadded:: 0.13\n | \n | rotate_z(self, angle: float) -> 'DXFGraphic'\n | Rotate entity inplace about z-axis, returns `self`\n | (floating interface).\n | \n | Args:\n | angle: rotation angle in radians\n | \n | .. versionadded:: 0.13\n | \n | scale(self, sx: float, sy: float, sz: float) -> 'DXFGraphic'\n | Scale entity inplace about `dx` in x-axis, `dy` in y-axis and `dz`\n | in z-axis, returns `self` (floating interface).\n | \n | .. versionadded:: 0.13\n | \n | scale_uniform(self, s: float) -> 'DXFGraphic'\n | Scale entity inplace uniform about `s` in x-axis, y-axis and z-axis,\n | returns `self` (floating interface).\n | \n | .. versionadded:: 0.13\n | \n | set_hyperlink(self, link: str, description: str = None, location: str = None)\n | Set hyperlink of an entity.\n | \n | .. versionadded:: 0.12\n | \n | transform_to_wcs(self, ucs: 'UCS') -> 'DXFGraphic'\n | \n | unlink_from_layout(self) -> None\n | Unlink entity from associated layout. Does nothing if entity is already\n | unlinked.\n | \n | It is more efficient to call the\n | :meth:`~ezdxf.layouts.BaseLayout.unlink_entity` method of the associated\n | layout, especially if you have to unlink more than one entity.\n | \n | .. versionadded:: 0.13\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from ezdxf.entities.dxfgfx.DXFGraphic:\n | \n | rgb\n | Returns RGB true color as (r, g, b) tuple or None if true_color is\n | not set.\n | \n | transparency\n | Get transparency as float value between 0 and 1, 0 is opaque and 1\n | is 100% transparent (invisible).\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from ezdxf.entities.dxfgfx.DXFGraphic:\n | \n | DEFAULT_ATTRIBS = {'layer': '0'}\n | \n | ----------------------------------------------------------------------\n | Methods inherited from ezdxf.entities.dxfentity.DXFEntity:\n | \n | __deepcopy__(self, memodict: Dict = None)\n | Some entities maybe linked by more than one entity, to be safe use\n | `memodict` for bookkeeping.\n | (internal API)\n | \n | __repr__(self) -> str\n | Returns a simple string representation including the class.\n | \n | __str__(self) -> str\n | Returns a simple string representation.\n | \n | append_reactor_handle(self, handle: str) -> None\n | Append `handle` to reactors.\n | \n | copy(self: ~T) -> ~T\n | Returns a copy of `self` but without handle, owner and reactors.\n | This copy is NOT stored in the entity database and does NOT reside\n | in any layout, block, table or objects section! Extension dictionary\n | and reactors are not copied.\n | \n | Don't use this function to duplicate DXF entities in drawing,\n | use :meth:`EntityDB.duplicate_entity` instead for this task.\n | \n | Copying is not trivial, because of linked resources and the lack of\n | documentation how to handle this linked resources: extension dictionary,\n | handles in appdata, xdata or embedded objects.\n | \n | (internal API)\n | \n | del_dxf_attrib(self, key: str) -> None\n | Delete DXF attribute `key`, does not raise an error if attribute is\n | supported but not present.\n | \n | Raises :class:`DXFAttributeError` if `key` is not an supported DXF\n | attribute.\n | \n | discard_app_data(self, appid: str)\n | Discard application defined data for `appid`. Does not raise an\n | exception if no data for `appid` exist.\n | \n | discard_reactor_handle(self, handle: str) -> None\n | Discard `handle` from reactors. Does not raise an exception if\n | `handle` does not exist.\n | \n | discard_xdata(self, appid: str) -> None\n | Discard extended data for `appid`. Does not raise an exception if\n | no extended data for `appid` exist.\n | \n | discard_xdata_list(self, appid: str, name: str) -> None\n | Discard tag list `name` for extended data `appid`. Does not raise\n | an exception if no extended data for `appid` or no tag list `name`\n | exist.\n | \n | dxf_attrib_exists = has_dxf_attrib(self, key: str) -> bool\n | \n | dxfattribs(self, drop: Set[str] = None) -> Dict\n | Returns a ``dict`` with all existing DXF attributes and their\n | values and exclude all DXF attributes listed in set `drop`.\n | \n | .. versionchanged:: 0.12\n | added `drop` argument\n | \n | dxftype(self) -> str\n | Get DXF type as string, like ``LINE`` for the line entity.\n | \n | export_base_class(self, tagwriter: 'TagWriter') -> None\n | Export base class DXF attributes and structures. (internal API)\n | \n | export_embedded_objects(self, tagwriter: 'TagWriter') -> None\n | Export embedded objects by `tagwriter`. (internal API)\n | \n | export_xdata(self, tagwriter: 'TagWriter') -> None\n | Export DXF XDATA by `tagwriter`. (internal API)\n | \n | get_app_data(self, appid: str) -> ezdxf.lldxf.tags.Tags\n | Returns application defined data for `appid`.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | \n | Raises:\n | DXFValueError: no data for `appid` found\n | \n | get_dxf_attrib(self, key: str, default: Any = None) -> Any\n | Get DXF attribute `key`, returns `default` if key doesn't exist, or\n | raise :class:`DXFValueError` if `default` is :class:`DXFValueError`\n | and no DXF default value is defined::\n | \n | layer = entity.get_dxf_attrib(\"layer\")\n | # same as\n | layer = entity.dxf.layer\n | \n | Raises :class:`DXFAttributeError` if `key` is not an supported DXF\n | attribute.\n | \n | get_extension_dict(self) -> 'ExtensionDict'\n | Returns the existing :class:`~ezdxf.entities.xdict.ExtensionDict`.\n | \n | Raises:\n | AttributeError: extension dict does not exist\n | \n | get_flag_state(self, flag: int, name: str = 'flags') -> bool\n | Returns ``True`` if any `flag` of DXF attribute is ``1`` (on), else\n | ``False``. Always check only one flag state at the time.\n | \n | get_reactors(self) -> List[str]\n | Returns associated reactors as list of handles.\n | \n | get_xdata(self, appid: str) -> ezdxf.lldxf.tags.Tags\n | Returns extended data for `appid`.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | \n | Raises:\n | DXFValueError: no extended data for `appid` found\n | \n | get_xdata_list(self, appid: str, name: str) -> ezdxf.lldxf.tags.Tags\n | Returns tag list `name` for extended data `appid`.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | name: extended data list name\n | \n | Raises:\n | DXFValueError: no extended data for `appid` found or no data list `name` not found\n | \n | has_app_data(self, appid: str) -> bool\n | Returns ``True`` if application defined data for `appid` exist.\n | \n | has_dxf_attrib(self, key: str) -> bool\n | Returns ``True`` if DXF attribute `key` really exist.\n | \n | Raises :class:`DXFAttributeError` if `key` is not an supported DXF\n | attribute.\n | \n | has_reactors(self) -> bool\n | Returns ``True`` if entity has reactors.\n | \n | has_xdata(self, appid: str) -> bool\n | Returns ``True`` if extended data for `appid` exist.\n | \n | has_xdata_list(self, appid: str, name: str) -> bool\n | Returns ``True`` if a tag list `name` for extended data `appid`\n | exist.\n | \n | is_supported_dxf_attrib(self, key: str) -> bool\n | Returns ``True`` if DXF attrib `key` is supported by this entity.\n | Does not grant that attribute `key` really exist.\n | \n | load_tags(self, tags: ezdxf.lldxf.extendedtags.ExtendedTags, dxfversion: str = None) -> None\n | Generic tag loading interface, called if DXF document is loaded\n | from external sources.\n | \n | 1. Loading stage which set the basic DXF attributes, additional\n | resources (DXF objects) are not loaded yet. References to these\n | resources have to be stored as handles and can be resolved in the\n | 2. Loading stage: :meth:`post_load_hook`.\n | \n | (internal API)\n | \n | new_extension_dict(self) -> 'ExtensionDict'\n | \n | post_load_hook(self, doc: 'Drawing') -> Union[Callable, NoneType]\n | The 2nd loading stage when loading DXF documents from an external\n | source, for the 1st loading stage see :meth:`load_tags`.\n | \n | This stage is meant to convert resource handles into :class:`DXFEntity`\n | objects. This is an untrusted environment where valid structure are not\n | guaranteed, raise exceptions only for unrecoverable structure errors\n | and fix everything else. Log fixes for debugging!\n | \n | Some fixes can not be applied at this stage, because some structures\n | like the OBJECTS section are not initialized, in this case return a\n | callable, which will be executed after the DXF document is fully\n | initialized, for an example see :class:`Image`.\n | \n | Triggered in method: :meth:`Drawing._2nd_loading_stage`\n | \n | Examples for two stage loading:\n | Image, Underlay, DXFGroup, Dictionary, Dimstyle\n | \n | preprocess_export(self, tagwriter: 'TagWriter') -> bool\n | Pre requirement check and pre processing for export.\n | \n | Returns False if entity should not be exported at all.\n | \n | (internal API)\n | \n | replace_xdata_list(self, appid: str, name: str, tags: Iterable) -> None\n | Replaces tag list `name` for existing extended data `appid` by `tags`.\n | Appends new list if tag list `name` do not exist, but raises\n | :class:`DXFValueError` if extended data `appid` do not exist.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | name: extended data list name\n | tags: iterable of (code, value) tuples or :class:`~ezdxf.lldxf.types.DXFTag`\n | \n | Raises:\n | DXFValueError: no extended data for `appid` found\n | \n | set_app_data(self, appid: str, tags: Iterable) -> None\n | Set application defined data for `appid` as iterable of tags.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | tags: iterable of (code, value) tuples or :class:`~ezdxf.lldxf.types.DXFTag`\n | \n | set_dxf_attrib(self, key: str, value: Any) -> None\n | Set new `value` for DXF attribute `key`::\n | \n | entity.set_dxf_attrib(\"layer\", \"MyLayer\")\n | # same as\n | entity.dxf.layer = \"MyLayer\"\n | \n | Raises :class:`DXFAttributeError` if `key` is not an supported DXF\n | attribute.\n | \n | set_flag_state(self, flag: int, state: bool = True, name: str = 'flags') -> None\n | Set binary coded `flag` of DXF attribute `name` to ``1`` (on)\n | if `state` is ``True``, set `flag` to ``0`` (off)\n | if `state` is ``False``.\n | \n | set_reactors(self, handles: Iterable[str]) -> None\n | Set reactors as list of handles.\n | \n | set_xdata(self, appid: str, tags: Iterable) -> None\n | Set extended data for `appid` as iterable of tags.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | tags: iterable of (code, value) tuples or :class:`~ezdxf.lldxf.types.DXFTag`\n | \n | set_xdata_list(self, appid: str, name: str, tags: Iterable) -> None\n | Set tag list `name` for extended data `appid` as iterable of tags.\n | \n | Args:\n | appid: application name as defined in the APPID table.\n | name: extended data list name\n | tags: iterable of (code, value) tuples or :class:`~ezdxf.lldxf.types.DXFTag`\n | \n | setup_app_data(self, appdata: List[ezdxf.lldxf.tags.Tags]) -> None\n | Setup data structures from APP data. (internal API)\n | \n | update_dxf_attribs(self, dxfattribs: Dict) -> None\n | Set DXF attributes by a ``dict`` like :code:`{'layer': 'test',\n | 'color': 4}`.\n | \n | update_handle(self, handle: str) -> None\n | Update entity handle. (internal API)\n | \n | ----------------------------------------------------------------------\n | Class methods inherited from ezdxf.entities.dxfentity.DXFEntity:\n | \n | from_text(text: str, doc: 'Drawing' = None) -> ~T from builtins.type\n | Load constructor from text for testing. (internal API)\n | \n | load(tags: ezdxf.lldxf.extendedtags.ExtendedTags, doc: 'Drawing' = None) -> ~T from builtins.type\n | Constructor to generate entities loaded from an external source.\n | \n | LOAD process:\n | \n | This is an untrusted environment where valid structure are not\n | guaranteed and errors should be fixed, because the package-user is not\n | responsible for the problems and also can't fix them, raising\n | exceptions should only be done for unrecoverable issues.\n | Log fixes for debugging!\n | \n | Be more like BricsCAD and not as mean as AutoCAD!\n | \n | The :attr:`Drawing.is_loading` flag can be checked to distinguish the\n | NEW and the LOAD process.\n | \n | Args:\n | tags: DXF tags as :class:`ExtendedTags`\n | doc: DXF Document\n | \n | (internal API)\n | \n | new(handle: str = None, owner: str = None, dxfattribs: Dict = None, doc: 'Drawing' = None) -> ~T from builtins.type\n | Constructor for building new entities from scratch by ezdxf.\n | \n | NEW process:\n | \n | This is a trusted environment where everything is under control of\n | ezdxf respectively the package-user, it is okay to raise exception\n | to show implementation errors in ezdxf or usage errors of the\n | package-user.\n | \n | The :attr:`Drawing.is_loading` flag can be checked to distinguish the\n | NEW and the LOAD process.\n | \n | Args:\n | handle: unique DXF entity handle or None\n | owner: owner handle if entity has an owner else None or '0'\n | dxfattribs: DXF attributes\n | doc: DXF document\n | \n | (internal API)\n | \n | shallow_copy(other: 'DXFEntity') -> ~T from builtins.type\n | Copy constructor for type casting e.g. Polyface and Polymesh.\n | (internal API)\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from ezdxf.entities.dxfentity.DXFEntity:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | has_extension_dict\n | Returns ``True`` if entity has an attached\n | :class:`~ezdxf.entities.xdict.ExtensionDict`.\n | \n | is_alive\n | Returns ``False`` if entity has been deleted.\n | \n | is_bound\n | Returns ``True`` if entity is bound to DXF document.\n | \n | is_virtual\n | Returns ``True`` if entity is a virtual entity.\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from ezdxf.entities.dxfentity.DXFEntity:\n | \n | MIN_DXF_VERSION_FOR_EXPORT = 'AC1009'\n | \n | __annotations__ = {'DEFAULT_ATTRIBS': typing.Dict}\n\n" ], [ "drawing.blocks[\"AAMADXFSAMPLE1\"].attribs", "_____no_output_____" ], [ "import ezdxf\nfrom ezdxf.addons.dxf2code import entities_to_code, block_to_code\n\ndoc = ezdxf.readfile(fname)\nmsp = doc.modelspace()\nsource = entities_to_code(msp)\n\n# create source code for a block definition\nblock_source = block_to_code(doc.blocks[\"AAMADXFSAMPLE1\"])\n", "_____no_output_____" ], [ "#source.merge(block_source) # crashes\n", "_____no_output_____" ], [ "\n# merge source code objects\n\nwith open('source.py', mode='wt') as f:\n f.write(source.import_str())\n f.write('\\n\\n')\n f.write(source.code_str())\n f.write('\\n\\n')\n f.write(block_source.import_str())\n f.write('\\n\\n')\n f.write(block_source.code_str())\n f.write('\\n\\n')\n \n ", "_____no_output_____" ], [ "entities[0].dxf.layer", "_____no_output_____" ], [ "list(drawing.layers)[3].dxf.__dict__", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec84e60ee090422fde1c73c834870ac87ae8ad92
522,218
ipynb
Jupyter Notebook
Starter_Code/dashboard.ipynb
jasonedwardsoto/Pythonic-Monopoly
86d24d933c45186dcdea9d54402b6cca1359c44f
[ "RSA-MD" ]
null
null
null
Starter_Code/dashboard.ipynb
jasonedwardsoto/Pythonic-Monopoly
86d24d933c45186dcdea9d54402b6cca1359c44f
[ "RSA-MD" ]
null
null
null
Starter_Code/dashboard.ipynb
jasonedwardsoto/Pythonic-Monopoly
86d24d933c45186dcdea9d54402b6cca1359c44f
[ "RSA-MD" ]
null
null
null
280.762366
319,150
0.84302
[ [ [ "# San Francisco Rental Prices Dashboard\n\nIn this notebook, you will compile the visualizations from the previous analysis into functions that can be used for a Panel dashboard.", "_____no_output_____" ] ], [ [ "# imports\nimport panel as pn\npn.extension('plotly')\nimport plotly.express as px\nimport pandas as pd\nimport hvplot.pandas\nimport matplotlib.pyplot as plt\nimport os\nfrom pathlib import Path\nfrom dotenv import load_dotenv", "_____no_output_____" ], [ "# Read the Mapbox API key\nload_dotenv()\nmap_box_api = os.getenv(\"mapbox\")\npx.set_mapbox_access_token(map_box_api)", "_____no_output_____" ] ], [ [ "# Import Data", "_____no_output_____" ] ], [ [ "# Import the necessary CSVs to Pandas DataFrames\n# YOUR CODE HERE!\n\nfile_path = Path(\"Data/sfo_neighborhoods_census_data.csv\")\nsf_data = pd.read_csv(file_path, index_col=\"year\")\n\nfile_path_2 = Path(\"Data/neighborhoods_coordinates.csv\")\ndf_neighborhood_location = pd.read_csv(file_path)", "_____no_output_____" ] ], [ [ "- - -", "_____no_output_____" ], [ "## Panel Visualizations\n\nIn this section, you will copy the code for each plot type from your analysis notebook and place it into separate functions that Panel can use to create panes for the dashboard. \n\nThese functions will convert the plot object to a Panel pane.\n\nBe sure to include any DataFrame transformation/manipulation code required along with the plotting code.\n\nReturn a Panel pane object from each function that can be used to build the dashboard.\n\nNote: Remove any `.show()` lines from the code. We want to return the plots instead of showing them. The Panel dashboard will then display the plots.", "_____no_output_____" ] ], [ [ "# Define Panel Visualization Functions\ndef housing_units_per_year():\n \"\"\"Housing Units Per Year.\"\"\"\n \n # YOUR CODE HERE!\n sfo_data_mean_per_year = sf_data.groupby(\"year\").mean()\n sfo_data_mean_per_year.reset_index(inplace=True)\n\n std=sfo_data_mean_per_year[\"housing_units\"].std()\n mins=sfo_data_mean_per_year[\"housing_units\"].min()-std\n maxes=sfo_data_mean_per_year[\"housing_units\"].max()+std\n\n housing_units_per_year_fig = px.bar(\n sfo_data_mean_per_year,\n x=\"year\",\n y=\"housing_units\",\n range_y=[mins,maxes],\n labels={\"year\":\"Year\", \"housing_units\": \"Housing Units\"},\n title=\"Housing Units in San Francisco from 2010 to 2016\",\n width=700,\n height=600\n )\n return housing_units_per_year_fig\n\ndef average_gross_rent():\n \"\"\"Average Gross Rent in San Francisco Per Year.\"\"\"\n \n # YOUR CODE HERE!\n sfo_data_mean_per_year = sfo_data.groupby(\"year\").mean()\n sfo_data_mean_per_year.reset_index(inplace=True)\n\n average_gross_rent_per_year_fig = px.line(\n sfo_data_mean_per_year,\n x=\"year\",\n y=\"gross_rent\",\n labels={\"year\":\"Year\",\"gross_rent\":\"Gross Rent\"},\n title=\"Average Gross Rent in San Francisco\",\n width=700, \n height=600\n )\n return average_gross_rent_per_year_fig\n\ndef average_sales_price():\n \"\"\"Average Sales Price Per Year.\"\"\"\n \n # YOUR CODE HERE!\n average_price_sqr_foot = sf_data[\"sale_price_sqr_foot\"].groupby([sf_data.index]).mean()\n sales_price_fig = plt.figure()\n average_price_plot = (average_price_sqr_foot.plot.line(x='year', y='sale_price_sqr_foot', title=\"Average Sales Price per Year\"))\n plt.close(sales_price_fig)\n \n return pn.pane.Matplotlib(sales_price_fig)\n\n\ndef average_price_by_neighborhood():\n \"\"\"Average Prices by Neighborhood.\"\"\"\n \n # YOUR CODE HERE!\n average_prices_by_neighborhood = sfo_data.groupby([\"year\",\"neighborhood\"]).mean()\n average_prices_by_neighborhood.reset_index(inplace=True)\n average_prices_by_neighborhood_plot = average_prices_by_neighborhood.hvplot.line(x='year',y=\"sale_price_sqr_foot\",ylabel='Avg. Sale Price per Square Foot',groupby='neighborhood')\n return average_prices_by_neighborhood_plot\n\n\n\ndef top_most_expensive_neighborhoods():\n \"\"\"Top 10 Most Expensive Neighborhoods.\"\"\"\n\n # YOUR CODE HERE!\n avg_value_per_neighborhood = sf_data.groupby([sf_data[\"neighborhood\"]]).mean()\n avg_value_per_neighborhood = avg_value_per_neighborhood.reset_index()\n top_10_most_expensive = avg_value_per_neighborhood.nlargest(10, 'sale_price_sqr_foot').reset_index()\n top10_plot = px.bar(top_10_most_expensive,\n x=\"neighborhood\", \n y=\"sale_price_sqr_foot\", \n title=\"Top 10 Most Expensive Neighborhoods in San Francisco\", \n )\n\n return top10_plot\n\n\ndef most_expensive_neighborhoods_rent_sales():\n \"\"\"Comparison of Rent and Sales Prices of Most Expensive Neighborhoods.\"\"\" \n \n # YOUR CODE HERE!\n neighborhood_cost = sfo_data.groupby([sfo_data.index, \"neighborhood\"]).mean()\n neighborhood_cost.reset_index(inplace=True)\n \n cost_plot = neighborhood_cost.hvplot.bar(\n 'year',\n ['gross_rent', 'sale_price_sqr_foot'],\n ylabel='Number of Housing Units',\n groupby='neighborhood',\n frame_width=1900,\n title='Comparison of Rent and Sales Prices of Most Expensive Neighborhoods'\n )\n \n return cost_plot\n \n \ndef parallel_coordinates():\n \"\"\"Parallel Coordinates Plot.\"\"\"\n\n # YOUR CODE HERE!\n avg_value_per_neighborhood = sf_data.groupby([sf_data[\"neighborhood\"]]).mean() \n top_10_most_expensive = avg_value_per_neighborhood.nlargest(10, 'sale_price_sqr_foot').reset_index()\n top_10_parallel = px.parallel_coordinates(\n top_10_most_expensive, \n color='sale_price_sqr_foot')\n\n return top_10_parallel\n \n\n\n\ndef parallel_categories():\n \"\"\"Parallel Categories Plot.\"\"\"\n \n # YOUR CODE HERE!\n avg_value_per_neighborhood = sf_data.groupby([sf_data[\"neighborhood\"]]).mean() \n top_10_ex_neighborhood = avg_value_per_neighborhood.nlargest(10, 'sale_price_sqr_foot').reset_index()\n top_10_parallel_categories = px.parallel_categories(\n top_10_ex_neighborhood,\n dimensions=[\"neighborhood\", \"sale_price_sqr_foot\", \"housing_units\", \"gross_rent\"],\n color=\"sale_price_sqr_foot\",\n color_continuous_scale=px.colors.sequential.Inferno,\n)\n\n return top_10_parallel_categories\n\n\n\ndef neighborhood_map():\n \"\"\"Neighborhood Map\"\"\"\n \n file_path = Path(\"Data/neighborhoods_coordinates.csv\")\n df_neighborhood_location = pd.read_csv(file_path_2)\n avg_value_per_neighborhood = sf_data.groupby([sf_data[\"neighborhood\"]]).mean() \n avg_value_per_neighborhood = avg_value_per_neighborhood.reset_index()\n avg_value_location = pd.concat([avg_value_per_neighborhood, df_neighborhood_location], axis=\"columns\", join=\"inner\")\n\n location_map = px.scatter_mapbox(\n avg_value_location,\n lat=\"Lat\",\n lon=\"Lon\",\n size=\"sale_price_sqr_foot\",\n color=\"gross_rent\",\n color_continuous_scale=px.colors.cyclical.IceFire,\n #title=\"Average Sale Price per Square Foot and Gross Rent in San Francisco\",\n zoom=11,\n width=800,\n)\n \n\n return location_map\n\n\ndef sunburst():\n \"\"\"Sunburst Plot.\"\"\"\n \n # YOUR CODE HERE!\n \n most_expensive = sfo_data.groupby(\"neighborhood\").mean()\n most_expensive = most_expensive.sort_values(\"sale_price_sqr_foot\", ascending=False).head(10)\n most_expensive = most_expensive.reset_index()\n neighborhood_cost = sfo_data.groupby([sfo_data.index, \"neighborhood\"]).mean()\n neighborhood_cost.reset_index(inplace=True)\n df_expensive_neighborhoods_per_year = neighborhood_cost[neighborhood_cost[\"neighborhood\"].isin(most_expensive[\"neighborhood\"])]\n \n sunburst = px.sunburst(\n df_expensive_neighborhoods_per_year,\n path=['year', 'neighborhood'],\n values='sale_price_sqr_foot',\n color='gross_rent',\n title='Costs Analysis of Most Expensive Neighborhoods in San Francisco per Year',\n height=800\n )\n \n return sunburst\n", "_____no_output_____" ] ], [ [ "## Panel Dashboard\n\nIn this section, you will combine all of the plots into a single dashboard view using Panel. Be creative with your dashboard design!", "_____no_output_____" ] ], [ [ "# Create a tab layout for the dashboard\ngeo_column = pn.Column(\"Geographical Display\", neighborhood_map())\n\nplot_column = pn.Column(\n \"Charts Analysis\",\n housing_units_per_year(), average_gross_rent(), average_sales_price(), \n average_price_by_neighborhood(), top_most_expensive_neighborhoods(), most_expensive_neighborhoods_rent_sales()\n)\n\ninteractive_column = pn.Column(\"Interactive Charts Analysis\", parallel_categories(), parallel_coordinates(), sunburst())\n", "_____no_output_____" ], [ "# Create the dashboard\nsfo_dashboard = pn.Tabs(\n (\"Geographical\", geo_column),\n (\"Interactive\", interactive_column),\n (\"Statistical\", plot_column)\n)", "_____no_output_____" ] ], [ [ "## Serve the Panel Dashboard", "_____no_output_____" ] ], [ [ "# Serve the# dashboard\n# YOUR CODE HERE!\nsfo_dashboard.servable()", "_____no_output_____" ] ], [ [ "# Debugging\n\nNote: Some of the Plotly express plots may not render in the notebook through the panel functions.\n\nHowever, you can test each plot by uncommenting the following code", "_____no_output_____" ] ], [ [ "# housing_units_per_year()\nhousing_units_per_year()", "_____no_output_____" ], [ "# average_gross_rent()\naverage_gross_rent()", "_____no_output_____" ], [ "# average_sales_price()\naverage_sales_price()", "_____no_output_____" ], [ "# average_price_by_neighborhood()\naverage_price_by_neighborhood()", "_____no_output_____" ], [ "# top_most_expensive_neighborhoods()\ntop_most_expensive_neighborhoods()", "_____no_output_____" ], [ "# most_expensive_neighborhoods_rent_sales()\nmost_expensive_neighborhoods_rent_sales()", "_____no_output_____" ], [ "# neighborhood_map().show()\nneighborhood_map().show()", "_____no_output_____" ], [ "# parallel_categories()\nparallel_categories()", "_____no_output_____" ], [ "# parallel_coordinates()\nparallel_coordinates()", "_____no_output_____" ], [ "# sunburst()\nsunburst()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec84e73b439dcd4f2a82727f19fb03751ef9bda3
68,089
ipynb
Jupyter Notebook
Projects/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
AmitHasanShuvo/Data-Scientist-with-Python--DataCamp-
78f82c56f656682e0ccc9e3260a3ddf6dab34af9
[ "MIT" ]
3
2020-07-09T17:28:23.000Z
2020-07-28T22:10:01.000Z
Projects/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
AmitHasanShuvo/Data-Scientist-with-Python--DataCamp-
78f82c56f656682e0ccc9e3260a3ddf6dab34af9
[ "MIT" ]
null
null
null
Projects/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
AmitHasanShuvo/Data-Scientist-with-Python--DataCamp-
78f82c56f656682e0ccc9e3260a3ddf6dab34af9
[ "MIT" ]
null
null
null
68,089
68,089
0.924129
[ [ [ "## 1. Meet Dr. Ignaz Semmelweis\n<p><img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg\"></p>\n<!--\n<img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg\">\n-->\n<p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p>\n<p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p>", "_____no_output_____" ] ], [ [ "# importing modules\nimport pandas as pd\n# Read datasets/yearly_deaths_by_clinic.csv into yearly\nyearly = pd.read_csv('datasets/yearly_deaths_by_clinic.csv')\n\n# Print out yearly\nprint(yearly)", " year births deaths clinic\n0 1841 3036 237 clinic 1\n1 1842 3287 518 clinic 1\n2 1843 3060 274 clinic 1\n3 1844 3157 260 clinic 1\n4 1845 3492 241 clinic 1\n5 1846 4010 459 clinic 1\n6 1841 2442 86 clinic 2\n7 1842 2659 202 clinic 2\n8 1843 2739 164 clinic 2\n9 1844 2956 68 clinic 2\n10 1845 3241 66 clinic 2\n11 1846 3754 105 clinic 2\n" ] ], [ [ "## 2. The alarming number of deaths\n<p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p>\n<p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p>", "_____no_output_____" ] ], [ [ "# Calculate proportion of deaths per no. births\nyearly[\"proportion_deaths\"] = yearly['deaths'] / yearly['births']\n\n# Extract clinic 1 data into yearly1 and clinic 2 data into yearly2\nyearly1 = yearly[yearly['clinic'] == \"clinic 1\"]\nyearly2 = yearly[yearly['clinic'] == \"clinic 2\"]\n\n# Print out yearly1\nprint(yearly1)", " year births deaths clinic proportion_deaths\n0 1841 3036 237 clinic 1 0.078063\n1 1842 3287 518 clinic 1 0.157591\n2 1843 3060 274 clinic 1 0.089542\n3 1844 3157 260 clinic 1 0.082357\n4 1845 3492 241 clinic 1 0.069015\n5 1846 4010 459 clinic 1 0.114464\n" ] ], [ [ "## 3. Death at the clinics\n<p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern…</p>", "_____no_output_____" ] ], [ [ "# This makes plots appear in the notebook\n%matplotlib inline\n\n# Plot yearly proportion of deaths at the two clinics\nax = yearly1.plot(x='year',y='proportion_deaths', label=\"Clinic 1\")\nyearly2.plot(x='year',y='proportion_deaths',label='Clinic 2',ax = ax)\nax.set_ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 4. The handwashing begins\n<p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p>\n<p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p>\n<p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p>", "_____no_output_____" ] ], [ [ "\n# Read datasets/monthly_deaths.csv into monthly\nmonthly = pd.read_csv(\"datasets/monthly_deaths.csv\",parse_dates=['date'])\n\n# Calculate proportion of deaths per no. births\nmonthly[\"proportion_deaths\"] =monthly['deaths'] / monthly['births']\n\n# Print out the first rows in monthly\nprint(monthly.head())", " date births deaths proportion_deaths\n0 1841-01-01 254 37 0.145669\n1 1841-02-01 239 18 0.075314\n2 1841-03-01 277 12 0.043321\n3 1841-04-01 255 4 0.015686\n4 1841-05-01 255 2 0.007843\n" ] ], [ [ "## 5. The effect of handwashing\n<p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p>", "_____no_output_____" ] ], [ [ "\n# Plot monthly proportion of deaths\nax = monthly.plot(x='date',y='proportion_deaths')\nax.set_ylabel = \"Proportion deaths\"", "_____no_output_____" ] ], [ [ "## 6. The effect of handwashing highlighted\n<p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p>\n<p>The effect of handwashing is made even more clear if we highlight this in the graph.</p>", "_____no_output_____" ] ], [ [ "# Date when handwashing was made mandatory\nimport pandas as pd\nhandwashing_start = pd.to_datetime('1847-06-01')\n\n# Split monthly into before and after handwashing_start\nbefore_washing = ...\nafter_washing = ...\n\n# Plot monthly proportion of deaths before and after handwashing\n# ... YOUR CODE FOR TASK 6 ...", "_____no_output_____" ] ], [ [ "## 7. More handwashing, fewer deaths?\n<p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p>", "_____no_output_____" ] ], [ [ "# Difference in mean monthly proportion of deaths due to handwashing\nbefore_proportion = ...\nafter_proportion = ...\nmean_diff = ...\nmean_diff", "_____no_output_____" ] ], [ [ "## 8. A Bootstrap analysis of Semmelweis handwashing data\n<p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p>\n<p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p>", "_____no_output_____" ] ], [ [ "# A bootstrap analysis of the reduction of deaths due to handwashing\nboot_mean_diff = []\nfor i in range(3000):\n boot_before = ...\n boot_after = ...\n boot_mean_diff.append( ... )\n\n# Calculating a 95% confidence interval from boot_mean_diff \nconfidence_interval = ...\nconfidence_interval\n", "_____no_output_____" ] ], [ [ "## 9. The fate of Dr. Semmelweis\n<p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p>\n<p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some \"substance\" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p>\n<p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p>", "_____no_output_____" ] ], [ [ "# The data Semmelweis collected points to that:\ndoctors_should_wash_their_hands = False", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec84e803123a6eed13a374fa86852e501a7abba4
782,730
ipynb
Jupyter Notebook
Old MRI segmentation code/Hist-seg-WES_007_6.ipynb
akac0297/PETLAB
950cc153ce230d12d752ad0d11111e7fc22d9e7d
[ "MIT" ]
null
null
null
Old MRI segmentation code/Hist-seg-WES_007_6.ipynb
akac0297/PETLAB
950cc153ce230d12d752ad0d11111e7fc22d9e7d
[ "MIT" ]
null
null
null
Old MRI segmentation code/Hist-seg-WES_007_6.ipynb
akac0297/PETLAB
950cc153ce230d12d752ad0d11111e7fc22d9e7d
[ "MIT" ]
null
null
null
94.887865
97,908
0.733685
[ [ [ "#import modules\nimport SimpleITK as sitk\n\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\nfrom platipy.imaging.utils.tools import get_com\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n%matplotlib notebook\n\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\n\nfrom platipy.imaging.registration.registration import (\n initial_registration,\n fast_symmetric_forces_demons_registration,\n transform_propagation,\n apply_field\n)", "_____no_output_____" ], [ "breast=sitk.ReadImage(\"/home/alicja/Downloads/Segmentation.nii.gz\") #right breast\n#Left breast=sitk.ReadImage(\"contralateral_segmentation.nii.gz\")\n\npat_no=\"07\"\ntimept=\"6\"\n\nfilenameB50T_1=\"WES_007_6_20180810_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz\"\nfilenameB800T_1=\"WES_007_6_20180810_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz\"\nfilenameT2w_1=\"WES_007_6_20180810_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz\"\nfilenameMPE_1=\"max_img_WES_0\" +pat_no+\"_\"+timept+\".nii.gz\"\n\nWES_1_B50T=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + pat_no + \"/IMAGES/\" +filenameB50T_1)\nWES_1_B800T=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + pat_no + \"/IMAGES/\" +filenameB800T_1)\nWES_1_T2w=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + pat_no + \"/IMAGES/\" +filenameT2w_1)\nWES_1_MPE=sitk.ReadImage(filenameMPE_1)\n\nWES_010_4_B50T=sitk.ReadImage(\"/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz\")", "_____no_output_____" ], [ "image_to_0_rigid, tfm_to_0_rigid = initial_registration(\n WES_1_B50T,\n WES_010_4_B50T,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\nimage_to_0_dir, tfm_to_0_dir = fast_symmetric_forces_demons_registration(\n WES_1_B50T,\n image_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\nbreast_to_0_rigid = transform_propagation(\n WES_1_B50T,\n breast,\n tfm_to_0_rigid,\n structure=True\n)\n\nbreast_to_0_dir = apply_field(\n breast_to_0_rigid,\n tfm_to_0_dir,\n structure=True\n)", "_____no_output_____" ], [ "vis = ImageVisualiser(WES_1_B50T, axis='z', cut=get_com(breast_to_0_dir), window=[-250, 500])\nvis.add_contour(breast_to_0_dir, name='BREAST', color='g')\nfig = vis.show()", "You have selected a single axis and multiple slice locations, attempting to match.\n" ], [ "breast_contour_dilate=sitk.BinaryDilate(breast_to_0_dir, (2,2,2))", "_____no_output_____" ], [ "vis = ImageVisualiser(WES_1_B50T, axis='z', cut=get_com(breast_to_0_dir), window=[-250, 500])\nvis.add_contour(breast_contour_dilate, name='BREAST', color='g')\nfig = vis.show()", "_____no_output_____" ], [ "masked_breast = sitk.Mask(WES_1_B50T, breast_contour_dilate)", "_____no_output_____" ], [ "values = sitk.GetArrayViewFromImage(masked_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,1000,50), histtype='stepfilled', lw=2)\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('Intensity')\nax.set_ylabel('Frequency')\nfig.show()", "_____no_output_____" ], [ "def estimate_tumour_vol(img_mri, lowerthreshold=300, upperthreshold=5000, hole_size=1):\n label_threshold = sitk.BinaryThreshold(img_mri, lowerThreshold=lowerthreshold, upperThreshold=upperthreshold)\n label_threshold_cc = sitk.RelabelComponent(sitk.ConnectedComponent(label_threshold))\n label_threshold_cc_x = (label_threshold_cc==1)\n label_threshold_cc_x_f = sitk.BinaryMorphologicalClosing(label_threshold_cc_x, (hole_size,hole_size,hole_size))\n return(label_threshold_cc_x_f)", "_____no_output_____" ], [ "image_mri=WES_1_B50T\narr_mri = sitk.GetArrayFromImage(image_mri)\narr_mri[:,:,arr_mri.shape[2]//2:] = 0 #if laterality is RIGHT\nimage_mri_masked=sitk.GetImageFromArray(arr_mri)\nimage_mri_masked.CopyInformation(image_mri)\n\nlabel_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=600, upperthreshold=5000, hole_size=1)\n\nsitk.WriteImage(label_threshold_cc_x_f,\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_B50T_hist.nii.gz\")", "_____no_output_____" ], [ "masked_breast = sitk.Mask(WES_1_B800T, breast_contour_dilate)\n\nvalues = sitk.GetArrayViewFromImage(masked_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,750,50), histtype='stepfilled', lw=2)\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('Intensity')\nax.set_ylabel('Frequency')\nfig.show()", "_____no_output_____" ], [ "image_mri=WES_1_B800T\narr_mri = sitk.GetArrayFromImage(image_mri)\narr_mri[:,:,arr_mri.shape[2]//2:] = 0 #if lat is right\nimage_mri_masked=sitk.GetImageFromArray(arr_mri)\nimage_mri_masked.CopyInformation(image_mri)\n\nlabel_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=330, upperthreshold=5000, hole_size=1)\n\nsitk.WriteImage(label_threshold_cc_x_f,\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_B800T_hist.nii.gz\")", "_____no_output_____" ], [ "WES_1_T2w=sitk.Resample(WES_1_T2w,WES_1_B50T)\nmasked_breast = sitk.Mask(WES_1_T2w, breast_contour_dilate)\nvalues = sitk.GetArrayViewFromImage(masked_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,300,50), histtype='stepfilled', lw=2)\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('Intensity')\nax.set_ylabel('Frequency')\nfig.show()", "_____no_output_____" ], [ "image_mri=WES_1_T2w\narr_mri = sitk.GetArrayFromImage(image_mri)\narr_mri[:,:,arr_mri.shape[2]//2:] = 0 #if lat is right\nimage_mri_masked=sitk.GetImageFromArray(arr_mri)\nimage_mri_masked.CopyInformation(image_mri)\n\nlabel_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=80, upperthreshold=5000, hole_size=1)\n\nsitk.WriteImage(label_threshold_cc_x_f,\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_T2w_hist.nii.gz\")", "_____no_output_____" ], [ "WES_1_MPE=sitk.Resample(WES_1_MPE,WES_1_B50T)\nmasked_breast = sitk.Mask(WES_1_MPE, breast_contour_dilate)\nvalues = sitk.GetArrayViewFromImage(masked_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,750,50), histtype='stepfilled', lw=2)\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('Intensity')\nax.set_ylabel('Frequency')\nfig.show()", "_____no_output_____" ], [ "image_mri=WES_1_MPE\narr_mri = sitk.GetArrayFromImage(image_mri)\narr_mri[:,:,arr_mri.shape[2]//2:] = 0 #if lat is right\narr_mri[:,:,:30] = 0\nimage_mri_masked=sitk.GetImageFromArray(arr_mri)\nimage_mri_masked.CopyInformation(image_mri)\n\nlabel_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=180, upperthreshold=5000, hole_size=1)\n\nsitk.WriteImage(label_threshold_cc_x_f,\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_MPE_hist.nii.gz\")", "_____no_output_____" ], [ "#add segs\nseg_B50T=sitk.ReadImage(\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_B50T_hist.nii.gz\")\nseg_B800T=sitk.ReadImage(\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_B800T_hist.nii.gz\")\nseg_T2=sitk.ReadImage(\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_T2w_hist.nii.gz\")\nseg_MPE=sitk.ReadImage(\"test_label_threshold_0\" + pat_no + \"_\" +timept +\"_MPE_hist.nii.gz\")\n\nseg_B50T=sitk.Resample(seg_B50T,seg_T2)\nseg_B800T=sitk.Resample(seg_B800T,seg_T2)\nseg_MPE=sitk.Resample(seg_MPE,seg_T2)\n\nnew_seg_T2=sitk.LabelMapToBinary(sitk.Cast(seg_T2, sitk.sitkLabelUInt8))\nnew_seg_B50T=sitk.LabelMapToBinary(sitk.Cast(seg_B50T, sitk.sitkLabelUInt8))\nnew_seg_B800T=sitk.LabelMapToBinary(sitk.Cast(seg_B800T, sitk.sitkLabelUInt8))\nnew_seg_MPE=sitk.LabelMapToBinary(sitk.Cast(seg_MPE, sitk.sitkLabelUInt8))\n\nnew_TRACE_seg=(new_seg_B50T+new_seg_B800T)/2\nnew_seg_1=(sitk.Cast(new_seg_T2,sitk.sitkFloat64)+sitk.Cast(new_TRACE_seg,sitk.sitkFloat64)+sitk.Cast(new_seg_MPE,sitk.sitkFloat64))\nvis=ImageVisualiser(new_seg_1, cut=get_com(new_seg_1), window=[0,3])\nfig=vis.show()", "_____no_output_____" ], [ "new_seg_1_1=sitk.BinaryThreshold(new_seg_1, lowerThreshold=2)\n\nvis=ImageVisualiser(new_seg_1_1, cut=get_com(new_seg_1), window=[0,1])\nfig=vis.show()", "_____no_output_____" ], [ "sitk.WriteImage(new_seg_1_1,\"new_seg_0\"+pat_no+\"_\"+timept+\"_mri.nii.gz\")", "_____no_output_____" ], [ "#Checking for volume decrease\n\ntp1=\"4\"\ntp2=\"5\"\ntp3=\"6\"\n\n#volumes\nimg1=sitk.ReadImage(\"new_seg_0\"+pat_no+\"_\"+tp1+\"_mri.nii.gz\")\nimg2=sitk.ReadImage(\"new_seg_0\"+pat_no+\"_\"+tp2+\"_mri.nii.gz\")\nimg3=sitk.ReadImage(\"new_seg_0\"+pat_no+\"_\"+tp3+\"_mri.nii.gz\")\n\narr1=sitk.GetArrayFromImage(img1)\narr2=sitk.GetArrayFromImage(img2)\narr3=sitk.GetArrayFromImage(img3)\n\nvol1=np.sum(arr1==1)\nvol2=np.sum(arr2==1)\nvol3=np.sum(arr3==1)\n\nprint(vol1, vol2, vol3)", "2403 2344 1348\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec84f0dd1271f195b105a1de5cf22c8bf6aeae41
1,803
ipynb
Jupyter Notebook
EDA/task_7.ipynb
Einhard6176/FlightDelayPredictor
ba04269b123bc23f9bb4c4e3dd237e60ae911c09
[ "MIT" ]
null
null
null
EDA/task_7.ipynb
Einhard6176/FlightDelayPredictor
ba04269b123bc23f9bb4c4e3dd237e60ae911c09
[ "MIT" ]
null
null
null
EDA/task_7.ipynb
Einhard6176/FlightDelayPredictor
ba04269b123bc23f9bb4c4e3dd237e60ae911c09
[ "MIT" ]
null
null
null
21.722892
113
0.565724
[ [ [ "# **Task 7**: Test the hypothesis whether planes fly faster when there is the departure delay? ", "_____no_output_____" ] ], [ [ "import psycopg2\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\nfrom functions import calculateSpeed, otp, SQLquery, aircraftSpeedTTest", "_____no_output_____" ], [ "data = SQLquery('select dep_delay, distance, air_time from flights order by random() limit 10000')", "_____no_output_____" ], [ "aircraftSpeedTTest(data)", "Hypothesis rejected; aircraft speeds are not equal with delays present.\nP-value: 5.984872925088224e-09\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
ec8518ebc01cde66331cc9db526bf5574cd5e8a7
12,606
ipynb
Jupyter Notebook
site/public/courses/DS-2.1/Assignments/07_Ensemble_Methods.ipynb
KitsuneNoctus/makeschool
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
[ "MIT" ]
1
2021-08-24T20:22:19.000Z
2021-08-24T20:22:19.000Z
site/public/courses/DS-2.1/Assignments/07_Ensemble_Methods.ipynb
KitsuneNoctus/makeschool
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
[ "MIT" ]
null
null
null
site/public/courses/DS-2.1/Assignments/07_Ensemble_Methods.ipynb
KitsuneNoctus/makeschool
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
[ "MIT" ]
null
null
null
63.989848
1,050
0.678804
[ [ [ "<center><img src='img/ms_logo.jpeg' height=40% width=40%></center>\n\n<center><h1>Ensemble Methods: Random Forests and Gradient Boosted Trees</h1></center>\n\nIn today's notebook, we're going to cover two of the more powerful and resilient machine learning algorithms used in predictive analytics--**_Random Forests_** and **_Gradient Boosted Trees_**. These algorithms belong to a class of algorithms called **_Ensemble Methods_**. \n\n<center><h3>What are Ensemble Methods?</h3></center>\n\nEnsemble Methods are machine learning algorithms that rely on the \"Wisdom of the Crowd\". That is, they take the approach that many weak algorithms working together do better than 1 big, monolithic algorithm. In practice, they're often right. Both of these algorithms create many small, poorly predictive learners that do only slightly better than chance. However, as we'll see when we begin using them, with enough of these learners voting on the overall prediction, we often get great results, with the added benefit of models that are more resistant to variance in the dataset, and are resistant to overfitting than many other model types (We'll talk about why later). \n\nBefore using examples in practice, Let's gain some intuition on how each algorithm works. \n\n<center><h3>Random Forests</h3></center>\n\n**_Random Forest_** is a name for a supervised learning method created by Berkeley professor Leo Breiman in 2001, although prior work on this problem had been done by other professors before him (Breiman's white paper available [here](https://www.stat.berkeley.edu/~breiman/randomforest2001.pdf)). The name for this algorithm gives an intuition for how it works--a **_Random Forest_** is just a collection of many small **_Decision Trees_**. The secret to this algorithm is using **_Bootstrap Aggregation_** (or **_bagging_**, for short) and **_subspace sampling_**, which is just a fancy way of saying that the algorithm selects random samples from the dataset with replacement (the _bagging_ step), and then selects a random subset of columns from data ( the _subspace sampling_ step) to use when creating each new \"weak\" Decision Tree. \n\nIn order to understand this model, let's visualize an example. \n\nPretend that we have a dataset with 10 columns, and thousands of rows. Our random forest algorithm would start by randomly selecting around 2/3 of the rows, and then randomly selecting 6 columns in the data that it will use to train on (this step is important--the learner does NOT have access to all of the columns for each data point, only a randomly selected subset!). It will then train it's first **_weak learner_**-- a decision tree that is only allowed to use the 6 columns that were randomly selected. This becomes our first \"tree\" planted in our Random Forest. The Random Forest algorithm will then repeat this step, sampling another 2/3's of the data, and grabbing another 6 columns from the dataset (recall that the sampling is done with replacement, which means that some of the same data and/or feature columns will likely be chosen again--including an exceedingly small chance that the exact same data/columns will be chosen again!). After a sufficient number of trees have been created, the algorithm is ready to go! \n<br> \n<center>**_Wait! How Many Trees Should be in my Random Forest?_**</center>\n \nThe number of trees created for a Random Forest is a parameter specified by the user. Typically, people tend to use the numbers 10, 30, or 100. The more trees you have, the more accurate your Random Forest will likely be. However, this algorithm is subject to _diminishing returns_ for each new tree--that is, each new tree created will add less accuracy than the tree before it. At some point, adding new trees just takes up more memory without making the accuracy of the model any more predictive. \n\nFor more background on how Random Forests work, check out the video below:", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\n\nHTML(\"\"\"\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/D_2LkhMJcfY\" \nframeborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen></iframe>\n\"\"\")", "_____no_output_____" ] ], [ [ "<center><h3>Building a Random Forest</h3></center>\n\nLike all the other great machine learning algorithms, `sklearn` has a great implementation of Random Forests that we can use. Let's start by building a classifer on the `pima_indians_diabetes` dataset contained within the `datasets` folder in this repo. \n\nYou'll find the `RandomForestsClassifier` object contained with `sklearn.ensemble`. For more information, see [sklearns' documentation for this classifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html). \n\n<center>**_Tuneable Parameters_**</center>\n\nYou might be able to increase the accuracy of your Random Forest Classifier by tuning some of it's parameters. Think about the values you pass in for the following parameters, and see how the affect the accuracy of your model:\n\n**_n_estimators:_** The number of Trees in your Random Forest. \n\n**_max_depth:_** How deep each Tree in the forst is allowed to go. \n\n**_min_samples_split:_** The minimum number of samples required to split a node in a Decision Tree. \n\n**_Challenge:_** Import the `pima_indians_diabetes` dataset, clean and scaled as needed, and then fit a random forest to this model. Create predictions and test the accuracy of the model. \n\n\n**_Stretch Challenge:_** Tune the parameters of the model, and track how it affects your accuracy. (This algorithm is stochastic, so remember to set a random seed!)", "_____no_output_____" ] ], [ [ "# Import the dataset, clean it, and then fit and a RandomForestClassifier \n# and make predictions on it below!\n\n", "_____no_output_____" ] ], [ [ "<center><h3>Gradient Boosted Trees</h3></center>\n\nThe other ensemble method we'll cover in this notebook is **_Gradient Boosted Trees_**, also called referred to as _Gradient Boosting_ for short (or GBT for really short). \n\nGradient Boosting also uses the concept of **_weak learners_**, but wheras Random Forest uses Decision Trees, GBT typically **_stumps_**--Decision Trees with 1 split. \n\nFor an intuitive visualization that shows how Gradient Boosted Trees can create very accurate with trees that are kept purposefully weak, take a look at the visualizations on [this website](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html) (don't worry too much about the math, although you are encouraged to click on the explanations such as \"what is gradient boosting?\"). \n\nWhen you've played around with those visualizations, take a look at [this article](http://mccormickml.com/2013/12/13/adaboost-tutorial/), which gives a more in-depth explanation of **_Adaboost_**, which is the classic algorithm for Gradient Boosted Trees. \n\n<center><h3>How Does Adaboost Work?</h3></center>\n\nAdaboost starts grabbing a random subsample of the dataset. It then creates a weak learner based on this subsample. This weak learner is then used to make predictions on the remaining data, with the algorithm keeping track of which points it gets right, and which points it gets wrong. Each data point is given a weight. The ones that previous learners got wrong will have a high weight, since it is increasingly important to create weak learners that can get this point correct. Conversely, the \"easy\" data points--the ones that many classifiers can get right--will see their weights shrink. This is intuitive--if most of our weak learners can a data point right, it isn't that \"hard\", so we shouldn't worry about it too much. \n\nThe higher the weight for a given data point, the more likely it is it will be inlcuded in the training set used to create the next weak learner, thereby increasing the chances that a weak learner will be created that can get the \"hard\" data points correct. In this way, the chances of correctly classifying \"hard\" data points will be _boosted_ each round!\n\nFor more information on how Gradient Boosted Trees work, check out the video below on Adaboost! Again, don't worry about the math--just try to gain an intuition for how the algorithm works!", "_____no_output_____" ] ], [ [ "HTML(\"\"\"<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/BoGNyWW9-mE\" frameborder=\"0\" \n allow=\"autoplay; encrypted-media\" allowfullscreen></iframe>\"\"\")", "_____no_output_____" ] ], [ [ "<center><h3>Using Adaboost for Classification</h3></center>\n\nLike Random Forests, `sklearn` contains a great implementation of a `GradientBoostingClassifier`, which is also found within `sklearn.ensemble`. As you did above with Random Forests, you're going to use `sklearn`'s implementation of this algorithm to make classifications on the `pima_indians_diabetes` dataset. \n\n**_Challenge_**: Create a `GradientBoostingClassifier` object, fit it to the `pima_indians_dataset`, and then use it to make predictions and test the overall accuracy of the model. \n\n\n**_Stretch Challenge:_** Take a look at the documentation for [GradientBoostingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) and look at the parameters available. Try tuning different parameters in the model and see how it affects the quality of the predictions made by the classifier!\n\n**_Stretch Challenge_** Adaboost is the classic algorithm usually covered for learning GBT, but there are many more robust implementations of GBT that exist today. The best seems to be `XGBoost`. Work through [this tutorial](https://machinelearningmastery.com/develop-first-xgboost-model-python-scikit-learn/) to install, fit, and use `XGBoost` on the dataset. ", "_____no_output_____" ] ], [ [ "# Write your code below!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec851be518cddfd45b12c4792ba9b4e1abd5b311
39,427
ipynb
Jupyter Notebook
data_prep.ipynb
nedaresa/car-classification-using-machine-learning
dcd928fccabdcc9d197059a8e7e6a76f1c452f84
[ "MIT" ]
null
null
null
data_prep.ipynb
nedaresa/car-classification-using-machine-learning
dcd928fccabdcc9d197059a8e7e6a76f1c452f84
[ "MIT" ]
null
null
null
data_prep.ipynb
nedaresa/car-classification-using-machine-learning
dcd928fccabdcc9d197059a8e7e6a76f1c452f84
[ "MIT" ]
null
null
null
29.379285
148
0.469982
[ [ [ "## Exploratory Data Analysis ", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport pickle\nimport functions as fn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder", "_____no_output_____" ], [ "car_df = pd.read_csv('data/cardataset.zip')", "_____no_output_____" ], [ "car_df.shape", "_____no_output_____" ], [ "car_df.head()", "_____no_output_____" ], [ "car_df.isna().sum()", "_____no_output_____" ] ], [ [ "subsetting the dataset to 7920 rows from 11914 total rows", "_____no_output_____" ] ], [ [ "car_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 11914 entries, 0 to 11913\nData columns (total 16 columns):\nMake 11914 non-null object\nModel 11914 non-null object\nYear 11914 non-null int64\nEngine Fuel Type 11911 non-null object\nEngine HP 11845 non-null float64\nEngine Cylinders 11884 non-null float64\nTransmission Type 11914 non-null object\nDriven_Wheels 11914 non-null object\nNumber of Doors 11908 non-null float64\nMarket Category 8172 non-null object\nVehicle Size 11914 non-null object\nVehicle Style 11914 non-null object\nhighway MPG 11914 non-null int64\ncity mpg 11914 non-null int64\nPopularity 11914 non-null int64\nMSRP 11914 non-null int64\ndtypes: float64(3), int64(5), object(8)\nmemory usage: 1.5+ MB\n" ], [ "# plt.hist(car_df['Year']);\ncar_df1 = car_df.loc[car_df['Year'] > 2009]", "_____no_output_____" ], [ "car_df1.isna().sum()", "_____no_output_____" ], [ "car_df1.dropna(subset=['Number of Doors'], axis=0, inplace=True)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "car_df1.dropna(subset=['Engine HP'], axis=0, inplace=True)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "car_df1.isna().sum()", "_____no_output_____" ], [ "car_df1.loc[car_df1['Engine Cylinders'].isna()] = car_df1.loc[car_df1['Engine Cylinders'].isna()].fillna(0)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/pandas/core/indexing.py:543: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n self.obj[item] = s\n" ], [ "car_df1.isna().sum()", "_____no_output_____" ], [ "car_df1.loc[car_df1['Market Category'].isna()]['Make'].value_counts()", "_____no_output_____" ], [ "car_df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 7856 entries, 0 to 11912\nData columns (total 16 columns):\nMake 7856 non-null object\nModel 7856 non-null object\nYear 7856 non-null int64\nEngine Fuel Type 7856 non-null object\nEngine HP 7856 non-null float64\nEngine Cylinders 7856 non-null float64\nTransmission Type 7856 non-null object\nDriven_Wheels 7856 non-null object\nNumber of Doors 7856 non-null float64\nMarket Category 6219 non-null object\nVehicle Size 7856 non-null object\nVehicle Style 7856 non-null object\nhighway MPG 7856 non-null int64\ncity mpg 7856 non-null int64\nPopularity 7856 non-null int64\nMSRP 7856 non-null int64\ndtypes: float64(3), int64(5), object(8)\nmemory usage: 1.0+ MB\n" ], [ "car_df1.dropna(axis=0, inplace=True)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "car_df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 6219 entries, 0 to 11912\nData columns (total 16 columns):\nMake 6219 non-null object\nModel 6219 non-null object\nYear 6219 non-null int64\nEngine Fuel Type 6219 non-null object\nEngine HP 6219 non-null float64\nEngine Cylinders 6219 non-null float64\nTransmission Type 6219 non-null object\nDriven_Wheels 6219 non-null object\nNumber of Doors 6219 non-null float64\nMarket Category 6219 non-null object\nVehicle Size 6219 non-null object\nVehicle Style 6219 non-null object\nhighway MPG 6219 non-null int64\ncity mpg 6219 non-null int64\nPopularity 6219 non-null int64\nMSRP 6219 non-null int64\ndtypes: float64(3), int64(5), object(8)\nmemory usage: 826.0+ KB\n" ], [ "car_df1.duplicated().sum()", "_____no_output_____" ], [ "car_df1.drop_duplicates(inplace=True)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "car_df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 5978 entries, 0 to 11912\nData columns (total 16 columns):\nMake 5978 non-null object\nModel 5978 non-null object\nYear 5978 non-null int64\nEngine Fuel Type 5978 non-null object\nEngine HP 5978 non-null float64\nEngine Cylinders 5978 non-null float64\nTransmission Type 5978 non-null object\nDriven_Wheels 5978 non-null object\nNumber of Doors 5978 non-null float64\nMarket Category 5978 non-null object\nVehicle Size 5978 non-null object\nVehicle Style 5978 non-null object\nhighway MPG 5978 non-null int64\ncity mpg 5978 non-null int64\nPopularity 5978 non-null int64\nMSRP 5978 non-null int64\ndtypes: float64(3), int64(5), object(8)\nmemory usage: 794.0+ KB\n" ], [ "# with open('car_df1.pickle', 'wb') as f:\n# pickle.dump(car_df1, f, pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ] ], [ [ "create a new column to assign carmaker origin to each row ", "_____no_output_____" ] ], [ [ "with open('make_origin.pickle', 'rb') as f:\n make_origin_dict = pickle.load(f)", "_____no_output_____" ], [ "make_origin_dict", "_____no_output_____" ] ], [ [ "split the attributes in the column market catergory to seperate columns", "_____no_output_____" ] ], [ [ "car_df1 = fn.market_columns(car_df1)", "/Users/Erica/flatiron/mod_5/mod_5_project/car-classification-using-machine-learning/functions.py:16: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n df[col] = df['Market Category'].apply(lambda x: 1 if col in x.split(',') else 0)\n/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/pandas/core/frame.py:3697: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n errors=errors)\n" ], [ "car_df1.drop(columns='Number of Doors', axis=1, inplace=True)", "/Users/Erica/anaconda3/envs/learn-env/lib/python3.6/site-packages/pandas/core/frame.py:3697: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n errors=errors)\n" ], [ "car_df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 5978 entries, 0 to 11912\nData columns (total 24 columns):\nMake 5978 non-null object\nModel 5978 non-null object\nYear 5978 non-null int64\nEngine Fuel Type 5978 non-null object\nEngine HP 5978 non-null float64\nEngine Cylinders 5978 non-null float64\nTransmission Type 5978 non-null object\nDriven_Wheels 5978 non-null object\nVehicle Size 5978 non-null object\nVehicle Style 5978 non-null object\nhighway MPG 5978 non-null int64\ncity mpg 5978 non-null int64\nPopularity 5978 non-null int64\nMSRP 5978 non-null int64\nHigh-Performance 5978 non-null int64\nExotic 5978 non-null int64\nCrossover 5978 non-null int64\nFlex Fuel 5978 non-null int64\nFactory Tuner 5978 non-null int64\nPerformance 5978 non-null int64\nLuxury 5978 non-null int64\nHatchback 5978 non-null int64\nDiesel 5978 non-null int64\nHybrid 5978 non-null int64\ndtypes: float64(2), int64(15), object(7)\nmemory usage: 1.1+ MB\n" ], [ "car_df1['Vehicle Style'].value_counts()", "_____no_output_____" ], [ "car_df1 = car_df1.loc[(car_df1['Vehicle Style'] != '2dr SUV') \n & (car_df1['Vehicle Style'] != 'Convertible SUV') &\n (car_df1['Vehicle Style'] != 'Cargo Minivan')]", "_____no_output_____" ], [ "car_df1['Driven_Wheels'].value_counts()", "_____no_output_____" ], [ "car_df1.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 5966 entries, 0 to 11912\nData columns (total 24 columns):\nMake 5966 non-null object\nModel 5966 non-null object\nYear 5966 non-null int64\nEngine Fuel Type 5966 non-null object\nEngine HP 5966 non-null float64\nEngine Cylinders 5966 non-null float64\nTransmission Type 5966 non-null object\nDriven_Wheels 5966 non-null object\nVehicle Size 5966 non-null object\nVehicle Style 5966 non-null object\nhighway MPG 5966 non-null int64\ncity mpg 5966 non-null int64\nPopularity 5966 non-null int64\nMSRP 5966 non-null int64\nHigh-Performance 5966 non-null int64\nExotic 5966 non-null int64\nCrossover 5966 non-null int64\nFlex Fuel 5966 non-null int64\nFactory Tuner 5966 non-null int64\nPerformance 5966 non-null int64\nLuxury 5966 non-null int64\nHatchback 5966 non-null int64\nDiesel 5966 non-null int64\nHybrid 5966 non-null int64\ndtypes: float64(2), int64(15), object(7)\nmemory usage: 1.1+ MB\n" ], [ "car_df1['Origin'] = car_df1['Make'].apply(lambda m: make_origin_dict[m])", "_____no_output_____" ], [ "car_df1.drop(['Make', 'Model'], axis=1, inplace=True)", "_____no_output_____" ], [ "X = car_df1.drop('Origin', axis=1)\ny = car_df1['Origin']", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)", "_____no_output_____" ], [ "# Create train and test dataframes with only the categorical variables\n\nX_train_obj = X_train[[col for col,dtype in list(zip(X_train.columns, X_train.dtypes)) \n if dtype == np.dtype('O')]]\nX_test_obj = X_test[[col for col,dtype in list(zip(X_test.columns, X_test.dtypes)) \n if dtype == np.dtype('O')]]", "_____no_output_____" ], [ "X_train_nonobj = X_train[[col for col,dtype in list(zip(X_train.columns, X_train.dtypes)) \n if dtype != np.dtype('O')]]\nX_test_nonobj = X_test[[col for col,dtype in list(zip(X_test.columns, X_test.dtypes)) \n if dtype != np.dtype('O')]]", "_____no_output_____" ], [ "ohe = OneHotEncoder(handle_unknown='ignore')\nX_train_obj_ohe = ohe.fit_transform(X_train_obj)\nX_test_obj_ohe = ohe.transform(X_test_obj)", "_____no_output_____" ], [ "X_train_nonobj_df = pd.DataFrame(X_train_nonobj).reset_index(drop=True) \nX_train_obj_ohe_df = pd.DataFrame(X_train_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)", "_____no_output_____" ], [ "X_train_all = pd.concat([X_train_nonobj_df, X_train_obj_ohe_df], axis=1)", "_____no_output_____" ], [ "X_test_nonobj_df = pd.DataFrame(X_test_nonobj).reset_index(drop=True) \nX_test_obj_ohe_df = pd.DataFrame(X_test_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)", "_____no_output_____" ], [ "X_test_all = pd.concat([X_test_nonobj_df, X_test_obj_ohe_df], axis=1)", "_____no_output_____" ], [ "X_train_all.shape", "_____no_output_____" ], [ "X_test_all.shape", "_____no_output_____" ], [ "origins = list(set(make_origin_dict.values()))\n\norigin_code = {}\nfor i in range(len(origins)):\n origin_code[origins[i]] = i\n\ny_train = y_train.apply(lambda x: origin_code[x])\ny_test = y_test.apply(lambda x: origin_code[x])", "_____no_output_____" ], [ "# origins = list(set(make_origin_dict.values()))\n\n# origin_code = {}\n# for i in range(len(origins)):\n# origin_code[origins[i]] = i\n# origin_code\n\n# car_df1['origin_code'] = car_df1['Origin'].apply(lambda x: origin_code[x])\n# car_df1.drop(columns='Origin', axis=1, inplace=True)", "_____no_output_____" ], [ "# X_train_all = pd.concat([pd.DataFrame(X_train_nonobj), pd.DataFrame(X_train_obj_ohe.todense(), columns=ohe.get_feature_names())], axis=1)\n# X_test_all = pd.concat([pd.DataFrame(X_test_nonobj), pd.DataFrame(X_test_obj_ohe.todense(), columns=ohe.get_feature_names())], axis=1)", "_____no_output_____" ], [ "# cat_features = ['Engine Fuel Type', 'Transmission Type', 'Driven_Wheels', 'Vehicle Size', 'Vehicle Style']\n# cat_index=[]\n# for ind, val in enumerate(X_train.columns):\n# #print(ind, val)\n# if val in cat_features:\n# cat_index.append(ind)", "_____no_output_____" ], [ "# X_train_df = pd.DataFrame(X_train_ohe.todense(), columns=ohe.get_feature_names())\n# X_train_df.columns", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec851f560a5a4aabe6b49ea0097f18ccd74752f7
166,726
ipynb
Jupyter Notebook
DatasetCrop.ipynb
deepaklorkhatri007/bacteriAi
2730945732c2b4a0c59bcb96dc9812e3bd84d71b
[ "MIT" ]
null
null
null
DatasetCrop.ipynb
deepaklorkhatri007/bacteriAi
2730945732c2b4a0c59bcb96dc9812e3bd84d71b
[ "MIT" ]
null
null
null
DatasetCrop.ipynb
deepaklorkhatri007/bacteriAi
2730945732c2b4a0c59bcb96dc9812e3bd84d71b
[ "MIT" ]
null
null
null
1,302.546875
163,720
0.958525
[ [ [ "import cv2\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "folder = \"\"\nbacteria = \"positive\"\nimage = \"028\"\ncategory = [\"png\", 'jpg', 'jpeg']\nimtype = category[1]\n# top, bottom, right, left\nmargin = [100, 0, 0, 0]\nimg = cv2.imread(folder+bacteria+\"/\"+bacteria+image+\".\"+imtype)\nimg = img[margin[0]:(img.shape[0]-margin[1]), margin[3]:(img.shape[1]-margin[2])]\n\n# just to show the image\ntoshow = plt.imread(bacteria+\"/\"+bacteria+image+\".\"+imtype)\nplt.imshow(toshow[margin[0]:(img.shape[0]-margin[1]), margin[3]:(img.shape[1]-margin[2])])", "_____no_output_____" ], [ "#### start = 0\nheight = 100\nwidth = 100\ncut = 50\nx = int((img.shape[1] - width)/cut + 1)\ny = int((img.shape[0] - height)/cut + 1)\nprint(\"you will get\", x*y, \"images\")", "you will get 135 images\n" ], [ "counter = 0\nfolder = \"data/\"\nfor hcut in range(x):\n for vcut in range(y):\n counter+=1\n imcut = img[ int(vcut*cut):int(vcut*cut+height), int(hcut*cut):int(hcut*cut + width)]\n cv2.imwrite(folder+bacteria+\"/\"+bacteria+image+str(vcut)+str(hcut)+\".jpg\", imcut)\n# print( vcut*cut, \":\", vcut*cut+100,\",\", hcut*cut,\":\",hcut*cut + 100)\nprint(\"created\", counter, \"images of dimension\",width,\"x\",height)", "created 135 images of dimension 100 x 100\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ec852333d0b6a74be0e17f7a706f415af4108847
602,866
ipynb
Jupyter Notebook
project_1_evictions.ipynb
Lauralynnz/sanfrancisco_eviction_analysis
8942d35a217e73e6d1bc0ae41d4c396bb8f53fcf
[ "MIT" ]
null
null
null
project_1_evictions.ipynb
Lauralynnz/sanfrancisco_eviction_analysis
8942d35a217e73e6d1bc0ae41d4c396bb8f53fcf
[ "MIT" ]
null
null
null
project_1_evictions.ipynb
Lauralynnz/sanfrancisco_eviction_analysis
8942d35a217e73e6d1bc0ae41d4c396bb8f53fcf
[ "MIT" ]
null
null
null
267.940444
68,556
0.894239
[ [ [ "# Modules\nimport os\nimport csv\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\n\n# Note: VALIDATION means we're just looking to see what is in the data\n# Note: CLEANING means we've found a problem and are correcting it, or adding data as needed", "_____no_output_____" ], [ "# Save path to data set in a variable\ndata_file = \"eviction_notices_kaggle.csv\"", "_____no_output_____" ], [ "# Save path to data set in a variable\ndata_file_pd = pd.read_csv(data_file)\nprint(data_file_pd)\n", " Eviction ID Fault Address City \\\n0 AL2K0014 At Fault 1600 Block Of Howard Street San Francicso \n1 E980001 At Fault 1500 Block Of 20th Avenue San Francicso \n2 E980002 At Fault 1500 Block Of California Street San Francicso \n3 E980003 At Fault 1600 Block Of Alabama Street San Francicso \n4 E980004 At Fault 400 Block Of Leland Avenue San Francicso \n... ... ... ... ... \n41351 M2K2028 Other 1600 Block Of 39th Avenue San Francicso \n41352 RE02119 Other 500 Block Of Oak Street San Francicso \n41353 RE02124 Other 800 Block Of Post Street San Francicso \n41354 S000329 Other 500 Block Of Eureka Street San Francicso \n41355 S000500 Other 2500 Block Of 22nd Avenue San Francicso \n\n State Eviction Notice Source Zipcode File Date \\\n0 CA 94103 2000-02-11T00:00:00.000 \n1 CA 94122 1997-12-12T00:00:00.000 \n2 CA NaN 1998-01-02T00:00:00.000 \n3 CA 94110 1998-01-02T00:00:00.000 \n4 CA 94134 1998-01-02T00:00:00.000 \n... ... ... ... \n41351 CA 94122 2000-10-05T00:00:00.000 \n41352 CA NaN 1997-06-18T00:00:00.000 \n41353 CA 94109 1997-06-19T00:00:00.000 \n41354 CA 94114 1997-09-17T00:00:00.000 \n41355 CA 94116 1997-10-09T00:00:00.000 \n\n Non Payment Breach Nuisance ... Roommate Same Unit Other Cause \\\n0 False True False ... False False \n1 False True False ... False False \n2 False False True ... False False \n3 False True True ... False False \n4 False False False ... False False \n... ... ... ... ... ... ... \n41351 True False False ... False False \n41352 False False True ... False False \n41353 False False False ... False False \n41354 False False False ... False False \n41355 False False False ... False False \n\n Late Payments Lead Remediation Development Good Samaritan Ends \\\n0 True False False False \n1 False False False False \n2 False False False False \n3 False False False False \n4 True False False False \n... ... ... ... ... \n41351 False False False False \n41352 False False False False \n41353 False False False False \n41354 False False False False \n41355 True False False False \n\n Constraints Date Supervisor District Neighborhoods \\\n0 NaN 6.0 Mission \n1 NaN 4.0 Sunset/Parkside \n2 NaN 3.0 Nob Hill \n3 NaN 9.0 Bernal Heights \n4 NaN 10.0 Visitacion Valley \n... ... ... ... \n41351 NaN 4.0 Sunset/Parkside \n41352 NaN 5.0 Hayes Valley \n41353 NaN 6.0 Tenderloin \n41354 NaN 8.0 Noe Valley \n41355 NaN 4.0 Sunset/Parkside \n\n Location \n0 {'type': 'Point', 'coordinates': [-122.4173382... \n1 {'type': 'Point', 'coordinates': [-122.4779409... \n2 {'type': 'Point', 'coordinates': [-122.4198727... \n3 {'type': 'Point', 'coordinates': [-122.4106005... \n4 {'type': 'Point', 'coordinates': [-122.4125124... \n... ... \n41351 {'type': 'Point', 'coordinates': [-122.4980273... \n41352 {'type': 'Point', 'coordinates': [-122.4283514... \n41353 {'type': 'Point', 'coordinates': [-122.4158061... \n41354 {'type': 'Point', 'coordinates': [-122.4375020... \n41355 {'type': 'Point', 'coordinates': [-122.4787784... \n\n[41356 rows x 30 columns]\n" ], [ "# VALIDATION: VIEW dataset\n\ndata_file_pd = pd.read_csv(data_file)\ndata_file_pd.head()\n", "_____no_output_____" ], [ "# CLEANING: REMOVE Constraints Date because it's mostly blank\n\ndata_file_pd_1 = data_file_pd.drop(['Constraints Date'], axis=1)\n#data_file_pd.isna().sum() #Check to see that the columns are gone\n\ndata_file_pd_dropna = data_file_pd_1.dropna(how='any')\n#data_file_pd_dropna.isna().sum() #Check to see that the columns are gone\n#len(data_file_pd_dropna[\"Eviction ID\"])\n\ndata_file_pd_dropna_df = pd.DataFrame(data_file_pd_dropna)\nlen(data_file_pd_dropna_df[\"Eviction ID\"])\n", "_____no_output_____" ], [ "# Q: Are all the eviction IDs unique?\n\n# VALIDATION & CLEANING: FIND (and remove) duplicate records in 'Eviction ID'\n\n# COUNT num of eviction IDs (answer is 39351)\n#len(data_file_pd_dropna['Eviction ID']) # check to see how many eviction IDs there are\n\n# DEFINE category Set and count num of eviction IDs again (answer is 39333)\n#len(set(data_file_pd_dropna['Eviction ID']))\n\n# DROP the 9 records and their duplicates from the DB\n\ndata_file_pd_dropna_df.drop_duplicates(subset =\"Eviction ID\", keep = False, inplace = True)\n\n# CONFIRM dups have been removed\nlen(data_file_pd_dropna_df[\"Eviction ID\"])\n", "_____no_output_____" ], [ "# VALIDATION: VIEW a list of all the data in column 'State' and how many records there are for each neighborhood\n\ndata_file_pd_dropna_df['State'].value_counts()", "_____no_output_____" ], [ "# CLEANING: RENAME 'Ca' to 'CA' (there are 4 of them)\ndata_file_pd_dropna_df['State'] = data_file_pd_dropna_df['State'].replace(\n {'Ca':'CA'})\ndata_file_pd_dropna_df['State'].value_counts() #Confirm all the values are now CA", "_____no_output_____" ], [ "# VALIDATION: VIEW a list of all the data in column 'Neighborhoods' and how many records there are for each neighborhood\n\ndata_file_pd_dropna_df['Neighborhoods'].value_counts()\n", "_____no_output_____" ], [ "# VALIDATION: VIEW a list of all the data in column 'Fault' and how many records there are for each\n\ndata_file_pd_dropna_df['Fault'].value_counts()\n", "_____no_output_____" ], [ "# Tenant Fault Reasons bucket\n\nnon_payment = data_file_pd_dropna_df[\"Non Payment\"].value_counts().values[1]\nbreach = data_file_pd_dropna_df[\"Breach\"].value_counts().values[1]\nnuisance = data_file_pd_dropna_df[\"Nuisance\"].value_counts().values[1]\nillegal_use = data_file_pd_dropna_df[\"Illegal Use\"].value_counts().values[1]\nfailure_to_sign_renewal = data_file_pd_dropna_df[\"Failure to Sign Renewal\"].value_counts().values[1]\naccess_denial = data_file_pd_dropna_df[\"Access Denial\"].value_counts().values[1]\nunapproved_subtenant = data_file_pd_dropna_df[\"Unapproved Subtenant\"].value_counts().values[1]\nroommate_same_unit = data_file_pd_dropna_df[\"Roommate Same Unit\"].value_counts().values[1]\nlate_payments = data_file_pd_dropna_df[\"Late Payments\"].value_counts().values[1]\nlead_remediation = data_file_pd_dropna_df[\"Lead Remediation\"].value_counts().values[1]\n\n\ntenant_fault = non_payment + breach + nuisance + illegal_use + failure_to_sign_renewal + access_denial + unapproved_subtenant + roommate_same_unit + late_payments + lead_remediation\nprint(tenant_fault)\n\n\n", "22510\n" ], [ "# Landlord Fault Reasons bucket\n\nowner_move_in = data_file_pd_dropna_df[\"Owner Move In\"].value_counts().values[1]\ndemolition = data_file_pd_dropna_df[\"Demolition\"].value_counts().values[1]\ncapital_improvement = data_file_pd_dropna_df[\"Capital Improvement\"].value_counts().values[1]\nsubstantial_rehab = data_file_pd_dropna_df[\"Substantial Rehab\"].value_counts().values[1]\nellis_act_withdrawal = data_file_pd_dropna_df[\"Ellis Act WithDrawal\"].value_counts().values[1]\ncondo_conversion = data_file_pd_dropna_df[\"Condo Conversion\"].value_counts().values[1]\ndevelopment = data_file_pd_dropna_df[\"Development\"].value_counts().values[1]\n\nlandlord_fault = owner_move_in + demolition + capital_improvement + substantial_rehab + ellis_act_withdrawal + condo_conversion + development\nprint(landlord_fault)", "16973\n" ], [ "# CLEANING: CREATE a new column to hold year eviction notice was filed\n\ndata_file_pd_dropna_df[\"Year\"] = pd.DatetimeIndex(data_file_pd_dropna_df['File Date']).year\n#data_file_pd['Year'].value_counts() #to see a count of eviction notices by year\n\n# CREATE dataframes to hold data for 2010 - 2018\ndf_2010 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2010]\ndf_2011 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2011]\ndf_2012 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2012]\ndf_2013 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2013]\ndf_2014 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2014]\ndf_2015 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2015]\ndf_2016 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2016]\ndf_2017 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2017]\ndf_2018 = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Year\"] == 2018]\n#len(df_2010) #to see how many records are in each new df_year above\n#print(df_2010.columns) # to see a list of columns\n\n\n# CREATE a count of fault group by year\ndate_tenant_fault_2010 = df_2010 [\"Non Payment\"].value_counts() + df_2010['Breach'].value_counts() + df_2010['Nuisance'].value_counts() + df_2010['Illegal Use'].value_counts() + df_2010['Failure to Sign Renewal'].value_counts() + df_2010['Access Denial'].value_counts() + df_2010['Unapproved Subtenant'].value_counts() + df_2010['Roommate Same Unit'].value_counts() + df_2010['Late Payments'].value_counts()\ndate_tenant_fault_2011 = df_2011 [\"Non Payment\"].value_counts() + df_2011['Breach'].value_counts() + df_2011['Nuisance'].value_counts() + df_2011['Illegal Use'].value_counts() + df_2011['Failure to Sign Renewal'].value_counts() + df_2011['Access Denial'].value_counts() + df_2011['Unapproved Subtenant'].value_counts() + df_2011['Roommate Same Unit'].value_counts() + df_2011['Late Payments'].value_counts()\ndate_tenant_fault_2012 = df_2012 [\"Non Payment\"].value_counts() + df_2012['Breach'].value_counts() + df_2012['Nuisance'].value_counts() + df_2012['Illegal Use'].value_counts() + df_2012['Failure to Sign Renewal'].value_counts() + df_2012['Access Denial'].value_counts() + df_2012['Unapproved Subtenant'].value_counts() + df_2012['Roommate Same Unit'].value_counts() + df_2012['Late Payments'].value_counts()\ndate_tenant_fault_2013 = df_2013 [\"Non Payment\"].value_counts() + df_2013['Breach'].value_counts() + df_2013['Nuisance'].value_counts() + df_2013['Illegal Use'].value_counts() + df_2013['Failure to Sign Renewal'].value_counts() + df_2013['Access Denial'].value_counts() + df_2013['Unapproved Subtenant'].value_counts() + df_2013['Roommate Same Unit'].value_counts() + df_2013['Late Payments'].value_counts()\ndate_tenant_fault_2014 = df_2014 [\"Non Payment\"].value_counts() + df_2014['Breach'].value_counts() + df_2014['Nuisance'].value_counts() + df_2014['Illegal Use'].value_counts() + df_2014['Failure to Sign Renewal'].value_counts() + df_2014['Access Denial'].value_counts() + df_2014['Unapproved Subtenant'].value_counts() + df_2014['Roommate Same Unit'].value_counts() + df_2014['Late Payments'].value_counts()\ndate_tenant_fault_2015 = df_2015 [\"Non Payment\"].value_counts() + df_2015['Breach'].value_counts() + df_2015['Nuisance'].value_counts() + df_2015['Illegal Use'].value_counts() + df_2015['Failure to Sign Renewal'].value_counts() + df_2015['Access Denial'].value_counts() + df_2015['Unapproved Subtenant'].value_counts() + df_2015['Roommate Same Unit'].value_counts() + df_2015['Late Payments'].value_counts()\ndate_tenant_fault_2016 = df_2016 [\"Non Payment\"].value_counts() + df_2016['Breach'].value_counts() + df_2016['Nuisance'].value_counts() + df_2016['Illegal Use'].value_counts() + df_2016['Failure to Sign Renewal'].value_counts() + df_2016['Access Denial'].value_counts() + df_2016['Unapproved Subtenant'].value_counts() + df_2016['Roommate Same Unit'].value_counts() + df_2016['Late Payments'].value_counts()\ndate_tenant_fault_2017 = df_2017 [\"Non Payment\"].value_counts() + df_2017['Breach'].value_counts() + df_2017['Nuisance'].value_counts() + df_2017['Illegal Use'].value_counts() + df_2017['Failure to Sign Renewal'].value_counts() + df_2017['Access Denial'].value_counts() + df_2017['Unapproved Subtenant'].value_counts() + df_2017['Roommate Same Unit'].value_counts() + df_2017['Late Payments'].value_counts()\ndate_tenant_fault_2018 = df_2018 [\"Non Payment\"].value_counts() + df_2018['Breach'].value_counts() + df_2018['Nuisance'].value_counts() + df_2018['Illegal Use'].value_counts() + df_2018['Failure to Sign Renewal'].value_counts() + df_2018['Access Denial'].value_counts() + df_2018['Unapproved Subtenant'].value_counts() + df_2018['Roommate Same Unit'].value_counts() + df_2018['Late Payments'].value_counts()\n\n# CREATE a count of fault group for entire dataset\ntenant_fault_all_years = data_file_pd_dropna_df [\"Non Payment\"].value_counts() + data_file_pd['Breach'].value_counts() + data_file_pd['Nuisance'].value_counts() + data_file_pd['Illegal Use'].value_counts() + data_file_pd['Failure to Sign Renewal'].value_counts() + data_file_pd['Access Denial'].value_counts() + data_file_pd['Unapproved Subtenant'].value_counts() + data_file_pd['Roommate Same Unit'].value_counts() + data_file_pd['Late Payments'].value_counts()\n\n\n# CREATE no-fault group\ndate_tenant_no_fault_2010 = df_2010 [\"Owner Move In\"].value_counts() + df_2010['Demolition'].value_counts() + df_2010['Capital Improvement'].value_counts() + df_2010['Substantial Rehab'].value_counts() + df_2010['Ellis Act WithDrawal'].value_counts() + df_2010['Condo Conversion'].value_counts() + df_2010['Development'].value_counts() + df_2010['Lead Remediation'].value_counts() + df_2010['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2011 = df_2011 [\"Owner Move In\"].value_counts() + df_2011['Demolition'].value_counts() + df_2011['Capital Improvement'].value_counts() + df_2011['Substantial Rehab'].value_counts() + df_2011['Ellis Act WithDrawal'].value_counts() + df_2011['Condo Conversion'].value_counts() + df_2011['Development'].value_counts() + df_2011['Lead Remediation'].value_counts() + df_2011['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2012 = df_2012 [\"Owner Move In\"].value_counts() + df_2012['Demolition'].value_counts() + df_2012['Capital Improvement'].value_counts() + df_2012['Substantial Rehab'].value_counts() + df_2012['Ellis Act WithDrawal'].value_counts() + df_2012['Condo Conversion'].value_counts() + df_2012['Development'].value_counts() + df_2012['Lead Remediation'].value_counts() + df_2012['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2013 = df_2013 [\"Owner Move In\"].value_counts() + df_2013['Demolition'].value_counts() + df_2013['Capital Improvement'].value_counts() + df_2013['Substantial Rehab'].value_counts() + df_2013['Ellis Act WithDrawal'].value_counts() + df_2013['Condo Conversion'].value_counts() + df_2013['Development'].value_counts() + df_2013['Lead Remediation'].value_counts() + df_2013['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2014 = df_2014 [\"Owner Move In\"].value_counts() + df_2014['Demolition'].value_counts() + df_2014['Capital Improvement'].value_counts() + df_2014['Substantial Rehab'].value_counts() + df_2014['Ellis Act WithDrawal'].value_counts() + df_2014['Condo Conversion'].value_counts() + df_2014['Development'].value_counts() + df_2014['Lead Remediation'].value_counts() + df_2014['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2015 = df_2015 [\"Owner Move In\"].value_counts() + df_2015['Demolition'].value_counts() + df_2015['Capital Improvement'].value_counts() + df_2015['Substantial Rehab'].value_counts() + df_2015['Ellis Act WithDrawal'].value_counts() + df_2015['Condo Conversion'].value_counts() + df_2015['Development'].value_counts() + df_2015['Lead Remediation'].value_counts() + df_2015['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2016 = df_2016 [\"Owner Move In\"].value_counts() + df_2016['Demolition'].value_counts() + df_2016['Capital Improvement'].value_counts() + df_2016['Substantial Rehab'].value_counts() + df_2016['Ellis Act WithDrawal'].value_counts() + df_2016['Condo Conversion'].value_counts() + df_2016['Development'].value_counts() + df_2016['Lead Remediation'].value_counts() + df_2016['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2017 = df_2017 [\"Owner Move In\"].value_counts() + df_2017['Demolition'].value_counts() + df_2017['Capital Improvement'].value_counts() + df_2017['Substantial Rehab'].value_counts() + df_2017['Ellis Act WithDrawal'].value_counts() + df_2017['Condo Conversion'].value_counts() + df_2017['Development'].value_counts() + df_2017['Lead Remediation'].value_counts() + df_2017['Good Samaritan Ends'].value_counts()\ndate_tenant_no_fault_2018 = df_2018 [\"Owner Move In\"].value_counts() + df_2018['Demolition'].value_counts() + df_2018['Capital Improvement'].value_counts() + df_2018['Substantial Rehab'].value_counts() + df_2018['Ellis Act WithDrawal'].value_counts() + df_2018['Condo Conversion'].value_counts() + df_2018['Development'].value_counts() + df_2018['Lead Remediation'].value_counts() + df_2018['Good Samaritan Ends'].value_counts()\n\n# CREATE no-fault group for entire dataset\ntenant_no_fault_all_years = data_file_pd_dropna_df [\"Owner Move In\"].value_counts() + data_file_pd['Demolition'].value_counts() + data_file_pd['Capital Improvement'].value_counts() + data_file_pd['Substantial Rehab'].value_counts() + data_file_pd['Ellis Act WithDrawal'].value_counts() + data_file_pd['Condo Conversion'].value_counts() + data_file_pd['Development'].value_counts() + data_file_pd['Lead Remediation'].value_counts() + data_file_pd['Good Samaritan Ends'].value_counts()\n\n# View files\ndf_2010.head()\n\n# CREATE datasets of fault and no-fault\ndf_tenant_at_fault = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Fault\"] == \"At Fault\"]\ndf_tenant_no_fault = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Fault\"] == \"No Fault\"]\n", "_____no_output_____" ], [ "data_file_pd_dropna_df['Neighborhoods'].value_counts()\n", "_____no_output_____" ], [ "# # # # # # # # GRAPHING # # # # # # # #", "_____no_output_____" ], [ "# Q: Has the SF eviction rate gone up or down from 2010 to 2018?\n# CREATE Graph #1 showing num of eviction notices per year, for the entire dataset\n\n# data_file_pd_dropna = data_file_pd_dropna.dataframe({\n# \"Num of Eviction Notices\"\n# index=[2010, 2011]\n# # })\n\n# plt.plot(data_file_pd_dropna[\"Year\"], data_file_pd_dropna)\n\n# # Give our graph axis labels\n# plt.xlabel(\"Year\")\n# plt.ylabel(\"Eviction Notices\")\n\n# # Have to plot our chart once again as it doesn't stick after being shown\n# plt.plot(x_axis, e_x)\n# plt.show()\n", "_____no_output_____" ], [ "# VALIDATION: VIEW a count of eviction notices by year from the new Year column\n\nevictionperyear = data_file_pd_dropna_df['Year'].value_counts().sort_index()\n\nevictionperyeardf = pd.DataFrame(evictionperyear)\nevictionperyeardf = evictionperyeardf.reset_index()", "_____no_output_____" ], [ "data_file_pd_dropna_df['Year'].value_counts().sort_index()", "_____no_output_____" ], [ "evictionperyear = data_file_pd_dropna_df['Year'].value_counts().sort_index()\nevictionperyear = pd.DataFrame(evictionperyear)\nevictionperyear = evictionperyear.reset_index()\nevictionperyeardf = evictionperyear.rename(columns={'Year':'Evictions', \"index\": \"Year\"})\n#evictionperyear = evictionperyear.reset_index()\n\nevictionperyeardf", "_____no_output_____" ], [ "evictionperyeardf.columns", "_____no_output_____" ], [ "x = list(evictionperyeardf[\"Year\"])\ny = list(evictionperyeardf[\"Evictions\"])\nprint(x)\nprint(y)", "[1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]\n[1751, 2786, 2416, 2480, 2060, 1570, 1493, 1393, 1495, 1437, 1449, 1385, 1143, 1322, 1268, 1601, 1825, 1912, 2062, 2104, 1523, 1609, 1249]\n" ], [ "lines = evictionperyeardf.plot.line(x='Year', y='Evictions', marker=\"o\", color=\"purple\", xlim=(2010, 2018), figsize = (20, 10))\nlines.set_ylabel(\"Num of Evictions\")\n\nplt.savefig(\"graph_1_solene.png\")\n", "_____no_output_____" ] ], [ [ "<img style=\"transform: rotate(90deg); width:500px\" src=\"graph_1.jpg\"/>", "_____no_output_____" ] ], [ [ "# Q: Does the eviction rate differ by district in SF? Which neighborhoods have highest/lowest \n# CREATE Graph #2 showing num of eviction notices per year, per district\n# CREATE Graph #2 Do a bar graph for years 2010 and 2018 that shows num of evictions for each distict\n# x-axis is district, y-axis is num of evictions\n\n\n# VIEW count of evictions per district for entire dataset\nevictionperdistrict_2015 = df_2015['Supervisor District'].value_counts()\nevictionperdistrict_2016 = df_2016['Supervisor District'].value_counts()\nevictionperdistrict_2017 = df_2017['Supervisor District'].value_counts()\nevictionperdistrict_2018 = df_2018['Supervisor District'].value_counts()\n#evictionperdistrict_2018", "_____no_output_____" ], [ "# Create a dataframe holding the District ID and District Neighborhoods\ndistrictname_df = pd.DataFrame({\n \"Supervisor District\": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0],\n \"District Neighborhoods\": [\"The Richmond\", \"Pacific Heights/Sea Cliff/Marina\", \"Chinatown/North Beach/Russian Hill\", \"The Sunset\", \"Haight/Western Addition\", \"SOMA/The Tenderloin/North Mission\", \"West of Twin Peaks\", \"The Castro/Noe Valley/Glen Park/Diamond Heights\", \"The Mission/Bernal Heights/Portola\", \"Bayview-Hunters Point/Visitacion Valley/Potrero Hill\", \"The Excelsior/Ocean View\"]\n})\ndistrictname_df\n", "_____no_output_____" ], [ "# Join districtname_df and df_2010 on 'Supervisor District'\n\nmerge_2015_districtname_df = pd.merge(districtname_df, df_2015, on=\"Supervisor District\")\nmerge_2016_districtname_df = pd.merge(districtname_df, df_2016, on=\"Supervisor District\")\nmerge_2017_districtname_df = pd.merge(districtname_df, df_2017, on=\"Supervisor District\")\nmerge_2018_districtname_df = pd.merge(districtname_df, df_2018, on=\"Supervisor District\")\nmerge_2018_districtname_df", "_____no_output_____" ], [ "gb_2015 = merge_2015_districtname_df.groupby(\"District Neighborhoods\").count()[\"Eviction ID\"]\ncount_2015 = pd.DataFrame(gb_2015)\n#count_2015 = count_2015.reset_index()\ncount_2015.plot(kind=\"bar\", color='mediumvioletred', fontsize = 15, title = 'For Year 2015', figsize = (20, 10)) #colormap for pandas plot\nplt.savefig(\"graph_2_2015.png\")\n", "_____no_output_____" ], [ "gb_2016 = merge_2018_districtname_df.groupby(\"District Neighborhoods\").count()[\"Eviction ID\"]\ncount_2016 = pd.DataFrame(gb_2016)\n#count_2016 = count_2016.reset_index()\ncount_2016.plot(kind=\"bar\", color='mediumturquoise', fontsize = 15, title = 'For Year 2016', figsize = (20, 10)) #colormap for pandas plot\nplt.savefig(\"graph_2_2016.png\")\n", "_____no_output_____" ], [ "gb_2017 = merge_2017_districtname_df.groupby(\"District Neighborhoods\").count()[\"Eviction ID\"]\ncount_2017 = pd.DataFrame(gb_2017)\n#count_2017 = count_2017.reset_index()\ncount_2017.plot(kind=\"bar\", color='mediumorchid', fontsize = 15, title = 'For Year 2017', figsize = (20, 10)) #colormap for pandas plot\nplt.savefig(\"graph_2_2017.png\")\n", "_____no_output_____" ], [ "gb_2018 = merge_2018_districtname_df.groupby(\"District Neighborhoods\").count()[\"Eviction ID\"]\ncount_2018 = pd.DataFrame(gb_2018)\n#count_2018 = count_2017.reset_index()\ncount_2018.plot(kind=\"bar\", color='mediumseagreen', fontsize = 15, title = 'For Year 2018', figsize = (20, 10)) #colormap for pandas plot\nplt.savefig(\"graph_2_2018.png\")\n", "_____no_output_____" ] ], [ [ "<img style=\"transform: rotate(90deg); width:500px\" src=\"graph_2b.jpg\" />", "_____no_output_____" ] ], [ [ "# Q: Over time, has evictions increased or decreased for each fault and no-fault categories?\n# CREATE Line chart showing num of eviction notices per year, by fault and no-fault for all of SF\n\n# EXAMPLE FROM CLASS WORK\n# # Plot the world average as a line chart\n# fault_vs_no_fault = plt.plot(years, average_unemployment, color=\"blue\", label=\"World Average\" )\n\n# # Plot the unemployment values for a single country\n# country_one, = plt.plot(years, combined_unemployed_data.loc['USA',[\"2010\",\"2011\",\"2012\",\"2013\",\"2014\"]], \n# color=\"green\",label=combined_unemployed_data.loc['USA',\"Country Name\"])\n\n# # Create a legend for our chart\n# plt.legend(handles=[world_avg, country_one], loc=\"best\")\n\n# # Show the chart\n# plt.show()\n\n#*******************************************\n\nfaultevictionperyear = df_tenant_at_fault['Year'].value_counts().sort_index()\nfaultevictionperyear = pd.DataFrame(faultevictionperyear)\nfaultevictionperyear = faultevictionperyear.reset_index()\nfaultevictionperyear_df = faultevictionperyear.rename(columns={'Year':'Evictions', \"index\": \"Year\"})\n#evictionperyear = evictionperyear.reset_index()\n\nfaultevictionperyear_df", "_____no_output_____" ], [ "# # #****** As a scatter plot *******\n\n# # x_axis = (faultevictionperyear_df[\"Year\"])\n# # y_axis = (faultevictionperyear_df[\"Evictions\"])\n\n# # plt.scatter(x_axis, y_axis, marker='o', facecolors='red')\n\n\n# # x_axis = (nofaultevictionperyear_df[\"Year\"])\n# # y_axis = (nofaultevictionperyear_df[\"Evictions\"])\n\n# # plt.scatter(x_axis, y_axis, marker='o', facecolors='green' )\n\n\n\n# # ****** As a line chart *******\n\n# faultevictionperyear_df.plot(kind=\"line\", x='Year', y='Evictions', color='mediumseagreen', fontsize = 15, title = 'For Year 2017', figsize = (20, 10)) #colormap for pandas plot\n# nofaultevictionperyear_df.plot(kind=\"line\", x='Year', y='Evictions', color='mediumorchid', fontsize = 15, title = 'For Year 2017', figsize = (20, 10)) #colormap for pandas plot\n", "_____no_output_____" ], [ "x_fault = list(faultevictionperyear_df[\"Year\"])\ny_fault = list(faultevictionperyear_df[\"Evictions\"])\nprint(x_fault)\nprint(y_fault)", "[1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]\n[491, 905, 1012, 914, 874, 700, 783, 608, 762, 788, 897, 908, 874, 904, 940, 948, 1093, 1368, 1319, 1192, 930, 940, 837]\n" ], [ "faultvsnofault_graph = faultevictionperyear_df.plot.line(x='Year', y='Evictions', fontsize = 15, color = \"magenta\", xlim=(2010, 2018), title = 'Evictions Caused By Tenant Fault', figsize = (20, 10))\nplt.savefig(\"graph_3_atfault.png\")\n\n\nfaultvsnofault_graph = nofaultevictionperyear_df.plot.line(x='Year', y='Evictions', fontsize = 15, color = \"green\", xlim=(2010, 2018), title = 'Evictions Not Caused By Tenant', figsize = (20, 10))\nplt.savefig(\"graph_3_nofault.png\")\n\n# faultvsnofault_graph.set_ylabel(\"Num of Evictions\")\n# #nofault_lines.set_ylabel(\"Num of Evictions\") #look into matplotlib scatter\n\n# fontsize = 15, title = 'For Year 2017', figsize = (20, 10)) #colormap for pandas plot\n\n\n\n\n\n\n\n", "_____no_output_____" ], [ "# DONE: VERONICA HAS THIS ONE\n\n# Q: Are tenants being evicted mostly because of things they do? Or are they being evicted through no fault of their own?\n# CREATE Graph #3 showing % of fault vs no-fault evictions in entire dataset\n\n", "_____no_output_____" ], [ "labels = [\"Tenant Fault\", \"Landlord Fault\"]\nsizes = [tenant_fault, landlord_fault]\nexplode = (0.01, 0)\ncolors = [\"cornflowerblue\",\"teal\"]\nfig, ax = plt.subplots()\nfig.set_size_inches(14,14)\nax.pie(sizes, colors=colors, explode=explode, labels=labels, autopct=\"%1.1f%%\")\nplt.legend(loc=\"best\")\n#plt.show()\n\nplt.savefig(\"graph_4_pie.png\")\n", "_____no_output_____" ], [ "labels = [\"Tenant Fault\", \"Landlord Fault\"]\nsizes = [tenant_fault, landlord_fault]\nexplode = (0.01, 0)\ncolors = [\"cornflowerblue\",\"teal\"]\nfig, ax = plt.subplots()\nfig.set_size_inches(14,14)\nax.pie(sizes, colors=colors, explode=explode, labels=labels, autopct=\"%1.1f%%\")\nplt.legend(loc=\"best\")\n#plt.show()\n\nplt.savefig(\"graph_4_pie.png\")\n\ndf_tenant_at_fault = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Fault\"] == \"At Fault\"]\ndf_tenant_no_fault = data_file_pd_dropna_df[data_file_pd_dropna_df[\"Fault\"] == \"No Fault\"]\n\n", "_____no_output_____" ] ], [ [ "<img style=\"transform: rotate(90deg); width:500px\" src=\"graph_3.jpg\" />", "_____no_output_____" ] ], [ [ "# Q: Is there a higher eviction rate in areas where tech companies are suppposedly displacing residents?\n# CREATE Graph #4 showing num of eviction notices per year, grouped into Tech-influnced neighborhoods and non\n# CREATE delinations for \n # Tech-influeced neighborhoods = Financial District, South of Market\n # non-TEch-influenced neighborhoods = everything else", "_____no_output_____" ] ], [ [ "<img style=\"transform: rotate(90deg); width:500px\" src=\"graph_4.jpg\" />", "_____no_output_____" ] ], [ [ "# Q: Do we see a shift in fault vs no-fault reasons for evictions in tech-influenced neighborhoods?\n# CREATE Graph #6 showing num of eviction notices per year, broken down by fault and no-fault", "_____no_output_____" ] ], [ [ "<img style=\"transform: rotate(90deg); width:500px\" src=\"graph_6.jpg\" />", "_____no_output_____" ] ], [ [ "# Q: For comparison, same graph as above but for the rest of SF neighborhoods (no whiteboard image)\n# CREATE Graph #7 showing num of eviction notices per year, broken down by fault and no-fault", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec8529fecf7e31c6386b59854879b3f5669d1ceb
243,776
ipynb
Jupyter Notebook
main_NN_VI.ipynb
metas-ch/Bayesian-Machine-Learning
2c50b090006bf2daf8f0f384dc594eb0f0355018
[ "MIT" ]
null
null
null
main_NN_VI.ipynb
metas-ch/Bayesian-Machine-Learning
2c50b090006bf2daf8f0f384dc594eb0f0355018
[ "MIT" ]
null
null
null
main_NN_VI.ipynb
metas-ch/Bayesian-Machine-Learning
2c50b090006bf2daf8f0f384dc594eb0f0355018
[ "MIT" ]
1
2022-02-06T06:43:42.000Z
2022-02-06T06:43:42.000Z
224.058824
48,724
0.896569
[ [ [ "# Introduction\n\nThis notebook guides through calibration function development using published data (S. De Vito et al., Sensors and Actuators B: Chemical, Volume 129, Issue 2).", "_____no_output_____" ] ], [ [ "# Load libriaries.\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nimport tensorflow as tf\ntfk = tf.keras\ntf.keras.backend.set_floatx(\"float64\")\nfrom tensorflow.keras.utils import plot_model\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import r2_score, explained_variance_score, mean_squared_error\n\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.rcParams['lines.linewidth'] = 1.0\nplt.rcParams['font.size'] = 6.0\nplt.rcParams['axes.titlesize'] = 6.0", "_____no_output_____" ] ], [ [ "# Method(s)\nA Bayesian neural network is used as machine learning algorithm.", "_____no_output_____" ] ], [ [ "# Define helper functions.\nscaler = StandardScaler(copy=True) # Define standard scaler instance.\nimputer = SimpleImputer()\ndetector = IsolationForest(n_estimators=1000, random_state=0) # Define outlier detector instance.\ndim_red = PCA(n_components=2) # Define principal components instance.", "_____no_output_____" ], [ "# Define moving-average function.\ndef moving_average(x, w=168*4):\n return np.convolve(x, np.ones(w), \"same\") / w\n\n# Create Meshgrid.\ndef make_meshgrid(x, y, h=0.25):\n \"\"\"Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n \"\"\"\n x_min, x_max = x.min() - 6, x.max() + 6\n y_min, y_max = y.min() - 6, y.max() + 6\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n \ndef compute_predictions(model, dim_red, x, y, n_outputs=4, n_iterations=30):\n xx, yy = make_meshgrid(x, y)\n shape = xx.shape\n xx, yy = xx.ravel(), yy.ravel()\n length = xx.shape[0]\n X, Y = np.zeros((length, 1)), np.zeros((length, 1))\n output = np.zeros((n_iterations, n_outputs))\n mu, sigma = np.zeros((length, n_outputs)), np.zeros((length, n_outputs))\n for index in range(length):\n data = np.expand_dims(dim_red.inverse_transform([xx[index], yy[index]]), axis=0)\n for k in range(n_iterations):\n output[k, :] = model.predict(data)\n X[index, 0], Y[index, 0] = xx[index], yy[index]\n mu[index, :], sigma[index, :] = np.mean(output, axis=0), np.std(output, axis=0)\n return X, Y, mu, sigma, shape\n\ndef run_simulation(model, data, T=None, n_outputs=4, n_iterations=30):\n if T == None:\n T = data.shape[0]\n mu = np.zeros((T, n_outputs))\n sigma = np.zeros((T, n_outputs))\n output = np.zeros((n_iterations, n_outputs))\n for t in range(T):\n for k in range(n_iterations):\n output[k, :] = model.predict(np.expand_dims(data[t, :], axis=0))\n mu[t, :] = np.mean(output, axis=0)\n sigma[t, :] = np.std(output, axis=0)\n return mu, sigma", "_____no_output_____" ] ], [ [ "# Data Handling\nLoad data to pandas data frame.", "_____no_output_____" ] ], [ [ "# Load data and keep only first six months.\ndata = pd.read_excel(\"AirQualityUCI.xlsx\")\ndata = data[data[\"Date\"] <= \"2004-09-10\"]\n\n# Visualize data summary.\ndata.describe()", "_____no_output_____" ] ], [ [ "# Methods\nThe data is preprocessed and sliced into different sets. Data is scaled before analysis. In particular, standard scaling provides better results with respect to R2 score and explained variance.\nNo imputation is performed.", "_____no_output_____" ] ], [ [ "# Select columns and remove rows with missing values.\ncolumns = [\"PT08.S1(CO)\", \"PT08.S3(NOx)\", \"PT08.S4(NO2)\", \"PT08.S5(O3)\", \"T\", \"AH\", \"CO(GT)\", \"C6H6(GT)\", \"NOx(GT)\", \"NO2(GT)\"]\ndata = data[columns].dropna(axis=0)\n\n# Scale data to zero mean and unit variance.\nX_t = scaler.fit_transform(data)\n\n# Optional: Impute missing values.\n# X_t = imputer.fit_transform(X_t)\n\n# Remove outliers.\nis_inlier = detector.fit_predict(X_t)\nX_t = X_t[(is_inlier > 0),:]\n\n# Restore frame.\ndataset = pd.DataFrame(X_t, columns=columns)\n\n# Define inputs/outputs.\ninputs = [\"PT08.S1(CO)\", \"PT08.S3(NOx)\", \"PT08.S4(NO2)\", \"PT08.S5(O3)\", \"T\", \"AH\"]\noutputs = [\"CO(GT)\", \"C6H6(GT)\", \"NOx(GT)\", \"NO2(GT)\"]", "_____no_output_____" ], [ "# Define some hyperparameters.\nn_epochs = 200\nn_samples = dataset.shape[0]\nn_batches = 50\nbatch_size = np.floor(n_samples/n_batches)\nbuffer_size = n_samples\n\n# Define training and test data sizes.\nn_train = int(0.7*dataset.shape[0])\n\n# Define dataset instance.\ndata = tf.data.Dataset.from_tensor_slices((dataset[inputs].values, dataset[outputs].values))\ndata = data.shuffle(n_samples, reshuffle_each_iteration=True)\n\n# Define train and test data instances.\ndata_train = data.take(n_train).batch(batch_size).repeat(n_epochs)\ndata_test = data.skip(n_train).batch(1)", "_____no_output_____" ], [ "# Specify the surrogate posterior over `keras.layers.Dense` `kernel` and `bias`.\ndef posterior_mean_field(kernel_size, bias_size, dtype=None):\n n = kernel_size + bias_size\n c = np.log(np.expm1(1.))\n return tf.keras.Sequential([\n tfp.layers.VariableLayer(2 * n, dtype=dtype),\n tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n tfd.Normal(loc=t[..., :n],\n scale=1e-5 + tf.nn.softplus(c + t[..., n:])),\n reinterpreted_batch_ndims=1)),\n ])\n\n# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.\ndef prior_trainable(kernel_size, bias_size, dtype=None):\n n = kernel_size + bias_size\n return tf.keras.Sequential([\n tfp.layers.VariableLayer(n, dtype=dtype),\n tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n tfd.Normal(loc=t, scale=1),\n reinterpreted_batch_ndims=1)),\n ])", "_____no_output_____" ], [ "# Define prior for regularization.\nprior = tfd.Independent(tfd.Normal(loc=tf.zeros(len(outputs), dtype=tf.float64), scale=1.0), reinterpreted_batch_ndims=1)\n\n# Define negative logarithmic likelihood.\nneg_log_likelihood = lambda x, rv_x: -rv_x.log_prob(x)\n\n# Define model instance.\nmodel = tfk.Sequential([\ntfk.layers.InputLayer(input_shape=(len(inputs),), name=\"input\"),\ntfp.layers.DenseVariational(10, posterior_mean_field, prior_trainable, \n activation=\"relu\", kl_weight=1/n_samples, name=\"dense_1\"),\ntfp.layers.DenseVariational(tfp.layers.MultivariateNormalTriL.params_size(len(outputs)),\n posterior_mean_field, prior_trainable, \n activation=\"linear\", kl_weight=1/n_samples, name=\"distribution_weights\"),\ntfp.layers.MultivariateNormalTriL(len(outputs), name=\"output\")\n], name=\"model\")\n\n# Compile model.\nmodel.compile(optimizer=\"Adam\", loss=neg_log_likelihood)\n\n# Describe model.\nmodel.summary()\n# plot_model(model, to_file='./figures/model.png', show_shapes=True, expand_nested=True, dpi=900)", "Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (DenseVariational) (None, 10) 210 \n_________________________________________________________________\ndistribution_weights (DenseV (None, 14) 462 \n_________________________________________________________________\noutput (MultivariateNormalTr multiple 0 \n=================================================================\nTotal params: 672\nTrainable params: 672\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Run training session.\nmodel.load_weights(\"./Bayesian_neural_network\")\n# model.fit(data_train, epochs=n_epochs, validation_data=data_test, verbose=True)\n# model.save_weights(\"./Bayesian_neural_network\")", "_____no_output_____" ] ], [ [ "# Results and Discussion", "_____no_output_____" ] ], [ [ "# Plot the training session.\nfig = plt.figure(figsize=(2.95, 2.95))\nfig.tight_layout()\nplt.yscale(\"log\")\nplt.xlim(0, np.round(len(model.history.epoch)+4,-1))\nplt.xticks(np.linspace(0, np.round(len(model.history.epoch)+4,-1), 5))\nplt.ylim(1e-1, 1e39)\nplt.xlabel(\"epoch\")\nplt.ylabel(\"loss\")\nplt.plot(list(map(lambda x: x + 1, model.history.epoch)), model.history.history[\"loss\"], label=\"training\", color=plt.cm.viridis(0.25))\nplt.plot(list(map(lambda x: x + 1, model.history.epoch)), model.history.history[\"val_loss\"], label=\"validation\", color=plt.cm.viridis(0.5))\nplt.legend(frameon=False, loc=0)\nplt.savefig('./figures/loss_BNN',dpi=900,transparent=True,orientation='landscape',bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "# Predict.\nY_true = dataset[outputs].values\nY_pred = np.zeros(shape=dataset[outputs].shape)\nk = 100\nfor _ in range(k):\n Y_pred += (1/k)*model.predict(dataset[inputs])", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig, axes = plt.subplots(figsize=(2.95, 2.95), ncols=2, nrows=2, sharex=True, sharey=True)\nax = axes.ravel()\nfor k in range(n_v):\n ax[k].plot([-3, 3], [-3, 3], color=\"k\")\n ax[k].scatter(Y_true[:, k], Y_pred[:, k], color=colors[k], s=4, alpha=1.0, label=labels[k])\n ax[k].text(0.45, -2.75, \"$R^2$ = \"+str(np.round(r2_score(Y_true[:, k], Y_pred[:, k]), 2)))\n ax[k].legend(frameon=False)\n if k % 2 == 0:\n ax[k].set_ylabel(\"Y$_{model}$\")\n if k // 2 != 0:\n ax[k].set_xlabel(\"Y$_{data}$\")\n ax[k].set_xlim(-3, 3)\n ax[k].set_ylim(-3, 3)\n ax[k].set_xticks(np.round(np.linspace(-3, 3, 7), 2))\n ax[k].set_yticks(np.round(np.linspace(-3, 3, 7), 2))\nplt.savefig('./figures/agreement',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "Y_red = dim_red.fit_transform(dataset[inputs])\n# X, Y, mu, sigma, shape = compute_predictions(model, dim_red, Y_red[:, 0], Y_red[:, 1])\nX, Y, mu, sigma, shape = np.load(\"X.npy\"), np.load(\"Y.npy\"), np.load(\"mu.npy\"), np.load(\"sigma.npy\"), np.load(\"shape.npy\")", "_____no_output_____" ], [ "mu[np.abs(mu) > 8] = 8\n\nfig = plt.figure(figsize=(1.2*2.95,2.95))\nax = fig.add_subplot(111)\nlevels = np.linspace(-8, 8, 17)\nxx, yy, Z = X.reshape(shape), Y.reshape(shape), np.mean(mu, axis=-1).reshape(shape)\nsurf = ax.contourf(xx, yy, Z, cmap=plt.cm.viridis, alpha=0.6, levels=levels)\ncbar = plt.colorbar(surf)\ncbar.ax.set_xlabel(r\"$\\mu$\")\nax.scatter(Y_red[:, 0], Y_red[:, 1], color=\"k\", s=4, alpha=1.0)\nax.set_xlim(-8, 8)\nax.set_ylim(-8, 8)\nax.set_xlabel('principal component 1')\nax.set_ylabel('principal component 2')\nax.set_xticks(np.round(np.linspace(-8, 8, 5), 2))\nax.set_yticks(np.round(np.linspace(-8, 8, 5), 2))\nplt.savefig('./figures/dim_red_mean',dpi=900,transparent=True,orientation='landscape',bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig, axes = plt.subplots(figsize=(2.95, 2.95), ncols=2, nrows=2, sharex=True, sharey=True)\nax = axes.ravel()\nlevels = np.linspace(-8, 8, 17)\nfor k in range(n_v):\n xx, yy, Z = X.reshape(shape), Y.reshape(shape), mu[:, k].reshape(shape)\n ax[k].contourf(xx, yy, Z, cmap=plt.cm.viridis, alpha=0.6, levels=levels)\n ax[k].scatter(Y_red[:, 0], Y_red[:, 1], color=\"k\", s=4, alpha=1.0, label=labels[k])\n ax[k].legend(frameon=True)\n if k % 2 == 0:\n ax[k].set_ylabel(\"principal component 2\")\n if k // 2 != 0:\n ax[k].set_xlabel(\"principal component 1\")\n ax[k].set_xlim(-8, 8)\n ax[k].set_ylim(-8, 8)\n ax[k].set_xticks(np.round(np.linspace(-8, 8, 5), 2))\n ax[k].set_yticks(np.round(np.linspace(-8, 8, 5), 2))\nplt.savefig('./figures/dim_red_mean_ind',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "sigma[sigma > 50] = 50\n\nfig = plt.figure(figsize=(1.2*2.95,2.95))\nax = fig.add_subplot(111)\nxx, yy, Z = X.reshape(shape), Y.reshape(shape), np.mean(sigma, axis=-1).reshape(shape)\nlevels = np.linspace(0, 50, 11)\nsurf = ax.contourf(xx, yy, Z, cmap=plt.cm.viridis, alpha=0.6, levels=levels)\ncbar = plt.colorbar(surf)\ncbar.ax.set_xlabel(r\"$\\sigma$\")\nax.scatter(Y_red[:, 0], Y_red[:, 1], color=\"k\", s=4, alpha=1.0)\nax.set_xlim(-8, 8)\nax.set_ylim(-8, 8)\nax.set_xlabel('principal component 1')\nax.set_ylabel('principal component 2')\nax.set_xticks(np.round(np.linspace(-8, 8, 5), 2))\nax.set_yticks(np.round(np.linspace(-8, 8, 5), 2))\nplt.savefig('./figures/dim_red_unc',dpi=900,transparent=True,orientation='landscape',bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig, axes = plt.subplots(figsize=(2.95, 2.95), ncols=2, nrows=2, sharex=True, sharey=True)\nax = axes.ravel()\nlevels = np.linspace(0, 50, 11)\nfor k in range(n_v):\n xx, yy, Z = X.reshape(shape), Y.reshape(shape), sigma[:, k].reshape(shape)\n ax[k].contourf(xx, yy, Z, cmap=plt.cm.viridis, alpha=0.6, levels=levels)\n ax[k].scatter(Y_red[:, 0], Y_red[:, 1], color=\"k\", s=4, alpha=1.0, label=labels[k])\n ax[k].legend(frameon=True)\n if k % 2 == 0:\n ax[k].set_ylabel(\"principal component 2\")\n if k // 2 != 0:\n ax[k].set_xlabel(\"principal component 1\")\n ax[k].set_xlim(-8, 8)\n ax[k].set_ylim(-8, 8)\n ax[k].set_xticks(np.round(np.linspace(-8, 8, 5), 2))\n ax[k].set_yticks(np.round(np.linspace(-8, 8, 5), 2))\nplt.savefig('./figures/dim_red_unc_ind',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "y_regular = []\nfor _ in range(500):\n y_regular.append(model.predict(np.expand_dims(dim_red.inverse_transform([0, 0]), axis=0)))\ny_regular = np.squeeze(np.array(y_regular), axis=1)", "_____no_output_____" ], [ "mu1 = np.mean(y_regular, axis=0)\nsigma1 = np.std(y_regular, axis=0)", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig, axes = plt.subplots(figsize=(2.95, 2.95), ncols=2, nrows=2, sharex=True, sharey=True)\nax = axes.ravel()\nfor k in range(n_v):\n ax[k].hist(y_regular[:, k], density=True, bins=21, color=colors[k], alpha=0.6, label=labels[k])\n ax[k].text(-1.8, 3.5, \"$\\mu=$\"+str(np.round(mu1[k],1)))\n ax[k].text(-1.8, 3, \"$\\sigma=$\"+str(np.round(sigma1[k],1)))\n ax[k].legend(frameon=False)\n if k % 2 == 0:\n ax[k].set_ylabel(r\"$\\rho$\")\n if k // 2 != 0:\n ax[k].set_xlabel(\"Y$_{model}$\")\n ax[k].set_xlim(-2, 2)\n ax[k].set_ylim(0, 4)\nplt.savefig('./figures/regular',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "y_irregular = []\nfor _ in range(500):\n y_irregular.append(model.predict(np.expand_dims(dim_red.inverse_transform([-6, -6]), axis=0)))\ny_irregular = np.squeeze(np.array(y_irregular), axis=1)", "_____no_output_____" ], [ "mu2 = np.mean(y_irregular, axis=0)\nsigma2 = np.std(y_irregular, axis=0)", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\nfig, axes = plt.subplots(figsize=(2.95, 2.95), ncols=2, nrows=2, sharex=True, sharey=True)\nax = axes.ravel()\nfor k in range(n_v):\n ax[k].hist(y_irregular[:, k], density=True, bins=31, color=colors[k], alpha=0.6, label=labels[k])\n ax[k].text(-53, 0.175, \"$\\mu=$\"+str(np.round(mu2[k],1)))\n ax[k].text(-53, 0.15, \"$\\sigma=$\"+str(np.round(sigma2[k],1)))\n ax[k].legend(frameon=False)\n if k % 2 == 0:\n ax[k].set_ylabel(r\"$\\rho$\")\n if k // 2 != 0:\n ax[k].set_xlabel(\"Y$_{model}$\")\n ax[k].set_xlim(-60, 60)\n ax[k].set_xticks(np.linspace(-60, 60, 5))\n ax[k].set_ylim(0, 0.20)\nplt.savefig('./figures/irregular',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "# Load data and keep only second six months.\ndata_test = pd.read_excel(\"AirQualityUCI.xlsx\")\ndata_test = data_test[data_test[\"Date\"] >= \"2004-09-10\"]\n\n# Impute missing values.\nX_t = imputer.fit_transform(data_test[columns])\n\n# Scale.\nX_t = scaler.transform(X_t)\n\n# Restore frame.\ndataset_test = pd.DataFrame(X_t, columns=columns, index=data_test[\"Date\"]+pd.to_timedelta([t.isoformat() for t in data_test[\"Time\"].values]))", "_____no_output_____" ], [ "T = None\nn_outputs = 4\n\n# mu_, sigma_ = run_simulation(model, dataset_test[inputs].values, T, n_outputs)\nmu_, sigma_ = np.load(\"mu_.npy\"), np.load(\"sigma_.npy\")", "_____no_output_____" ], [ "n_v = 4\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig, axes = plt.subplots(figsize=(1.0*2.95, 1.5*2.95), ncols=1, nrows=4, sharex=True, sharey=True)\nax = axes.ravel()\nfor k in range(n_v):\n ax[k].plot(np.arange(0, mu_.shape[0], 1), moving_average(mu_[:, k]), color=colors[k], label=labels[k])\n ax[k].fill_between(np.arange(0, mu_.shape[0], 1), \n moving_average(mu_[:, k]) - moving_average(sigma_[:, k]),\n moving_average(mu_[:, k]) + moving_average(sigma_[:, k]), \n color=colors[k], alpha=0.33)\n ax[k].legend(frameon=False, loc=2)\n ax[k].set_ylabel(\"Y$_{model}$\")\n ax[k].set_ylim(-2, 2)\n ax[k].set_yticks(np.linspace(-2, 2, 5))\n ax[k].set_xlim(0, 3360)\n ax[k].set_xticks(np.linspace(0, 3360, 5, dtype=int))\n ax[k].set_xticklabels(np.linspace(0, (3360/24), 5, dtype=int))\nax[k].set_xlabel(\"time / days\")\nplt.savefig('./figures/mu_sigma_test',dpi=900,transparent=True,bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "# Plot violinplot.\nlabels = [\"CO\", \"C6H6\", \"NOx\", \"NO2\"]\ncolors = [plt.cm.viridis(0.0), plt.cm.viridis(0.25), plt.cm.viridis(0.50), plt.cm.viridis(0.75)]\nfig = plt.figure(figsize=(2.95,2.95))\nfig.tight_layout()\nvp = plt.violinplot(sigma_,\n widths=0.2, points=1000, showmeans=False, showmedians=True, showextrema=True, quantiles=[[0.25, 0.75]]*n_outputs)\nfor k, vp_ in enumerate(vp[\"bodies\"]):\n vp_.set_facecolor(colors[k])\n vp_.set_alpha(0.6)\n \nfor partname in list(vp.keys())[1:]:\n vp_ = vp[partname]\n vp_.set_edgecolor(\"k\")\n vp_.set_alpha(1.0)\nplt.xlabel(r\"pollutant\")\nplt.ylabel(\"$\\sigma$\")\nplt.xticks((1,2,3,4),labels)\nplt.yscale(\"log\")\nplt.ylim(0.01, 100)\nplt.savefig(\"./figures/box_plot_sigma\",dpi=900,transparent=True,orientation=\"landscape\",bbox_inches=\"tight\")\nplt.show()", "_____no_output_____" ], [ "Y_irreg = dim_red.transform(dataset_test[inputs].loc[np.max(sigma_, axis=1) > 1])\n\nfig = plt.figure(figsize=(2.95,2.95))\nax = fig.add_subplot(111)\nax.scatter(Y_red[:, 0], Y_red[:, 1], color=plt.cm.viridis(0.0), s=4, alpha=1.0, label=\"training\")\nax.scatter(Y_irreg[:, 0], Y_irreg[:, 1], color=plt.cm.viridis(0.25), marker=\"s\", s=4, alpha=1.0, label=\"testing\")\nax.set_xlim(-8, 8)\nax.set_ylim(-8, 8)\nax.set_xlabel('principal component 1')\nax.set_ylabel('principal component 2')\nax.set_xticks(np.round(np.linspace(-8, 8, 5), 2))\nax.set_yticks(np.round(np.linspace(-8, 8, 5), 2))\nplt.legend(frameon=False)\nplt.savefig('./figures/anomalies',dpi=900,transparent=True,orientation='landscape',bbox_inches='tight')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec8540f194a8fb7683fd3690dc0ade8ee6400db7
29,079
ipynb
Jupyter Notebook
004_Python_HW_Assignment_04.ipynb
meng0606/04_Python_Functions
6253070a7a503b474e4e5f977d3c9a596ff13a03
[ "MIT" ]
1
2021-09-30T05:49:32.000Z
2021-09-30T05:49:32.000Z
004_Python_HW_Assignment_04.ipynb
meng0606/04_Python_Functions
6253070a7a503b474e4e5f977d3c9a596ff13a03
[ "MIT" ]
null
null
null
004_Python_HW_Assignment_04.ipynb
meng0606/04_Python_Functions
6253070a7a503b474e4e5f977d3c9a596ff13a03
[ "MIT" ]
null
null
null
51.285714
1,490
0.587916
[ [ [ "&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp; **JLUFE** &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&ensp;**Fall 2021(Sep-Jan)** \n\n\n**<div align=\"center\">Homework Assignment Report</div>**\n<br>\n\n**<div align=\"center\">JILIN UNIVERSITY OF FINANCE AND ECONOMICS</div>**\n<br>\n\n**<div align=\"center\"><span style=\"color:blue\">College of Managment Science and Information Engineering</span></div>**\n\n**<div align=\"center\">BSc in <span style=\"color:blue\">Data Science and Big Data Technology</span></div>**\n\n**<div align=\"center\">(2021)</div>**\n\n<br> \n\n**<div align=\"center\">MODULE: Intelligent Technology</div>**\n\n**<div align=\"center\">Homework Assignment: 04</div>**\n\n**<div align=\"center\">Flow Control Statements</div>**\n\n**<div align=\"center\">21/10/2021</div>**\n\n<br>\n\n**<div align=\"center\">Submitted by:</div>**\n\n**<div align=\"center\"><span style=\"color:blue\">Heloise(韩孟汝) 2152683938 (2006)</span></div>**\n**<div align=\"center\">QQ: <span style=\"color:blue\">2152683938</span> | Github ID: <span style=\"color:blue\">meng0606</span></div>**", "_____no_output_____" ], [ "# Instructions: \n\n\n1. I have added tips and required learning resources for each question, which helps you to solve the problems. \n\n2. Finish the assignment on your **OWN**. **<span style=\"color:red\">Any student find copying/sharing from classmates or internet will get '0' points!!!</span>**\n\n3. After <span class='label label-default'>Accepting this assignment</span> from ➞ **[GitHub Clasroom link](https://classroom.github.com/a/zeslyIXN)**, Github will create private repository of the assignment in your GitHub Classroom account.\n\n4. In your repository <span class='label label-default'>Clone</span> ➞ <span class='label label-default'>Download ZIP</span> in your computer.\n\n5. Change your ➞ **College**, **Major**, **Name**, **Student number**, **Class number**, **QQ number** and **GitHub ID**\n\n6. Once you finish the Assignment **[convert your .ipynb file into PDF](https://github.com/milaan9/91_Python_Mini_Projects/tree/main/001_Convert_IPython_to_PDF)** (both **.ipynb** and **.pdf** file will be required!)\n\n7. To submit your assignment, go to GitHub Classroom repository and <span class='label label-default'>Add file</span> ➞ <span class='label label-default'>Upload files</span> ➞ <span class='label label-default'>Commit changes</span>\n 1. Replace the question (**.ipynb**) file with your solution (**.ipynb**) file.\n 2. Also, upload (**.pdf**) converted file of your solution (**.ipynb**) file.", "_____no_output_____" ], [ "# Python Assignment 04", "_____no_output_____" ], [ "# Part A ➞ If-elif-else Statements <span class='label label-default'>Level 1, 2 & 3</span>\n\n>**Note:** Please create new cell for each question", "_____no_output_____" ], [ "### Part A ➞ <span class='label label-default'>Level 1</span>\n>**Note:** Please create new cell for each question\n\n1. Get two numbers from the user using **`input()`** prompt. \n - If **`num_1`** is greater than **`num_2`** return **`num_1`** is greater than **`num_2`**, \n - if **`num_1`** is less **`num_2`** return **`num_1`** is smaller than **`num_2`**, \n - else **`num_1`** is equal to **`num_2`**. \n\n - ```sh\nEnter number one: 9\nEnter number two: 6\n9 is greater than 6\n ```", "_____no_output_____" ] ], [ [ "# Solution:\nnum 1=input (\"please enter numberl:\")\nnum 2=input (\"please enter number2:\")\nif (num 1 > num 2)\nprint (\"number1 is greater than number2\")\nelif (num 1<num 2) :\nprint (\"number1 is smaller than number2\")\nelse (num 1 <num 2):\nprint(\"number1 is equal to number2\")", "_____no_output_____" ] ], [ [ "### Part A ➞ <span class='label label-default'>Level 2</span>\n>**Note:** Please create new cell for each question\n\n1. Write a code which gives grade to students according to theirs scores get from user **`input()`**:\n \n - ```sy\n 80-100, A\n 70-89, B\n 60-69, C\n 50-59, D\n 0-49, F\n ```\n\n2. Check if the season is **`Autumn`**, **`Winter`**, **`Spring`** or **`Summer`**. \n - If the user **`input()`** is:\n - September, October or November, the season is Autumn.\n - December, January or February, the season is Winter.\n - March, April or May, the season is Spring\n - June, July or August, the season is Summer\n\n\n3. The following list contains some fruits:\n - Taker user **`input()`** and if a fruit doesn't exist in the list add the fruit to the list and print the modified list. If the fruit exists print **`('That fruit already exist in the list')`** \n \n ```py\n fruits = ['banana', 'orange', 'mango', 'pear']\n ``` ", "_____no_output_____" ] ], [ [ "#1\nacore=int(input(\"please enter score:\"))\nif(90<=score<=100):\n print(\"your grade is A\")\nelif(70<=score<=89):\n print(\"your grade is B\")\nelif(60<=score<=69):\n print(\"your grade is C\")\nelif(50<=score<=59):\n print(\"your grade is D\")\nelse:\n print(\"your grade is E\")", "_____no_output_____" ], [ "#2\nmonth=input(\"please enter month:\")\nif(month==\"9\" or month==\"10\" or month==\"11\"):\n print(\"the season is Autumn\")\nelif(month==\"10\" or month==\"1\" or month==\"2\"):\n print(\"the season is Winter\")\nelif(month==\"3\" or month==\"4\" or month==\"5\"):\n print(\"the season is Spring\")\nelif(month==\"6\" or month==\"7\" or month==\"8\"):\n print(\"the season is Summer\")\nelse:\n print(\"Wrong input,there is no \"+ month +\" month\")", "_____no_output_____" ], [ "#3\nfruits = ['banana', 'orange', 'mango', 'pear']\nfruit=input(\"please enter a fruit:\")\nif(fruit not in fruits):\n fruits.append(fruit)\n print(fruits)\nelse:\n print('That fruit already exist in the list')", "_____no_output_____" ] ], [ [ "### Part A ➞ <span class='label label-default'>Level 3</span>\n>**Note:** Please create new cell for each question\n\n1. Here we have a person dictionary. Feel free to modify it!\n \n - ```py\n person={\n 'first_name': 'Milaan',\n 'last_name': 'Parmar',\n 'age': 96,\n 'country': 'England',\n 'is_marred': True,\n 'skills': ['Python', 'Matlab', 'R', 'C', 'C++'],\n 'address': {\n 'street': 'Space street',\n 'zipcode': '02210'\n }\n }\n ```\n\n * Check if the person dictionary has **`skills`** key, if so print out the middle skill in the skills list.\n * Check if the person dictionary has **`skills`** key, if so check if the person has 'Python' skill and print out the result.\n * If a person skills has only Python and Matlab, print ('He knows machine learning'), if the person skills has Python, and R print ('He knows statistics'), if the person skills has C, and C++, Print ('He knows software development'), else print ('unknown title') - for more accurate results more conditions can be nested!\n * If the person is married and if he lives in England, print the information in the following format:\n\n - ```sy\n Milaan Parmar lives in England. He is married.\n ```", "_____no_output_____" ] ], [ [ "# Solution: \nperson={\n'first_name': 'Milaan',\n'last_name': 'Parmar',\n'age': 96,\n'country': 'England',\n'is_marred': True,\n'skills': ['Python', 'Matlab', 'R', 'C', 'C++'],\n'address': {\n 'street': 'Space street',\n 'zipcode': '02210'\n}\n}", "_____no_output_____" ] ], [ [ "# Part B ➞ Loops <span class='label label-default'>Level 1, 2 and 3</span>\n\n>**Note:** Please create new cell for each question", "_____no_output_____" ], [ "### Part B ➞ <span class='label label-default'>Level 1</span>\n>**Note:** Please create new cell for each question\n\n1. Iterate 0 to 10 using **`for`** loop, do the same using **`while`** loop.\n2. Iterate 10 to 0 using **`for`** loop, do the same using **`while`** loop.\n3. Write a code so we get on the output the following square by taking **`input()`** from user:\n\n - ```sy\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n# = # = # = # = #\n ```\n\n4. Use nested loops to create the following by taking **`input()`** from user:\n\n```sy\n #\n ###\n #####\n #######\n #########\n ###########\n#############\n```\n\n5. Print the following using loops by taking **`input()`** from user:\n\n - ```sy\n 0 x 0 = 0\n 1 x 1 = 1\n 2 x 2 = 4\n 3 x 3 = 9\n 4 x 4 = 16\n 5 x 5 = 25\n 6 x 6 = 36\n 7 x 7 = 49\n 8 x 8 = 64\n 9 x 9 = 81\n 10 x 10 = 100\n ```\n\n6. Iterate through the list, **`['Python', 'Numpy', 'Pandas', 'Scikit', 'Pytorch']`** using a **`for`** loop and print out the items.\n\n7. Use **`while`** loop to iterate from 0 to 100 and print the sum of all numbers.\n\n - ```py\n The sum of all numbers is 5050.\n ```\n \n8. Use **`for`** loop to iterate from 0 to 100 and print the sum of all evens and the sum of all odds.\n\n - ```py\n The sum of all evens is 2550. And the sum of all odds is 2500.\n ```", "_____no_output_____" ] ], [ [ "# Solution: \n#1\nfor i in range(11):\n print(i,end=' ')\nprint()\nn=0\nwhile(n<=10):\n print(n,end=' ')\n n=n+1 ", "0 1 2 3 4 5 6 7 8 9 10 \n0 1 2 3 4 5 6 7 8 9 10 " ], [ "#2\nfor i in range(10,-1,-1):\n print(i,end=' ')\nprint()\nn=10\nwhile(n>=0):\n print(n,end=' ')\n n=n-1 ", "_____no_output_____" ], [ "#3\nn=int(input('please enter line numbers:'))\nfor i in range(n):\n for j in range(n+1):\n if((j+1)%2==1):\n print('0',end=' ')\n else:\n print('=',end=' ')\n print()", "_____no_output_____" ], [ "#4\nn=int(input('please enter line numbers:'))\nfor i in range(1,n+1):\n for j in range(n-i):\n print(' ',end=' ')\n for j in range(2*i-1): \n print('=',end=' ')\n print()", "_____no_output_____" ], [ "#5", "_____no_output_____" ] ], [ [ "### Part B ➞ <span class='label label-default'>Level 2</span>\n>**Note:** Please create new cell for each question\n\n1. Use **`for`** loop to find fibonacci numbers from from 0 to 100 and print only even numbers from it. Also, find how many even numbers are in it.\n2. Use **`while`** loop to find fibonacci numbers from from 0 to 100 and print only odd numbers from it. Also find how many odd numbers are in it.", "_____no_output_____" ], [ "### Part B ➞ <span class='label label-default'>Level 3</span>\n>**Note:** Please create new cell for each question\n\n1. Go to the data folder and use the **[countries_data.py](https://github.com/milaan9/02_Python_Datatypes/blob/main/countries_data.py)** file. Loop through the countries and extract all the countries containing the word **`land`**.\n2. This is a fruit list, **`['banana', 'orange', 'mango', 'lemon']`** reverse the order using loop.\n3. Go to the data folder and use the **[countries_details_data.py](https://github.com/milaan9/03_Python_Flow_Control/blob/main/countries_details_data.py)** file. \n 1. What are the total number of languages in the data\n 2. Find the ten most spoken languages from the data\n 3. Find the 10 most populated countries in the world", "_____no_output_____" ] ], [ [ "# Solution: \n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec8559c59ce76e9734001a4aed7ca958b8e28d76
246,734
ipynb
Jupyter Notebook
taller_regresion_lineal/taller_regresion_lineal.ipynb
DiplomadoAI-DL-UNAL2021/samuelmesa
87dfff3269d5056deb45dd6cedad2781d2568c97
[ "CC0-1.0" ]
null
null
null
taller_regresion_lineal/taller_regresion_lineal.ipynb
DiplomadoAI-DL-UNAL2021/samuelmesa
87dfff3269d5056deb45dd6cedad2781d2568c97
[ "CC0-1.0" ]
null
null
null
taller_regresion_lineal/taller_regresion_lineal.ipynb
DiplomadoAI-DL-UNAL2021/samuelmesa
87dfff3269d5056deb45dd6cedad2781d2568c97
[ "CC0-1.0" ]
null
null
null
304.234279
85,222
0.902308
[ [ [ "# Taller Entendiendo Jupyter Lab, Github, Python y Regresión Lineal\n\n---\n\n**Nombre**: Samuel Fernando Mesa\n\n**Feha**: 22 de marzo de 2021\n\n**Obetivo**: Conocer las herramientas básicas de trabajo de entorno de trabajo de Jupyter con Google Collaboration, Github y desarrollo con Python para generar un modelo de ajuste de aprendizaje de máquina \n\n---", "_____no_output_____" ], [ "## Parte 1 - Introducción a la regresión lineal", "_____no_output_____" ], [ "### Introducción\nRepresenta un ajuste o relación lineal entre dos o mas variables en un modelo matemático usado para aproximar la relación de dependencia, entre una variable dependiente $y$, y una variable independiente $x$.\n\nEntendiendo que lo que llamamos predicción o pronóstico (Probabilidad en el futuro que algo suceda) en las situaciones que nos compete estudiar la basaremos en conocimiento que aplicaremos para el desarrollo de nuestro estudio en temas de Inteligencia Artificial. Y podemos partir de una relación como la que se describe a continuación:\n\n$Variable Dependiente = Constante + Pendiente * Variable Independiente + Error$", "_____no_output_____" ], [ "### 2. Breve historia\n\nLa regresión lineal es un campo de estudio el cual se centra en la relación estadística entre dos variables continuas, cuando ya se involucran más de una variable predictora, se establece una relación lineal Múltiple, se puede llevar el estudio de relación de $variables$.\n\n![Image](https://i.imgur.com/HW2MoNP.png)\n", "_____no_output_____" ], [ "### Fundamento teórico\n\nLa regresión lineal se puede decir que es una técnica basada en parámetros, para ejemplificar conocemos que una línea necesita 2 parámetros. cuya formula es $y=wx +b$\n\nLo que denominamos aprendizaje se basa en encontrar los mejores parámetros (coeficientes -- $w$) que minimicen la medida del error.\n\n$y= b + wx$\nes así, que cuando tengamos un dato con N variables, llamaremos al dato $x$. Se debe tener en cuenta que se expanden también los parámetros $w$.\n\n![Image](https://i.imgur.com/4p09QuC.png)\n", "_____no_output_____" ], [ "## Regresión lineal con Datos de muestras de suelo \n\nEl siguiente conjunto de datos corresponde a las muestras de suelo toamdas en un ejemplo académico por parte del ITC de Holanda, en el municipio de Chinchiná (Caldas) [ver documento](https://github.com/DiplomadoAI-DL-UNAL2021/samuelmesa/blob/main/taller_regresion_lineal/documento_base.pdf). \n\n### Descripción de los datos \n\nLos datos corresponden a barrenos o muestras tomadas en terreno (203) y en la cual trae información de:\n\n* Ubicación (logitud y latitud) \n* Descripción (DESCRIPTIO)\n* Litología (GEOL)\n* Espesor en metros de la capas enterradas (Thickness)\n* Porcentaje de arcillas (PERCLAY)\n* Permeabilidad metros/dia (PERMEABILI).", "_____no_output_____" ] ], [ [ "## Importar librerias y módulos\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nplt.rcParams['figure.figsize'] = (128, 54)\nplt.style.use('ggplot')\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score", "_____no_output_____" ], [ "# Importamos el archivo CSV\n# Carga de los datos de barrenos \nbarrenos = pd.read_csv(\"https://raw.githubusercontent.com/DiplomadoAI-DL-UNAL2021/samuelmesa/main/taller_regresion_lineal/barrenos.csv\")\n\n# imprimir dimensiones, número e filas y registros\nbarrenos.shape", "_____no_output_____" ], [ "# Explorar los registros con una muestra \n\nbarrenos.head()", "_____no_output_____" ], [ "# Despliegue de estadísticas de los datos de entrada\n\nbarrenos.describe()", "_____no_output_____" ], [ "# Despliegue de los datos en histogramas \nfig, axs = plt.subplots(3, figsize=(12, 12))\nsns.histplot(data=barrenos, x=\"THICKNESS\", kde=True, ax=axs[0])\nsns.histplot(data=barrenos, x=\"PERCCLAY\", kde=True, ax=axs[1])\nsns.histplot(data=barrenos, x=\"PERMEABILI\", kde=True, ax=axs[2])\nplt.show()", "_____no_output_____" ], [ "## Realizamos el gráfico de dispersión\n\nsns.pairplot(barrenos,x_vars=['THICKNESS','PERCCLAY'],y_vars='PERMEABILI',height=7,aspect=0.7,kind='reg')\nsns.despine()", "_____no_output_____" ] ], [ [ "\n## Regresión lineal con Python\n\n\nRealizar el entrenamiento con la regresión lineal simple usando las varible independiente de **PERCCLAY** y variable dependiente **PERMEABILI**\n", "_____no_output_____" ] ], [ [ "## En este caso definirmos las variables dependientes e independienets como vectores de entrada de entrenamiento \n\ndata_x = barrenos[[\"PERCCLAY\"]]\nX_train = np.array(data_x)\nY_train = barrenos[[\"PERMEABILI\"]]\n\n# Crear el modelo Regresión Lineal simple\nregr = linear_model.LinearRegression()\n\n# Entrenar el modelo de regresión lineal simple\nregr.fit(X_train, Y_train)\n\n# Hacemos las predicciones que en definitiva una línea (en este caso, al ser 2D)\nY_pred = regr.predict(X_train)\n\n# Imprimir los coeficienetes obtenidos, En nuestro caso, serán la Tangente\nprint('Coefficients: \\n', regr.coef_)\n# Este es el valor donde corta el eje Y (en X=0)\nprint('Independent term: \\n', regr.intercept_)\n# Error Cuadrado Medio\nprint(\"Mean squared error: %.2f\" % mean_squared_error(Y_train, Y_pred))\n# Puntaje de Varianza. El mejor puntaje es un 1.0\nprint('Variance score: %.2f' % r2_score(Y_train, Y_pred))", "Coefficients: \n [[-0.05394601]]\nIndependent term: \n [2.75218183]\nMean squared error: 0.40\nVariance score: 0.73\n" ] ], [ [ "## Visualizamos la recta en el diagrama de dispersión", "_____no_output_____" ] ], [ [ "# Predicción de m, b\n\nsns.lmplot(x='PERCCLAY',y='PERMEABILI',data=barrenos)\nsns.despine()", "_____no_output_____" ], [ "fig, axs = plt.subplots(figsize=(10, 6))\nsns.histplot((Y_pred-Y_train), kde=True, ax=axs)\nfig.show()", "_____no_output_____" ] ], [ [ "## Regresión Lineal Múltiple en Python\n\nLa regresión lineal múltiple se realiza entre las variables independientes **THICKNESS**,\t**PERCCLAY** y variable dependiente\t**PERMEABILI**", "_____no_output_____" ] ], [ [ "# Vamos a intentar mejorar el Modelo, con una dimensión más: \n# Para poder graficar en 3D se agrega un nuevo dataframe las variables de THICKNESS y PERCCLAY\ndataX2 = pd.DataFrame()\ndataX2[\"PERCCLAY\"] = barrenos[\"PERCCLAY\"]\ndataX2[\"THICKNESS\"] = barrenos[\"THICKNESS\"]\n\nXY_train = np.array(dataX2)\nz_train = barrenos[\"PERMEABILI\"].values", "_____no_output_____" ], [ "## Entrenamiento con SKLearn\n# Crear el modelo de Regresión Lineal\nregr2 = linear_model.LinearRegression()\n\n# Entrenamos el modelo, esta vez, con 2 dimensiones\n# obtendremos 2 coeficientes, para graficar un plano\nregr2.fit(XY_train, z_train)\n\n# Reaizar la predicción con la que tendremos puntos sobre el plano hallado\nz_pred = regr2.predict(XY_train)\n\n# Los coeficientes\nprint('Coefficients: \\n', regr2.coef_)\n# Error cuadrático medio\nprint(\"Mean squared error: %.2f\" % mean_squared_error(z_train, z_pred))\n# Evaluamos el puntaje de varianza (siendo 1.0 el mejor posible)\nprint('Variance score: %.2f' % r2_score(z_train, z_pred))", "Coefficients: \n [-0.03229392 0.05623289]\nMean squared error: 0.13\nVariance score: 0.91\n" ], [ "## Visualizar diagrama de dispersión en 3D\nfig = plt.figure(figsize=(12, 10))\nax = fig.add_subplot(111, projection = '3d')\n\n# Creamos una malla, sobre la cual graficaremos el plano\nxx, yy = np.meshgrid(np.linspace(0, 100, num=10), np.linspace(0, 50, num=10))\n\n# calculamos los valores del plano para los puntos x e y\nnuevoX = (regr2.coef_[0] * xx)\nnuevoY = (regr2.coef_[1] * yy) \n# calculamos los correspondientes valores para z. Debemos sumar el punto de intercepción\nz = (nuevoX + nuevoY + regr2.intercept_)\n\nax.set_xlabel('% arcillas')\nax.set_ylabel('Espesor en m')\nax.set_zlabel('Permeabilidad m/dia')\nax.set_title('Regresión Lineal con Múltiples Variables') \n\nax.scatter(XY_train[:, 0], XY_train[:, 1], z_train)\nax.plot_surface(xx, yy, z, alpha=0.4, cmap='hot')\nax.scatter(XY_train[:, 0], XY_train[:, 1], z_pred)\nax.view_init(elev=10, azim=25)\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Conclusiones\n\nSe observa el desarrollo e implementación de la regresión lineal en los cuadernos de Python con Scikit Learn", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
ec8565c4ed2a8d5fb9df60ca7e5721562892c843
30,018
ipynb
Jupyter Notebook
Chapter2/defense_posion_attacks/provenance_defence.ipynb
PacktPublishing/Designing-Models-for-Responsible-AI
36b60f1e3e9db8b3d2db3ace873dbdee1b076b74
[ "MIT" ]
null
null
null
Chapter2/defense_posion_attacks/provenance_defence.ipynb
PacktPublishing/Designing-Models-for-Responsible-AI
36b60f1e3e9db8b3d2db3ace873dbdee1b076b74
[ "MIT" ]
null
null
null
Chapter2/defense_posion_attacks/provenance_defence.ipynb
PacktPublishing/Designing-Models-for-Responsible-AI
36b60f1e3e9db8b3d2db3ace873dbdee1b076b74
[ "MIT" ]
2
2022-01-17T07:28:22.000Z
2022-01-30T00:12:53.000Z
62.278008
6,780
0.770771
[ [ [ "# Adversarial Robustness Toolbox for Provenance-Based Defenses\n\nIn this notebook we will learn how to use ART to defend against adversarial attacks in IoT settings.\n\nWhen data is collected from multiple sources, we can use **provenance features** to track the origin of that data. Using those features, we can defend models against malicious attacks. We will also show how to use the Reject on Negative Impact (RONI) defense method within ART.", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os, sys\nfrom os.path import abspath\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom art.attacks.poisoning.poisoning_attack_svm import PoisoningAttackSVM\nfrom art.estimators.classification.scikitlearn import ScikitlearnSVC\nfrom art.defences.detector.poison import ProvenanceDefense, RONIDefense\nfrom art.utils import load_mnist\nfrom sklearn.svm import SVC\nimport numpy as np\nimport matplotlib.pyplot as plt\nnp.random.seed(301)\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "## Set Hyperparameters", "_____no_output_____" ] ], [ [ "num_training = 40\nnum_poison = 5\nnum_valid = 40 # the number of valid examples for the attacker\nnum_trusted = 25 # the number of trusted data for the defender\nnum_devices = 4 # last device is inserting poison\nkernel = 'linear' # available kernels are 'rbf', 'poly' and 'linear'", "_____no_output_____" ] ], [ [ "## Load and transform MNIST data\n\nIn this examples we are training a classifer that differentiates between the number 4 and the number 0. The training data is split between the first `num_devices - 1` devices and the poisoned training data is the added to the last device. Quantity fo data and model kernel are specified by hyperparameters", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()\ny_train = np.argmax(y_train, axis=1)\ny_test = np.argmax(y_test, axis=1)\nzero_or_four = np.logical_or(y_train == 4, y_train == 0)\nx_train = x_train[zero_or_four]\ny_train = y_train[zero_or_four]\ntr_labels = np.zeros((y_train.shape[0], 2))\ntr_labels[y_train == 0] = np.array([1, 0])\ntr_labels[y_train == 4] = np.array([0, 1])\ny_train = tr_labels\n\n\nzero_or_four = np.logical_or(y_test == 4, y_test == 0)\nx_test = x_test[zero_or_four]\ny_test = y_test[zero_or_four]\nte_labels = np.zeros((y_test.shape[0], 2))\nte_labels[y_test == 0] = np.array([1, 0])\nte_labels[y_test == 4] = np.array([0, 1])\ny_test = te_labels\n\nn_samples_train = x_train.shape[0]\nn_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3]\nn_samples_test = x_test.shape[0]\nn_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3]\n\nx_train = x_train.reshape(n_samples_train, n_features_train)\nx_test = x_test.reshape(n_samples_test, n_features_test)\nx_train = x_train[:num_training]\ny_train = y_train[:num_training]\n\ntrusted_data = x_test[:num_trusted]\ntrusted_labels = y_test[:num_trusted]\nx_test = x_test[num_trusted:]\ny_test = y_test[num_trusted:]\nvalid_data = x_test[:num_valid]\nvalid_labels = y_test[:num_valid]\nx_test = x_test[num_valid:]\ny_test = y_test[num_valid:]", "_____no_output_____" ] ], [ [ "### Add provenance data and poison samples\n\n*Note:* In real application scenarios, provenance data is also loaded. Provenance data is generated for this experiment for demonstration purposes.\n\nThis code will take longer to run depending on the number of poison samples you allow. Each samples is being generated independently, iteratively maximizing the generalization loss of the original SVM", "_____no_output_____" ] ], [ [ "# assign random provenance features to the original training points\nclean_prov = np.random.randint(num_devices - 1, size=x_train.shape[0])\np_train = np.eye(num_devices)[clean_prov]\n\nno_defense = ScikitlearnSVC(model=SVC(kernel=kernel), clip_values=(min_, max_))\nno_defense.fit(x=x_train, y=y_train)\n# poison a predetermined number of points starting at training points\npoison_points = np.random.randint(no_defense._model.support_vectors_.shape[0], size=num_poison)\nall_poison_init = np.copy(no_defense._model.support_vectors_[poison_points])\npoison_labels = np.array([1,1]) - no_defense.predict(all_poison_init)\n\n\nsvm_attack = PoisoningAttackSVM(classifier=no_defense, x_train=x_train, y_train=y_train,\n step=0.1, eps=1.0, x_val=valid_data, y_val=valid_labels)\n\npoisoned_data, _ = svm_attack.poison(all_poison_init, y=poison_labels)\n\n# Stack on poison to data and add provenance of bad actor\nall_data = np.vstack([x_train, poisoned_data])\nall_labels = np.vstack([y_train, poison_labels])\npoison_prov = np.zeros((num_poison, num_devices))\npoison_prov[:,num_devices - 1] = 1\nall_p = np.vstack([p_train, poison_prov])", "_____no_output_____" ] ], [ [ "### Visualize Poison\n\nBy changing the value of `idx` from 0 to `num_poison - 1` you can visualize each poison sample. Notice how they attempt to add features from the other class to confuse the classifier.", "_____no_output_____" ] ], [ [ "idx = 0\nplt.matshow(poisoned_data[idx].reshape(28, 28))\nplt.title(\"Poison Point\\n\")\nplt.matshow(all_poison_init[idx].reshape(28, 28))\nplt.title(\"Original Example\\n\")\nplt.clim(0,1)", "_____no_output_____" ] ], [ [ "Notice that sometimes the poison appears to be a number 4 with features of the number zero in the background. It may appear as a shadowed zero \"watermarking\" the four. The aim of inserting poisonous samples like these in the training set is to shift the decision boundary so actual 0s to also become classified as 4s. ", "_____no_output_____" ] ], [ [ "# Train clean classifier and poisoned classifier\nperfect_defense = ScikitlearnSVC(model=SVC(kernel=kernel), clip_values=(min_, max_))\nperfect_defense.fit(x=x_train, y=y_train)\nno_defense.fit(x=all_data, y=all_labels)", "_____no_output_____" ], [ "perf_acc = np.average(np.all(perfect_defense.predict(x_test) == y_test, axis=1)) * 100\nno_acc = np.average(np.all(no_defense.predict(x_test) == y_test, axis=1)) * 100\nprint(\"Perfect defense accuracy (trusted set) {0:.2f}%\".format(perf_acc))\nprint(\"No defense accuracy (trusted set) {0:.2f}%\".format(no_acc))", "Perfect defense accuracy (trusted set) 97.68%\nNo defense accuracy (trusted set) 80.23%\n" ] ], [ [ "## Apply Defenses\n\nWe will apply the following defenses to this poisoning attack:\n* **Perfect Defense** — All poison is detected and model is trained on clean data.\n* **Provenance-Based Defense with Trusted Data** — Poison is detected using the provenance defense algorithm specified above.\n* **Provenance-Baseed Defense without Trusted Data** — Assuming no validation data, just check each data segment for suspected poison.\n* **RONI Defense w/ Calibration** — Poison is detecting using RONI defense method (see below).\n* **RONI Defense w/o Calibration** — Suspicious poison is found by a threshold epsilon value\n* **No defense** — Model is trained with poisoned data\n\n### RONI Defense\n", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML('<img src=\"../utils/data/images/roni.gif\">')", "_____no_output_____" ] ], [ [ "The [RONI (Reject on Negative Impact) defense method](https://www.usenix.org/legacy/event/leet08/tech/full_papers/nelson/nelson_html/#SECTION00051000000000000000) checks the empirical effect of each point on the performance of the classifier and removes suspicious points. Our is similar except instead of checking each point we check each set of points with the same provenance feature. We evaluate the defense with both the provenance defense and the perfect defense", "_____no_output_____" ] ], [ [ "%%capture\nroni_defense = RONIDefense(no_defense, all_data, all_labels, trusted_data, trusted_labels)\nroni_defense.detect_poison()\nroni_no_cal = RONIDefense(no_defense, all_data, all_labels, trusted_data, trusted_labels)\nroni_no_cal.detect_poison()", "_____no_output_____" ] ], [ [ "### Provenance Defense", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML('<img src=\"../utils/data/images/prov_defense.gif\">')", "_____no_output_____" ] ], [ [ "The provenenace defense method checks the effect of removing segments of the data that may come a bad actor intentionally poisoning the data. When a sector is found that is potentially poisonous, it is flagged as suspicious.\n\nIn the trusted data version of the algorithm, the defender has some handpicked trusted data to test the performance of the model. In the version of the algorithm without trusted data, a random subset of training points from all segments are used as the test set.", "_____no_output_____" ] ], [ [ "%%capture\nprov_defense_trust = ProvenanceDefense(no_defense, all_data, all_labels, all_p, \n x_val=trusted_data, y_val=trusted_labels, eps=0.1)\nprov_defense_trust.detect_poison()\nprov_defense_no_trust = ProvenanceDefense(no_defense, all_data, all_labels, all_p, eps=0.1)\nprov_defense_no_trust.detect_poison()", "_____no_output_____" ] ], [ [ "## Evaluate Defenses", "_____no_output_____" ] ], [ [ "real_is_clean = np.array([1 if i < num_training else 0 for i in range(len(all_data))])\ndef evaluate_defense(defense, name):\n print(\"\\nEvaluating results of {} defense...\".format(name))\n pc_tp = np.average(real_is_clean[:num_training] == defense.is_clean_lst[:num_training]) * 100\n pc_tn = np.average(real_is_clean[num_training:] == defense.is_clean_lst[num_training:]) * 100\n print(\"Percent of normal points correctly labeled (True Negative): {0:.2f}%\".format(pc_tp))\n print(\"Percent of poison points correctly labeled (True Positive): {0:.2f}%\".format(pc_tn))\n \n classifier = ScikitlearnSVC(model=SVC(kernel=kernel), clip_values=(min_, max_))\n mask = np.array(defense.is_clean_lst) == 1\n classifier.fit(all_data[mask], all_labels[mask])\n acc = np.average(np.all(classifier.predict(x_test) == y_test, axis=1)) * 100\n print(\"Accuracy of classifier trained with {0:.2f} filter on test set\".format(acc))", "_____no_output_____" ], [ "evaluate_defense(roni_no_cal, \"RONI w/o Calibration\")\nevaluate_defense(roni_defense, \"RONI w/ Calibration\")\nevaluate_defense(prov_defense_no_trust, \"Provenance Defense w/o Trusted Data\")\nevaluate_defense(prov_defense_trust, \"Provenance Defense w/ Trusted Data\")", "\nEvaluating results of RONI w/o Calibration defense...\nPercent of normal points correctly labeled (True Negative): 100.00%\nPercent of poison points correctly labeled (True Positive): 40.00%\nAccuracy of classifier trained with 83.97 filter on test set\n\nEvaluating results of RONI w/ Calibration defense...\nPercent of normal points correctly labeled (True Negative): 97.50%\nPercent of poison points correctly labeled (True Positive): 40.00%\nAccuracy of classifier trained with 84.92 filter on test set\n\nEvaluating results of Provenance Defense w/o Trusted Data defense...\nPercent of normal points correctly labeled (True Negative): 62.50%\nPercent of poison points correctly labeled (True Positive): 0.00%\nAccuracy of classifier trained with 82.45 filter on test set\n\nEvaluating results of Provenance Defense w/ Trusted Data defense...\nPercent of normal points correctly labeled (True Negative): 100.00%\nPercent of poison points correctly labeled (True Positive): 100.00%\nAccuracy of classifier trained with 97.68 filter on test set\n" ] ], [ [ "In [the paper](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8473440), we show that with only limited amounts of trusted data, you can still have a very powerful defense able to detect against bad actors. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec856de8e6172fc893ee597da808d6a8f70258df
64,958
ipynb
Jupyter Notebook
doc/source/misc/kktpm.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
2
2021-03-28T03:06:35.000Z
2021-03-28T03:40:08.000Z
doc/source/misc/kktpm.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
null
null
null
doc/source/misc/kktpm.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
1
2022-03-31T08:19:13.000Z
2022-03-31T08:19:13.000Z
360.877778
59,768
0.931741
[ [ [ ".. _nb_kktpm:", "_____no_output_____" ], [ "## Karush Kuhn Tucker Proximity Measure (KKTPM)\n\n\n", "_____no_output_____" ], [ "In 2016, Deb and Abouhawwash proposed Karush Kuhn Tucker Proximity Measure (KKTPM) <cite data-cite= “kktpm1”></cite>, a metric that can measure how close a point is from being “an optimum”. The smaller the metric, the closer the point. This does not require the Pareto front to be known, but the gradient information needs to be approximated.\nTheir metric applies to both single objective and multi-objective optimization problems. \n\nIn a single objective problem, the metric shows how close a point is from being a “local optimum”, while in multi-objective problems, the metric shows how close a point is from being a “local Pareto point”. Exact calculations of KKTPM for each point requires solving a whole optimization problem, which is extremely time-consuming. To avoid this problem, the authors of the original work again proposed several approximations to the true KKTPM, namely Direct KKTPM, Projected KKTPM, Adjusted KKTPM, and Approximate KKTPM. Approximate KKTPM is simply the average of the former three and is what we call simply “KKTPM”. Moreover, they were able to show that Approximate KKTPM is reliable and can be used in place of the exact one <cite data-cite= “kktpm2”></cite>.", "_____no_output_____" ], [ "<div style=\"display: block;margin-left: auto;margin-right: auto;width: 50%;\">\n![nsga2_crowding](../resources/images/kktpm.png)\n</div>", "_____no_output_____" ], [ "Let us now see how to use pymoo to calculate the KKTPM for point:", "_____no_output_____" ] ], [ [ "from pymoo.factory import get_problem\nproblem = get_problem(\"zdt1\", n_var=10)", "_____no_output_____" ] ], [ [ "For instance, the code below calculates the KKTPM metric for randomly sampled points for the given an example;", "_____no_output_____" ] ], [ [ "from pymoo.performance_indicator.kktpm import KKTPM\nfrom pymoo.operators.sampling.random_sampling import FloatRandomSampling\n\nX = FloatRandomSampling().do(problem, 100).get(\"X\")\nkktpm = KKTPM().calc(X, problem)", "_____no_output_____" ] ], [ [ "Moreover, a whole run of a genetic algorithm can be analyzed by storing each generation's history and then calculating the KKTPM metric for each of the points:", "_____no_output_____" ] ], [ [ "from pymoo.algorithms.nsga2 import NSGA2\nfrom pymoo.factory import get_problem\nfrom pymoo.optimize import minimize\nfrom pymoo.visualization.scatter import Scatter\n\nalgorithm = NSGA2(pop_size=100, eliminate_duplicates=True)\n\nres = minimize(problem,\n algorithm,\n ('n_gen', 100),\n seed=1,\n save_history=True,\n verbose=False)", "_____no_output_____" ], [ "import numpy as np\n_min, _median, _max = [], [], []\n\nfor a in res.history:\n X = a.pop.get(\"X\")\n kktpm = KKTPM().calc(X, problem)\n \n _min.append(kktpm.min())\n _median.append(np.median(kktpm))\n _max.append(kktpm.max())", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nI = np.arange(len(res.history))\nplt.plot(I, _min, label=\"Min\")\nplt.plot(I, _median, label=\"Median\")\nplt.plot(I, _max, label=\"Max\")\nplt.yscale(\"log\")\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec857359f948b7041d2058ef3e3772ad4108dd1c
46,732
ipynb
Jupyter Notebook
notebooks/landlab/.ipynb_checkpoints/landlab-fault-scarp-for-gsa-workshop-checkpoint.ipynb
csdms/csdms2021_landlab_terrainbento
cf32cd2867862693929905c20771eb45145ccd20
[ "MIT" ]
4
2021-05-20T18:05:00.000Z
2021-05-20T18:05:34.000Z
notebooks/landlab/.ipynb_checkpoints/landlab-fault-scarp-for-gsa-workshop-checkpoint.ipynb
csdms/csdms2021_landlab_terrainbento
cf32cd2867862693929905c20771eb45145ccd20
[ "MIT" ]
null
null
null
notebooks/landlab/.ipynb_checkpoints/landlab-fault-scarp-for-gsa-workshop-checkpoint.ipynb
csdms/csdms2021_landlab_terrainbento
cf32cd2867862693929905c20771eb45145ccd20
[ "MIT" ]
null
null
null
37.059477
686
0.6221
[ [ [ "<a href=\"http://landlab.github.io\"><img style=\"float: left\" src=\"../media/landlab_header.png\"></a>", "_____no_output_____" ], [ "# Introduction to Landlab: Creating a simple 2D scarp diffusion model", "_____no_output_____" ], [ "<hr>\n<small>For more Landlab tutorials, click here: <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>\n<hr>\n", "_____no_output_____" ], [ "NOTE: this tutorial is based on the generic Landlab Fault Scarp Tutorial, but it also adds a set of exercises for use in an online GSA short cource in October 2020.\n\nThis tutorial illustrates how you can use Landlab to construct a simple two-dimensional numerical model on a regular (raster) grid, using a simple forward-time, centered-space numerical scheme. The example is the erosional degradation of an earthquake fault scarp, and which evolves over time in response to the gradual downhill motion of soil. Here we use a simple \"geomorphic diffusion\" model for landform evolution, in which the downhill flow of soil is assumed to be proportional to the (downhill) gradient of the land surface multiplied by a transport coefficient.\n\nWe start by importing the [numpy](https://numpy.org) and [matplotlib](https://matplotlib.org) libraries:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Part 1: 1D version using numpy\n\nThis example uses a finite-volume numerical solution to the 2D diffusion equation. The 2D diffusion equation in this case is derived as follows. Continuity of mass states that:\n\n$\\frac{\\partial z}{\\partial t} = -\\nabla \\cdot \\mathbf{q}_s$,\n\nwhere $z$ is elevation, $t$ is time, the vector $\\mathbf{q}_s$ is the volumetric soil transport rate per unit width, and $\\nabla$ is the divergence operator (here in two dimensions). (Note that we have omitted a porosity factor here; its effect will be subsumed in the transport coefficient). The sediment flux vector depends on the slope gradient:\n\n$\\mathbf{q}_s = -D \\nabla z$,\n\nwhere $D$ is a transport-rate coefficient---sometimes called *hillslope diffusivity*---with dimensions of length squared per time. Combining the two, and assuming $D$ is uniform, we have a classical 2D diffusion equation:\n\n$\\frac{\\partial z}{\\partial t} = -\\nabla^2 z$.\n\nIn this first example, we will create a our 1D domain in $x$ and $z$, and set a value for $D$.\n\nThis means that the equation we solve will be in 1D. \n\n$\\frac{\\partial z}{\\partial t} = -\\frac{\\partial q_s}{\\partial x}$,\n\nwhere \n\n$q_s = -D \\frac{\\partial z}{\\partial x}$\n", "_____no_output_____" ] ], [ [ "dx = 1\nx = np.arange(0, 100, dx, dtype=float)\nz = np.zeros(x.shape, dtype=float)\nD = 0.01", "_____no_output_____" ] ], [ [ "Next we must create our fault by uplifting some of the domain. We will increment all elements of `z` in which `x>50`.", "_____no_output_____" ] ], [ [ "z[x>50] += 100", "_____no_output_____" ] ], [ [ "Finally, we will diffuse our fault for 1,000 years.\n\nWe will use a timestep with a [Courant–Friedrichs–Lewy condition](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) of $C_{cfl}=0.2$. This will keep our solution numerically stable. \n\n$C_{cfl} = \\frac{\\Delta t D}{\\Delta x^2} = 0.2$", "_____no_output_____" ] ], [ [ "dt = 0.2 * dx * dx / D\ntotal_time = 1e3\nnts = int(total_time/dt)\nz_orig = z.copy()\nfor i in range(nts):\n qs = -D * np.diff(z)/dx\n dzdt = -np.diff(qs)/dx\n z[1:-1] += dzdt*dt\n\nplt.plot(x, z_orig, label=\"Original Profile\")\nplt.plot(x, z, label=\"Diffused Profile\")\nplt.legend()", "_____no_output_____" ] ], [ [ "The prior example is pretty simple. If this was all you needed to do, you wouldn't need Landlab. \n\nBut what if you wanted...\n\n... to use the same diffusion model in 2D instead of 1D.\n\n... to use an irregular grid (in 1 or 2D). \n\n... wanted to combine the diffusion model with a more complex model. \n\n... have a more complex model you want to use over and over again with different boundary conditions.\n\nThese are the sorts of problems that Landlab was designed to solve. \n\nIn the next two sections we will introduce some of the core capabilities of Landlab. \n\nIn Part 2 we will use the RasterModelGrid, fields, and a numerical utility for calculating flux divergence. \n\nIn Part 3 we will use the HexagonalModelGrid. \n\nIn Part 4 we will use the LinearDiffuser component. \n\n## Part 2: 2D version using Landlab's Model Grids\n\nThe Landlab model grids are data structures that represent the model domain (the variable `x` in our prior example). Here we will use `RasterModelGrid` which creates a grid with regularly spaced square grid elements. The RasterModelGrid knows how the elements are connected and how far apart they are.\n\nLet's start by creating a RasterModelGrid class. First we need to import it. ", "_____no_output_____" ] ], [ [ "from landlab import RasterModelGrid", "_____no_output_____" ] ], [ [ "\n### (a) Explore the RasterModelGrid\n\nBefore we make a RasterModelGrid for our fault example, let's explore the Landlab model grid. \n\nLandlab considers the grid as a \"dual\" graph. Two sets of points, lines and polygons that represent 2D space. \n\nThe first graph considers points called \"nodes\" that are connected by lines called \"links\". The area that surrounds each node is called a \"cell\".\n\nFirst, the nodes:", "_____no_output_____" ] ], [ [ "from landlab.plot.graph import plot_graph\ngrid = RasterModelGrid((4, 5), xy_spacing=(3,4))\nplot_graph(grid, at=\"node\")", "_____no_output_____" ] ], [ [ "You can see that the nodes are points and they are numbered with unique IDs from lower left to upper right. \n\nNext the links:", "_____no_output_____" ] ], [ [ "plot_graph(grid, at=\"link\")", "_____no_output_____" ] ], [ [ "which are lines that connect the nodes and each have a unique ID number. \n\nAnd finally, the cells:", "_____no_output_____" ] ], [ [ "plot_graph(grid, at=\"cell\")", "_____no_output_____" ] ], [ [ "which are polygons centered around the nodes. \n\nLandlab is a \"dual\" graph because it also keeps track of a second set of points, lines, and polygons (\"corners\", \"faces\", and \"patches\"). We will not focus on them further.", "_____no_output_____" ], [ "### *Exercises for section 2a*\n\n(2a.1) Create an instance of a `RasterModelGrid` with 5 rows and 7 columns, with a spacing between nodes of 10 units. Plot the node layout, and identify the ID number of the center-most node.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2a.1 here)", "_____no_output_____" ] ], [ [ "(2a.2) Find the ID of the cell that contains this node.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2a.2 here)", "_____no_output_____" ] ], [ [ "(2a.3) Find the ID of the horizontal link that connects to the last node on the right in the middle column.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2a.3 here)", "_____no_output_____" ] ], [ [ "### (b) Use the RasterModelGrid for 2D diffusion \n\nLet's continue by making a new grid that is bigger. We will use this for our next fault diffusion example.\n\nThe syntax in the next line says: create a new *RasterModelGrid* object called **mg**, with 25 rows, 40 columns, and a grid spacing of 10 m.", "_____no_output_____" ] ], [ [ "mg = RasterModelGrid((25, 40), 10.0)", "_____no_output_____" ] ], [ [ "Note the use of object-oriented programming here. `RasterModelGrid` is a class; `mg` is a particular instance of that class, and it contains all the data necessary to fully describe the topology and geometry of this particular grid.\n\nNext we'll add a *data field* to the grid, to represent the elevation values at grid nodes. The \"dot\" syntax below indicates that we are calling a function (or *method*) that belongs to the *RasterModelGrid* class, and will act on data contained in **mg**. The arguments indicate that we want the data elements attached to grid nodes (rather than links, for example), and that we want to name this data field `topographic__elevation`. The `add_zeros` method returns the newly created NumPy array.", "_____no_output_____" ] ], [ [ "z = mg.add_zeros('topographic__elevation', at='node')", "_____no_output_____" ] ], [ [ "The above line of code creates space in memory to store 1,000 floating-point values, which will represent the elevation of the land surface at each of our 1,000 grid nodes.", "_____no_output_____" ], [ "Let's plot the positions of all the grid nodes. The nodes' *(x,y)* positions are stored in the arrays `mg.x_of_node` and `mg.y_of_node`, respectively.", "_____no_output_____" ] ], [ [ "plt.plot(mg.x_of_node, mg.y_of_node, '.')", "_____no_output_____" ] ], [ [ "If we bothered to count, we'd see that there are indeed 1,000 grid nodes, and a corresponding number of `z` values:", "_____no_output_____" ] ], [ [ "len(z)", "_____no_output_____" ] ], [ [ "Now for some tectonics. Let's say there's a fault trace that angles roughly east-northeast. We can describe the trace with the equation for a line. One trick here: by using `mg.x_of_node`, in the line of code below, we are calculating a *y* (i.e., north-south) position of the fault trace for each grid node---meaning that this is the *y* coordinate of the trace at the *x* coordinate of a given node.", "_____no_output_____" ] ], [ [ "fault_trace_y = 50.0 + 0.25 * mg.x_of_node", "_____no_output_____" ] ], [ [ "Here comes the earthquake. For all the nodes north of the fault (i.e., those with a *y* coordinate greater than the corresponding *y* coordinate of the fault trace), we'll add elevation equal to 10 meters plus a centimeter for every meter east along the grid (just to make it interesting):", "_____no_output_____" ] ], [ [ "z[mg.y_of_node >\n fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]", "_____no_output_____" ] ], [ [ "(A little bit of Python under the hood: the statement `mg.y_of_node > fault_trace_y` creates a 1000-element long boolean array; placing this within the index brackets will select only those array entries that correspond to `True` in the boolean array)\n\nLet's look at our newly created initial topography using Landlab's *imshow_node_grid* plotting function (which we first need to import).", "_____no_output_____" ] ], [ [ "from landlab.plot.imshow import imshow_grid\nimshow_grid(mg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "To finish getting set up, we will define two parameters: the transport (\"diffusivity\") coefficient, `D`, and the time-step size, `dt`. (The latter is set using the Courant condition for a forward-time, centered-space finite-difference solution; you can find the explanation in most textbooks on numerical methods).", "_____no_output_____" ] ], [ [ "D = 0.01 # m2/yr transport coefficient\ndt = 0.2 * mg.dx * mg.dx / D\ndt", "_____no_output_____" ] ], [ [ "Boundary conditions: for this example, we'll assume that the east and west sides are closed to flow of sediment, but that the north and south sides are open. (The order of the function arguments is east, north, west, south)", "_____no_output_____" ] ], [ [ "mg.set_closed_boundaries_at_grid_edges(True, False, True, False)", "_____no_output_____" ] ], [ [ "*A note on boundaries:* with a Landlab raster grid, all the perimeter nodes are boundary nodes. In this example, there are 24 + 24 + 39 + 39 = 126 boundary nodes. The previous line of code set those on the east and west edges to be **closed boundaries**, while those on the north and south are **open boundaries** (the default). All the remaining nodes are known as **core** nodes. In this example, there are 1000 - 126 = 874 core nodes:", "_____no_output_____" ] ], [ [ "len(mg.core_nodes)", "_____no_output_____" ] ], [ [ "One more thing before we run the time loop: we'll create an array to contain soil flux. In the function call below, the first argument tells Landlab that we want one value for each grid link, while the second argument provides a name for this data *field*:", "_____no_output_____" ] ], [ [ "qs = mg.add_zeros('sediment_flux', at='link')", "_____no_output_____" ] ], [ [ "And now for some landform evolution. We will loop through 25 iterations, representing 50,000 years. On each pass through the loop, we do the following:\n\n1. Calculate, and store in the array `g`, the gradient between each neighboring pair of nodes. These calculations are done on **links**. The gradient value is a positive number when the gradient is \"uphill\" in the direction of the link, and negative when the gradient is \"downhill\" in the direction of the link. On a raster grid, link directions are always in the direction of increasing $x$ (\"horizontal\" links) or increasing $y$ (\"vertical\" links).\n\n2. Calculate, and store in the array `qs`, the sediment flux between each adjacent pair of nodes by multiplying their gradient by the transport coefficient. We will only do this for the **active links** (those not connected to a closed boundary, and not connecting two boundary nodes of any type); others will remain as zero.\n\n3. Calculate the resulting net flux at each node (positive=net outflux, negative=net influx). The negative of this array is the rate of change of elevation at each (core) node, so store it in a node array called `dzdt`.\n\n4. Update the elevations for the new time step.", "_____no_output_____" ] ], [ [ "for i in range(25):\n g = mg.calc_grad_at_link(z)\n qs[mg.active_links] = -D * g[mg.active_links]\n dzdt = -mg.calc_flux_div_at_node(qs)\n z[mg.core_nodes] += dzdt[mg.core_nodes] * dt", "_____no_output_____" ] ], [ [ "Let's look at how our fault scarp has evolved.", "_____no_output_____" ] ], [ [ "imshow_grid(mg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "Notice that we have just created and run a 2D model of fault-scarp creation and diffusion with fewer than two dozen lines of code. How long would this have taken to write in C or Fortran?\n\nWhile it was very very easy to write in 1D, writing this in 2D would mean we would have needed to keep track of the adjacency of the different parts of the grid. This is the primary problem that the Landlab grids are meant to solve. \n\nThink about how difficult this would be to hand code if the grid were irregular or hexagonal. In order to conserve mass and implement the differential equation you would need to know how nodes were conected, how long the links were, and how big each cell was.\n\nWe do such an example after the next section. ", "_____no_output_____" ], [ "### *Exercises for section 2b*\n\n(2b .1) Create an instance of a `RasterModelGrid` called `mygrid`, with 16 rows and 25 columns, with a spacing between nodes of 5 meters. Use the `plot` function in the `matplotlib` library to make a plot that shows the position of each node marked with a dot (hint: see the plt.plot() example above).", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.1 here)", "_____no_output_____" ] ], [ [ "(2b.2) Query the grid variables `number_of_nodes` and `number_of_core_nodes` to find out how many nodes are in your grid, and how many of them are core nodes.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.2 here)", "_____no_output_____" ] ], [ [ "(2b.3) Add a new field to your grid, called `temperature` and attached to nodes. Have the initial values be all zero.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.3 here)", "_____no_output_____" ] ], [ [ "(2b.4) Change the temperature of nodes in the top (north) half of the grid to be 10 degrees C. Use the `imshow_grid` function to display a shaded image of the elevation field.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.4 here)", "_____no_output_____" ] ], [ [ "(2b.5) Use the grid function `set_closed_boundaries_at_grid_edges` to assign closed boundaries to the right and left sides of the grid.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.5 here)", "_____no_output_____" ] ], [ [ "(2b.6) Create a new field of zeros called `heat_flux` and attached to links. Using the `number_of_links` grid variable, verify that your new field array has the correct number of items. ", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.6 here)", "_____no_output_____" ] ], [ [ "(2b.7) Use the `calc_grad_at_link` grid function to calculate the temperature gradients at all the links in the grid. Given the node spacing and the temperatures you assigned to the top versus bottom grid nodes, what do you expect the maximum temperature gradient to be? Print the values in the gradient array to verify that this is indeed the maximum temperature gradient.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.7 here)", "_____no_output_____" ] ], [ [ "(2b.8) Back to hillslopes: Reset the values in the elevation field of the grid `mg` to zero. Then copy and paste the time loop above (i.e., the block in Section 2b that starts with `for i in range(25):`) below. Modify the last line to add uplift of the hillslope material at a rate `uplift_rate` = 0.0001 m/yr (hint: the amount of uplift in each iteration should be the uplift rate times the time-step duration). Then run the block and plot the resulting topography. Try experimenting with different uplift rates and different values of `D`.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2b.8 here)", "_____no_output_____" ] ], [ [ "### (c) What's going on under the hood?\n\nThis example uses a finite-volume numerical solution to the 2D diffusion equation. The 2D diffusion equation in this case is derived as follows. Continuity of mass states that:\n\n$\\frac{\\partial z}{\\partial t} = -\\nabla \\cdot \\mathbf{q}_s$,\n\nwhere $z$ is elevation, $t$ is time, the vector $\\mathbf{q}_s$ is the volumetric soil transport rate per unit width, and $\\nabla$ is the divergence operator (here in two dimensions). (Note that we have omitted a porosity factor here; its effect will be subsumed in the transport coefficient). The sediment flux vector depends on the slope gradient:\n\n$\\mathbf{q}_s = -D \\nabla z$,\n\nwhere $D$ is a transport-rate coefficient---sometimes called *hillslope diffusivity*---with dimensions of length squared per time. Combining the two, and assuming $D$ is uniform, we have a classical 2D diffusion equation:\n\n$\\frac{\\partial z}{\\partial t} = -\\nabla^2 z$.\n\nFor the numerical solution, we discretize $z$ at a series of *nodes* on a grid. The example in this notebook uses a Landlab *RasterModelGrid*, in which every interior node sits inside a cell of width $\\Delta x$, but we could alternatively have used any grid type that provides nodes, links, and cells.\n\nThe gradient and sediment flux vectors will be calculated at the *links* that connect each pair of adjacent nodes. These links correspond to the mid-points of the cell faces, and the values that we assign to links represent the gradients and fluxes, respectively, along the faces of the cells.\n\nThe flux divergence, $\\nabla \\mathbf{q}_s$, will be calculated by summing, for every cell, the total volume inflows and outflows at each cell face, and dividing the resulting sum by the cell area. Note that for a regular, rectilinear grid, as we use in this example, this finite-volume method is equivalent to a finite-difference method.\n\nTo advance the solution in time, we will use a simple explicit, forward-difference method. This solution scheme for a given node $i$ can be written:\n\n$\\frac{z_i^{t+1} - z_i^t}{\\Delta t} = -\\frac{1}{A_i} \\sum\\limits_{j=1}^{N_i} \\delta (l_{ij}) q_s (l_{ij}) \\lambda(l_{ij})$.\n\nHere the superscripts refer to time steps, $\\Delta t$ is time-step size, $q_s(l_{ij})$ is the sediment flux per width associated with the link that crosses the $j$-th face of the cell at node $i$, $\\lambda(l_{ij})$ is the width of the cell face associated with that link ($=\\Delta x$ for a regular uniform grid), and $N_i$ is the number of active links that connect to node $i$. The variable $\\delta(l_{ij})$ contains either +1 or -1: it is +1 if link $l_{ij}$ is oriented away from the node (in which case positive flux would represent material leaving its cell), or -1 if instead the link \"points\" into the cell (in which case positive flux means material is entering).\n\nTo get the fluxes, we first calculate the *gradient*, $G$, at each link, $k$:\n\n$G(k) = \\frac{z(H_k) - z(T_k)}{L_k}$.\n\nHere $H_k$ refers the *head node* associated with link $k$, $T_k$ is the *tail node* associated with link $k$. Each link has a direction: from the tail node to the head node. The length of link $k$ is $L_k$ (equal to $\\Delta x$ is a regular uniform grid). What the above equation says is that the gradient in $z$ associated with each link is simply the difference in $z$ value between its two endpoint nodes, divided by the distance between them. The gradient is positive when the value at the head node (the \"tip\" of the link) is greater than the value at the tail node, and vice versa.\n\nThe calculation of gradients in $z$ at the links is accomplished with the `calc_grad_at_link` function. The sediment fluxes are then calculated by multiplying the link gradients by $-D$. Once the fluxes at links have been established, the `calc_flux_div_at_node` function performs the summation of fluxes.", "_____no_output_____" ], [ "### *Exercises for section 2c*\n\n(2c.1) Make a 3x3 `RasterModelGrid` called `tinygrid`, with a cell spacing of 2 m. Use the `plot_graph` function to display the nodes and their ID numbers.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.1 here)", "_____no_output_____" ] ], [ [ "(2c.2) Give your `tinygrid` a node field called `height` and set the height of the center-most node to 0.5. Use `imshow_grid` to display the height field.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.2 here)", "_____no_output_____" ] ], [ [ "(2c.3) The grid should have 12 links (extra credit: verify this with `plot_graph`). When you compute gradients, which of these links will have non-zero gradients? What will the absolute value(s) of these gradients be? Which (if any) will have positive gradients and which negative? To codify your answers, make a 12-element numpy array that contains your predicted gradient value for each link.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.3 here)", "_____no_output_____" ] ], [ [ "(2c.4) Test your prediction by running the `calc_grad_at_link` function on your tiny grid. Print the resulting array and compare it with your predictions.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.4 here)", "_____no_output_____" ] ], [ [ "(2c.5) Suppose the flux of soil per unit cell width is defined as -0.01 times the height gradient. What would the flux be at the those links that have non-zero gradients? Test your prediction by creating and printing a new array whose values are equal to -0.01 times the link-gradient values.", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.5 here)", "_____no_output_____" ] ], [ [ "(2c.6) Consider the net soil accumulation or loss rate around the center-most node in your tiny grid (which is the only one that has a cell). The *divergence* of soil flux can be represented numerically as the sum of the total volumetric soil flux across each of the cell's four faces. What is the flux across each face? (Hint: multiply by face width) What do they add up to? Test your prediction by running the grid function `calc_flux_div_at_node` (hint: pass your unit flux array as the argument). What are the units of the divergence values returned by the `calc_flux_div_at_node` function?", "_____no_output_____" ] ], [ [ "# (enter your solution to 2c.6 here)", "_____no_output_____" ] ], [ [ "## Part 3: Hexagonal grid\n\nNext we will use an non-raster Landlab grid.\n\nWe start by making a random set of points with x values between 0 and 400 and y values of 0 and 250. We then add zeros to our grid at a field called \"topographic__elevation\" and plot the node locations. \n\nNote that the syntax here is exactly the same as in the RasterModelGrid example (once the grid has been created).", "_____no_output_____" ] ], [ [ "from landlab import HexModelGrid\n\nmg = HexModelGrid((25, 40), 10, node_layout=\"rect\")\nz = mg.add_zeros('topographic__elevation', at='node')\nplt.plot(mg.x_of_node, mg.y_of_node, '.')", "_____no_output_____" ] ], [ [ "Next we create our fault trace and uplift the hanging wall. \n\nWe can plot just like we did with the RasterModelGrid. ", "_____no_output_____" ] ], [ [ "fault_trace_y = 50.0 + 0.25 * mg.x_of_node\nz[mg.y_of_node >\n fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]\nimshow_grid(mg, \"topographic__elevation\")", "_____no_output_____" ] ], [ [ "And we can use the same code as before to create a diffusion model!\n\nLandlab supports multiple grid types. You can read more about them [here](https://landlab.readthedocs.io/en/latest/reference/grid/index.html).", "_____no_output_____" ] ], [ [ "qs = mg.add_zeros('sediment_flux', at='link')\nfor i in range(25):\n g = mg.calc_grad_at_link(z)\n qs[mg.active_links] = -D * g[mg.active_links]\n dzdt = -mg.calc_flux_div_at_node(qs)\n z[mg.core_nodes] += dzdt[mg.core_nodes] * dt\nimshow_grid(mg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "### *Exercises for section 3*\n\n(3.1-6) Repeat the exercises from section 2c, but this time using a hexagonal tiny grid called `tinyhex`. Your grid should have 7 nodes: one core node and 6 perimeter nodes. (Hints: use `node_layout = 'hex'`, and make a grid with 3 rows and 2 base-row columns.)", "_____no_output_____" ] ], [ [ "# (enter your solution to 3.1 here)", "_____no_output_____" ], [ "# (enter your solution to 3.2 here)", "_____no_output_____" ], [ "# (enter your solution to 3.3 here)", "_____no_output_____" ], [ "# (enter your solution to 3.4 here)", "_____no_output_____" ], [ "# (enter your solution to 3.5 here)", "_____no_output_____" ], [ "# (enter your solution to 3.6 here)", "_____no_output_____" ] ], [ [ "## Part 4: Landlab Components\n\nFinally we will use a Landlab component, called the LinearDiffuser [link to its documentation](https://landlab.readthedocs.io/en/latest/reference/components/diffusion.html).\n\nLandlab was designed to have many of the utilities like `calc_grad_at_link`, and `calc_flux_divergence_at_node` to help you make your own models. Sometimes, however, you may use such a model over and over and over. Then it is nice to be able to put it in its own python class with a standard interface. \n\nThis is what a Landlab Component is. \n\nThere is a whole [tutorial on components](../component_tutorial/component_tutorial.ipynb) and a [page on the User Guide](https://landlab.readthedocs.io/en/latest/user_guide/components.html). For now we will just show you what the prior example looks like if we use the LinearDiffuser. \n\nFirst we import it, set up the grid, and uplift our fault block. ", "_____no_output_____" ] ], [ [ "from landlab.components import LinearDiffuser\n\nmg = HexModelGrid((25, 40), 10, node_layout=\"rect\")\nz = mg.add_zeros('topographic__elevation', at='node')\nfault_trace_y = 50.0 + 0.25 * mg.x_of_node\nz[mg.y_of_node >\n fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]", "_____no_output_____" ] ], [ [ "Next we instantiate a LinearDiffuser. We have to tell the component what value to use for the diffusivity. ", "_____no_output_____" ] ], [ [ "ld = LinearDiffuser(mg, linear_diffusivity=D)", "_____no_output_____" ] ], [ [ "Finally we run the component forward in time and plot. Like many Landlab components, the LinearDiffuser has a method called \"run_one_step\" that takes one input, the timestep dt. Calling this method runs the LinearDiffuser forward in time by an increment dt. ", "_____no_output_____" ] ], [ [ "for i in range(25):\n ld.run_one_step(dt)\nimshow_grid(mg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "### *Exercises for section 4*\n\n(4.1) Repeat the steps above that instantiate and run a `LinearDiffuser` component, but this time give it a `RasterModelGrid`. Use `imshow_grid` to display the topography below.", "_____no_output_____" ] ], [ [ "# (enter your solution to 4.1 here)\nrmg = RasterModelGrid((25, 40), 10)\nz = rmg.add_zeros('topographic__elevation', at='node')\nfault_trace_y = 50.0 + 0.25 * rmg.x_of_node\nz[rmg.y_of_node >\n fault_trace_y] += 10.0 + 0.01 * rmg.x_of_node[rmg.y_of_node > fault_trace_y]\nld = LinearDiffuser(rmg, linear_diffusivity=D)\nfor i in range(25):\n ld.run_one_step(dt)\nimshow_grid(rmg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "(4.2) Using either a raster or hex grid (your choice) with a `topographic__elevation` field that is initially all zeros, write a modified version of the loop that adds uplift to the core nodes each iteration, at a rate of 0.0001 m/yr. Run the model for enough time to accumulate 10 meters of uplift. Plot the terrain to verify that the land surface height never gets higher than 10 m. ", "_____no_output_____" ] ], [ [ "# (enter your solution to 4.2 here)\nrmg = RasterModelGrid((40, 40), 10) # while we're at it, make it a bit bigger\nz = rmg.add_zeros('topographic__elevation', at='node')\nld = LinearDiffuser(rmg, linear_diffusivity=D)\nfor i in range(50):\n ld.run_one_step(dt)\n z[rmg.core_nodes] += dt * 0.0001\nimshow_grid(rmg, 'topographic__elevation')", "_____no_output_____" ] ], [ [ "(4.3) Now run the same model long enough that it reaches (or gets very close to) a dynamic equilibrium between uplift and erosion. What shape does the hillslope have? ", "_____no_output_____" ] ], [ [ "# (enter your solution to 4.3 here)\nz[:] = 0.0\nuplift_rate = 0.0001\nfor i in range(4000):\n ld.run_one_step(dt)\n z[rmg.core_nodes] += dt * uplift_rate\nimshow_grid(rmg, 'topographic__elevation')\nplt.figure()\nplt.plot(rmg.x_of_node, z, '.')", "_____no_output_____" ] ], [ [ "(BONUS CHALLENGE QUESTION) Derive an analytical solution for the cross-sectional shape of your steady-state hillslope. Plot this solution next to the actual model's cross-section.", "_____no_output_____" ], [ "#### *SOLUTION (derivation)*\n\n##### Derivation of the original governing equation\n\n(Note: you could just start with the governing equation and go from there, but we include this here for completeness).\n\nConsider a topographic profile across a hillslope. The horizontal coordinate along the profile is $x$, measured from the left side of the profile (i.e., the base of the hill on the left side, where $x=0$). The horizontal coordinate perpendicular to the profile is $y$. Assume that at any time, the hillslope is perfectly symmetrical in the $y$ direction, and that there is no flow of soil in this direction.\n\nNow consider a vertical column of soil somewhere along the profile. The left side of the column is at position $x$, and the right side is at position $x+\\Delta x$, with $\\Delta x$ being the width of the column in the $x$ direction. The width of the column in the $y$ direction is $W$. The height of the column, $z$, is also the height of the land surface at that location. Height is measured relative to the height of the base of the slope (in other words, $z(0) = 0$).\n\nThe total mass of soil inside the column, and above the slope base, is equal to the volume of soil material times its density times the fraction of space that it fills, which is 1 - porosity. Denoting soil particle density by $\\rho$ and porosity by $\\phi$, the soil mass in a column of height $h$ is\n\n$m = (1-\\phi ) \\rho \\Delta x W z$.\n\nConservation of mass dictates that the rate of change of mass equals the rate of mass inflow minus the rate of mass outflow. Assume that mass enters or leaves only by (1) soil creep, and (2) uplift of the hillslope material relative to the elevation of the hillslope base. The rate of the latter, in terms of length per time, will be denoted $U$. The rate of soil creep at a particular position $x$, in terms of bulk volume (including pores) per time per width, will be denoted $q_s(x)$. With this definition in mind, mass conservation dictates that:\n\n$\\frac{\\partial (1-\\phi ) \\rho \\Delta x W z}{\\partial t} = \\rho (1-\\phi ) \\Delta x W U + \\rho (1-\\phi ) q_s(x) - \\rho (1-\\phi ) q_s(x+\\Delta x)$.\n\nAssume that porosity and density are steady and uniform. Then,\n\n$\\frac{\\partial z}{\\partial t} = U + \\frac{q_s(x) - q_s(x+\\Delta x)}{\\Delta x}$.\n\nFactoring out -1 from the right-most term, and taking the limit as $\\Delta x\\rightarrow 0$, we get a differential equation that expresses conservation of mass for this situation:\n\n$\\frac{\\partial z}{\\partial t} = U - \\frac{\\partial q_s}{\\partial x}$.\n\nNext, substitute the soil-creep rate law\n\n$q_s = -D \\frac{\\partial z}{\\partial x}$,\n\nto obtain\n\n$\\frac{\\partial z}{\\partial t} = U + D \\frac{\\partial^2 z}{\\partial x^2}$.\n\n##### Steady state\n\nSteady means $dz/dt = 0$. If we go back to the mass conservation law a few steps ago and apply steady state, we find\n\n$\\frac{dq_s}{dx} = U$.\n\nIf you think of a hillslope that slopes down to the right, you can think of this as indicating that for every step you take to the right, you get another increment of incoming soil via uplift relative to baselevel. (Turns out it works the same way for a slope that angles down the left, but that's less obvious in the above math)\n\nIntegrate to get:\n\n$q_s = Ux + C_1$, where $C_1$ is a constant of integration.\n\nTo evaluate the integration constant, let's assume the crest of the hill is right in the middle of the profile, at $x=L/2$, with $L$ being the total length of the profile. Net downslope soil flux will be zero at the crest (where the slope is zero), so for this location:\n\n$q_s = 0 = UL/2 + C_1$, \n\nand therefore,\n\n$C_1 = -UL/2$, \n\nand\n\n$q_s = U (x - L/2)$.\n\nNow substitute the creep law for $q_s$ and divide both sides by $-D$:\n\n$\\frac{dz}{dx} = \\frac{U}{D} (L/2 - x)$.\n\nIntegrate:\n\n$z = \\frac{U}{D} (Lx/2 - x^2/2) + C_2$.\n\nTo evaluate $C_2$, recall that $z(0)=0$ (and also $z(L)=0$), so $C_2=0$. Hence, here's our analytical solution, which describes a parabola:\n\n$\\boxed{z = \\frac{U}{2D} (Lx - x^2)}$.", "_____no_output_____" ] ], [ [ "# (enter your solution to the bonus challenge question here)", "_____no_output_____" ] ], [ [ "Hey, hang on a minute, that's not a very good fit! What's going on? \n\nTurns out our 2D hillslope isn't as tall as the idealized 1D profile because of the boundary conditions: with soil free to flow east and west as well as north and south, the crest ends up lower than it would be if it were perfectly symmetrical in one direction.\n\nSo let's try re-running the numerical model, but this time with the north and south boundaries closed so that the hill shape becomes uniform in the $y$ direction:", "_____no_output_____" ] ], [ [ "rmg = RasterModelGrid((40, 40), 10)\nz = rmg.add_zeros('topographic__elevation', at='node')\nrmg.set_closed_boundaries_at_grid_edges(False, True, False, True) # closed on N and S\nld = LinearDiffuser(rmg, linear_diffusivity=D)\nfor i in range(4000):\n ld.run_one_step(dt)\n z[rmg.core_nodes] += dt * uplift_rate\nimshow_grid(rmg, 'topographic__elevation')", "_____no_output_____" ], [ "plt.plot(rmg.x_of_node, z, '.')\n#plt.plot(x_analytic, z_analytic, 'r')", "_____no_output_____" ] ], [ [ "That's more like it!", "_____no_output_____" ], [ "Congratulations on making it to the end of this tutorial!\n\n### Click here for more <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">Landlab tutorials</a>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec85806eac8dda66981577a7305701379c8d3993
822,107
ipynb
Jupyter Notebook
docs/source/customizing.ipynb
kramred/nbconvert
c538beb3b5816babb98794a444495d99c221f6fd
[ "BSD-3-Clause-Clear" ]
null
null
null
docs/source/customizing.ipynb
kramred/nbconvert
c538beb3b5816babb98794a444495d99c221f6fd
[ "BSD-3-Clause-Clear" ]
null
null
null
docs/source/customizing.ipynb
kramred/nbconvert
c538beb3b5816babb98794a444495d99c221f6fd
[ "BSD-3-Clause-Clear" ]
null
null
null
32.976615
552
0.490594
[ [ [ "# Customizing nbconvert", "_____no_output_____" ], [ "Under the hood, nbconvert uses [Jinja templates](http://jinja.pocoo.org/docs/) to specify how the notebooks should be formatted. These templates can be fully customized, allowing you to use nbconvert to create notebooks in different formats with different styles as well.", "_____no_output_____" ], [ "## Converting a notebook to an (I)Python script and printing to stdout\n\nOut of the box, nbconvert can be used to convert notebooks to plain Python files. For example, the following command converts the `example.ipynb` notebook to Python and prints out the result:", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout", "[NbConvertApp] Converting notebook example.ipynb to python\n\n# coding: utf-8\n\n# # Example notebook\n\n# ### Markdown cells\n# \n# This is an example notebook that can be converted with `nbconvert` to different formats. This is an example of a markdown cell.\n\n# ### LaTeX Equations\n# \n# Here is an equation:\n# \n# $$\n# y = \\sin(x)\n# $$\n\n# ### Code cells\n\n# In[1]:\n\n\nprint(\"This is a code cell that produces some output\")\n\n\n# ### Inline figures\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "From the code, you can see that non-code cells are also exported. If you wanted to change that behaviour, you would first look to nbconvert [configuration options page](./config_options.rst) to see if there is an option available that can give you your desired behaviour. \n\nIn this case, if you wanted to remove code cells from the output, you could use the `TemplateExporter.exclude_markdown` traitlet directly, as below. ", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout --TemplateExporter.exclude_markdown=True", "[NbConvertApp] Converting notebook example.ipynb to python\n\n# coding: utf-8\n\n# In[1]:\n\n\nprint(\"This is a code cell that produces some output\")\n\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "## Custom Templates \n\nAs mentioned above, if you want to change this behavior, you can use a custom template. The custom template inherits from the Python template and overwrites the markdown blocks so that they are empty. \n\nBelow is an example of a custom template, which we write to a file called `simplepython.tpl`. This template removes markdown cells from the output, and also changes how the execution count numbers are formatted:", "_____no_output_____" ] ], [ [ "%%writefile simplepython.tpl\n\n{% extends 'python.tpl'%}\n\n## remove markdown cells\n{% block markdowncell -%}\n{% endblock markdowncell %}\n\n## change the appearance of execution count\n{% block in_prompt %}\n# [{{ cell.execution_count if cell.execution_count else ' ' }}]:\n{% endblock in_prompt %}", "Overwriting simplepython.tpl\n" ] ], [ [ "Using this template, we see that the resulting Python code does not contain anything that was previously in a markdown cell, and only displays execution counts (i.e., `[#]:` not `In[#]:`):", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout --template=simplepython.tpl", "[NbConvertApp] Converting notebook example.ipynb to python\n\n\n# coding: utf-8\n\n# [1]:\n\nprint(\"This is a code cell that produces some output\")\n\n\n# [1]:\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "## Template structure\n\nNbconvert templates consist of a set of nested blocks. When defining a new\ntemplate, you extend an existing template by overriding some of the blocks.\n\nAll the templates shipped in nbconvert have the basic structure described here,\nthough some may define additional blocks.", "_____no_output_____" ] ], [ [ "from IPython.display import HTML, display\nwith open('template_structure.html') as f:\n display(HTML(f.read()))", "_____no_output_____" ] ], [ [ "### A few gotchas\n\nJinja blocks use `{% %}` by default which does not play nicely with LaTeX, so those are replaced by `((* *))` in LaTeX templates.", "_____no_output_____" ], [ "## Templates using cell tags\n\nThe notebook file format supports attaching arbitrary JSON metadata to each cell. In addition, every cell has a special `tags` metadata field that accepts a list of strings that indicate the cell's tags. To apply these, go to the `View → CellToolbar → Tags` option which will create a Tag editor at the top of every cell. \n\nFirst choose a notebook you want to convert to html, and apply the tags: `\"Easy\"`, `\"Medium\"`, or \n`\"Hard\"`. \n\nWith this in place, the notebook can be converted using a custom template.\n\nDesign your template in the cells provided below.\n\nHint: tags are located at `cell.metadata.tags`, the following Python code collects the value of the tag: \n\n```python\ncell['metadata'].get('tags', [])\n```\n\nWhich you can then use inside a Jinja template as in the following:", "_____no_output_____" ] ], [ [ "%%writefile mytemplate.tpl\n\n{% extends 'full.tpl'%}\n{% block any_cell %}\n{% if 'Hard' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid red\">\n {{ super() }}\n </div>\n{% elif 'Medium' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid orange\">\n {{ super() }}\n </div>\n{% elif 'Easy' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid green\">\n {{ super() }}\n </div>\n{% else %}\n {{ super() }}\n{% endif %}\n{% endblock any_cell %}", "Overwriting mytemplate.tpl\n" ] ], [ [ "Now, if we collect the result of using nbconvert with this template, and display the resulting html, we see the following:", "_____no_output_____" ] ], [ [ "example = !jupyter nbconvert --to html 'example.ipynb' --template=mytemplate.tpl --stdout\nexample = example[3:] # have to remove the first three lines which are not proper html\nfrom IPython.display import HTML, display\ndisplay(HTML('\\n'.join(example))) ", "_____no_output_____" ] ], [ [ "## Templates using custom cell metadata \n\nWe demonstrated [above](#Templates-using-cell-tags) how to use cell tags in a template to apply custom styling to a notebook. But remember, the notebook file format supports attaching _arbitrary_ JSON metadata to each cell, not only cell tags. \nHere, we describe an exercise for using an `example.difficulty` metadata field (rather than cell tags) to do the same as before (to mark up different cells as being \"Easy\", \"Medium\" or \"Hard\").\n\n### How to edit cell metadata\n\nTo edit the cell metadata from within the notebook, go to the menu item: `View → Cell Toolbar → Edit Metadata`. This will bring up a toolbar above each cell with a button that says \"Edit Metadata\". Click this button, and a field will pop up in which you will directly edit the cell metadata JSON. \n\n**NB**: Because it is JSON, you will need to ensure that what you write is valid JSON. \n\n### Template challenges: dealing with missing custom metadata fields\n\nOne of the challenges of dealing with custom metadata is to handle the case where the metadata is not present on every cell. This can get somewhat tricky because of JSON objects tendency to be deeply nested coupled with Python's (and therefore Jinja's) approach to calling into dictionaries. Specifically, the following code will error:\n\n```python\nfoo = {}\nfoo[\"bar\"]\n```\n\nAccordingly, it is better to use the [`{}.get` method](https://docs.python.org/3.6/library/stdtypes.html#dict.get) which allows you to set a default value to return if no key is found as the second argument. \n\nHint: if your metadata items are located at `cell.metadata.example.difficulty`, the following Python code would get the value defaulting to an empty string (`''`) if nothing is found:\n\n```python\ncell['metadata'].get('example', {}).get('difficulty', '')\n```", "_____no_output_____" ], [ "### Exercise: Write a template for handling custom metadata\nNow, write a template that will look for `Easy`, `Medium` and `Hard` metadata values for the `cell.metadata.example.difficulty` field and wrap them in a div with a green, orange, or red thin solid border (respectively). \n\n**NB**: This is the same design and logic as used in the previous cell tag example.\n\n#### How to get `example.ipynb`\n\nWe have provided an example file in `example.ipynb` in the nbconvert documentation that has already been marked up with both tags and the above metadata for you to test with. You can get it from [this link to the raw file]( https://raw.githubusercontent.com/jupyter/nbconvert/master/docs/source/example.ipynb) or by cloning the repository [from GitHub](https://github.com/jupyter/nbconvert) and navingating to `nbconvert/docs/source/example.ipynb`. \n\n#### Convert `example.ipynb` using cell tags \n\nFirst, make sure that you can reproduce the previous result using the cell tags template that we have provided above. \n\n**Easy**: If you want to make it easy on yourself, create a new file `my_template.tpl` in the same directory as `example.ipynb` and copy the contents of the cell we use to write `mytemplate.tpl` to the file system. \n\nThen run `jupyter nbconvert --to html 'example.ipynb' --template=mytemplate.tpl` and see if your \n\n**Moderate**: If you want more of a challenge, try recreating the jinja template by modifying the following jinja template file:\n\n```python\n{% extends 'full.tpl'%}\n{% block any_cell %}\n <div style=\"border:thin solid red\">\n {{ super() }}\n </div>\n{% endblock any_cell %}\n```\n\n**Hard**: If you want even more of a challenge, try recreating the jinja template from scratch. \n\n#### Write your template\n\nOnce you've done at least the **Easy** version of the previous step, try modifying your template to use `cell.metadata.example.difficulty` fields rather than cell tags. \n\n#### Convert `example.ipynb` with formatting from custom metadata\n\nOnce you've written your template, try converting `example.ipynb` using the following command (making sure that `your_template.tpl` is in your local directory where you are running the command):\n\n```bash\njupyter nbconvert --to html 'example.ipynb' --template=your_template.tpl --stdout\n```\n\nThe resulting display should pick out different cells to be bordered with green, orange, or red.\n\nIf you do that successfullly, the resulting html document should look like the following cell's contents: ", "_____no_output_____" ], [ "<html>\n<head><meta charset=\"utf-8\" />\n<title>example</title><script src=\"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js\"></script>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js\"></script>\n\n<style type=\"text/css\">\n /*!\n*\n* Twitter Bootstrap\n*\n*/\n/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n font-family: sans-serif;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: 1px dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\nmark {\n background: #ff0;\n color: #000;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\nsup {\n top: -0.5em;\n}\nsub {\n bottom: -0.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n box-sizing: content-box;\n height: 0;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit;\n font: inherit;\n margin: 0;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: textfield;\n box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n border: 0;\n padding: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n background: transparent !important;\n color: #000 !important;\n box-shadow: none !important;\n text-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: 'Glyphicons Halflings';\n src: url('../components/bootstrap/fonts/glyphicons-halflings-regular.eot');\n src: url('../components/bootstrap/fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.woff') format('woff'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\002a\";\n}\n.glyphicon-plus:before {\n content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 13px;\n line-height: 1.42857143;\n color: #000;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 3px;\n}\n.img-thumbnail {\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 2px;\n -webkit-transition: all 0.2s ease-in-out;\n -o-transition: all 0.2s ease-in-out;\n transition: all 0.2s ease-in-out;\n display: inline-block;\n max-width: 100%;\n height: auto;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 18px;\n margin-bottom: 18px;\n border: 0;\n border-top: 1px solid #eeeeee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n margin: -1px;\n padding: 0;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: normal;\n line-height: 1;\n color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 18px;\n margin-bottom: 9px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 9px;\n margin-bottom: 9px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 33px;\n}\nh2,\n.h2 {\n font-size: 27px;\n}\nh3,\n.h3 {\n font-size: 23px;\n}\nh4,\n.h4 {\n font-size: 17px;\n}\nh5,\n.h5 {\n font-size: 13px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 9px;\n}\n.lead {\n margin-bottom: 18px;\n font-size: 14px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 19.5px;\n }\n}\nsmall,\n.small {\n font-size: 92%;\n}\nmark,\n.mark {\n background-color: #fcf8e3;\n padding: .2em;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 8px;\n margin: 36px 0 18px;\n border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 9px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n list-style: none;\n margin-left: -5px;\n}\n.list-inline > li {\n display: inline-block;\n padding-left: 5px;\n padding-right: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 18px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 541px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n clear: left;\n text-align: right;\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted #777777;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 9px 18px;\n margin: 0 0 18px;\n font-size: inherit;\n border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n border-right: 5px solid #eeeeee;\n border-left: 0;\n text-align: right;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: '\\00A0 \\2014';\n}\naddress {\n margin-bottom: 18px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 2px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #888;\n background-color: transparent;\n border-radius: 1px;\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 8.5px;\n margin: 0 0 9px;\n font-size: 12px;\n line-height: 1.42857143;\n word-break: break-all;\n word-wrap: break-word;\n color: #333333;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 2px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n margin-right: auto;\n margin-left: auto;\n padding-left: 0px;\n padding-right: 0px;\n}\n@media (min-width: 768px) {\n .container {\n width: 768px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 940px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1140px;\n }\n}\n.container-fluid {\n margin-right: auto;\n margin-left: auto;\n padding-left: 0px;\n padding-right: 0px;\n}\n.row {\n margin-left: 0px;\n margin-right: 0px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-left: 0px;\n padding-right: 0px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0%;\n}\n@media (min-width: 768px) {\n .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 992px) {\n .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0%;\n }\n}\ntable {\n background-color: transparent;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 18px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-column;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-cell;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n overflow-x: auto;\n min-height: 0.01%;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 13.5px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n padding: 0;\n margin: 0;\n border: 0;\n min-width: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 18px;\n font-size: 19.5px;\n line-height: inherit;\n color: #333333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: bold;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 32px;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control::-ms-expand {\n border: 0;\n background-color: transparent;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eeeeee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"].form-control,\n input[type=\"time\"].form-control,\n input[type=\"datetime-local\"].form-control,\n input[type=\"month\"].form-control {\n line-height: 32px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 45px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n min-height: 18px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-left: -20px;\n margin-top: 4px \\9;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n vertical-align: middle;\n font-weight: normal;\n cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.form-control-static {\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n min-height: 31px;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-left: 0;\n padding-right: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.form-group-sm select.form-control {\n height: 30px;\n line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 30px;\n padding: 6px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\nselect.input-lg {\n height: 45px;\n line-height: 45px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\n.form-group-lg select.form-control {\n height: 45px;\n line-height: 45px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 45px;\n min-height: 35px;\n padding: 11px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 40px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 32px;\n height: 32px;\n line-height: 32px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n width: 45px;\n height: 45px;\n line-height: 45px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n border-color: #3c763d;\n background-color: #dff0d8;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n border-color: #8a6d3b;\n background-color: #fcf8e3;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n border-color: #a94442;\n background-color: #f2dede;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 23px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #404040;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n margin-top: 0;\n margin-bottom: 0;\n padding-top: 7px;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 25px;\n}\n.form-horizontal .form-group {\n margin-left: 0px;\n margin-right: 0px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n text-align: right;\n margin-bottom: 0;\n padding-top: 7px;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 0px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 11px;\n font-size: 17px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n font-size: 12px;\n }\n}\n.btn {\n display: inline-block;\n margin-bottom: 0;\n font-weight: normal;\n text-align: center;\n vertical-align: middle;\n touch-action: manipulation;\n cursor: pointer;\n background-image: none;\n border: 1px solid transparent;\n white-space: nowrap;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n border-radius: 2px;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n outline: 0;\n background-image: none;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n cursor: not-allowed;\n opacity: 0.65;\n filter: alpha(opacity=65);\n -webkit-box-shadow: none;\n box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n pointer-events: none;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.btn-default:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n background-image: none;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n color: #fff;\n background-color: #286090;\n border-color: #122b40;\n}\n.btn-primary:hover {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n color: #fff;\n background-color: #204d74;\n border-color: #122b40;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n background-image: none;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.btn-success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n background-image: none;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.btn-info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n background-image: none;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.btn-warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n background-image: none;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.btn-danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n background-image: none;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n color: #337ab7;\n font-weight: normal;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity 0.15s linear;\n -o-transition: opacity 0.15s linear;\n transition: opacity 0.15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-property: height, visibility;\n transition-property: height, visibility;\n -webkit-transition-duration: 0.35s;\n transition-duration: 0.35s;\n -webkit-transition-timing-function: ease;\n transition-timing-function: ease;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-top: 4px solid \\9;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n list-style: none;\n font-size: 13px;\n text-align: left;\n background-color: #fff;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.15);\n border-radius: 2px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n background-clip: padding-box;\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 8px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n text-decoration: none;\n color: #262626;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n outline: 0;\n background-color: #337ab7;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n cursor: not-allowed;\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n left: auto;\n right: 0;\n}\n.dropdown-menu-left {\n left: 0;\n right: auto;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n left: 0;\n right: 0;\n bottom: 0;\n top: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n border-top: 0;\n border-bottom: 4px dashed;\n border-bottom: 4px solid \\9;\n content: \"\";\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 541px) {\n .navbar-right .dropdown-menu {\n left: auto;\n right: 0;\n }\n .navbar-right .dropdown-menu-left {\n left: 0;\n right: auto;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-left: 8px;\n padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-left: 12px;\n padding-right: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 2px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n float: none;\n display: table-cell;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-left: 0;\n padding-right: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group .form-control:focus {\n z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 45px;\n line-height: 45px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 13px;\n font-weight: normal;\n line-height: 1;\n color: #555555;\n text-align: center;\n background-color: #eeeeee;\n border: 1px solid #ccc;\n border-radius: 2px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 1px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 17px;\n border-radius: 3px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n z-index: 2;\n margin-left: -1px;\n}\n.nav {\n margin-bottom: 0;\n padding-left: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777777;\n text-decoration: none;\n background-color: transparent;\n cursor: not-allowed;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eeeeee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 8px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 2px 2px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555555;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n cursor: default;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 2px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 2px 2px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 2px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 2px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 2px 2px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 30px;\n margin-bottom: 18px;\n border: 1px solid transparent;\n}\n@media (min-width: 541px) {\n .navbar {\n border-radius: 2px;\n }\n}\n@media (min-width: 541px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n overflow-x: visible;\n padding-right: 0px;\n padding-left: 0px;\n border-top: 1px solid transparent;\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 541px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-left: 0;\n padding-right: 0;\n }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 540px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: 0px;\n margin-left: 0px;\n}\n@media (min-width: 541px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 541px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n@media (min-width: 541px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.navbar-brand {\n float: left;\n padding: 6px 0px;\n font-size: 17px;\n line-height: 18px;\n height: 30px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 541px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: 0px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n margin-right: 0px;\n padding: 9px 10px;\n margin-top: -2px;\n margin-bottom: -2px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 2px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 541px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 3px 0px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 18px;\n}\n@media (max-width: 540px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 18px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 541px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 6px;\n padding-bottom: 6px;\n }\n}\n.navbar-form {\n margin-left: 0px;\n margin-right: 0px;\n padding: 10px 0px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n margin-top: -1px;\n margin-bottom: -1px;\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 540px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 541px) {\n .navbar-form {\n width: auto;\n border: 0;\n margin-left: 0;\n margin-right: 0;\n padding-top: 0;\n padding-bottom: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: -1px;\n margin-bottom: -1px;\n}\n.navbar-btn.btn-sm {\n margin-top: 0px;\n margin-bottom: 0px;\n}\n.navbar-btn.btn-xs {\n margin-top: 4px;\n margin-bottom: 4px;\n}\n.navbar-text {\n margin-top: 6px;\n margin-bottom: 6px;\n}\n@media (min-width: 541px) {\n .navbar-text {\n float: left;\n margin-left: 0px;\n margin-right: 0px;\n }\n}\n@media (min-width: 541px) {\n .navbar-left {\n float: left !important;\n float: left;\n }\n .navbar-right {\n float: right !important;\n float: right;\n margin-right: 0px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n background-color: #e7e7e7;\n color: #555;\n}\n@media (max-width: 540px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n background-color: #080808;\n color: #fff;\n}\n@media (max-width: 540px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 18px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 2px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n content: \"/\\00a0\";\n padding: 0 5px;\n color: #5e5e5e;\n}\n.breadcrumb > .active {\n color: #777777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 18px 0;\n border-radius: 2px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n line-height: 1.42857143;\n text-decoration: none;\n color: #337ab7;\n background-color: #fff;\n border: 1px solid #ddd;\n margin-left: -1px;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-bottom-left-radius: 2px;\n border-top-left-radius: 2px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-bottom-right-radius: 2px;\n border-top-right-radius: 2px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n z-index: 2;\n color: #23527c;\n background-color: #eeeeee;\n border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 3;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n cursor: default;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777777;\n background-color: #fff;\n border-color: #ddd;\n cursor: not-allowed;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-bottom-left-radius: 3px;\n border-top-left-radius: 3px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-bottom-right-radius: 3px;\n border-top-right-radius: 3px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-bottom-left-radius: 1px;\n border-top-left-radius: 1px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-bottom-right-radius: 1px;\n border-top-right-radius: 1px;\n}\n.pager {\n padding-left: 0;\n margin: 18px 0;\n list-style: none;\n text-align: center;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777777;\n background-color: #fff;\n cursor: not-allowed;\n}\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n color: #fff;\n line-height: 1;\n vertical-align: middle;\n white-space: nowrap;\n text-align: center;\n background-color: #777777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding-top: 30px;\n padding-bottom: 30px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 20px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n border-radius: 3px;\n padding-left: 0px;\n padding-right: 0px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding-top: 48px;\n padding-bottom: 48px;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-left: 60px;\n padding-right: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 59px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 18px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 2px;\n -webkit-transition: border 0.2s ease-in-out;\n -o-transition: border 0.2s ease-in-out;\n transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-left: auto;\n margin-right: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #000;\n}\n.alert {\n padding: 15px;\n margin-bottom: 18px;\n border: 1px solid transparent;\n border-radius: 2px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n background-color: #dff0d8;\n border-color: #d6e9c6;\n color: #3c763d;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n background-color: #d9edf7;\n border-color: #bce8f1;\n color: #31708f;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n background-color: #fcf8e3;\n border-color: #faebcc;\n color: #8a6d3b;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n background-color: #f2dede;\n border-color: #ebccd1;\n color: #a94442;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n overflow: hidden;\n height: 18px;\n margin-bottom: 18px;\n background-color: #f5f5f5;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n float: left;\n width: 0%;\n height: 100%;\n font-size: 12px;\n line-height: 18px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n -webkit-transition: width 0.6s ease;\n -o-transition: width 0.6s ease;\n transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n zoom: 1;\n overflow: hidden;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-object.img-thumbnail {\n max-width: none;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n margin-bottom: 20px;\n padding-left: 0;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 2px;\n}\na.list-group-item,\nbutton.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n text-decoration: none;\n color: #555;\n background-color: #f5f5f5;\n}\nbutton.list-group-item {\n width: 100%;\n text-align: left;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n background-color: #eeeeee;\n color: #777777;\n cursor: not-allowed;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 18px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 2px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 15px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-left: 15px;\n padding-right: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 1px;\n border-top-right-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 1px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-left-radius: 1px;\n border-bottom-right-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 1px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n border: 0;\n margin-bottom: 0;\n}\n.panel-group {\n margin-bottom: 18px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 2px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n height: 100%;\n width: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 3px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 1px;\n}\n.close {\n float: right;\n font-size: 19.5px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n opacity: 0.2;\n filter: alpha(opacity=20);\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\nbutton.close {\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n -webkit-appearance: none;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n display: none;\n overflow: hidden;\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n -webkit-transition: -webkit-transform 0.3s ease-out;\n -moz-transition: -moz-transform 0.3s ease-out;\n -o-transition: -o-transform 0.3s ease-out;\n transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 3px;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n background-clip: padding-box;\n outline: 0;\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.modal-backdrop.in {\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\n.modal-header {\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-left: 5px;\n margin-bottom: 0;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 12px;\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.tooltip.in {\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.tooltip.top {\n margin-top: -3px;\n padding: 5px 0;\n}\n.tooltip.right {\n margin-left: 3px;\n padding: 0 5px;\n}\n.tooltip.bottom {\n margin-top: 3px;\n padding: 5px 0;\n}\n.tooltip.left {\n margin-left: -3px;\n padding: 0 5px;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n background-color: #000;\n border-radius: 2px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n bottom: 0;\n right: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 13px;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 3px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover-title {\n margin: 0;\n padding: 8px 14px;\n font-size: 13px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 2px 2px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow:after {\n border-width: 10px;\n content: \"\";\n}\n.popover.top > .arrow {\n left: 50%;\n margin-left: -11px;\n border-bottom-width: 0;\n border-top-color: #999999;\n border-top-color: rgba(0, 0, 0, 0.25);\n bottom: -11px;\n}\n.popover.top > .arrow:after {\n content: \" \";\n bottom: 1px;\n margin-left: -10px;\n border-bottom-width: 0;\n border-top-color: #fff;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-left-width: 0;\n border-right-color: #999999;\n border-right-color: rgba(0, 0, 0, 0.25);\n}\n.popover.right > .arrow:after {\n content: \" \";\n left: 1px;\n bottom: -10px;\n border-left-width: 0;\n border-right-color: #fff;\n}\n.popover.bottom > .arrow {\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999999;\n border-bottom-color: rgba(0, 0, 0, 0.25);\n top: -11px;\n}\n.popover.bottom > .arrow:after {\n content: \" \";\n top: 1px;\n margin-left: -10px;\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999999;\n border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n content: \" \";\n right: 1px;\n border-right-width: 0;\n border-left-color: #fff;\n bottom: -10px;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n overflow: hidden;\n width: 100%;\n}\n.carousel-inner > .item {\n display: none;\n position: relative;\n -webkit-transition: 0.6s ease-in-out left;\n -o-transition: 0.6s ease-in-out left;\n transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform 0.6s ease-in-out;\n -moz-transition: -moz-transform 0.6s ease-in-out;\n -o-transition: -o-transform 0.6s ease-in-out;\n transition: transform 0.6s ease-in-out;\n -webkit-backface-visibility: hidden;\n -moz-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000px;\n -moz-perspective: 1000px;\n perspective: 1000px;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n left: 0;\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n width: 15%;\n opacity: 0.5;\n filter: alpha(opacity=50);\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n}\n.carousel-control.right {\n left: auto;\n right: 0;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n}\n.carousel-control:hover,\n.carousel-control:focus {\n outline: 0;\n color: #fff;\n text-decoration: none;\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n margin-top: -10px;\n z-index: 5;\n display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n line-height: 1;\n font-family: serif;\n}\n.carousel-control .icon-prev:before {\n content: '\\2039';\n}\n.carousel-control .icon-next:before {\n content: '\\203a';\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n margin-left: -30%;\n padding-left: 0;\n list-style: none;\n text-align: center;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n border: 1px solid #fff;\n border-radius: 10px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-indicators .active {\n margin: 0;\n width: 12px;\n height: 12px;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n left: 15%;\n right: 15%;\n bottom: 20px;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -10px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -10px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -10px;\n }\n .carousel-caption {\n left: 20%;\n right: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after,\n.item_buttons:before,\n.item_buttons:after {\n content: \" \";\n display: table;\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after,\n.item_buttons:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-left: auto;\n margin-right: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table !important;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table !important;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table !important;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table !important;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table !important;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*!\n*\n* Font Awesome\n*\n*/\n/*!\n * Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome\n * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n/* FONT PATH\n * -------------------------- */\n@font-face {\n font-family: 'FontAwesome';\n src: url('../components/font-awesome/fonts/fontawesome-webfont.eot?v=4.2.0');\n src: url('../components/font-awesome/fonts/fontawesome-webfont.eot?#iefix&v=4.2.0') format('embedded-opentype'), url('../components/font-awesome/fonts/fontawesome-webfont.woff?v=4.2.0') format('woff'), url('../components/font-awesome/fonts/fontawesome-webfont.ttf?v=4.2.0') format('truetype'), url('../components/font-awesome/fonts/fontawesome-webfont.svg?v=4.2.0#fontawesomeregular') format('svg');\n font-weight: normal;\n font-style: normal;\n}\n.fa {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n/* makes the font 33% larger relative to the icon container */\n.fa-lg {\n font-size: 1.33333333em;\n line-height: 0.75em;\n vertical-align: -15%;\n}\n.fa-2x {\n font-size: 2em;\n}\n.fa-3x {\n font-size: 3em;\n}\n.fa-4x {\n font-size: 4em;\n}\n.fa-5x {\n font-size: 5em;\n}\n.fa-fw {\n width: 1.28571429em;\n text-align: center;\n}\n.fa-ul {\n padding-left: 0;\n margin-left: 2.14285714em;\n list-style-type: none;\n}\n.fa-ul > li {\n position: relative;\n}\n.fa-li {\n position: absolute;\n left: -2.14285714em;\n width: 2.14285714em;\n top: 0.14285714em;\n text-align: center;\n}\n.fa-li.fa-lg {\n left: -1.85714286em;\n}\n.fa-border {\n padding: .2em .25em .15em;\n border: solid 0.08em #eee;\n border-radius: .1em;\n}\n.pull-right {\n float: right;\n}\n.pull-left {\n float: left;\n}\n.fa.pull-left {\n margin-right: .3em;\n}\n.fa.pull-right {\n margin-left: .3em;\n}\n.fa-spin {\n -webkit-animation: fa-spin 2s infinite linear;\n animation: fa-spin 2s infinite linear;\n}\n@-webkit-keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(359deg);\n transform: rotate(359deg);\n }\n}\n@keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(359deg);\n transform: rotate(359deg);\n }\n}\n.fa-rotate-90 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);\n -webkit-transform: rotate(90deg);\n -ms-transform: rotate(90deg);\n transform: rotate(90deg);\n}\n.fa-rotate-180 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);\n -webkit-transform: rotate(180deg);\n -ms-transform: rotate(180deg);\n transform: rotate(180deg);\n}\n.fa-rotate-270 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);\n -webkit-transform: rotate(270deg);\n -ms-transform: rotate(270deg);\n transform: rotate(270deg);\n}\n.fa-flip-horizontal {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);\n -webkit-transform: scale(-1, 1);\n -ms-transform: scale(-1, 1);\n transform: scale(-1, 1);\n}\n.fa-flip-vertical {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);\n -webkit-transform: scale(1, -1);\n -ms-transform: scale(1, -1);\n transform: scale(1, -1);\n}\n:root .fa-rotate-90,\n:root .fa-rotate-180,\n:root .fa-rotate-270,\n:root .fa-flip-horizontal,\n:root .fa-flip-vertical {\n filter: none;\n}\n.fa-stack {\n position: relative;\n display: inline-block;\n width: 2em;\n height: 2em;\n line-height: 2em;\n vertical-align: middle;\n}\n.fa-stack-1x,\n.fa-stack-2x {\n position: absolute;\n left: 0;\n width: 100%;\n text-align: center;\n}\n.fa-stack-1x {\n line-height: inherit;\n}\n.fa-stack-2x {\n font-size: 2em;\n}\n.fa-inverse {\n color: #fff;\n}\n/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n readers do not read off random characters that represent icons */\n.fa-glass:before {\n content: \"\\f000\";\n}\n.fa-music:before {\n content: \"\\f001\";\n}\n.fa-search:before {\n content: \"\\f002\";\n}\n.fa-envelope-o:before {\n content: \"\\f003\";\n}\n.fa-heart:before {\n content: \"\\f004\";\n}\n.fa-star:before {\n content: \"\\f005\";\n}\n.fa-star-o:before {\n content: \"\\f006\";\n}\n.fa-user:before {\n content: \"\\f007\";\n}\n.fa-film:before {\n content: \"\\f008\";\n}\n.fa-th-large:before {\n content: \"\\f009\";\n}\n.fa-th:before {\n content: \"\\f00a\";\n}\n.fa-th-list:before {\n content: \"\\f00b\";\n}\n.fa-check:before {\n content: \"\\f00c\";\n}\n.fa-remove:before,\n.fa-close:before,\n.fa-times:before {\n content: \"\\f00d\";\n}\n.fa-search-plus:before {\n content: \"\\f00e\";\n}\n.fa-search-minus:before {\n content: \"\\f010\";\n}\n.fa-power-off:before {\n content: \"\\f011\";\n}\n.fa-signal:before {\n content: \"\\f012\";\n}\n.fa-gear:before,\n.fa-cog:before {\n content: \"\\f013\";\n}\n.fa-trash-o:before {\n content: \"\\f014\";\n}\n.fa-home:before {\n content: \"\\f015\";\n}\n.fa-file-o:before {\n content: \"\\f016\";\n}\n.fa-clock-o:before {\n content: \"\\f017\";\n}\n.fa-road:before {\n content: \"\\f018\";\n}\n.fa-download:before {\n content: \"\\f019\";\n}\n.fa-arrow-circle-o-down:before {\n content: \"\\f01a\";\n}\n.fa-arrow-circle-o-up:before {\n content: \"\\f01b\";\n}\n.fa-inbox:before {\n content: \"\\f01c\";\n}\n.fa-play-circle-o:before {\n content: \"\\f01d\";\n}\n.fa-rotate-right:before,\n.fa-repeat:before {\n content: \"\\f01e\";\n}\n.fa-refresh:before {\n content: \"\\f021\";\n}\n.fa-list-alt:before {\n content: \"\\f022\";\n}\n.fa-lock:before {\n content: \"\\f023\";\n}\n.fa-flag:before {\n content: \"\\f024\";\n}\n.fa-headphones:before {\n content: \"\\f025\";\n}\n.fa-volume-off:before {\n content: \"\\f026\";\n}\n.fa-volume-down:before {\n content: \"\\f027\";\n}\n.fa-volume-up:before {\n content: \"\\f028\";\n}\n.fa-qrcode:before {\n content: \"\\f029\";\n}\n.fa-barcode:before {\n content: \"\\f02a\";\n}\n.fa-tag:before {\n content: \"\\f02b\";\n}\n.fa-tags:before {\n content: \"\\f02c\";\n}\n.fa-book:before {\n content: \"\\f02d\";\n}\n.fa-bookmark:before {\n content: \"\\f02e\";\n}\n.fa-print:before {\n content: \"\\f02f\";\n}\n.fa-camera:before {\n content: \"\\f030\";\n}\n.fa-font:before {\n content: \"\\f031\";\n}\n.fa-bold:before {\n content: \"\\f032\";\n}\n.fa-italic:before {\n content: \"\\f033\";\n}\n.fa-text-height:before {\n content: \"\\f034\";\n}\n.fa-text-width:before {\n content: \"\\f035\";\n}\n.fa-align-left:before {\n content: \"\\f036\";\n}\n.fa-align-center:before {\n content: \"\\f037\";\n}\n.fa-align-right:before {\n content: \"\\f038\";\n}\n.fa-align-justify:before {\n content: \"\\f039\";\n}\n.fa-list:before {\n content: \"\\f03a\";\n}\n.fa-dedent:before,\n.fa-outdent:before {\n content: \"\\f03b\";\n}\n.fa-indent:before {\n content: \"\\f03c\";\n}\n.fa-video-camera:before {\n content: \"\\f03d\";\n}\n.fa-photo:before,\n.fa-image:before,\n.fa-picture-o:before {\n content: \"\\f03e\";\n}\n.fa-pencil:before {\n content: \"\\f040\";\n}\n.fa-map-marker:before {\n content: \"\\f041\";\n}\n.fa-adjust:before {\n content: \"\\f042\";\n}\n.fa-tint:before {\n content: \"\\f043\";\n}\n.fa-edit:before,\n.fa-pencil-square-o:before {\n content: \"\\f044\";\n}\n.fa-share-square-o:before {\n content: \"\\f045\";\n}\n.fa-check-square-o:before {\n content: \"\\f046\";\n}\n.fa-arrows:before {\n content: \"\\f047\";\n}\n.fa-step-backward:before {\n content: \"\\f048\";\n}\n.fa-fast-backward:before {\n content: \"\\f049\";\n}\n.fa-backward:before {\n content: \"\\f04a\";\n}\n.fa-play:before {\n content: \"\\f04b\";\n}\n.fa-pause:before {\n content: \"\\f04c\";\n}\n.fa-stop:before {\n content: \"\\f04d\";\n}\n.fa-forward:before {\n content: \"\\f04e\";\n}\n.fa-fast-forward:before {\n content: \"\\f050\";\n}\n.fa-step-forward:before {\n content: \"\\f051\";\n}\n.fa-eject:before {\n content: \"\\f052\";\n}\n.fa-chevron-left:before {\n content: \"\\f053\";\n}\n.fa-chevron-right:before {\n content: \"\\f054\";\n}\n.fa-plus-circle:before {\n content: \"\\f055\";\n}\n.fa-minus-circle:before {\n content: \"\\f056\";\n}\n.fa-times-circle:before {\n content: \"\\f057\";\n}\n.fa-check-circle:before {\n content: \"\\f058\";\n}\n.fa-question-circle:before {\n content: \"\\f059\";\n}\n.fa-info-circle:before {\n content: \"\\f05a\";\n}\n.fa-crosshairs:before {\n content: \"\\f05b\";\n}\n.fa-times-circle-o:before {\n content: \"\\f05c\";\n}\n.fa-check-circle-o:before {\n content: \"\\f05d\";\n}\n.fa-ban:before {\n content: \"\\f05e\";\n}\n.fa-arrow-left:before {\n content: \"\\f060\";\n}\n.fa-arrow-right:before {\n content: \"\\f061\";\n}\n.fa-arrow-up:before {\n content: \"\\f062\";\n}\n.fa-arrow-down:before {\n content: \"\\f063\";\n}\n.fa-mail-forward:before,\n.fa-share:before {\n content: \"\\f064\";\n}\n.fa-expand:before {\n content: \"\\f065\";\n}\n.fa-compress:before {\n content: \"\\f066\";\n}\n.fa-plus:before {\n content: \"\\f067\";\n}\n.fa-minus:before {\n content: \"\\f068\";\n}\n.fa-asterisk:before {\n content: \"\\f069\";\n}\n.fa-exclamation-circle:before {\n content: \"\\f06a\";\n}\n.fa-gift:before {\n content: \"\\f06b\";\n}\n.fa-leaf:before {\n content: \"\\f06c\";\n}\n.fa-fire:before {\n content: \"\\f06d\";\n}\n.fa-eye:before {\n content: \"\\f06e\";\n}\n.fa-eye-slash:before {\n content: \"\\f070\";\n}\n.fa-warning:before,\n.fa-exclamation-triangle:before {\n content: \"\\f071\";\n}\n.fa-plane:before {\n content: \"\\f072\";\n}\n.fa-calendar:before {\n content: \"\\f073\";\n}\n.fa-random:before {\n content: \"\\f074\";\n}\n.fa-comment:before {\n content: \"\\f075\";\n}\n.fa-magnet:before {\n content: \"\\f076\";\n}\n.fa-chevron-up:before {\n content: \"\\f077\";\n}\n.fa-chevron-down:before {\n content: \"\\f078\";\n}\n.fa-retweet:before {\n content: \"\\f079\";\n}\n.fa-shopping-cart:before {\n content: \"\\f07a\";\n}\n.fa-folder:before {\n content: \"\\f07b\";\n}\n.fa-folder-open:before {\n content: \"\\f07c\";\n}\n.fa-arrows-v:before {\n content: \"\\f07d\";\n}\n.fa-arrows-h:before {\n content: \"\\f07e\";\n}\n.fa-bar-chart-o:before,\n.fa-bar-chart:before {\n content: \"\\f080\";\n}\n.fa-twitter-square:before {\n content: \"\\f081\";\n}\n.fa-facebook-square:before {\n content: \"\\f082\";\n}\n.fa-camera-retro:before {\n content: \"\\f083\";\n}\n.fa-key:before {\n content: \"\\f084\";\n}\n.fa-gears:before,\n.fa-cogs:before {\n content: \"\\f085\";\n}\n.fa-comments:before {\n content: \"\\f086\";\n}\n.fa-thumbs-o-up:before {\n content: \"\\f087\";\n}\n.fa-thumbs-o-down:before {\n content: \"\\f088\";\n}\n.fa-star-half:before {\n content: \"\\f089\";\n}\n.fa-heart-o:before {\n content: \"\\f08a\";\n}\n.fa-sign-out:before {\n content: \"\\f08b\";\n}\n.fa-linkedin-square:before {\n content: \"\\f08c\";\n}\n.fa-thumb-tack:before {\n content: \"\\f08d\";\n}\n.fa-external-link:before {\n content: \"\\f08e\";\n}\n.fa-sign-in:before {\n content: \"\\f090\";\n}\n.fa-trophy:before {\n content: \"\\f091\";\n}\n.fa-github-square:before {\n content: \"\\f092\";\n}\n.fa-upload:before {\n content: \"\\f093\";\n}\n.fa-lemon-o:before {\n content: \"\\f094\";\n}\n.fa-phone:before {\n content: \"\\f095\";\n}\n.fa-square-o:before {\n content: \"\\f096\";\n}\n.fa-bookmark-o:before {\n content: \"\\f097\";\n}\n.fa-phone-square:before {\n content: \"\\f098\";\n}\n.fa-twitter:before {\n content: \"\\f099\";\n}\n.fa-facebook:before {\n content: \"\\f09a\";\n}\n.fa-github:before {\n content: \"\\f09b\";\n}\n.fa-unlock:before {\n content: \"\\f09c\";\n}\n.fa-credit-card:before {\n content: \"\\f09d\";\n}\n.fa-rss:before {\n content: \"\\f09e\";\n}\n.fa-hdd-o:before {\n content: \"\\f0a0\";\n}\n.fa-bullhorn:before {\n content: \"\\f0a1\";\n}\n.fa-bell:before {\n content: \"\\f0f3\";\n}\n.fa-certificate:before {\n content: \"\\f0a3\";\n}\n.fa-hand-o-right:before {\n content: \"\\f0a4\";\n}\n.fa-hand-o-left:before {\n content: \"\\f0a5\";\n}\n.fa-hand-o-up:before {\n content: \"\\f0a6\";\n}\n.fa-hand-o-down:before {\n content: \"\\f0a7\";\n}\n.fa-arrow-circle-left:before {\n content: \"\\f0a8\";\n}\n.fa-arrow-circle-right:before {\n content: \"\\f0a9\";\n}\n.fa-arrow-circle-up:before {\n content: \"\\f0aa\";\n}\n.fa-arrow-circle-down:before {\n content: \"\\f0ab\";\n}\n.fa-globe:before {\n content: \"\\f0ac\";\n}\n.fa-wrench:before {\n content: \"\\f0ad\";\n}\n.fa-tasks:before {\n content: \"\\f0ae\";\n}\n.fa-filter:before {\n content: \"\\f0b0\";\n}\n.fa-briefcase:before {\n content: \"\\f0b1\";\n}\n.fa-arrows-alt:before {\n content: \"\\f0b2\";\n}\n.fa-group:before,\n.fa-users:before {\n content: \"\\f0c0\";\n}\n.fa-chain:before,\n.fa-link:before {\n content: \"\\f0c1\";\n}\n.fa-cloud:before {\n content: \"\\f0c2\";\n}\n.fa-flask:before {\n content: \"\\f0c3\";\n}\n.fa-cut:before,\n.fa-scissors:before {\n content: \"\\f0c4\";\n}\n.fa-copy:before,\n.fa-files-o:before {\n content: \"\\f0c5\";\n}\n.fa-paperclip:before {\n content: \"\\f0c6\";\n}\n.fa-save:before,\n.fa-floppy-o:before {\n content: \"\\f0c7\";\n}\n.fa-square:before {\n content: \"\\f0c8\";\n}\n.fa-navicon:before,\n.fa-reorder:before,\n.fa-bars:before {\n content: \"\\f0c9\";\n}\n.fa-list-ul:before {\n content: \"\\f0ca\";\n}\n.fa-list-ol:before {\n content: \"\\f0cb\";\n}\n.fa-strikethrough:before {\n content: \"\\f0cc\";\n}\n.fa-underline:before {\n content: \"\\f0cd\";\n}\n.fa-table:before {\n content: \"\\f0ce\";\n}\n.fa-magic:before {\n content: \"\\f0d0\";\n}\n.fa-truck:before {\n content: \"\\f0d1\";\n}\n.fa-pinterest:before {\n content: \"\\f0d2\";\n}\n.fa-pinterest-square:before {\n content: \"\\f0d3\";\n}\n.fa-google-plus-square:before {\n content: \"\\f0d4\";\n}\n.fa-google-plus:before {\n content: \"\\f0d5\";\n}\n.fa-money:before {\n content: \"\\f0d6\";\n}\n.fa-caret-down:before {\n content: \"\\f0d7\";\n}\n.fa-caret-up:before {\n content: \"\\f0d8\";\n}\n.fa-caret-left:before {\n content: \"\\f0d9\";\n}\n.fa-caret-right:before {\n content: \"\\f0da\";\n}\n.fa-columns:before {\n content: \"\\f0db\";\n}\n.fa-unsorted:before,\n.fa-sort:before {\n content: \"\\f0dc\";\n}\n.fa-sort-down:before,\n.fa-sort-desc:before {\n content: \"\\f0dd\";\n}\n.fa-sort-up:before,\n.fa-sort-asc:before {\n content: \"\\f0de\";\n}\n.fa-envelope:before {\n content: \"\\f0e0\";\n}\n.fa-linkedin:before {\n content: \"\\f0e1\";\n}\n.fa-rotate-left:before,\n.fa-undo:before {\n content: \"\\f0e2\";\n}\n.fa-legal:before,\n.fa-gavel:before {\n content: \"\\f0e3\";\n}\n.fa-dashboard:before,\n.fa-tachometer:before {\n content: \"\\f0e4\";\n}\n.fa-comment-o:before {\n content: \"\\f0e5\";\n}\n.fa-comments-o:before {\n content: \"\\f0e6\";\n}\n.fa-flash:before,\n.fa-bolt:before {\n content: \"\\f0e7\";\n}\n.fa-sitemap:before {\n content: \"\\f0e8\";\n}\n.fa-umbrella:before {\n content: \"\\f0e9\";\n}\n.fa-paste:before,\n.fa-clipboard:before {\n content: \"\\f0ea\";\n}\n.fa-lightbulb-o:before {\n content: \"\\f0eb\";\n}\n.fa-exchange:before {\n content: \"\\f0ec\";\n}\n.fa-cloud-download:before {\n content: \"\\f0ed\";\n}\n.fa-cloud-upload:before {\n content: \"\\f0ee\";\n}\n.fa-user-md:before {\n content: \"\\f0f0\";\n}\n.fa-stethoscope:before {\n content: \"\\f0f1\";\n}\n.fa-suitcase:before {\n content: \"\\f0f2\";\n}\n.fa-bell-o:before {\n content: \"\\f0a2\";\n}\n.fa-coffee:before {\n content: \"\\f0f4\";\n}\n.fa-cutlery:before {\n content: \"\\f0f5\";\n}\n.fa-file-text-o:before {\n content: \"\\f0f6\";\n}\n.fa-building-o:before {\n content: \"\\f0f7\";\n}\n.fa-hospital-o:before {\n content: \"\\f0f8\";\n}\n.fa-ambulance:before {\n content: \"\\f0f9\";\n}\n.fa-medkit:before {\n content: \"\\f0fa\";\n}\n.fa-fighter-jet:before {\n content: \"\\f0fb\";\n}\n.fa-beer:before {\n content: \"\\f0fc\";\n}\n.fa-h-square:before {\n content: \"\\f0fd\";\n}\n.fa-plus-square:before {\n content: \"\\f0fe\";\n}\n.fa-angle-double-left:before {\n content: \"\\f100\";\n}\n.fa-angle-double-right:before {\n content: \"\\f101\";\n}\n.fa-angle-double-up:before {\n content: \"\\f102\";\n}\n.fa-angle-double-down:before {\n content: \"\\f103\";\n}\n.fa-angle-left:before {\n content: \"\\f104\";\n}\n.fa-angle-right:before {\n content: \"\\f105\";\n}\n.fa-angle-up:before {\n content: \"\\f106\";\n}\n.fa-angle-down:before {\n content: \"\\f107\";\n}\n.fa-desktop:before {\n content: \"\\f108\";\n}\n.fa-laptop:before {\n content: \"\\f109\";\n}\n.fa-tablet:before {\n content: \"\\f10a\";\n}\n.fa-mobile-phone:before,\n.fa-mobile:before {\n content: \"\\f10b\";\n}\n.fa-circle-o:before {\n content: \"\\f10c\";\n}\n.fa-quote-left:before {\n content: \"\\f10d\";\n}\n.fa-quote-right:before {\n content: \"\\f10e\";\n}\n.fa-spinner:before {\n content: \"\\f110\";\n}\n.fa-circle:before {\n content: \"\\f111\";\n}\n.fa-mail-reply:before,\n.fa-reply:before {\n content: \"\\f112\";\n}\n.fa-github-alt:before {\n content: \"\\f113\";\n}\n.fa-folder-o:before {\n content: \"\\f114\";\n}\n.fa-folder-open-o:before {\n content: \"\\f115\";\n}\n.fa-smile-o:before {\n content: \"\\f118\";\n}\n.fa-frown-o:before {\n content: \"\\f119\";\n}\n.fa-meh-o:before {\n content: \"\\f11a\";\n}\n.fa-gamepad:before {\n content: \"\\f11b\";\n}\n.fa-keyboard-o:before {\n content: \"\\f11c\";\n}\n.fa-flag-o:before {\n content: \"\\f11d\";\n}\n.fa-flag-checkered:before {\n content: \"\\f11e\";\n}\n.fa-terminal:before {\n content: \"\\f120\";\n}\n.fa-code:before {\n content: \"\\f121\";\n}\n.fa-mail-reply-all:before,\n.fa-reply-all:before {\n content: \"\\f122\";\n}\n.fa-star-half-empty:before,\n.fa-star-half-full:before,\n.fa-star-half-o:before {\n content: \"\\f123\";\n}\n.fa-location-arrow:before {\n content: \"\\f124\";\n}\n.fa-crop:before {\n content: \"\\f125\";\n}\n.fa-code-fork:before {\n content: \"\\f126\";\n}\n.fa-unlink:before,\n.fa-chain-broken:before {\n content: \"\\f127\";\n}\n.fa-question:before {\n content: \"\\f128\";\n}\n.fa-info:before {\n content: \"\\f129\";\n}\n.fa-exclamation:before {\n content: \"\\f12a\";\n}\n.fa-superscript:before {\n content: \"\\f12b\";\n}\n.fa-subscript:before {\n content: \"\\f12c\";\n}\n.fa-eraser:before {\n content: \"\\f12d\";\n}\n.fa-puzzle-piece:before {\n content: \"\\f12e\";\n}\n.fa-microphone:before {\n content: \"\\f130\";\n}\n.fa-microphone-slash:before {\n content: \"\\f131\";\n}\n.fa-shield:before {\n content: \"\\f132\";\n}\n.fa-calendar-o:before {\n content: \"\\f133\";\n}\n.fa-fire-extinguisher:before {\n content: \"\\f134\";\n}\n.fa-rocket:before {\n content: \"\\f135\";\n}\n.fa-maxcdn:before {\n content: \"\\f136\";\n}\n.fa-chevron-circle-left:before {\n content: \"\\f137\";\n}\n.fa-chevron-circle-right:before {\n content: \"\\f138\";\n}\n.fa-chevron-circle-up:before {\n content: \"\\f139\";\n}\n.fa-chevron-circle-down:before {\n content: \"\\f13a\";\n}\n.fa-html5:before {\n content: \"\\f13b\";\n}\n.fa-css3:before {\n content: \"\\f13c\";\n}\n.fa-anchor:before {\n content: \"\\f13d\";\n}\n.fa-unlock-alt:before {\n content: \"\\f13e\";\n}\n.fa-bullseye:before {\n content: \"\\f140\";\n}\n.fa-ellipsis-h:before {\n content: \"\\f141\";\n}\n.fa-ellipsis-v:before {\n content: \"\\f142\";\n}\n.fa-rss-square:before {\n content: \"\\f143\";\n}\n.fa-play-circle:before {\n content: \"\\f144\";\n}\n.fa-ticket:before {\n content: \"\\f145\";\n}\n.fa-minus-square:before {\n content: \"\\f146\";\n}\n.fa-minus-square-o:before {\n content: \"\\f147\";\n}\n.fa-level-up:before {\n content: \"\\f148\";\n}\n.fa-level-down:before {\n content: \"\\f149\";\n}\n.fa-check-square:before {\n content: \"\\f14a\";\n}\n.fa-pencil-square:before {\n content: \"\\f14b\";\n}\n.fa-external-link-square:before {\n content: \"\\f14c\";\n}\n.fa-share-square:before {\n content: \"\\f14d\";\n}\n.fa-compass:before {\n content: \"\\f14e\";\n}\n.fa-toggle-down:before,\n.fa-caret-square-o-down:before {\n content: \"\\f150\";\n}\n.fa-toggle-up:before,\n.fa-caret-square-o-up:before {\n content: \"\\f151\";\n}\n.fa-toggle-right:before,\n.fa-caret-square-o-right:before {\n content: \"\\f152\";\n}\n.fa-euro:before,\n.fa-eur:before {\n content: \"\\f153\";\n}\n.fa-gbp:before {\n content: \"\\f154\";\n}\n.fa-dollar:before,\n.fa-usd:before {\n content: \"\\f155\";\n}\n.fa-rupee:before,\n.fa-inr:before {\n content: \"\\f156\";\n}\n.fa-cny:before,\n.fa-rmb:before,\n.fa-yen:before,\n.fa-jpy:before {\n content: \"\\f157\";\n}\n.fa-ruble:before,\n.fa-rouble:before,\n.fa-rub:before {\n content: \"\\f158\";\n}\n.fa-won:before,\n.fa-krw:before {\n content: \"\\f159\";\n}\n.fa-bitcoin:before,\n.fa-btc:before {\n content: \"\\f15a\";\n}\n.fa-file:before {\n content: \"\\f15b\";\n}\n.fa-file-text:before {\n content: \"\\f15c\";\n}\n.fa-sort-alpha-asc:before {\n content: \"\\f15d\";\n}\n.fa-sort-alpha-desc:before {\n content: \"\\f15e\";\n}\n.fa-sort-amount-asc:before {\n content: \"\\f160\";\n}\n.fa-sort-amount-desc:before {\n content: \"\\f161\";\n}\n.fa-sort-numeric-asc:before {\n content: \"\\f162\";\n}\n.fa-sort-numeric-desc:before {\n content: \"\\f163\";\n}\n.fa-thumbs-up:before {\n content: \"\\f164\";\n}\n.fa-thumbs-down:before {\n content: \"\\f165\";\n}\n.fa-youtube-square:before {\n content: \"\\f166\";\n}\n.fa-youtube:before {\n content: \"\\f167\";\n}\n.fa-xing:before {\n content: \"\\f168\";\n}\n.fa-xing-square:before {\n content: \"\\f169\";\n}\n.fa-youtube-play:before {\n content: \"\\f16a\";\n}\n.fa-dropbox:before {\n content: \"\\f16b\";\n}\n.fa-stack-overflow:before {\n content: \"\\f16c\";\n}\n.fa-instagram:before {\n content: \"\\f16d\";\n}\n.fa-flickr:before {\n content: \"\\f16e\";\n}\n.fa-adn:before {\n content: \"\\f170\";\n}\n.fa-bitbucket:before {\n content: \"\\f171\";\n}\n.fa-bitbucket-square:before {\n content: \"\\f172\";\n}\n.fa-tumblr:before {\n content: \"\\f173\";\n}\n.fa-tumblr-square:before {\n content: \"\\f174\";\n}\n.fa-long-arrow-down:before {\n content: \"\\f175\";\n}\n.fa-long-arrow-up:before {\n content: \"\\f176\";\n}\n.fa-long-arrow-left:before {\n content: \"\\f177\";\n}\n.fa-long-arrow-right:before {\n content: \"\\f178\";\n}\n.fa-apple:before {\n content: \"\\f179\";\n}\n.fa-windows:before {\n content: \"\\f17a\";\n}\n.fa-android:before {\n content: \"\\f17b\";\n}\n.fa-linux:before {\n content: \"\\f17c\";\n}\n.fa-dribbble:before {\n content: \"\\f17d\";\n}\n.fa-skype:before {\n content: \"\\f17e\";\n}\n.fa-foursquare:before {\n content: \"\\f180\";\n}\n.fa-trello:before {\n content: \"\\f181\";\n}\n.fa-female:before {\n content: \"\\f182\";\n}\n.fa-male:before {\n content: \"\\f183\";\n}\n.fa-gittip:before {\n content: \"\\f184\";\n}\n.fa-sun-o:before {\n content: \"\\f185\";\n}\n.fa-moon-o:before {\n content: \"\\f186\";\n}\n.fa-archive:before {\n content: \"\\f187\";\n}\n.fa-bug:before {\n content: \"\\f188\";\n}\n.fa-vk:before {\n content: \"\\f189\";\n}\n.fa-weibo:before {\n content: \"\\f18a\";\n}\n.fa-renren:before {\n content: \"\\f18b\";\n}\n.fa-pagelines:before {\n content: \"\\f18c\";\n}\n.fa-stack-exchange:before {\n content: \"\\f18d\";\n}\n.fa-arrow-circle-o-right:before {\n content: \"\\f18e\";\n}\n.fa-arrow-circle-o-left:before {\n content: \"\\f190\";\n}\n.fa-toggle-left:before,\n.fa-caret-square-o-left:before {\n content: \"\\f191\";\n}\n.fa-dot-circle-o:before {\n content: \"\\f192\";\n}\n.fa-wheelchair:before {\n content: \"\\f193\";\n}\n.fa-vimeo-square:before {\n content: \"\\f194\";\n}\n.fa-turkish-lira:before,\n.fa-try:before {\n content: \"\\f195\";\n}\n.fa-plus-square-o:before {\n content: \"\\f196\";\n}\n.fa-space-shuttle:before {\n content: \"\\f197\";\n}\n.fa-slack:before {\n content: \"\\f198\";\n}\n.fa-envelope-square:before {\n content: \"\\f199\";\n}\n.fa-wordpress:before {\n content: \"\\f19a\";\n}\n.fa-openid:before {\n content: \"\\f19b\";\n}\n.fa-institution:before,\n.fa-bank:before,\n.fa-university:before {\n content: \"\\f19c\";\n}\n.fa-mortar-board:before,\n.fa-graduation-cap:before {\n content: \"\\f19d\";\n}\n.fa-yahoo:before {\n content: \"\\f19e\";\n}\n.fa-google:before {\n content: \"\\f1a0\";\n}\n.fa-reddit:before {\n content: \"\\f1a1\";\n}\n.fa-reddit-square:before {\n content: \"\\f1a2\";\n}\n.fa-stumbleupon-circle:before {\n content: \"\\f1a3\";\n}\n.fa-stumbleupon:before {\n content: \"\\f1a4\";\n}\n.fa-delicious:before {\n content: \"\\f1a5\";\n}\n.fa-digg:before {\n content: \"\\f1a6\";\n}\n.fa-pied-piper:before {\n content: \"\\f1a7\";\n}\n.fa-pied-piper-alt:before {\n content: \"\\f1a8\";\n}\n.fa-drupal:before {\n content: \"\\f1a9\";\n}\n.fa-joomla:before {\n content: \"\\f1aa\";\n}\n.fa-language:before {\n content: \"\\f1ab\";\n}\n.fa-fax:before {\n content: \"\\f1ac\";\n}\n.fa-building:before {\n content: \"\\f1ad\";\n}\n.fa-child:before {\n content: \"\\f1ae\";\n}\n.fa-paw:before {\n content: \"\\f1b0\";\n}\n.fa-spoon:before {\n content: \"\\f1b1\";\n}\n.fa-cube:before {\n content: \"\\f1b2\";\n}\n.fa-cubes:before {\n content: \"\\f1b3\";\n}\n.fa-behance:before {\n content: \"\\f1b4\";\n}\n.fa-behance-square:before {\n content: \"\\f1b5\";\n}\n.fa-steam:before {\n content: \"\\f1b6\";\n}\n.fa-steam-square:before {\n content: \"\\f1b7\";\n}\n.fa-recycle:before {\n content: \"\\f1b8\";\n}\n.fa-automobile:before,\n.fa-car:before {\n content: \"\\f1b9\";\n}\n.fa-cab:before,\n.fa-taxi:before {\n content: \"\\f1ba\";\n}\n.fa-tree:before {\n content: \"\\f1bb\";\n}\n.fa-spotify:before {\n content: \"\\f1bc\";\n}\n.fa-deviantart:before {\n content: \"\\f1bd\";\n}\n.fa-soundcloud:before {\n content: \"\\f1be\";\n}\n.fa-database:before {\n content: \"\\f1c0\";\n}\n.fa-file-pdf-o:before {\n content: \"\\f1c1\";\n}\n.fa-file-word-o:before {\n content: \"\\f1c2\";\n}\n.fa-file-excel-o:before {\n content: \"\\f1c3\";\n}\n.fa-file-powerpoint-o:before {\n content: \"\\f1c4\";\n}\n.fa-file-photo-o:before,\n.fa-file-picture-o:before,\n.fa-file-image-o:before {\n content: \"\\f1c5\";\n}\n.fa-file-zip-o:before,\n.fa-file-archive-o:before {\n content: \"\\f1c6\";\n}\n.fa-file-sound-o:before,\n.fa-file-audio-o:before {\n content: \"\\f1c7\";\n}\n.fa-file-movie-o:before,\n.fa-file-video-o:before {\n content: \"\\f1c8\";\n}\n.fa-file-code-o:before {\n content: \"\\f1c9\";\n}\n.fa-vine:before {\n content: \"\\f1ca\";\n}\n.fa-codepen:before {\n content: \"\\f1cb\";\n}\n.fa-jsfiddle:before {\n content: \"\\f1cc\";\n}\n.fa-life-bouy:before,\n.fa-life-buoy:before,\n.fa-life-saver:before,\n.fa-support:before,\n.fa-life-ring:before {\n content: \"\\f1cd\";\n}\n.fa-circle-o-notch:before {\n content: \"\\f1ce\";\n}\n.fa-ra:before,\n.fa-rebel:before {\n content: \"\\f1d0\";\n}\n.fa-ge:before,\n.fa-empire:before {\n content: \"\\f1d1\";\n}\n.fa-git-square:before {\n content: \"\\f1d2\";\n}\n.fa-git:before {\n content: \"\\f1d3\";\n}\n.fa-hacker-news:before {\n content: \"\\f1d4\";\n}\n.fa-tencent-weibo:before {\n content: \"\\f1d5\";\n}\n.fa-qq:before {\n content: \"\\f1d6\";\n}\n.fa-wechat:before,\n.fa-weixin:before {\n content: \"\\f1d7\";\n}\n.fa-send:before,\n.fa-paper-plane:before {\n content: \"\\f1d8\";\n}\n.fa-send-o:before,\n.fa-paper-plane-o:before {\n content: \"\\f1d9\";\n}\n.fa-history:before {\n content: \"\\f1da\";\n}\n.fa-circle-thin:before {\n content: \"\\f1db\";\n}\n.fa-header:before {\n content: \"\\f1dc\";\n}\n.fa-paragraph:before {\n content: \"\\f1dd\";\n}\n.fa-sliders:before {\n content: \"\\f1de\";\n}\n.fa-share-alt:before {\n content: \"\\f1e0\";\n}\n.fa-share-alt-square:before {\n content: \"\\f1e1\";\n}\n.fa-bomb:before {\n content: \"\\f1e2\";\n}\n.fa-soccer-ball-o:before,\n.fa-futbol-o:before {\n content: \"\\f1e3\";\n}\n.fa-tty:before {\n content: \"\\f1e4\";\n}\n.fa-binoculars:before {\n content: \"\\f1e5\";\n}\n.fa-plug:before {\n content: \"\\f1e6\";\n}\n.fa-slideshare:before {\n content: \"\\f1e7\";\n}\n.fa-twitch:before {\n content: \"\\f1e8\";\n}\n.fa-yelp:before {\n content: \"\\f1e9\";\n}\n.fa-newspaper-o:before {\n content: \"\\f1ea\";\n}\n.fa-wifi:before {\n content: \"\\f1eb\";\n}\n.fa-calculator:before {\n content: \"\\f1ec\";\n}\n.fa-paypal:before {\n content: \"\\f1ed\";\n}\n.fa-google-wallet:before {\n content: \"\\f1ee\";\n}\n.fa-cc-visa:before {\n content: \"\\f1f0\";\n}\n.fa-cc-mastercard:before {\n content: \"\\f1f1\";\n}\n.fa-cc-discover:before {\n content: \"\\f1f2\";\n}\n.fa-cc-amex:before {\n content: \"\\f1f3\";\n}\n.fa-cc-paypal:before {\n content: \"\\f1f4\";\n}\n.fa-cc-stripe:before {\n content: \"\\f1f5\";\n}\n.fa-bell-slash:before {\n content: \"\\f1f6\";\n}\n.fa-bell-slash-o:before {\n content: \"\\f1f7\";\n}\n.fa-trash:before {\n content: \"\\f1f8\";\n}\n.fa-copyright:before {\n content: \"\\f1f9\";\n}\n.fa-at:before {\n content: \"\\f1fa\";\n}\n.fa-eyedropper:before {\n content: \"\\f1fb\";\n}\n.fa-paint-brush:before {\n content: \"\\f1fc\";\n}\n.fa-birthday-cake:before {\n content: \"\\f1fd\";\n}\n.fa-area-chart:before {\n content: \"\\f1fe\";\n}\n.fa-pie-chart:before {\n content: \"\\f200\";\n}\n.fa-line-chart:before {\n content: \"\\f201\";\n}\n.fa-lastfm:before {\n content: \"\\f202\";\n}\n.fa-lastfm-square:before {\n content: \"\\f203\";\n}\n.fa-toggle-off:before {\n content: \"\\f204\";\n}\n.fa-toggle-on:before {\n content: \"\\f205\";\n}\n.fa-bicycle:before {\n content: \"\\f206\";\n}\n.fa-bus:before {\n content: \"\\f207\";\n}\n.fa-ioxhost:before {\n content: \"\\f208\";\n}\n.fa-angellist:before {\n content: \"\\f209\";\n}\n.fa-cc:before {\n content: \"\\f20a\";\n}\n.fa-shekel:before,\n.fa-sheqel:before,\n.fa-ils:before {\n content: \"\\f20b\";\n}\n.fa-meanpath:before {\n content: \"\\f20c\";\n}\n/*!\n*\n* IPython base\n*\n*/\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\ncode {\n color: #000;\n}\npre {\n font-size: inherit;\n line-height: inherit;\n}\nlabel {\n font-weight: normal;\n}\n/* Make the page background atleast 100% the height of the view port */\n/* Make the page itself atleast 70% the height of the view port */\n.border-box-sizing {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\n.corner-all {\n border-radius: 2px;\n}\n.no-padding {\n padding: 0px;\n}\n/* Flexible box model classes */\n/* Taken from Alex Russell http://infrequently.org/2009/08/css-3-progress/ */\n/* This file is a compatability layer. It allows the usage of flexible box \nmodel layouts accross multiple browsers, including older browsers. The newest,\nuniversal implementation of the flexible box model is used when available (see\n`Modern browsers` comments below). Browsers that are known to implement this \nnew spec completely include:\n\n Firefox 28.0+\n Chrome 29.0+\n Internet Explorer 11+ \n Opera 17.0+\n\nBrowsers not listed, including Safari, are supported via the styling under the\n`Old browsers` comments below.\n*/\n.hbox {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n.hbox > * {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n}\n.vbox {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\n.vbox > * {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n}\n.hbox.reverse,\n.vbox.reverse,\n.reverse {\n /* Old browsers */\n -webkit-box-direction: reverse;\n -moz-box-direction: reverse;\n box-direction: reverse;\n /* Modern browsers */\n flex-direction: row-reverse;\n}\n.hbox.box-flex0,\n.vbox.box-flex0,\n.box-flex0 {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n width: auto;\n}\n.hbox.box-flex1,\n.vbox.box-flex1,\n.box-flex1 {\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n.hbox.box-flex,\n.vbox.box-flex,\n.box-flex {\n /* Old browsers */\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n.hbox.box-flex2,\n.vbox.box-flex2,\n.box-flex2 {\n /* Old browsers */\n -webkit-box-flex: 2;\n -moz-box-flex: 2;\n box-flex: 2;\n /* Modern browsers */\n flex: 2;\n}\n.box-group1 {\n /* Deprecated */\n -webkit-box-flex-group: 1;\n -moz-box-flex-group: 1;\n box-flex-group: 1;\n}\n.box-group2 {\n /* Deprecated */\n -webkit-box-flex-group: 2;\n -moz-box-flex-group: 2;\n box-flex-group: 2;\n}\n.hbox.start,\n.vbox.start,\n.start {\n /* Old browsers */\n -webkit-box-pack: start;\n -moz-box-pack: start;\n box-pack: start;\n /* Modern browsers */\n justify-content: flex-start;\n}\n.hbox.end,\n.vbox.end,\n.end {\n /* Old browsers */\n -webkit-box-pack: end;\n -moz-box-pack: end;\n box-pack: end;\n /* Modern browsers */\n justify-content: flex-end;\n}\n.hbox.center,\n.vbox.center,\n.center {\n /* Old browsers */\n -webkit-box-pack: center;\n -moz-box-pack: center;\n box-pack: center;\n /* Modern browsers */\n justify-content: center;\n}\n.hbox.baseline,\n.vbox.baseline,\n.baseline {\n /* Old browsers */\n -webkit-box-pack: baseline;\n -moz-box-pack: baseline;\n box-pack: baseline;\n /* Modern browsers */\n justify-content: baseline;\n}\n.hbox.stretch,\n.vbox.stretch,\n.stretch {\n /* Old browsers */\n -webkit-box-pack: stretch;\n -moz-box-pack: stretch;\n box-pack: stretch;\n /* Modern browsers */\n justify-content: stretch;\n}\n.hbox.align-start,\n.vbox.align-start,\n.align-start {\n /* Old browsers */\n -webkit-box-align: start;\n -moz-box-align: start;\n box-align: start;\n /* Modern browsers */\n align-items: flex-start;\n}\n.hbox.align-end,\n.vbox.align-end,\n.align-end {\n /* Old browsers */\n -webkit-box-align: end;\n -moz-box-align: end;\n box-align: end;\n /* Modern browsers */\n align-items: flex-end;\n}\n.hbox.align-center,\n.vbox.align-center,\n.align-center {\n /* Old browsers */\n -webkit-box-align: center;\n -moz-box-align: center;\n box-align: center;\n /* Modern browsers */\n align-items: center;\n}\n.hbox.align-baseline,\n.vbox.align-baseline,\n.align-baseline {\n /* Old browsers */\n -webkit-box-align: baseline;\n -moz-box-align: baseline;\n box-align: baseline;\n /* Modern browsers */\n align-items: baseline;\n}\n.hbox.align-stretch,\n.vbox.align-stretch,\n.align-stretch {\n /* Old browsers */\n -webkit-box-align: stretch;\n -moz-box-align: stretch;\n box-align: stretch;\n /* Modern browsers */\n align-items: stretch;\n}\ndiv.error {\n margin: 2em;\n text-align: center;\n}\ndiv.error > h1 {\n font-size: 500%;\n line-height: normal;\n}\ndiv.error > p {\n font-size: 200%;\n line-height: normal;\n}\ndiv.traceback-wrapper {\n text-align: left;\n max-width: 800px;\n margin: auto;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\nbody {\n background-color: #fff;\n /* This makes sure that the body covers the entire window and needs to\n be in a different element than the display: box in wrapper below */\n position: absolute;\n left: 0px;\n right: 0px;\n top: 0px;\n bottom: 0px;\n overflow: visible;\n}\nbody > #header {\n /* Initially hidden to prevent FLOUC */\n display: none;\n background-color: #fff;\n /* Display over codemirror */\n position: relative;\n z-index: 100;\n}\nbody > #header #header-container {\n padding-bottom: 5px;\n padding-top: 5px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\nbody > #header .header-bar {\n width: 100%;\n height: 1px;\n background: #e7e7e7;\n margin-bottom: -1px;\n}\n@media print {\n body > #header {\n display: none !important;\n }\n}\n#header-spacer {\n width: 100%;\n visibility: hidden;\n}\n@media print {\n #header-spacer {\n display: none;\n }\n}\n#ipython_notebook {\n padding-left: 0px;\n padding-top: 1px;\n padding-bottom: 1px;\n}\n@media (max-width: 991px) {\n #ipython_notebook {\n margin-left: 10px;\n }\n}\n[dir=\"rtl\"] #ipython_notebook {\n float: right !important;\n}\n#noscript {\n width: auto;\n padding-top: 16px;\n padding-bottom: 16px;\n text-align: center;\n font-size: 22px;\n color: red;\n font-weight: bold;\n}\n#ipython_notebook img {\n height: 28px;\n}\n#site {\n width: 100%;\n display: none;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n overflow: auto;\n}\n@media print {\n #site {\n height: auto !important;\n }\n}\n/* Smaller buttons */\n.ui-button .ui-button-text {\n padding: 0.2em 0.8em;\n font-size: 77%;\n}\ninput.ui-button {\n padding: 0.3em 0.9em;\n}\nspan#login_widget {\n float: right;\n}\nspan#login_widget > .button,\n#logout {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\nspan#login_widget > .button:focus,\n#logout:focus,\nspan#login_widget > .button.focus,\n#logout.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\nspan#login_widget > .button:hover,\n#logout:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\nspan#login_widget > .button:active,\n#logout:active,\nspan#login_widget > .button.active,\n#logout.active,\n.open > .dropdown-togglespan#login_widget > .button,\n.open > .dropdown-toggle#logout {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\nspan#login_widget > .button:active:hover,\n#logout:active:hover,\nspan#login_widget > .button.active:hover,\n#logout.active:hover,\n.open > .dropdown-togglespan#login_widget > .button:hover,\n.open > .dropdown-toggle#logout:hover,\nspan#login_widget > .button:active:focus,\n#logout:active:focus,\nspan#login_widget > .button.active:focus,\n#logout.active:focus,\n.open > .dropdown-togglespan#login_widget > .button:focus,\n.open > .dropdown-toggle#logout:focus,\nspan#login_widget > .button:active.focus,\n#logout:active.focus,\nspan#login_widget > .button.active.focus,\n#logout.active.focus,\n.open > .dropdown-togglespan#login_widget > .button.focus,\n.open > .dropdown-toggle#logout.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\nspan#login_widget > .button:active,\n#logout:active,\nspan#login_widget > .button.active,\n#logout.active,\n.open > .dropdown-togglespan#login_widget > .button,\n.open > .dropdown-toggle#logout {\n background-image: none;\n}\nspan#login_widget > .button.disabled:hover,\n#logout.disabled:hover,\nspan#login_widget > .button[disabled]:hover,\n#logout[disabled]:hover,\nfieldset[disabled] span#login_widget > .button:hover,\nfieldset[disabled] #logout:hover,\nspan#login_widget > .button.disabled:focus,\n#logout.disabled:focus,\nspan#login_widget > .button[disabled]:focus,\n#logout[disabled]:focus,\nfieldset[disabled] span#login_widget > .button:focus,\nfieldset[disabled] #logout:focus,\nspan#login_widget > .button.disabled.focus,\n#logout.disabled.focus,\nspan#login_widget > .button[disabled].focus,\n#logout[disabled].focus,\nfieldset[disabled] span#login_widget > .button.focus,\nfieldset[disabled] #logout.focus {\n background-color: #fff;\n border-color: #ccc;\n}\nspan#login_widget > .button .badge,\n#logout .badge {\n color: #fff;\n background-color: #333;\n}\n.nav-header {\n text-transform: none;\n}\n#header > span {\n margin-top: 10px;\n}\n.modal_stretch .modal-dialog {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n min-height: 80vh;\n}\n.modal_stretch .modal-dialog .modal-body {\n max-height: calc(100vh - 200px);\n overflow: auto;\n flex: 1;\n}\n@media (min-width: 768px) {\n .modal .modal-dialog {\n width: 700px;\n }\n}\n@media (min-width: 768px) {\n select.form-control {\n margin-left: 12px;\n margin-right: 12px;\n }\n}\n/*!\n*\n* IPython auth\n*\n*/\n.center-nav {\n display: inline-block;\n margin-bottom: -4px;\n}\n/*!\n*\n* IPython tree view\n*\n*/\n/* We need an invisible input field on top of the sentense*/\n/* \"Drag file onto the list ...\" */\n.alternate_upload {\n background-color: none;\n display: inline;\n}\n.alternate_upload.form {\n padding: 0;\n margin: 0;\n}\n.alternate_upload input.fileinput {\n text-align: center;\n vertical-align: middle;\n display: inline;\n opacity: 0;\n z-index: 2;\n width: 12ex;\n margin-right: -12ex;\n}\n.alternate_upload .btn-upload {\n height: 22px;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\n[dir=\"rtl\"] #tabs li {\n float: right;\n}\nul#tabs {\n margin-bottom: 4px;\n}\n[dir=\"rtl\"] ul#tabs {\n margin-right: 0px;\n}\nul#tabs a {\n padding-top: 6px;\n padding-bottom: 4px;\n}\nul.breadcrumb a:focus,\nul.breadcrumb a:hover {\n text-decoration: none;\n}\nul.breadcrumb i.icon-home {\n font-size: 16px;\n margin-right: 4px;\n}\nul.breadcrumb span {\n color: #5e5e5e;\n}\n.list_toolbar {\n padding: 4px 0 4px 0;\n vertical-align: middle;\n}\n.list_toolbar .tree-buttons {\n padding-top: 1px;\n}\n[dir=\"rtl\"] .list_toolbar .tree-buttons {\n float: left !important;\n}\n[dir=\"rtl\"] .list_toolbar .pull-right {\n padding-top: 1px;\n float: left !important;\n}\n[dir=\"rtl\"] .list_toolbar .pull-left {\n float: right !important;\n}\n.dynamic-buttons {\n padding-top: 3px;\n display: inline-block;\n}\n.list_toolbar [class*=\"span\"] {\n min-height: 24px;\n}\n.list_header {\n font-weight: bold;\n background-color: #EEE;\n}\n.list_placeholder {\n font-weight: bold;\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n}\n.list_container {\n margin-top: 4px;\n margin-bottom: 20px;\n border: 1px solid #ddd;\n border-radius: 2px;\n}\n.list_container > div {\n border-bottom: 1px solid #ddd;\n}\n.list_container > div:hover .list-item {\n background-color: red;\n}\n.list_container > div:last-child {\n border: none;\n}\n.list_item:hover .list_item {\n background-color: #ddd;\n}\n.list_item a {\n text-decoration: none;\n}\n.list_item:hover {\n background-color: #fafafa;\n}\n.list_header > div,\n.list_item > div {\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n line-height: 22px;\n}\n.list_header > div input,\n.list_item > div input {\n margin-right: 7px;\n margin-left: 14px;\n vertical-align: baseline;\n line-height: 22px;\n position: relative;\n top: -1px;\n}\n.list_header > div .item_link,\n.list_item > div .item_link {\n margin-left: -1px;\n vertical-align: baseline;\n line-height: 22px;\n}\n.new-file input[type=checkbox] {\n visibility: hidden;\n}\n.item_name {\n line-height: 22px;\n height: 24px;\n}\n.item_icon {\n font-size: 14px;\n color: #5e5e5e;\n margin-right: 7px;\n margin-left: 7px;\n line-height: 22px;\n vertical-align: baseline;\n}\n.item_buttons {\n line-height: 1em;\n margin-left: -5px;\n}\n.item_buttons .btn,\n.item_buttons .btn-group,\n.item_buttons .input-group {\n float: left;\n}\n.item_buttons > .btn,\n.item_buttons > .btn-group,\n.item_buttons > .input-group {\n margin-left: 5px;\n}\n.item_buttons .btn {\n min-width: 13ex;\n}\n.item_buttons .running-indicator {\n padding-top: 4px;\n color: #5cb85c;\n}\n.item_buttons .kernel-name {\n padding-top: 4px;\n color: #5bc0de;\n margin-right: 7px;\n float: left;\n}\n.toolbar_info {\n height: 24px;\n line-height: 24px;\n}\n.list_item input:not([type=checkbox]) {\n padding-top: 3px;\n padding-bottom: 3px;\n height: 22px;\n line-height: 14px;\n margin: 0px;\n}\n.highlight_text {\n color: blue;\n}\n#project_name {\n display: inline-block;\n padding-left: 7px;\n margin-left: -2px;\n}\n#project_name > .breadcrumb {\n padding: 0px;\n margin-bottom: 0px;\n background-color: transparent;\n font-weight: bold;\n}\n#tree-selector {\n padding-right: 0px;\n}\n[dir=\"rtl\"] #tree-selector a {\n float: right;\n}\n#button-select-all {\n min-width: 50px;\n}\n#select-all {\n margin-left: 7px;\n margin-right: 2px;\n}\n.menu_icon {\n margin-right: 2px;\n}\n.tab-content .row {\n margin-left: 0px;\n margin-right: 0px;\n}\n.folder_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f114\";\n}\n.folder_icon:before.pull-left {\n margin-right: .3em;\n}\n.folder_icon:before.pull-right {\n margin-left: .3em;\n}\n.notebook_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f02d\";\n position: relative;\n top: -1px;\n}\n.notebook_icon:before.pull-left {\n margin-right: .3em;\n}\n.notebook_icon:before.pull-right {\n margin-left: .3em;\n}\n.running_notebook_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f02d\";\n position: relative;\n top: -1px;\n color: #5cb85c;\n}\n.running_notebook_icon:before.pull-left {\n margin-right: .3em;\n}\n.running_notebook_icon:before.pull-right {\n margin-left: .3em;\n}\n.file_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f016\";\n position: relative;\n top: -2px;\n}\n.file_icon:before.pull-left {\n margin-right: .3em;\n}\n.file_icon:before.pull-right {\n margin-left: .3em;\n}\n#notebook_toolbar .pull-right {\n padding-top: 0px;\n margin-right: -1px;\n}\nul#new-menu {\n left: auto;\n right: 0;\n}\n[dir=\"rtl\"] #new-menu {\n text-align: right;\n}\n.kernel-menu-icon {\n padding-right: 12px;\n width: 24px;\n content: \"\\f096\";\n}\n.kernel-menu-icon:before {\n content: \"\\f096\";\n}\n.kernel-menu-icon-current:before {\n content: \"\\f00c\";\n}\n#tab_content {\n padding-top: 20px;\n}\n#running .panel-group .panel {\n margin-top: 3px;\n margin-bottom: 1em;\n}\n#running .panel-group .panel .panel-heading {\n background-color: #EEE;\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n line-height: 22px;\n}\n#running .panel-group .panel .panel-heading a:focus,\n#running .panel-group .panel .panel-heading a:hover {\n text-decoration: none;\n}\n#running .panel-group .panel .panel-body {\n padding: 0px;\n}\n#running .panel-group .panel .panel-body .list_container {\n margin-top: 0px;\n margin-bottom: 0px;\n border: 0px;\n border-radius: 0px;\n}\n#running .panel-group .panel .panel-body .list_container .list_item {\n border-bottom: 1px solid #ddd;\n}\n#running .panel-group .panel .panel-body .list_container .list_item:last-child {\n border-bottom: 0px;\n}\n[dir=\"rtl\"] #running .col-sm-8 {\n float: right !important;\n}\n.delete-button {\n display: none;\n}\n.duplicate-button {\n display: none;\n}\n.rename-button {\n display: none;\n}\n.shutdown-button {\n display: none;\n}\n.dynamic-instructions {\n display: inline-block;\n padding-top: 4px;\n}\n/*!\n*\n* IPython text editor webapp\n*\n*/\n.selected-keymap i.fa {\n padding: 0px 5px;\n}\n.selected-keymap i.fa:before {\n content: \"\\f00c\";\n}\n#mode-menu {\n overflow: auto;\n max-height: 20em;\n}\n.edit_app #header {\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n.edit_app #menubar .navbar {\n /* Use a negative 1 bottom margin, so the border overlaps the border of the\n header */\n margin-bottom: -1px;\n}\n.dirty-indicator {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-dirty {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator-dirty.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-dirty.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-clean {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator-clean.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-clean.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-clean:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f00c\";\n}\n.dirty-indicator-clean:before.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-clean:before.pull-right {\n margin-left: .3em;\n}\n#filename {\n font-size: 16pt;\n display: table;\n padding: 0px 5px;\n}\n#current-mode {\n padding-left: 5px;\n padding-right: 5px;\n}\n#texteditor-backdrop {\n padding-top: 20px;\n padding-bottom: 20px;\n}\n@media not print {\n #texteditor-backdrop {\n background-color: #EEE;\n }\n}\n@media print {\n #texteditor-backdrop #texteditor-container .CodeMirror-gutter,\n #texteditor-backdrop #texteditor-container .CodeMirror-gutters {\n background-color: #fff;\n }\n}\n@media not print {\n #texteditor-backdrop #texteditor-container .CodeMirror-gutter,\n #texteditor-backdrop #texteditor-container .CodeMirror-gutters {\n background-color: #fff;\n }\n}\n@media not print {\n #texteditor-backdrop #texteditor-container {\n padding: 0px;\n background-color: #fff;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n }\n}\n/*!\n*\n* IPython notebook\n*\n*/\n/* CSS font colors for translated ANSI colors. */\n.ansibold {\n font-weight: bold;\n}\n/* use dark versions for foreground, to improve visibility */\n.ansiblack {\n color: black;\n}\n.ansired {\n color: darkred;\n}\n.ansigreen {\n color: darkgreen;\n}\n.ansiyellow {\n color: #c4a000;\n}\n.ansiblue {\n color: darkblue;\n}\n.ansipurple {\n color: darkviolet;\n}\n.ansicyan {\n color: steelblue;\n}\n.ansigray {\n color: gray;\n}\n/* and light for background, for the same reason */\n.ansibgblack {\n background-color: black;\n}\n.ansibgred {\n background-color: red;\n}\n.ansibggreen {\n background-color: green;\n}\n.ansibgyellow {\n background-color: yellow;\n}\n.ansibgblue {\n background-color: blue;\n}\n.ansibgpurple {\n background-color: magenta;\n}\n.ansibgcyan {\n background-color: cyan;\n}\n.ansibggray {\n background-color: gray;\n}\ndiv.cell {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n border-radius: 2px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n border-width: 1px;\n border-style: solid;\n border-color: transparent;\n width: 100%;\n padding: 5px;\n /* This acts as a spacer between cells, that is outside the border */\n margin: 0px;\n outline: none;\n border-left-width: 1px;\n padding-left: 5px;\n background: linear-gradient(to right, transparent -40px, transparent 1px, transparent 1px, transparent 100%);\n}\ndiv.cell.jupyter-soft-selected {\n border-left-color: #90CAF9;\n border-left-color: #E3F2FD;\n border-left-width: 1px;\n padding-left: 5px;\n border-right-color: #E3F2FD;\n border-right-width: 1px;\n background: #E3F2FD;\n}\n@media print {\n div.cell.jupyter-soft-selected {\n border-color: transparent;\n }\n}\ndiv.cell.selected {\n border-color: #ababab;\n border-left-width: 0px;\n padding-left: 6px;\n background: linear-gradient(to right, #42A5F5 -40px, #42A5F5 5px, transparent 5px, transparent 100%);\n}\n@media print {\n div.cell.selected {\n border-color: transparent;\n }\n}\ndiv.cell.selected.jupyter-soft-selected {\n border-left-width: 0;\n padding-left: 6px;\n background: linear-gradient(to right, #42A5F5 -40px, #42A5F5 7px, #E3F2FD 7px, #E3F2FD 100%);\n}\n.edit_mode div.cell.selected {\n border-color: #66BB6A;\n border-left-width: 0px;\n padding-left: 6px;\n background: linear-gradient(to right, #66BB6A -40px, #66BB6A 5px, transparent 5px, transparent 100%);\n}\n@media print {\n .edit_mode div.cell.selected {\n border-color: transparent;\n }\n}\n.prompt {\n /* This needs to be wide enough for 3 digit prompt numbers: In[100]: */\n min-width: 14ex;\n /* This padding is tuned to match the padding on the CodeMirror editor. */\n padding: 0.4em;\n margin: 0px;\n font-family: monospace;\n text-align: right;\n /* This has to match that of the the CodeMirror class line-height below */\n line-height: 1.21429em;\n /* Don't highlight prompt number selection */\n -webkit-touch-callout: none;\n -webkit-user-select: none;\n -khtml-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n /* Use default cursor */\n cursor: default;\n}\n@media (max-width: 540px) {\n .prompt {\n text-align: left;\n }\n}\ndiv.inner_cell {\n min-width: 0;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n/* input_area and input_prompt must match in top border and margin for alignment */\ndiv.input_area {\n border: 1px solid #cfcfcf;\n border-radius: 2px;\n background: #f7f7f7;\n line-height: 1.21429em;\n}\n/* This is needed so that empty prompt areas can collapse to zero height when there\n is no content in the output_subarea and the prompt. The main purpose of this is\n to make sure that empty JavaScript output_subareas have no height. */\ndiv.prompt:empty {\n padding-top: 0;\n padding-bottom: 0;\n}\ndiv.unrecognized_cell {\n padding: 5px 5px 5px 0px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\ndiv.unrecognized_cell .inner_cell {\n border-radius: 2px;\n padding: 5px;\n font-weight: bold;\n color: red;\n border: 1px solid #cfcfcf;\n background: #eaeaea;\n}\ndiv.unrecognized_cell .inner_cell a {\n color: inherit;\n text-decoration: none;\n}\ndiv.unrecognized_cell .inner_cell a:hover {\n color: inherit;\n text-decoration: none;\n}\n@media (max-width: 540px) {\n div.unrecognized_cell > div.prompt {\n display: none;\n }\n}\ndiv.code_cell {\n /* avoid page breaking on code cells when printing */\n}\n@media print {\n div.code_cell {\n page-break-inside: avoid;\n }\n}\n/* any special styling for code cells that are currently running goes here */\ndiv.input {\n page-break-inside: avoid;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.input {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n }\n}\n/* input_area and input_prompt must match in top border and margin for alignment */\ndiv.input_prompt {\n color: #303F9F;\n border-top: 1px solid transparent;\n}\ndiv.input_area > div.highlight {\n margin: 0.4em;\n border: none;\n padding: 0px;\n background-color: transparent;\n}\ndiv.input_area > div.highlight > pre {\n margin: 0px;\n border: none;\n padding: 0px;\n background-color: transparent;\n}\n/* The following gets added to the <head> if it is detected that the user has a\n * monospace font with inconsistent normal/bold/italic height. See\n * notebookmain.js. Such fonts will have keywords vertically offset with\n * respect to the rest of the text. The user should select a better font.\n * See: https://github.com/ipython/ipython/issues/1503\n *\n * .CodeMirror span {\n * vertical-align: bottom;\n * }\n */\n.CodeMirror {\n line-height: 1.21429em;\n /* Changed from 1em to our global default */\n font-size: 14px;\n height: auto;\n /* Changed to auto to autogrow */\n background: none;\n /* Changed from white to allow our bg to show through */\n}\n.CodeMirror-scroll {\n /* The CodeMirror docs are a bit fuzzy on if overflow-y should be hidden or visible.*/\n /* We have found that if it is visible, vertical scrollbars appear with font size changes.*/\n overflow-y: hidden;\n overflow-x: auto;\n}\n.CodeMirror-lines {\n /* In CM2, this used to be 0.4em, but in CM3 it went to 4px. We need the em value because */\n /* we have set a different line-height and want this to scale with that. */\n padding: 0.4em;\n}\n.CodeMirror-linenumber {\n padding: 0 8px 0 4px;\n}\n.CodeMirror-gutters {\n border-bottom-left-radius: 2px;\n border-top-left-radius: 2px;\n}\n.CodeMirror pre {\n /* In CM3 this went to 4px from 0 in CM2. We need the 0 value because of how we size */\n /* .CodeMirror-lines */\n padding: 0;\n border: 0;\n border-radius: 0;\n}\n/*\n\nOriginal style from softwaremaniacs.org (c) Ivan Sagalaev <[email protected]>\nAdapted from GitHub theme\n\n*/\n.highlight-base {\n color: #000;\n}\n.highlight-variable {\n color: #000;\n}\n.highlight-variable-2 {\n color: #1a1a1a;\n}\n.highlight-variable-3 {\n color: #333333;\n}\n.highlight-string {\n color: #BA2121;\n}\n.highlight-comment {\n color: #408080;\n font-style: italic;\n}\n.highlight-number {\n color: #080;\n}\n.highlight-atom {\n color: #88F;\n}\n.highlight-keyword {\n color: #008000;\n font-weight: bold;\n}\n.highlight-builtin {\n color: #008000;\n}\n.highlight-error {\n color: #f00;\n}\n.highlight-operator {\n color: #AA22FF;\n font-weight: bold;\n}\n.highlight-meta {\n color: #AA22FF;\n}\n/* previously not defined, copying from default codemirror */\n.highlight-def {\n color: #00f;\n}\n.highlight-string-2 {\n color: #f50;\n}\n.highlight-qualifier {\n color: #555;\n}\n.highlight-bracket {\n color: #997;\n}\n.highlight-tag {\n color: #170;\n}\n.highlight-attribute {\n color: #00c;\n}\n.highlight-header {\n color: blue;\n}\n.highlight-quote {\n color: #090;\n}\n.highlight-link {\n color: #00c;\n}\n/* apply the same style to codemirror */\n.cm-s-ipython span.cm-keyword {\n color: #008000;\n font-weight: bold;\n}\n.cm-s-ipython span.cm-atom {\n color: #88F;\n}\n.cm-s-ipython span.cm-number {\n color: #080;\n}\n.cm-s-ipython span.cm-def {\n color: #00f;\n}\n.cm-s-ipython span.cm-variable {\n color: #000;\n}\n.cm-s-ipython span.cm-operator {\n color: #AA22FF;\n font-weight: bold;\n}\n.cm-s-ipython span.cm-variable-2 {\n color: #1a1a1a;\n}\n.cm-s-ipython span.cm-variable-3 {\n color: #333333;\n}\n.cm-s-ipython span.cm-comment {\n color: #408080;\n font-style: italic;\n}\n.cm-s-ipython span.cm-string {\n color: #BA2121;\n}\n.cm-s-ipython span.cm-string-2 {\n color: #f50;\n}\n.cm-s-ipython span.cm-meta {\n color: #AA22FF;\n}\n.cm-s-ipython span.cm-qualifier {\n color: #555;\n}\n.cm-s-ipython span.cm-builtin {\n color: #008000;\n}\n.cm-s-ipython span.cm-bracket {\n color: #997;\n}\n.cm-s-ipython span.cm-tag {\n color: #170;\n}\n.cm-s-ipython span.cm-attribute {\n color: #00c;\n}\n.cm-s-ipython span.cm-header {\n color: blue;\n}\n.cm-s-ipython span.cm-quote {\n color: #090;\n}\n.cm-s-ipython span.cm-link {\n color: #00c;\n}\n.cm-s-ipython span.cm-error {\n color: #f00;\n}\n.cm-s-ipython span.cm-tab {\n background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAMCAYAAAAkuj5RAAAAAXNSR0IArs4c6QAAAGFJREFUSMft1LsRQFAQheHPowAKoACx3IgEKtaEHujDjORSgWTH/ZOdnZOcM/sgk/kFFWY0qV8foQwS4MKBCS3qR6ixBJvElOobYAtivseIE120FaowJPN75GMu8j/LfMwNjh4HUpwg4LUAAAAASUVORK5CYII=);\n background-position: right;\n background-repeat: no-repeat;\n}\ndiv.output_wrapper {\n /* this position must be relative to enable descendents to be absolute within it */\n position: relative;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n z-index: 1;\n}\n/* class for the output area when it should be height-limited */\ndiv.output_scroll {\n /* ideally, this would be max-height, but FF barfs all over that */\n height: 24em;\n /* FF needs this *and the wrapper* to specify full width, or it will shrinkwrap */\n width: 100%;\n overflow: auto;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 2px 8px rgba(0, 0, 0, 0.8);\n box-shadow: inset 0 2px 8px rgba(0, 0, 0, 0.8);\n display: block;\n}\n/* output div while it is collapsed */\ndiv.output_collapsed {\n margin: 0px;\n padding: 0px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\ndiv.out_prompt_overlay {\n height: 100%;\n padding: 0px 0.4em;\n position: absolute;\n border-radius: 2px;\n}\ndiv.out_prompt_overlay:hover {\n /* use inner shadow to get border that is computed the same on WebKit/FF */\n -webkit-box-shadow: inset 0 0 1px #000;\n box-shadow: inset 0 0 1px #000;\n background: rgba(240, 240, 240, 0.5);\n}\ndiv.output_prompt {\n color: #D84315;\n}\n/* This class is the outer container of all output sections. */\ndiv.output_area {\n padding: 0px;\n page-break-inside: avoid;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\ndiv.output_area .MathJax_Display {\n text-align: left !important;\n}\ndiv.output_area .rendered_html table {\n margin-left: 0;\n margin-right: 0;\n}\ndiv.output_area .rendered_html img {\n margin-left: 0;\n margin-right: 0;\n}\ndiv.output_area img,\ndiv.output_area svg {\n max-width: 100%;\n height: auto;\n}\ndiv.output_area img.unconfined,\ndiv.output_area svg.unconfined {\n max-width: none;\n}\n/* This is needed to protect the pre formating from global settings such\n as that of bootstrap */\n.output {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.output_area {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n }\n}\ndiv.output_area pre {\n margin: 0;\n padding: 0;\n border: 0;\n vertical-align: baseline;\n color: black;\n background-color: transparent;\n border-radius: 0;\n}\n/* This class is for the output subarea inside the output_area and after\n the prompt div. */\ndiv.output_subarea {\n overflow-x: auto;\n padding: 0.4em;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n max-width: calc(100% - 14ex);\n}\ndiv.output_scroll div.output_subarea {\n overflow-x: visible;\n}\n/* The rest of the output_* classes are for special styling of the different\n output types */\n/* all text output has this class: */\ndiv.output_text {\n text-align: left;\n color: #000;\n /* This has to match that of the the CodeMirror class line-height below */\n line-height: 1.21429em;\n}\n/* stdout/stderr are 'text' as well as 'stream', but execute_result/error are *not* streams */\ndiv.output_stderr {\n background: #fdd;\n /* very light red background for stderr */\n}\ndiv.output_latex {\n text-align: left;\n}\n/* Empty output_javascript divs should have no height */\ndiv.output_javascript:empty {\n padding: 0;\n}\n.js-error {\n color: darkred;\n}\n/* raw_input styles */\ndiv.raw_input_container {\n line-height: 1.21429em;\n padding-top: 5px;\n}\npre.raw_input_prompt {\n /* nothing needed here. */\n}\ninput.raw_input {\n font-family: monospace;\n font-size: inherit;\n color: inherit;\n width: auto;\n /* make sure input baseline aligns with prompt */\n vertical-align: baseline;\n /* padding + margin = 0.5em between prompt and cursor */\n padding: 0em 0.25em;\n margin: 0em 0.25em;\n}\ninput.raw_input:focus {\n box-shadow: none;\n}\np.p-space {\n margin-bottom: 10px;\n}\ndiv.output_unrecognized {\n padding: 5px;\n font-weight: bold;\n color: red;\n}\ndiv.output_unrecognized a {\n color: inherit;\n text-decoration: none;\n}\ndiv.output_unrecognized a:hover {\n color: inherit;\n text-decoration: none;\n}\n.rendered_html {\n color: #000;\n /* any extras will just be numbers: */\n}\n.rendered_html em {\n font-style: italic;\n}\n.rendered_html strong {\n font-weight: bold;\n}\n.rendered_html u {\n text-decoration: underline;\n}\n.rendered_html :link {\n text-decoration: underline;\n}\n.rendered_html :visited {\n text-decoration: underline;\n}\n.rendered_html h1 {\n font-size: 185.7%;\n margin: 1.08em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h2 {\n font-size: 157.1%;\n margin: 1.27em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h3 {\n font-size: 128.6%;\n margin: 1.55em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h4 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h5 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n font-style: italic;\n}\n.rendered_html h6 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n font-style: italic;\n}\n.rendered_html h1:first-child {\n margin-top: 0.538em;\n}\n.rendered_html h2:first-child {\n margin-top: 0.636em;\n}\n.rendered_html h3:first-child {\n margin-top: 0.777em;\n}\n.rendered_html h4:first-child {\n margin-top: 1em;\n}\n.rendered_html h5:first-child {\n margin-top: 1em;\n}\n.rendered_html h6:first-child {\n margin-top: 1em;\n}\n.rendered_html ul {\n list-style: disc;\n margin: 0em 2em;\n padding-left: 0px;\n}\n.rendered_html ul ul {\n list-style: square;\n margin: 0em 2em;\n}\n.rendered_html ul ul ul {\n list-style: circle;\n margin: 0em 2em;\n}\n.rendered_html ol {\n list-style: decimal;\n margin: 0em 2em;\n padding-left: 0px;\n}\n.rendered_html ol ol {\n list-style: upper-alpha;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol {\n list-style: lower-alpha;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol ol {\n list-style: lower-roman;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol ol ol {\n list-style: decimal;\n margin: 0em 2em;\n}\n.rendered_html * + ul {\n margin-top: 1em;\n}\n.rendered_html * + ol {\n margin-top: 1em;\n}\n.rendered_html hr {\n color: black;\n background-color: black;\n}\n.rendered_html pre {\n margin: 1em 2em;\n}\n.rendered_html pre,\n.rendered_html code {\n border: 0;\n background-color: #fff;\n color: #000;\n font-size: 100%;\n padding: 0px;\n}\n.rendered_html blockquote {\n margin: 1em 2em;\n}\n.rendered_html table {\n margin-left: auto;\n margin-right: auto;\n border: 1px solid black;\n border-collapse: collapse;\n}\n.rendered_html tr,\n.rendered_html th,\n.rendered_html td {\n border: 1px solid black;\n border-collapse: collapse;\n margin: 1em 2em;\n}\n.rendered_html td,\n.rendered_html th {\n text-align: left;\n vertical-align: middle;\n padding: 4px;\n}\n.rendered_html th {\n font-weight: bold;\n}\n.rendered_html * + table {\n margin-top: 1em;\n}\n.rendered_html p {\n text-align: left;\n}\n.rendered_html * + p {\n margin-top: 1em;\n}\n.rendered_html img {\n display: block;\n margin-left: auto;\n margin-right: auto;\n}\n.rendered_html * + img {\n margin-top: 1em;\n}\n.rendered_html img,\n.rendered_html svg {\n max-width: 100%;\n height: auto;\n}\n.rendered_html img.unconfined,\n.rendered_html svg.unconfined {\n max-width: none;\n}\ndiv.text_cell {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.text_cell > div.prompt {\n display: none;\n }\n}\ndiv.text_cell_render {\n /*font-family: \"Helvetica Neue\", Arial, Helvetica, Geneva, sans-serif;*/\n outline: none;\n resize: none;\n width: inherit;\n border-style: none;\n padding: 0.5em 0.5em 0.5em 0.4em;\n color: #000;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\na.anchor-link:link {\n text-decoration: none;\n padding: 0px 20px;\n visibility: hidden;\n}\nh1:hover .anchor-link,\nh2:hover .anchor-link,\nh3:hover .anchor-link,\nh4:hover .anchor-link,\nh5:hover .anchor-link,\nh6:hover .anchor-link {\n visibility: visible;\n}\n.text_cell.rendered .input_area {\n display: none;\n}\n.text_cell.rendered .rendered_html {\n overflow-x: auto;\n overflow-y: hidden;\n}\n.text_cell.unrendered .text_cell_render {\n display: none;\n}\n.cm-header-1,\n.cm-header-2,\n.cm-header-3,\n.cm-header-4,\n.cm-header-5,\n.cm-header-6 {\n font-weight: bold;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n}\n.cm-header-1 {\n font-size: 185.7%;\n}\n.cm-header-2 {\n font-size: 157.1%;\n}\n.cm-header-3 {\n font-size: 128.6%;\n}\n.cm-header-4 {\n font-size: 110%;\n}\n.cm-header-5 {\n font-size: 100%;\n font-style: italic;\n}\n.cm-header-6 {\n font-size: 100%;\n font-style: italic;\n}\n/*!\n*\n* IPython notebook webapp\n*\n*/\n@media (max-width: 767px) {\n .notebook_app {\n padding-left: 0px;\n padding-right: 0px;\n }\n}\n#ipython-main-app {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n height: 100%;\n}\ndiv#notebook_panel {\n margin: 0px;\n padding: 0px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n height: 100%;\n}\ndiv#notebook {\n font-size: 14px;\n line-height: 20px;\n overflow-y: hidden;\n overflow-x: auto;\n width: 100%;\n /* This spaces the page away from the edge of the notebook area */\n padding-top: 20px;\n margin: 0px;\n outline: none;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n min-height: 100%;\n}\n@media not print {\n #notebook-container {\n padding: 15px;\n background-color: #fff;\n min-height: 0;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n }\n}\n@media print {\n #notebook-container {\n width: 100%;\n }\n}\ndiv.ui-widget-content {\n border: 1px solid #ababab;\n outline: none;\n}\npre.dialog {\n background-color: #f7f7f7;\n border: 1px solid #ddd;\n border-radius: 2px;\n padding: 0.4em;\n padding-left: 2em;\n}\np.dialog {\n padding: 0.2em;\n}\n/* Word-wrap output correctly. This is the CSS3 spelling, though Firefox seems\n to not honor it correctly. Webkit browsers (Chrome, rekonq, Safari) do.\n */\npre,\ncode,\nkbd,\nsamp {\n white-space: pre-wrap;\n}\n#fonttest {\n font-family: monospace;\n}\np {\n margin-bottom: 0;\n}\n.end_space {\n min-height: 100px;\n transition: height .2s ease;\n}\n.notebook_app > #header {\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n@media not print {\n .notebook_app {\n background-color: #EEE;\n }\n}\nkbd {\n border-style: solid;\n border-width: 1px;\n box-shadow: none;\n margin: 2px;\n padding-left: 2px;\n padding-right: 2px;\n padding-top: 1px;\n padding-bottom: 1px;\n}\n/* CSS for the cell toolbar */\n.celltoolbar {\n border: thin solid #CFCFCF;\n border-bottom: none;\n background: #EEE;\n border-radius: 2px 2px 0px 0px;\n width: 100%;\n height: 29px;\n padding-right: 4px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n /* Old browsers */\n -webkit-box-pack: end;\n -moz-box-pack: end;\n box-pack: end;\n /* Modern browsers */\n justify-content: flex-end;\n display: -webkit-flex;\n}\n@media print {\n .celltoolbar {\n display: none;\n }\n}\n.ctb_hideshow {\n display: none;\n vertical-align: bottom;\n}\n/* ctb_show is added to the ctb_hideshow div to show the cell toolbar.\n Cell toolbars are only shown when the ctb_global_show class is also set.\n*/\n.ctb_global_show .ctb_show.ctb_hideshow {\n display: block;\n}\n.ctb_global_show .ctb_show + .input_area,\n.ctb_global_show .ctb_show + div.text_cell_input,\n.ctb_global_show .ctb_show ~ div.text_cell_render {\n border-top-right-radius: 0px;\n border-top-left-radius: 0px;\n}\n.ctb_global_show .ctb_show ~ div.text_cell_render {\n border: 1px solid #cfcfcf;\n}\n.celltoolbar {\n font-size: 87%;\n padding-top: 3px;\n}\n.celltoolbar select {\n display: block;\n width: 100%;\n height: 32px;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n width: inherit;\n font-size: inherit;\n height: 22px;\n padding: 0px;\n display: inline-block;\n}\n.celltoolbar select:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.celltoolbar select::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.celltoolbar select:-ms-input-placeholder {\n color: #999;\n}\n.celltoolbar select::-webkit-input-placeholder {\n color: #999;\n}\n.celltoolbar select::-ms-expand {\n border: 0;\n background-color: transparent;\n}\n.celltoolbar select[disabled],\n.celltoolbar select[readonly],\nfieldset[disabled] .celltoolbar select {\n background-color: #eeeeee;\n opacity: 1;\n}\n.celltoolbar select[disabled],\nfieldset[disabled] .celltoolbar select {\n cursor: not-allowed;\n}\ntextarea.celltoolbar select {\n height: auto;\n}\nselect.celltoolbar select {\n height: 30px;\n line-height: 30px;\n}\ntextarea.celltoolbar select,\nselect[multiple].celltoolbar select {\n height: auto;\n}\n.celltoolbar label {\n margin-left: 5px;\n margin-right: 5px;\n}\n.completions {\n position: absolute;\n z-index: 110;\n overflow: hidden;\n border: 1px solid #ababab;\n border-radius: 2px;\n -webkit-box-shadow: 0px 6px 10px -1px #adadad;\n box-shadow: 0px 6px 10px -1px #adadad;\n line-height: 1;\n}\n.completions select {\n background: white;\n outline: none;\n border: none;\n padding: 0px;\n margin: 0px;\n overflow: auto;\n font-family: monospace;\n font-size: 110%;\n color: #000;\n width: auto;\n}\n.completions select option.context {\n color: #286090;\n}\n#kernel_logo_widget {\n float: right !important;\n float: right;\n}\n#kernel_logo_widget .current_kernel_logo {\n display: none;\n margin-top: -1px;\n margin-bottom: -1px;\n width: 32px;\n height: 32px;\n}\n#menubar {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n margin-top: 1px;\n}\n#menubar .navbar {\n border-top: 1px;\n border-radius: 0px 0px 2px 2px;\n margin-bottom: 0px;\n}\n#menubar .navbar-toggle {\n float: left;\n padding-top: 7px;\n padding-bottom: 7px;\n border: none;\n}\n#menubar .navbar-collapse {\n clear: left;\n}\n.nav-wrapper {\n border-bottom: 1px solid #e7e7e7;\n}\ni.menu-icon {\n padding-top: 4px;\n}\nul#help_menu li a {\n overflow: hidden;\n padding-right: 2.2em;\n}\nul#help_menu li a i {\n margin-right: -1.2em;\n}\n.dropdown-submenu {\n position: relative;\n}\n.dropdown-submenu > .dropdown-menu {\n top: 0;\n left: 100%;\n margin-top: -6px;\n margin-left: -1px;\n}\n.dropdown-submenu:hover > .dropdown-menu {\n display: block;\n}\n.dropdown-submenu > a:after {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n display: block;\n content: \"\\f0da\";\n float: right;\n color: #333333;\n margin-top: 2px;\n margin-right: -10px;\n}\n.dropdown-submenu > a:after.pull-left {\n margin-right: .3em;\n}\n.dropdown-submenu > a:after.pull-right {\n margin-left: .3em;\n}\n.dropdown-submenu:hover > a:after {\n color: #262626;\n}\n.dropdown-submenu.pull-left {\n float: none;\n}\n.dropdown-submenu.pull-left > .dropdown-menu {\n left: -100%;\n margin-left: 10px;\n}\n#notification_area {\n float: right !important;\n float: right;\n z-index: 10;\n}\n.indicator_area {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n}\n#kernel_indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n border-left: 1px solid;\n}\n#kernel_indicator .kernel_indicator_name {\n padding-left: 5px;\n padding-right: 5px;\n}\n#modal_indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n}\n#readonly-indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n margin-top: 2px;\n margin-bottom: 0px;\n margin-left: 0px;\n margin-right: 0px;\n display: none;\n}\n.modal_indicator:before {\n width: 1.28571429em;\n text-align: center;\n}\n.edit_mode .modal_indicator:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f040\";\n}\n.edit_mode .modal_indicator:before.pull-left {\n margin-right: .3em;\n}\n.edit_mode .modal_indicator:before.pull-right {\n margin-left: .3em;\n}\n.command_mode .modal_indicator:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: ' ';\n}\n.command_mode .modal_indicator:before.pull-left {\n margin-right: .3em;\n}\n.command_mode .modal_indicator:before.pull-right {\n margin-left: .3em;\n}\n.kernel_idle_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f10c\";\n}\n.kernel_idle_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_idle_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_busy_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f111\";\n}\n.kernel_busy_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_busy_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_dead_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f1e2\";\n}\n.kernel_dead_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_dead_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_disconnected_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f127\";\n}\n.kernel_disconnected_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_disconnected_icon:before.pull-right {\n margin-left: .3em;\n}\n.notification_widget {\n color: #777;\n z-index: 10;\n background: rgba(240, 240, 240, 0.5);\n margin-right: 4px;\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.notification_widget:focus,\n.notification_widget.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.notification_widget:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.notification_widget:active,\n.notification_widget.active,\n.open > .dropdown-toggle.notification_widget {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.notification_widget:active:hover,\n.notification_widget.active:hover,\n.open > .dropdown-toggle.notification_widget:hover,\n.notification_widget:active:focus,\n.notification_widget.active:focus,\n.open > .dropdown-toggle.notification_widget:focus,\n.notification_widget:active.focus,\n.notification_widget.active.focus,\n.open > .dropdown-toggle.notification_widget.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.notification_widget:active,\n.notification_widget.active,\n.open > .dropdown-toggle.notification_widget {\n background-image: none;\n}\n.notification_widget.disabled:hover,\n.notification_widget[disabled]:hover,\nfieldset[disabled] .notification_widget:hover,\n.notification_widget.disabled:focus,\n.notification_widget[disabled]:focus,\nfieldset[disabled] .notification_widget:focus,\n.notification_widget.disabled.focus,\n.notification_widget[disabled].focus,\nfieldset[disabled] .notification_widget.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.notification_widget .badge {\n color: #fff;\n background-color: #333;\n}\n.notification_widget.warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.notification_widget.warning:focus,\n.notification_widget.warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.notification_widget.warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.notification_widget.warning:active,\n.notification_widget.warning.active,\n.open > .dropdown-toggle.notification_widget.warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.notification_widget.warning:active:hover,\n.notification_widget.warning.active:hover,\n.open > .dropdown-toggle.notification_widget.warning:hover,\n.notification_widget.warning:active:focus,\n.notification_widget.warning.active:focus,\n.open > .dropdown-toggle.notification_widget.warning:focus,\n.notification_widget.warning:active.focus,\n.notification_widget.warning.active.focus,\n.open > .dropdown-toggle.notification_widget.warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.notification_widget.warning:active,\n.notification_widget.warning.active,\n.open > .dropdown-toggle.notification_widget.warning {\n background-image: none;\n}\n.notification_widget.warning.disabled:hover,\n.notification_widget.warning[disabled]:hover,\nfieldset[disabled] .notification_widget.warning:hover,\n.notification_widget.warning.disabled:focus,\n.notification_widget.warning[disabled]:focus,\nfieldset[disabled] .notification_widget.warning:focus,\n.notification_widget.warning.disabled.focus,\n.notification_widget.warning[disabled].focus,\nfieldset[disabled] .notification_widget.warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.notification_widget.warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.notification_widget.success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.notification_widget.success:focus,\n.notification_widget.success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.notification_widget.success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.notification_widget.success:active,\n.notification_widget.success.active,\n.open > .dropdown-toggle.notification_widget.success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.notification_widget.success:active:hover,\n.notification_widget.success.active:hover,\n.open > .dropdown-toggle.notification_widget.success:hover,\n.notification_widget.success:active:focus,\n.notification_widget.success.active:focus,\n.open > .dropdown-toggle.notification_widget.success:focus,\n.notification_widget.success:active.focus,\n.notification_widget.success.active.focus,\n.open > .dropdown-toggle.notification_widget.success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.notification_widget.success:active,\n.notification_widget.success.active,\n.open > .dropdown-toggle.notification_widget.success {\n background-image: none;\n}\n.notification_widget.success.disabled:hover,\n.notification_widget.success[disabled]:hover,\nfieldset[disabled] .notification_widget.success:hover,\n.notification_widget.success.disabled:focus,\n.notification_widget.success[disabled]:focus,\nfieldset[disabled] .notification_widget.success:focus,\n.notification_widget.success.disabled.focus,\n.notification_widget.success[disabled].focus,\nfieldset[disabled] .notification_widget.success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.notification_widget.success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.notification_widget.info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.notification_widget.info:focus,\n.notification_widget.info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.notification_widget.info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.notification_widget.info:active,\n.notification_widget.info.active,\n.open > .dropdown-toggle.notification_widget.info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.notification_widget.info:active:hover,\n.notification_widget.info.active:hover,\n.open > .dropdown-toggle.notification_widget.info:hover,\n.notification_widget.info:active:focus,\n.notification_widget.info.active:focus,\n.open > .dropdown-toggle.notification_widget.info:focus,\n.notification_widget.info:active.focus,\n.notification_widget.info.active.focus,\n.open > .dropdown-toggle.notification_widget.info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.notification_widget.info:active,\n.notification_widget.info.active,\n.open > .dropdown-toggle.notification_widget.info {\n background-image: none;\n}\n.notification_widget.info.disabled:hover,\n.notification_widget.info[disabled]:hover,\nfieldset[disabled] .notification_widget.info:hover,\n.notification_widget.info.disabled:focus,\n.notification_widget.info[disabled]:focus,\nfieldset[disabled] .notification_widget.info:focus,\n.notification_widget.info.disabled.focus,\n.notification_widget.info[disabled].focus,\nfieldset[disabled] .notification_widget.info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.notification_widget.info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.notification_widget.danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.notification_widget.danger:focus,\n.notification_widget.danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.notification_widget.danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.notification_widget.danger:active,\n.notification_widget.danger.active,\n.open > .dropdown-toggle.notification_widget.danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.notification_widget.danger:active:hover,\n.notification_widget.danger.active:hover,\n.open > .dropdown-toggle.notification_widget.danger:hover,\n.notification_widget.danger:active:focus,\n.notification_widget.danger.active:focus,\n.open > .dropdown-toggle.notification_widget.danger:focus,\n.notification_widget.danger:active.focus,\n.notification_widget.danger.active.focus,\n.open > .dropdown-toggle.notification_widget.danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.notification_widget.danger:active,\n.notification_widget.danger.active,\n.open > .dropdown-toggle.notification_widget.danger {\n background-image: none;\n}\n.notification_widget.danger.disabled:hover,\n.notification_widget.danger[disabled]:hover,\nfieldset[disabled] .notification_widget.danger:hover,\n.notification_widget.danger.disabled:focus,\n.notification_widget.danger[disabled]:focus,\nfieldset[disabled] .notification_widget.danger:focus,\n.notification_widget.danger.disabled.focus,\n.notification_widget.danger[disabled].focus,\nfieldset[disabled] .notification_widget.danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.notification_widget.danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\ndiv#pager {\n background-color: #fff;\n font-size: 14px;\n line-height: 20px;\n overflow: hidden;\n display: none;\n position: fixed;\n bottom: 0px;\n width: 100%;\n max-height: 50%;\n padding-top: 8px;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n /* Display over codemirror */\n z-index: 100;\n /* Hack which prevents jquery ui resizable from changing top. */\n top: auto !important;\n}\ndiv#pager pre {\n line-height: 1.21429em;\n color: #000;\n background-color: #f7f7f7;\n padding: 0.4em;\n}\ndiv#pager #pager-button-area {\n position: absolute;\n top: 8px;\n right: 20px;\n}\ndiv#pager #pager-contents {\n position: relative;\n overflow: auto;\n width: 100%;\n height: 100%;\n}\ndiv#pager #pager-contents #pager-container {\n position: relative;\n padding: 15px 0px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\ndiv#pager .ui-resizable-handle {\n top: 0px;\n height: 8px;\n background: #f7f7f7;\n border-top: 1px solid #cfcfcf;\n border-bottom: 1px solid #cfcfcf;\n /* This injects handle bars (a short, wide = symbol) for \n the resize handle. */\n}\ndiv#pager .ui-resizable-handle::after {\n content: '';\n top: 2px;\n left: 50%;\n height: 3px;\n width: 30px;\n margin-left: -15px;\n position: absolute;\n border-top: 1px solid #cfcfcf;\n}\n.quickhelp {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n line-height: 1.8em;\n}\n.shortcut_key {\n display: inline-block;\n width: 21ex;\n text-align: right;\n font-family: monospace;\n}\n.shortcut_descr {\n display: inline-block;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\nspan.save_widget {\n margin-top: 6px;\n}\nspan.save_widget span.filename {\n height: 1em;\n line-height: 1em;\n padding: 3px;\n margin-left: 16px;\n border: none;\n font-size: 146.5%;\n border-radius: 2px;\n}\nspan.save_widget span.filename:hover {\n background-color: #e6e6e6;\n}\nspan.checkpoint_status,\nspan.autosave_status {\n font-size: small;\n}\n@media (max-width: 767px) {\n span.save_widget {\n font-size: small;\n }\n span.checkpoint_status,\n span.autosave_status {\n display: none;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n span.checkpoint_status {\n display: none;\n }\n span.autosave_status {\n font-size: x-small;\n }\n}\n.toolbar {\n padding: 0px;\n margin-left: -5px;\n margin-top: 2px;\n margin-bottom: 5px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\n.toolbar select,\n.toolbar label {\n width: auto;\n vertical-align: middle;\n margin-right: 2px;\n margin-bottom: 0px;\n display: inline;\n font-size: 92%;\n margin-left: 0.3em;\n margin-right: 0.3em;\n padding: 0px;\n padding-top: 3px;\n}\n.toolbar .btn {\n padding: 2px 8px;\n}\n.toolbar .btn-group {\n margin-top: 0px;\n margin-left: 5px;\n}\n#maintoolbar {\n margin-bottom: -3px;\n margin-top: -8px;\n border: 0px;\n min-height: 27px;\n margin-left: 0px;\n padding-top: 11px;\n padding-bottom: 3px;\n}\n#maintoolbar .navbar-text {\n float: none;\n vertical-align: middle;\n text-align: right;\n margin-left: 5px;\n margin-right: 0px;\n margin-top: 0px;\n}\n.select-xs {\n height: 24px;\n}\n.pulse,\n.dropdown-menu > li > a.pulse,\nli.pulse > a.dropdown-toggle,\nli.pulse.open > a.dropdown-toggle {\n background-color: #F37626;\n color: white;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\n/** WARNING IF YOU ARE EDITTING THIS FILE, if this is a .css file, It has a lot\n * of chance of beeing generated from the ../less/[samename].less file, you can\n * try to get back the less file by reverting somme commit in history\n **/\n/*\n * We'll try to get something pretty, so we\n * have some strange css to have the scroll bar on\n * the left with fix button on the top right of the tooltip\n */\n@-moz-keyframes fadeOut {\n from {\n opacity: 1;\n }\n to {\n opacity: 0;\n }\n}\n@-webkit-keyframes fadeOut {\n from {\n opacity: 1;\n }\n to {\n opacity: 0;\n }\n}\n@-moz-keyframes fadeIn {\n from {\n opacity: 0;\n }\n to {\n opacity: 1;\n }\n}\n@-webkit-keyframes fadeIn {\n from {\n opacity: 0;\n }\n to {\n opacity: 1;\n }\n}\n/*properties of tooltip after \"expand\"*/\n.bigtooltip {\n overflow: auto;\n height: 200px;\n -webkit-transition-property: height;\n -webkit-transition-duration: 500ms;\n -moz-transition-property: height;\n -moz-transition-duration: 500ms;\n transition-property: height;\n transition-duration: 500ms;\n}\n/*properties of tooltip before \"expand\"*/\n.smalltooltip {\n -webkit-transition-property: height;\n -webkit-transition-duration: 500ms;\n -moz-transition-property: height;\n -moz-transition-duration: 500ms;\n transition-property: height;\n transition-duration: 500ms;\n text-overflow: ellipsis;\n overflow: hidden;\n height: 80px;\n}\n.tooltipbuttons {\n position: absolute;\n padding-right: 15px;\n top: 0px;\n right: 0px;\n}\n.tooltiptext {\n /*avoid the button to overlap on some docstring*/\n padding-right: 30px;\n}\n.ipython_tooltip {\n max-width: 700px;\n /*fade-in animation when inserted*/\n -webkit-animation: fadeOut 400ms;\n -moz-animation: fadeOut 400ms;\n animation: fadeOut 400ms;\n -webkit-animation: fadeIn 400ms;\n -moz-animation: fadeIn 400ms;\n animation: fadeIn 400ms;\n vertical-align: middle;\n background-color: #f7f7f7;\n overflow: visible;\n border: #ababab 1px solid;\n outline: none;\n padding: 3px;\n margin: 0px;\n padding-left: 7px;\n font-family: monospace;\n min-height: 50px;\n -moz-box-shadow: 0px 6px 10px -1px #adadad;\n -webkit-box-shadow: 0px 6px 10px -1px #adadad;\n box-shadow: 0px 6px 10px -1px #adadad;\n border-radius: 2px;\n position: absolute;\n z-index: 1000;\n}\n.ipython_tooltip a {\n float: right;\n}\n.ipython_tooltip .tooltiptext pre {\n border: 0;\n border-radius: 0;\n font-size: 100%;\n background-color: #f7f7f7;\n}\n.pretooltiparrow {\n left: 0px;\n margin: 0px;\n top: -16px;\n width: 40px;\n height: 16px;\n overflow: hidden;\n position: absolute;\n}\n.pretooltiparrow:before {\n background-color: #f7f7f7;\n border: 1px #ababab solid;\n z-index: 11;\n content: \"\";\n position: absolute;\n left: 15px;\n top: 10px;\n width: 25px;\n height: 25px;\n -webkit-transform: rotate(45deg);\n -moz-transform: rotate(45deg);\n -ms-transform: rotate(45deg);\n -o-transform: rotate(45deg);\n}\nul.typeahead-list i {\n margin-left: -10px;\n width: 18px;\n}\nul.typeahead-list {\n max-height: 80vh;\n overflow: auto;\n}\nul.typeahead-list > li > a {\n /** Firefox bug **/\n /* see https://github.com/jupyter/notebook/issues/559 */\n white-space: normal;\n}\n.cmd-palette .modal-body {\n padding: 7px;\n}\n.cmd-palette form {\n background: white;\n}\n.cmd-palette input {\n outline: none;\n}\n.no-shortcut {\n display: none;\n}\n.command-shortcut:before {\n content: \"(command)\";\n padding-right: 3px;\n color: #777777;\n}\n.edit-shortcut:before {\n content: \"(edit)\";\n padding-right: 3px;\n color: #777777;\n}\n#find-and-replace #replace-preview .match,\n#find-and-replace #replace-preview .insert {\n background-color: #BBDEFB;\n border-color: #90CAF9;\n border-style: solid;\n border-width: 1px;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview .replace .match {\n background-color: #FFCDD2;\n border-color: #EF9A9A;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview .replace .insert {\n background-color: #C8E6C9;\n border-color: #A5D6A7;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview {\n max-height: 60vh;\n overflow: auto;\n}\n#find-and-replace #replace-preview pre {\n padding: 5px 10px;\n}\n.terminal-app {\n background: #EEE;\n}\n.terminal-app #header {\n background: #fff;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n.terminal-app .terminal {\n width: 100%;\n float: left;\n font-family: monospace;\n color: white;\n background: black;\n padding: 0.4em;\n border-radius: 2px;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.4);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.4);\n}\n.terminal-app .terminal,\n.terminal-app .terminal dummy-screen {\n line-height: 1em;\n font-size: 14px;\n}\n.terminal-app .terminal .xterm-rows {\n padding: 10px;\n}\n.terminal-app .terminal-cursor {\n color: black;\n background: white;\n}\n.terminal-app #terminado-container {\n margin-top: 20px;\n}\n/*# sourceMappingURL=style.min.css.map */\n </style>\n<style type=\"text/css\">\n .highlight .hll { background-color: #ffffcc }\n.highlight { background: #f8f8f8; }\n.highlight .c { color: #408080; font-style: italic } /* Comment */\n.highlight .err { border: 1px solid #FF0000 } /* Error */\n.highlight .k { color: #008000; font-weight: bold } /* Keyword */\n.highlight .o { color: #666666 } /* Operator */\n.highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n.highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n.highlight .cp { color: #BC7A00 } /* Comment.Preproc */\n.highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n.highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */\n.highlight .cs { color: #408080; font-style: italic } /* Comment.Special */\n.highlight .gd { color: #A00000 } /* Generic.Deleted */\n.highlight .ge { font-style: italic } /* Generic.Emph */\n.highlight .gr { color: #FF0000 } /* Generic.Error */\n.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n.highlight .gi { color: #00A000 } /* Generic.Inserted */\n.highlight .go { color: #888888 } /* Generic.Output */\n.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n.highlight .gs { font-weight: bold } /* Generic.Strong */\n.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n.highlight .gt { color: #0044DD } /* Generic.Traceback */\n.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n.highlight .kp { color: #008000 } /* Keyword.Pseudo */\n.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n.highlight .kt { color: #B00040 } /* Keyword.Type */\n.highlight .m { color: #666666 } /* Literal.Number */\n.highlight .s { color: #BA2121 } /* Literal.String */\n.highlight .na { color: #7D9029 } /* Name.Attribute */\n.highlight .nb { color: #008000 } /* Name.Builtin */\n.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n.highlight .no { color: #880000 } /* Name.Constant */\n.highlight .nd { color: #AA22FF } /* Name.Decorator */\n.highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */\n.highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n.highlight .nf { color: #0000FF } /* Name.Function */\n.highlight .nl { color: #A0A000 } /* Name.Label */\n.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */\n.highlight .nv { color: #19177C } /* Name.Variable */\n.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n.highlight .w { color: #bbbbbb } /* Text.Whitespace */\n.highlight .mb { color: #666666 } /* Literal.Number.Bin */\n.highlight .mf { color: #666666 } /* Literal.Number.Float */\n.highlight .mh { color: #666666 } /* Literal.Number.Hex */\n.highlight .mi { color: #666666 } /* Literal.Number.Integer */\n.highlight .mo { color: #666666 } /* Literal.Number.Oct */\n.highlight .sa { color: #BA2121 } /* Literal.String.Affix */\n.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */\n.highlight .sc { color: #BA2121 } /* Literal.String.Char */\n.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */\n.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n.highlight .s2 { color: #BA2121 } /* Literal.String.Double */\n.highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */\n.highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n.highlight .sx { color: #008000 } /* Literal.String.Other */\n.highlight .sr { color: #BB6688 } /* Literal.String.Regex */\n.highlight .s1 { color: #BA2121 } /* Literal.String.Single */\n.highlight .ss { color: #19177C } /* Literal.String.Symbol */\n.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */\n.highlight .fm { color: #0000FF } /* Name.Function.Magic */\n.highlight .vc { color: #19177C } /* Name.Variable.Class */\n.highlight .vg { color: #19177C } /* Name.Variable.Global */\n.highlight .vi { color: #19177C } /* Name.Variable.Instance */\n.highlight .vm { color: #19177C } /* Name.Variable.Magic */\n.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */\n </style>\n<style type=\"text/css\">\n \n/* Temporary definitions which will become obsolete with Notebook release 5.0 */\n.ansi-black-fg { color: #3E424D; }\n.ansi-black-bg { background-color: #3E424D; }\n.ansi-black-intense-fg { color: #282C36; }\n.ansi-black-intense-bg { background-color: #282C36; }\n.ansi-red-fg { color: #E75C58; }\n.ansi-red-bg { background-color: #E75C58; }\n.ansi-red-intense-fg { color: #B22B31; }\n.ansi-red-intense-bg { background-color: #B22B31; }\n.ansi-green-fg { color: #00A250; }\n.ansi-green-bg { background-color: #00A250; }\n.ansi-green-intense-fg { color: #007427; }\n.ansi-green-intense-bg { background-color: #007427; }\n.ansi-yellow-fg { color: #DDB62B; }\n.ansi-yellow-bg { background-color: #DDB62B; }\n.ansi-yellow-intense-fg { color: #B27D12; }\n.ansi-yellow-intense-bg { background-color: #B27D12; }\n.ansi-blue-fg { color: #208FFB; }\n.ansi-blue-bg { background-color: #208FFB; }\n.ansi-blue-intense-fg { color: #0065CA; }\n.ansi-blue-intense-bg { background-color: #0065CA; }\n.ansi-magenta-fg { color: #D160C4; }\n.ansi-magenta-bg { background-color: #D160C4; }\n.ansi-magenta-intense-fg { color: #A03196; }\n.ansi-magenta-intense-bg { background-color: #A03196; }\n.ansi-cyan-fg { color: #60C6C8; }\n.ansi-cyan-bg { background-color: #60C6C8; }\n.ansi-cyan-intense-fg { color: #258F8F; }\n.ansi-cyan-intense-bg { background-color: #258F8F; }\n.ansi-white-fg { color: #C5C1B4; }\n.ansi-white-bg { background-color: #C5C1B4; }\n.ansi-white-intense-fg { color: #A1A6B2; }\n.ansi-white-intense-bg { background-color: #A1A6B2; }\n\n.ansi-bold { font-weight: bold; }\n\n </style>\n\n\n<style type=\"text/css\">\n/* Overrides of notebook CSS for static HTML export */\nbody {\n overflow: visible;\n padding: 8px;\n}\n\ndiv#notebook {\n overflow: visible;\n border-top: none;\n}\n\n@media print {\n div.cell {\n display: block;\n page-break-inside: avoid;\n } \n div.output_wrapper { \n display: block;\n page-break-inside: avoid; \n }\n div.output { \n display: block;\n page-break-inside: avoid; \n }\n}\n</style>\n\n<!-- Custom stylesheet, it must be in the same directory as the html file -->\n<link rel=\"stylesheet\" href=\"custom.css\">\n\n<!-- Loading mathjax macro -->\n<!-- Load mathjax -->\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_HTML\"></script>\n <!-- MathJax configuration -->\n <script type=\"text/x-mathjax-config\">\n MathJax.Hub.Config({\n tex2jax: {\n inlineMath: [ ['$','$'], [\"\\\\(\",\"\\\\)\"] ],\n displayMath: [ ['$$','$$'], [\"\\\\[\",\"\\\\]\"] ],\n processEscapes: true,\n processEnvironments: true\n },\n // Center justify equations in code and markdown cells. Elsewhere\n // we use CSS to left justify single line equations in code cells.\n displayAlign: 'center',\n \"HTML-CSS\": {\n styles: {'.MathJax_Display': {\"margin\": 0}},\n linebreaks: { automatic: true }\n }\n });\n </script>\n <!-- End of mathjax configuration --></head>\n<body>\n <div tabindex=\"-1\" id=\"notebook\" class=\"border-box-sizing\">\n <div class=\"container\" id=\"notebook-container\">\n\n\n <div style=\"border:thin solid red\">\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h1 id=\"Example-notebook\">Example notebook<a class=\"anchor-link\" href=\"#Example-notebook\">&#182;</a></h1>\n</div>\n</div>\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Markdown-cells\">Markdown cells<a class=\"anchor-link\" href=\"#Markdown-cells\">&#182;</a></h3><p>This is an example notebook that can be converted with <code>nbconvert</code> to different formats. This is an example of a markdown cell.</p>\n\n</div>\n</div>\n</div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"LaTeX-Equations\">LaTeX Equations<a class=\"anchor-link\" href=\"#LaTeX-Equations\">&#182;</a></h3><p>Here is an equation:</p>\n$$\ny = \\sin(x)\n$$\n</div>\n</div>\n</div>\n\n\n\n <div style=\"border:thin solid green\">\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Code-cells\">Code cells<a class=\"anchor-link\" href=\"#Code-cells\">&#182;</a></h3>\n</div>\n</div>\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[1]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span><span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s2\">&quot;This is a code cell that produces some output&quot;</span><span class=\"p\">)</span>\n</pre></div>\n\n</div>\n</div>\n</div>\n\n<div class=\"output_wrapper\">\n<div class=\"output\">\n\n\n<div class=\"output_area\">\n\n<div class=\"prompt\"></div>\n\n\n<div class=\"output_subarea output_stream output_stdout output_text\">\n<pre>This is a code cell that produces some output\n</pre>\n</div>\n</div>\n\n</div>\n</div>\n\n</div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Inline-figures\">Inline figures<a class=\"anchor-link\" href=\"#Inline-figures\">&#182;</a></h3>\n</div>\n</div>\n</div>\n\n\n\n <div style=\"border:thin solid orange\">\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[1]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span><span class=\"kn\">import</span> <span class=\"nn\">matplotlib.pyplot</span> <span class=\"k\">as</span> <span class=\"nn\">plt</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ion</span><span class=\"p\">()</span>\n\n<span class=\"n\">x</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">linspace</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">2</span> <span class=\"o\">*</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">pi</span><span class=\"p\">,</span> <span class=\"mi\">100</span><span class=\"p\">)</span>\n<span class=\"n\">y</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sin</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">)</span>\n<span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">y</span><span class=\"p\">)</span>\n</pre></div>\n\n</div>\n</div>\n</div>\n\n<div class=\"output_wrapper\">\n<div class=\"output\">\n\n\n<div class=\"output_area\">\n\n<div class=\"prompt output_prompt\">Out[1]:</div>\n\n\n\n\n<div class=\"output_text output_subarea output_execute_result\">\n<pre>[&lt;matplotlib.lines.Line2D at 0x1111b2160&gt;]</pre>\n</div>\n\n</div>\n\n<div class=\"output_area\">\n\n<div class=\"prompt\"></div>\n\n\n\n\n<div class=\"output_png output_subarea \">\n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4lfX9//HnO5sMEkLCyoAAYW9iUHAwBSeKC6yKOHBb\na2vFr7Zaq63WVlHEgThwax1AFWWjKCIEZEPIYCSsJISRQfbn90cO/pKYkHFOcp/xflzXuXLOfe47\n5xVa88rnXh8xxqCUUkqd5mV1AKWUUs5Fi0EppVQ1WgxKKaWq0WJQSilVjRaDUkqparQYlFJKVaPF\noJRSqhotBqWUUtVoMSillKrGx+oATREREWG6dOlidQyllHIpGzZsyDHGRNa3nksWQ5cuXUhKSrI6\nhlJKuRQR2deQ9XRXklJKqWq0GJRSSlWjxaCUUqoaLQallFLVaDEopZSqxiHFICJviUiWiGyr430R\nkZdEJFVEtojIkCrvTRWRFNtjqiPyKKWUajpHjRjeASac4f2LgHjbYzrwKoCIhAOPA8OAROBxEWnj\noExKKaWawCHXMRhjvheRLmdYZSLwrqmcR3StiISJSEdgJLDUGJMLICJLqSyYjxyRSzVOQXEZqVn5\npGXnc7ywlOKyCorLymnl60271v60Cwmga2QQHUNbWR1VKdWMWuoCtyggo8rrTNuyupb/hohMp3K0\nQWxsbPOk9DCnSspZm36UlclZfLc7m31HCxu0XafQAAZ3bsOIbhFc3L8DYYF+zZxUKdWSWqoYpJZl\n5gzLf7vQmDnAHICEhIRa11ENs+vwSd79aR/zfzlAYUnliGB4t7ZcMzSa7u1C6N4umIhgP/x9vPHz\n8aKwpIysvGKOnCwi+XAeG/YdY8O+Y3y95RCPL9zGBT3acfXQaC7s0x4vr9r+J1VKuZKWKoZMIKbK\n62jgoG35yBrLV7VQJo+zbk8u/1mSzM97cvH38eKygZ24fGAnEuPCCfD1rnO7kABfQgJ86RYZzPBu\nEUwbEYcxhu0HT7Jg0wEWbj7Isp1H6BYZxN0ju3P5oE74eusJb0q5Kqnc7e+Ab1R5jOErY0y/Wt67\nBLgXuJjKA80vGWMSbQefNwCnz1LaCAw9fcyhLgkJCUbvldRwKUfyePbbXSzbmUX71v7cMiKOaxNi\naBPkmF1A5RWGRVsPMXtlKrsO59GlbSCPX9aXUb3aOeT7K6UcQ0Q2GGMS6lvPISMGEfmIyr/8I0Qk\nk8ozjXwBjDGvAYuoLIVUoBCYZnsvV0T+Dqy3fasn6ysF1XBFpeW8uDyF179LI8jPh4fG9+SWEXG0\n8qt7dNAU3l7CZQM7cemAjizfmcU/vtnJtHfWM75ve/56WV+iwvRgtVKuxGEjhpakI4b6bco4zkP/\n3UxKVj7XJkQz46LehDtohFCfkrIK5v6QzqzlqYjAkxP7cdWQKET0+INSVmroiEF3BLsZYwyvrkpj\n0is/kl9cxjvTzuJfVw9ssVIA8PPx4u6R3Vn64Pn0jwrlT//dzB8+2UReUWmLZVBKNZ1Lzsegapdf\nXMZD/93MN9sOc8mAjvxzUn9aB/halie6TSAf3n42s1emMnPZbn7JOM7cmxKIbx9iWSalVP10xOAm\n9uYUMPHlH1iy4wiPXtybl6cMtrQUTvP2Eu4fE88nd5xDYUk5k15Zw+qUbKtjKaXOQIvBDWzNPMFV\nr64ht6CE925N5Pbzuzrd/vyzuoQz/54RRLVpxc1vr+f9tQ2aSEopZQEtBhf3Q0oOk+f8RICvN5/d\nNZzh3SKsjlSnqLBWfHbXcC7oEclj87fx4rIUXPHkB6XcnRaDC/t22yGmvbOOmPBAvrh7ON0ig62O\nVK9gfx/euCmBq4ZE88Ky3fxrcbKWg1JORg8+u6gl2w9z74e/MCA6lLenJRLayvrjCQ3l7SU8d/UA\nAny9eHVVGkWl5fz10j5Ot/tLKU+lxeCCVuw6wj0fbqRvVCjzbkkkxAkOMjeWl5fw1BX98Pfx5q0f\n9+DjJfzfxb21HJRyAloMLmZ1SjZ3vreRXh1a866LlsJpIsJfLu1NeUUFb6zeQ1igH/eM6m51LKU8\nnhaDC9l24AR3vreBrpFBvHera+0+qouI8PhlfTlxqpTnFicT2sqXG87ubHUspTyaFoOLyMgtZNo7\n6wlt5cu8WxLdag4ELy/huWsGkldUxl8WbCMi2I8J/TpaHUspj6VnJbmA44Ul3Pz2OopLy5l3SyLt\nWwdYHcnhfL29mP27IQyOCeOBTzaxJfO41ZGU8lhaDE6utLyCO9/fQEbuKd5w89tJBPh6M+emBCKC\n/bltXhKHTpyyOpJSHkmLwck9/fVO1qbn8sxV/RnWta3VcZpdRLA/b049i8KScm59J4mC4jKrIynl\ncbQYnNinSRm8s2Yvt50bx6Qh0VbHaTE9O4Qw6/rB7Dp8kj9/vkUvgFOqhTmkGERkgogki0iqiMyo\n5f0XRGST7bFbRI5Xea+8ynsLHZHHHWzcf4zHvtzGefERzLiol9VxWtyonu340/iefL3lEG//uNfq\nOEp5FLvPShIRb2A2MI7KOZzXi8hCY8yO0+sYY/5QZf37gMFVvsUpY8wge3O4k9yCEu75YCPtQ/2Z\nNWUwPh46f/JdF3Tjl/3H+ceinQyIDiWhS7jVkZTyCI74jZMIpBpj0o0xJcDHwMQzrD8F+MgBn+uW\nKioMf/x0E0fzS3j1d0Pd6rTUxhIR/nPtQKLbtOLuDzaSlVdkdSSlPIIjiiEKyKjyOtO27DdEpDMQ\nB6yosjhARJJEZK2IXOGAPC7tjdXprEzO5rFLe9MvKtTqOJZrHeDLazcO5WRRKQ9+spmKCj3eoFRz\nc0Qx1HZzm7r+650MfGaMKa+yLNY2B+n1wEwR6Vbrh4hMtxVIUna2e070smFfLv9anMzF/Ttwo179\n+6teHVrz+GV9+SE1hzdWp1sdRym354hiyARiqryOBg7Wse5kauxGMsYctH1NB1ZR/fhD1fXmGGMS\njDEJkZGR9mZ2OieLSrn/o01EhbXimasG6M3kaph8VgwX9evAc4uT2ZyhF78p1ZwcUQzrgXgRiRMR\nPyp/+f/m7CIR6Qm0AX6qsqyNiPjbnkcAI4AdNbf1BE8s3M7hk0XMnDzIKabkdDYiwj8n9ScyxJ/7\nP/6FfL2+QalmY3cxGGPKgHuBxcBO4FNjzHYReVJELq+y6hTgY1P9pPTeQJKIbAZWAs9UPZvJUyza\neogvNh7gnlHdGRLbxuo4Tiss0I+Z1w0iI7eQJ/+33eo4SrktccWLhxISEkxSUpLVMRziyMkixs/8\nns7hgXx213B8PfTU1MZ49ttdvLoqjbduTmB0r/ZWx1HKZYjIBtsx3TPS30IWMsbw0GdbKC6t4IXr\nBmkpNNADY+Pp2T6Ehz/fyvHCEqvjKOV29DeRhf67IZPvd2fzyMW96OoC8zU7C38fb/5z7UCOFZTw\n+ELdpaSUo2kxWOTwiSL+/tUOhsWFc8MwPTW1sfpFhXLf6HgWbDrIt9sOWR1HKbeixWABYwyPfrmV\n0vIKnr1qAF5eempqU9w9qhv9olrz2PztnCgstTqOUm5Di8ECCzcfZPmuLP50YU+6RARZHcdl+Xp7\n8cykARwrLOEfi3ZaHUcpt6HF0MJyC0p4YuF2BseGMW1EnNVxXF6/qFBuP68rnyRlsCY1x+o4SrkF\nLYYW9o9FO8krKuPZqwbgrbuQHOKBsfF0bhvII19upai0vP4NlFJnpMXQgn5KO8pnGzKZfn5Xerjx\nFJ0tLcDXm39O6s++o4XMXJZidRylXJ4WQwspLivn0S+3EhseyH2j462O43aGd4vgmqHRzF2dTsqR\nPKvjKOXStBhayKur0kjPKeDvV/SjlZ+31XHc0oyLehHk78Nj87fpdKBK2UGLoQXsO1rAK6vSuGxg\nJy7o4X53hnUWbYP9eXhCL37ek8v8TQesjqOUy9JiaAFP/m8Hvl7CY5f0tjqK25t8VgwDY8J4+utd\nnDil1zYo1RRaDM1s+c4jLN+VxQNje9C+dYDVcdyel5fw9BX9yC0o5vklyVbHUcolaTE0o6LScv72\nvx10bxfMzSO6WB3HY/SLCuWGszvz3tp97Dp80uo4SrkcLYZm9Mb36ezPLeRvl/fVO6e2sAfH9aB1\nK1+eWLhdD0Qr1Uj626qZHDx+itmrUrmkf0dGdI+wOo7HCQv0448X9mRtei7fbDtsdRylXIpDikFE\nJohIsoikisiMWt6/WUSyRWST7XFblfemikiK7THVEXmcwbPf7sIYeOTiXlZH8VjXJ8bSq0MIT3+9\nU6+IVqoR7C4GEfEGZgMXAX2AKSLSp5ZVPzHGDLI95tq2DQceB4YBicDjIuLyc1tu2HeMBZsOMv38\nrkS3CbQ6jsfy9hKeuLwvB46f4vXv0q2Oo5TLcMSIIRFINcakG2NKgI+BiQ3cdjyw1BiTa4w5BiwF\nJjggk2UqKgxPfrWD9q39ufOCblbH8Xhnd23LJf078up3qRw+UWR1HKVcgiOKIQrIqPI607aspqtE\nZIuIfCYiMY3c1mUs2HyAzRnH+fP4yqtwlfVmXNSLigr4t56+qlSDOKIYartFaM3TQP4HdDHGDACW\nAfMasW3liiLTRSRJRJKys7ObHLY5FZaU8ew3yQyIDuXKwS7db24lJjyQaSO68PnGTLYdOGF1HKWc\nniOKIROIqfI6GjhYdQVjzFFjTLHt5RvA0IZuW+V7zDHGJBhjEiIjnfO2EnNX7+HwySL+cmkfnZXN\nydw9qjttAv146usdevqqUvVwRDGsB+JFJE5E/IDJwMKqK4hIxyovLwdOT7e1GLhQRNrYDjpfaFvm\ncrLzinn9uzTG923PWV3CrY6jaght5csfxsazNj2XZTuzrI6jlFOzuxiMMWXAvVT+Qt8JfGqM2S4i\nT4rI5bbV7heR7SKyGbgfuNm2bS7wdyrLZT3wpG2Zy5m5bDfFZRU8PEFPT3VWUxJj6RYZxD8X7aS0\nvMLqOEo5LXHFYXVCQoJJSkqyOsavUrPyGT/ze24YFsvfJvazOo46g2U7jnDbu0n8/Yp+3Hh2Z6vj\nKNWiRGSDMSahvvX0ymcHeOabXQT6enP/GJ2Ax9mN6d2OxC7hvLgshYLiMqvjKOWUtBjstG5PLst2\nHuHOkd1oG+xvdRxVDxFhxsW9yMkv5o3VetGbUrXRYrCDMYZnvtlJ+9b+3DIizuo4qoGGxLbhon4d\nmPN9Otl5xfVvoJSH0WKww9IdR9i4/zgPjO2h03W6mIfG96S4rIJZK1KsjqKU09FiaKLyCsNzi5Pp\nGhnENUOjrY6jGqlrZDBTEmP48Of97M0psDqOUk5Fi6GJPt+YSUpWPg9d2BMfnWvBJd0/Oh5fby9e\nWLbb6ihKORX9jdYERaXlzFy6m4ExYUzo18HqOKqJ2rUOYNqILizcfJCdh3SmN6VO02JogvfX7uPg\niSIeHt8TEb31hSu74/xuhPj78O/FeoM9pU7TYmik/OIyXl2VxrndIxiuM7O5vNBAX+4c2Y3lu7JI\n2uuSF90r5XBaDI309g97OFpQwp/G97Q6inKQacPjiAzx51+Lk/UGe0qhxdAoxwtLmLM6nXF92jMo\nJszqOMpBWvl5c//o7qzbk8vqlByr4yhlOS2GRnj9+3Tyi8v444U9rI6iHOy6s2KJCmvFv5foqEEp\nLYYGysor4u0f93D5wE706tDa6jjKwfx8vPj92Hi2ZJ5g6Y4jVsdRylJaDA30yso0SssND4zV0YK7\nmjQ4iriIIJ5fupuKCh01KM+lxdAAh06c4sN1+7l6SDRxEUFWx1HNxMfbiwfGxrPrcB5fbz1kdRyl\nLKPF0ACzV6ZijOHe0d2tjqKa2WUDOtGzfQgvLNtNmU7mozyUQ4pBRCaISLKIpIrIjFref1BEdojI\nFhFZLiKdq7xXLiKbbI+FNbe1WuaxQj5Zn8G1CTHEhAdaHUc1My8v4Q/j4knPLmDBplqnH1fK7dld\nDCLiDcwGLgL6AFNEpE+N1X4BEowxA4DPgH9Vee+UMWaQ7XE5TmbW8lREREcLHmR83w706dial1ak\n6KhBeSRHjBgSgVRjTLoxpgT4GJhYdQVjzEpjTKHt5VrAJW5Huu9oAZ9tzOT6xFg6hrayOo5qISLC\nH8b1YN/RQr745YDVcZRqcY4ohiggo8rrTNuyutwKfFPldYCIJInIWhG5oq6NRGS6bb2k7Oxs+xI3\n0EvLU/H1Fu4e2a1FPk85j7G929E/KpRZK1Io1VGD8jCOKIba7iJX67l+InIDkAA8V2VxrG1y6uuB\nmSJS629hY8wcY0yCMSYhMjLS3sz12pNTwJe/ZHLDsM60ax3Q7J+nnEvlqCGejNxTfL4h0+o4SrUo\nRxRDJhBT5XU08JujdiIyFngUuNwY8+t8isaYg7av6cAqYLADMtlt1vIU/Hy8uOMCHS14qlE92zEw\nJoxZK1IpKdNRg/IcjiiG9UC8iMSJiB8wGah2dpGIDAZep7IUsqosbyMi/rbnEcAIYIcDMtklPTuf\n+ZsOcOPZnYkM8bc6jrKIiPCHsfEcOH6Kz3TUoDyI3cVgjCkD7gUWAzuBT40x20XkSRE5fZbRc0Aw\n8N8ap6X2BpJEZDOwEnjGGGN5McxakYqfjxfTz9fRgqe7oEckg2LCmL1SRw3Kc/g44psYYxYBi2os\n+2uV52Pr2G4N0N8RGRwlLTufBZsOcNt5XXW0oBARHhgbz81vr+ezDZlcPyzW6khKNTu98rmGl1ek\n4u/jzfTzu1odRTkJHTUoT6PFUEW6bbRw4zmdiQjW0YKqdHrUcOD4KT7fqMcalPvTYqjiZduxhdvP\n09GCqu70qOFlPUNJeQAtBps9OQXM33SAG4bpmUjqt0SE3+uoQXkILQabl1ek4uvtxfQLdLSgajey\nRyQDo0OZvTJVr4ZWbk2Lgcp7Is3fdIDfDetMuxC9ylnV7vSoIfPYKb7cqPdQUu5Li4HK+RZ8vIQ7\ndbSg6jGqZ+U9lF5emap3XlVuy+OLISO3kC82HmBKYqzeE0nVS0S4f0w8+3MLma/zNSg35fHF8Mqq\nNLxEuFPviaQaaGzvdvTp2JrZOmpQbsqji6HyHjgZXHdWDB1CdbSgGub0qGFPTgH/26KjBuV+PLoY\nXluVBsBdOt+CaqQL+7SnV4cQXl6RSnlFrXeZV8pleWwxHD5RxCfrM7gmIYZOYTo7m2ocLy/hvtHx\npGUXsGjrIavjKOVQHlsMr32XRoUx3KXHFlQTXdSvA/Htgnl5RSoVOmpQbsQjiyErr4iP1u1n0pAo\nYsIDrY6jXJSXl3Dv6O4kH8ljyY7DVsdRymE8shje+D6dsgrDPaO6Wx1FubhLB3Sia0QQLy1PxRgd\nNSj34JBiEJEJIpIsIqkiMqOW9/1F5BPb+z+LSJcq7z1iW54sIuMdkedMcvKLeX/tfiYO7ETntkHN\n/XHKzXl7CfeM6s6OQydZtjOr/g2UcgF2F4OIeAOzgYuAPsAUEelTY7VbgWPGmO7AC8Cztm37UDkV\naF9gAvCK7fs1m7mr91BUVs49o3W0oBxj4qBOdG4byKwVKTpqUG7BESOGRCDVGJNujCkBPgYm1lhn\nIjDP9vwzYIyIiG35x8aYYmPMHiDV9v2axbGCEt77aS+XDuhEt8jg5voY5WF8vL24e2Q3tmSe4Lvd\n2VbHUcpujiiGKCCjyutM27Ja17HNEX0CaNvAbR3mrR/3UFBSzn06WlAOduXgaKLCWvHSch01qOaR\nmpXPtLfXsf9oYbN/liOKQWpZVvO/jLrWaci2ld9AZLqIJIlIUnZ20/4qyy0o4ZIBHenRPqRJ2ytV\nFz8fL+4a2Y2N+4+zJu2o1XGUG5q9MpW16bkE+Tfr3nbAMcWQCcRUeR0N1LxPwK/riIgPEArkNnBb\nAIwxc4wxCcaYhMjIyCYFffrK/rw0eXCTtlWqPtckRNOhdQAvLk+xOopyM3tyCn6ddrhtC0w77Ihi\nWA/Ei0iciPhReTB5YY11FgJTbc+vBlaYyvH2QmCy7aylOCAeWOeATHXy9qptkKKU/fx9vLnzgq6s\n25PL2nQdNSjHeWVl5URit50X1yKfZ3cx2I4Z3AssBnYCnxpjtovIkyJyuW21N4G2IpIKPAjMsG27\nHfgU2AF8C9xjjCm3N5NSVpmcGEtkiD+zVuioQTlGRm4hX/xygOuHxbbYRGI+jvgmxphFwKIay/5a\n5XkRcE0d2z4NPO2IHEpZLcDXmzvO78pTX+9kw75chnYOtzqScnGvrErDW4Q7zm+52/d45JXPSjWn\n64fF0jbIj5eWp1odRbk4q6YG0GJQysEC/Xy4/fyufLc7m00Zx62Oo1zY699VTg1wZwtPDaDFoFQz\nuOHszoQF+jJLz1BSTXT4RBEfr8vg6qExRLXw1ABaDEo1g2B/H247N47lu7LYduCE1XGUC3r9+8qp\nAe62YCIxLQalmslNw7vQOsCHl3TUoBopK6+ID3+2bmoALQalmknrAF9uOTeOJTuOsOPgSavjKBdi\n9dQAWgxKNaNpw+MI8ffR6xpUg+XkF/Pe2n22u/ZaMzWAFoNSzSg00JdpI7rwzbbDJB/OszqOcgFv\nrE6npKzC0onEtBiUama3nBtHsL8PL+moQdUjt6CE937ax2UDrZ0aQItBqWYWFujH1OGdWbT1EClH\ndNSg6vbG6nROlVo/NYAWg1It4NZzu9LK15uXVujV0Kp2uQUlzFtTOZFY93bWTg2gxaBUCwgP8uOm\nc7rw1ZaDpGbpqEH91lzbaOF+J5hITItBqRZy+3lxlaMGvYeSquGYbbRwcf+OxDvBRGJaDEq1kLbB\n/tx0Thf+p6MGVcObP1ROO3z/6HirowBaDEq1qNOjhll6rEHZHCso4Z01e7mkf0d6drB+tABaDEq1\nqNOjhoWbD5KalW91HOUE5v6QTkFJGfePcY7RAthZDCISLiJLRSTF9rVNLesMEpGfRGS7iGwRkeuq\nvPeOiOwRkU22xyB78ijlCv7/sQa9rsHT5RaU8M6PlccWnGW0APaPGGYAy40x8cBy2+uaCoGbjDF9\ngQnATBEJq/L+Q8aYQbbHJjvzKOX0qh5r0OsaPNvc1ekUlpbzgBONFsD+YpgIzLM9nwdcUXMFY8xu\nY0yK7flBIAuItPNzlXJp08/vSqCvNy/qqMFjVb1uwRnORKrK3mJob4w5BGD72u5MK4tIIuAHpFVZ\n/LRtF9MLIuJvZx6lXEJ4kB83j+jC11sP6T2UPNQbttGCM1y3UFO9xSAiy0RkWy2PiY35IBHpCLwH\nTDPGVNgWPwL0As4CwoGHz7D9dBFJEpGk7Ozsxny0Uk7p9vO6EuTnw4vLd1sdRbWwnPxi3vlxL5c5\n4WgBGlAMxpixxph+tTwWAEdsv/BP/+LPqu17iEhr4GvgMWPM2irf+5CpVAy8DSSeIcccY0yCMSYh\nMlL3RCnXFxboxy0jurBo62Gdr8HDvLYqjeKycn4/1rmOLZxm766khcBU2/OpwIKaK4iIH/Al8K4x\n5r813jtdKkLl8YltduZRyqXcem5XQgJ8mLlMRw2e4sjJIt5bu48rB0dbegfVM7G3GJ4BxolICjDO\n9hoRSRCRubZ1rgXOB26u5bTUD0RkK7AViACesjOPUi4lNNCX287typIdR9iaqXNDe4JXVqZSVmG4\nf4zzHVs4TYwxVmdotISEBJOUlGR1DKUcIq+olPP+tZJBMWG8M63OvanKDRw8foqRz61i0pAonrlq\nQIt/vohsMMYk1LeeXvmslMVCAny584JurErOJmlvrtVxVDN6eWUqBsO9TngmUlVaDEo5gZvO6UxE\nsD/PLU7GFUfxqn77jhbw6foMJp8VS3SbQKvjnJEWg1JOINDPh3tGdePnPbmsSTtqdRzVDGYuS8HH\nWyyfna0htBiUchJTEmPpGBqgowY3tPtIHvM3HWDqOV1o1zrA6jj10mJQykkE+Hpz/5h4NmUcZ9nO\nWi8JUi7qP0uSCfLz4c4LulkdpUG0GJRyItcMjSYuIoh/L06mvEJHDe5gc8ZxFm8/wm3nxdEmyM/q\nOA2ixaCUE/Hx9uLBcT1IPpLHgk0HrI6jHODfS5JpE+jLrefGWR2lwbQYlHIyl/TvSJ+OrXlh2W5K\nyirq30A5rTWpOaxOyeHukd0JCfC1Ok6DaTEo5WS8vISHJvQkI/cUH6/fb3Uc1UTGGJ79dhedQgO4\n8ZzOVsdpFC0GpZzQyB6RJMaF89LyVAqKy6yOo5rgm22H2Zx5ggfG9SDA19vqOI2ixaCUExIRHp7Q\ni5z8Yuau3mN1HNVIZeUV/HtxMvHtgrlqSLTVcRpNi0EpJzW0cxvG923PnO/TyMkvtjqOaoRPkzJJ\nzyngofE98fYSq+M0mhaDUk7szxN6UVRWwUs6BajLKCwpY+ay3QyJDWNcn/ZWx2kSLQalnFi3yGCu\nOyuGD3/ez56cAqvjqAaYu3oPWXnFPHpJbyqnmnE9WgxKObkHxsTj6+3FvxcnWx1F1SMrr4jXvktj\nQt8ODO0cbnWcJtNiUMrJtWsdwO3nxfH11kNs3H/M6jjqDGYuS6GkrIKHL+pldRS72FUMIhIuIktF\nJMX2tU0d65VXmb1tYZXlcSLys237T2zTgCqlarjjgm5Ehvjz1Fc79AZ7Tio1K49P1mdww9mdiYsI\nsjqOXewdMcwAlhtj4oHltte1OWWMGWR7XF5l+bPAC7btjwG32plHKbcU5O/DH8f1YOP+43y99ZDV\ncVQtnvlmF4G2GyG6OnuLYSIwz/Z8HnBFQzeUyqMyo4HPmrK9Up7mmoQYenUI4dlvd1FUWm51HFXF\nDyk5LNuZxV2juhHuIjfKOxN7i6G9MeYQgO1ruzrWCxCRJBFZKyKnf/m3BY4bY05f1pkJRNmZRym3\n5e0lPHZJHzJyTzFvzV6r4yibsvIK/v7VDmLCW3HLCNe5Ud6Z+NS3gogsAzrU8tajjficWGPMQRHp\nCqwQka3AyVrWq3PnqYhMB6YDxMbGNuKjlXIf58ZHMKpnJC+vSOXqodG0Dfa3OpLH+yQpg+Qjebz6\nuyEud+uLutQ7YjDGjDXG9KvlsQA4IiIdAWxfa51dxBhz0PY1HVgFDAZygDAROV1O0cDBM+SYY4xJ\nMMYkREaojnEAAAAPnklEQVRGNuJHVMq9PHpJb06VlvPvJbutjuLxThaV8p8lu0mMC2dCv9r+fnZN\n9u5KWghMtT2fCiyouYKItBERf9vzCGAEsMNUnlqxErj6TNsrparr3i6EqcO78PH6/Ww7cMLqOB7t\n5RWpHCss4a+X9nHZi9lqY28xPAOME5EUYJztNSKSICJzbev0BpJEZDOVRfCMMWaH7b2HgQdFJJXK\nYw5v2plHKY9w/5h4wgP9eGLhdj191SJp2fm8/eMerh4STb+oUKvjOFS9xxjOxBhzFBhTy/Ik4Dbb\n8zVA/zq2TwcS7cmglCcKbeXLQ+N7MuOLrSzcfJCJg/S8jZZkjOGJhdsJ8PHmzxNc+2K22uiVz0q5\nqGsSYugfFco/F+2isETnbGhJi7cfYXVKDn8Y14PIEPc7AUCLQSkX5e0lPHF5Hw6fLGLWilSr43iM\nUyXl/P2rHfRsH8JNLjYzW0NpMSjlwoZ2DufahGje+D6dlCN5VsfxCK9+l8aB46f428S++Hi7569Q\n9/yplPIgD0/oRZC/D39ZsE0PRDezvTkFvPZdGpcP7MTZXdtaHafZaDEo5eLaBvvz8IRerE3PZcGm\nOi8FUnYyxvDY/G34e3vx6CW9rY7TrLQYlHIDk8+KYWBMGE99vZMTp0qtjuOWFm4+yA+pOfx5Qk/a\ntw6wOk6z0mJQyg14eQlPX9GP3IJinv12l9Vx3M7xwhL+/tUOBsaEcf0w9zzgXJUWg1Juol9UKLed\n15UPf97Pz+lHrY7jVp79dhfHCkv5x5X98PZynyuc66LFoJQb+cPYHsSEt+KRL7bqrbkd5Of0o3y0\nLoNbz42jbyf3usK5LloMSrmRVn7e/OPK/qTnFPCyXttgt1Ml5Tz8+RZiwwN5YKzrT8DTUFoMSrmZ\n8+IjuWpINK99l8aOg7Xd3V411PNLk9l7tJBnrupPoJ9ddxByKVoMSrmhxy7pTVigH3/872ZKyiqs\njuOSNu4/xps/7OF3w2IZ3i3C6jgtSotBKTfUJsiPf07qz85DJ3l5RYrVcVxOUWk5f/5sCx1aBzDj\nIve7SV59tBiUclPj+rRn0pAoZq9KY3PGcavjuJT/LEkmNSuff0zqT0iAr9VxWpwWg1Ju7PHL+hIZ\n7M8f/7tZz1JqoDVpOcz9YQ83nB3LyJ51TWPv3rQYlHJjoa18efbqAaRm5euFbw1w4lQpf/p0M3Ft\ng3j04j5Wx7GMXcUgIuEislREUmxf29SyzigR2VTlUSQiV9jee0dE9lR5b5A9eZRSv3VBj0imntOZ\nt3/cy8rkWqdlVzZ/XbCNrLxiXrhuEK38vK2OYxl7RwwzgOXGmHhgue11NcaYlcaYQcaYQcBooBBY\nUmWVh06/b4zZZGcepVQtHrm4N706hPCnTzeTlVdkdRynNP+XAyzYdJD7x8QzMCbM6jiWsrcYJgLz\nbM/nAVfUs/7VwDfGmEI7P1cp1QgBvt7MmjKY/OIy/vjpZioq9PbcVaVl5/N/X24lsUs4d4/sZnUc\ny9lbDO2NMYcAbF/rO1IzGfioxrKnRWSLiLwgInXOkSci00UkSUSSsrOz7UutlAeKbx/CXy7tw+qU\nHOasTrc6jtMoKi3nng82EuDrzUtTBrvt5DuNUe+/gIgsE5FttTwmNuaDRKQj0B9YXGXxI0Av4Cwg\nHHi4ru2NMXOMMQnGmITIyMjGfLRSyuZ3w2K5pH9H/vXtLn5K0xvtAfztf9vZdTiP568dSIdQ976d\ndkPVWwzGmLHGmH61PBYAR2y/8E//4j/Tka1rgS+NMb/eLN4Yc8hUKgbeBhLt+3GUUmciIjx79QDi\nIoK476ONHD7h2ccbvtiYyUfrMrhrZDePPTW1NvaOmRYCU23PpwILzrDuFGrsRqpSKkLl8YltduZR\nStUj2N+H128cSmFJOfd8uNFjb5mxJfM4M77YyrC4cP44rofVcZyKvcXwDDBORFKAcbbXiEiCiMw9\nvZKIdAFigO9qbP+BiGwFtgIRwFN25lFKNUD3diH86+oBbNh3jCe/2m51nBaXlVfE9Hc3EBnszyu/\nG6LHFWqw63aBxpijwJhalicBt1V5vReIqmW90fZ8vlKq6S4d0ImtB07w+nfpdI8M5uYRcVZHahEl\nZRXc/f5Gjp8q4fO7htM2uM5zXjyW59xHVin1Gw+P78We7AKe/GoHnSOCGOXm+9mNMTzyxVaS9h1j\n1pTBHjPxTmPp+EkpD+blJcycPIjeHVtz34e/kHw4z+pIzer5pbv5fGMmD4yN57KBnayO47S0GJTy\ncIF+PsydmkCQvzdT31pHRq57Xn/64c/7mbUilesSYvj9GM+Zja0ptBiUUnQMbcW8WxIpLCnjxjd/\nJjuv2OpIDrV0xxEem7+VkT0jeerKflSeCKnqosWglAKgV4fWvD0tkSMni7nprXWcOFVa/0YuYGVy\nFvd8sJH+UaHMvn4IvnoGUr30X0gp9auhndvw+o1DSc3KY6oblMP3u7O5470NxLcP5t1bhhHkr+fb\nNIQWg1KqmvN7RDL7+iFsP3iC699YS25BidWRmuTH1BxufzeJrhFBvH/rMEIDPW8mtqbSYlBK/caF\nfTvwxk0JpGblM3nOTy53q+6vtxxi2tvr6dI2iA9uG0abID+rI7kULQalVK1G9mzH2zefReaxU1z9\n6k+kZuVbHalB3vtpL/d+tJEB0aF8csfZegFbE2gxKKXqNLx7BB/cNozCkjImvfIjP6bmWB2pTuUV\nhme/3cVfFmxnTK92vHfrMMICdaTQFFoMSqkzGhzbhi/vHkGH0ACmvrWO99fuwxjnmujnWEEJ095Z\nz6ur0piSGMtrNwz16Kk57aXFoJSqV0x4IJ/fNZwR3SN4bP427vvoF04WOccZS9sOnOCyl39gbdpR\n/jmpP/+c1F9vimcn/ddTSjVISIAvb918Fg+N78k32w5z8Yur2bDvmGV5SssreGl5Cle+8iNl5YZP\n7jibKYmxluVxJ1oMSqkG8/YS7hnVnU/vOAdj4OrX1vCX+ds4Udiyo4cdB09yxewfeX7pbi7q15Fv\nfn8eg2PbtGgGdybOtq+wIRISEkxSUpLVMZTyaCeLSnl+yW7e/WkvbQL9eHhCLyYNiWrW3TiHTpzi\n+SWVN8ILD/LjqSv6M6Ffh2b7PHcjIhuMMQn1rmdPMYjINcATQG8g0TYPQ23rTQBeBLyBucaY0xP6\nxAEfUznf80bgRmNMvVfTaDEo5Ty2HzzBX+ZvY+P+48SGB3LnBd24amgU/j6OO/ibkVvIuz/t5d2f\n9mEM3HROZ+4d3V3POmqkliqG3kAF8Drwp9qKQUS8gd1UzvCWCawHphhjdojIp8AXxpiPReQ1YLMx\n5tX6PleLQSnnUlFhWLbzCLNXprI58wSRIf5cMagTEwdF0bdT6ybdtK6otJw1aTl8+PN+lu/KQoCJ\ng6J4cFwPYsIDHf9DeICGFoO9M7jttH3YmVZLBFKNMem2dT8GJorITmA0cL1tvXlUjj7qLQallHPx\n8hIu7NuBcX3a80NqDvPW7OOdNXt5Y/Ue4iKCOLtrOENi2zA4NoyosMDfnEpqjOFoQQkpR/JJPnyS\n1Sk5/JiWQ1FpBRHBftwzsjvXD4ulU1gri35Cz9ISd5SKAjKqvM4EhgFtgePGmLIqy38z/adSynWI\nCOfFR3JefCTHC0tYtPUwS3Yc5usth/ho3f//NRDi70N4sB8VxlBcWkFhSTn5xWW/vh8T3orrEmIY\n2bMdw7u3dehuKVW/eotBRJYBtR3dedQYs6ABn1HbcMKcYXldOaYD0wFiY/WUNKWcXVigH9cPi+X6\nYbFUVBjSsvPZeuAEh08WkXWymKMFJfh6Cf6+Xvj7eBMbHkh8+2C6twumQ+sAnTPBQvUWgzFmrJ2f\nkQnEVHkdDRwEcoAwEfGxjRpOL68rxxxgDlQeY7Azk1KqBXl5CfHtQ4hvH2J1FNUALXEdw3ogXkTi\nRMQPmAwsNJVHvVcCV9vWmwo0ZASilFKqGdlVDCJypYhkAucAX4vIYtvyTiKyCMA2GrgXWAzsBD41\nxmy3fYuHgQdFJJXKYw5v2pNHKaWU/fQCN6WU8hANPV1Vb4mhlFKqGi0GpZRS1WgxKKWUqkaLQSml\nVDVaDEoppapxybOSRCQb2NfEzSOovLjOVbl6fnD9n8HV84Pr/wyunh+s+Rk6G2Mi61vJJYvBHiKS\n1JDTtZyVq+cH1/8ZXD0/uP7P4Or5wbl/Bt2VpJRSqhotBqWUUtV4YjHMsTqAnVw9P7j+z+Dq+cH1\nfwZXzw9O/DN43DEGpZRSZ+aJIwallFJn4FHFICITRCRZRFJFZIbVeRpDRN4SkSwR2WZ1lqYQkRgR\nWSkiO0Vku4j83upMjSUiASKyTkQ2236Gv1mdqSlExFtEfhGRr6zO0hQisldEtorIJhFxubtpikiY\niHwmIrts/z2cY3WmmjxmV5KIeAO7gXFUTh60HphijNlhabAGEpHzgXzgXWNMP6vzNJaIdAQ6GmM2\nikgIsAG4wlX+/QGkckqxIGNMvoj4Aj8AvzfGrLU4WqOIyINAAtDaGHOp1XkaS0T2AgnGGJe8jkFE\n5gGrjTFzbXPUBBpjjludqypPGjEkAqnGmHRjTAnwMTDR4kwNZoz5Hsi1OkdTGWMOGWM22p7nUTk3\nh0vN8W0q5dte+toeLvWXlYhEA5cAc63O4olEpDVwPra5Z4wxJc5WCuBZxRAFZFR5nYmL/WJyFyLS\nBRgM/Gxtksaz7YbZBGQBS40xrvYzzAT+DFRYHcQOBlgiIhtsc8G7kq5ANvC2bXfeXBEJsjpUTZ5U\nDLXNLO5Sf+25AxEJBj4HHjDGnLQ6T2MZY8qNMYOonKM8UURcZreeiFwKZBljNlidxU4jjDFDgIuA\ne2y7WV2FDzAEeNUYMxgoAJzueKcnFUMmEFPldTRw0KIsHsm2X/5z4ANjzBdW57GHbfi/CphgcZTG\nGAFcbttH/zEwWkTetzZS4xljDtq+ZgFfUrmb2FVkAplVRpqfUVkUTsWTimE9EC8icbYDPpOBhRZn\n8hi2A7dvAjuNMc9bnacpRCRSRMJsz1sBY4Fd1qZqOGPMI8aYaGNMFyr//7/CGHODxbEaRUSCbCcv\nYNsFcyHgMmfqGWMOAxki0tO2aAzgdCdg+FgdoKUYY8pE5F5gMeANvGWM2W5xrAYTkY+AkUCEiGQC\njxtj3rQ2VaOMAG4Ettr20QP8nzFmkYWZGqsjMM92hpsX8KkxxiVP+XRh7YEvK//OwAf40BjzrbWR\nGu0+4APbH6jpwDSL8/yGx5yuqpRSqmE8aVeSUkqpBtBiUEopVY0Wg1JKqWq0GJRSSlWjxaCUUqoa\nLQallFLVaDEopZSqRotBKaVUNf8PSkPz2rqC2OEAAAAASUVORK5CYII=\n\"\n>\n</div>\n\n</div>\n\n</div>\n</div>\n\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[&nbsp;]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span> \n</pre></div>\n\n</div>\n</div>\n</div>\n\n</div>\n\n\n </div>\n </div>\n</body>\n\n \n\n\n</html>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ec8588f65d637ebe8c45c64281f1077b1326f376
421,537
ipynb
Jupyter Notebook
PRY_VGGFace_MODEL_FER.ipynb
jppolbar/ReconocIAs
2c70f9dd12675dbc4bd1c162b68aa651ad9f677b
[ "MIT" ]
null
null
null
PRY_VGGFace_MODEL_FER.ipynb
jppolbar/ReconocIAs
2c70f9dd12675dbc4bd1c162b68aa651ad9f677b
[ "MIT" ]
null
null
null
PRY_VGGFace_MODEL_FER.ipynb
jppolbar/ReconocIAs
2c70f9dd12675dbc4bd1c162b68aa651ad9f677b
[ "MIT" ]
null
null
null
421,537
421,537
0.929385
[ [ [ "<img src=\"img/mioti.png\" >\n\n\n# Proyecto Reconocimiento Facial: Modelo VGGFaceCustom", "_____no_output_____" ], [ "<img src=\"./img/emociones.png\" style=\"width: 800px\">", "_____no_output_____" ], [ "### Objetivos", "_____no_output_____" ], [ "* Preparar la estructura de la red neuronal convolucional y entrenarla para que sea capaz de identificar las diferentes emociones clasificadas en el dataset", "_____no_output_____" ], [ "### Prepación del entorno", "_____no_output_____" ] ], [ [ "\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\nimport os\nos.chdir(\"//content/drive/My Drive/MIOTI/PRY/\")\n\n# Check current working directory.\nretval = os.getcwd()\nprint (\"Current working directory %s\" % retval)", "Mounted at /content/drive\nCurrent working directory /content/drive/My Drive/MIOTI/PRY\n" ], [ "#!pip install scikit-plot", "_____no_output_____" ] ], [ [ "### Importación de librerías", "_____no_output_____" ], [ "* Las librerías que vamos a utilizar en este proceso serán las siguientes:\n * pandas: para trabajar con dataframes\n * numpy: para realizar operaciones aritméticas\n * matplotlib: para la visualización de gráficos de resultados\n * os: para trabajar con directorios\n * json, sys: para la exportación del modelo y los pesos de la red entrenada\n * tensorflow: para la generación de la red neuronal y toda su arquitectura, compilación y entrenamiento\n * sklearn: para la partición de datos, preprocesamiento y análisis de la información", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom os import listdir\nimport json, sys\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input,Conv2D, Flatten, MaxPool2D, BatchNormalization, GlobalAveragePooling2D, Dense, Dropout\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import utils\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.utils import plot_model\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import confusion_matrix,classification_report\nfrom sklearn.metrics import ConfusionMatrixDisplay\n", "_____no_output_____" ] ], [ [ "### Preprocesing", "_____no_output_____" ], [ "* En esta parte se va a realizar el tratamiento de los datos para que estos sean compatibles con la red neuronal y el entrenamiento se lleve a cabo de manera satisfactoria. Los pasos que se van a seguir son:\n * Definir forma de las imágenes, tamaño de los lotes en cada paso por época y definir las clases y el número de estas\n * Descarga del dataset y transformación del campo pixels en arrys con el tamaño y forma requerido\n * Partición de los datos primero en entrenamiento y test, después los datos de entrenamiento se volverán a dividir en entrenamiento y validación\n * Transformación de las clases en one hot encoding\n * Normalización de los datos de entrenamiento, validación y test", "_____no_output_____" ] ], [ [ "#Definimos las rutas de los archivos, tamaños de imágenes, lotes y clases\nIMG_SIZE = (48,48)#(224,224)\nBATCH_SIZE = 64\nCLASSES=['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\nNUM_CLASSES = len(CLASSES)", "_____no_output_____" ], [ "df = pd.read_csv('./data/fer2013.csv')\n# emotion_label = {0:'Angry', 1:'Disgust', 2:'Fear', 3:'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}\nINTERESTED_LABELS = [0,1,2,3,4,5,6]\ndf = df[df.emotion.isin(INTERESTED_LABELS)]", "_____no_output_____" ], [ "img_array = df.pixels.apply(lambda x: np.array(x.split(' ')).reshape(48, 48, 1).astype('float32'))\nimg_array = np.stack(img_array, axis=0)\nimg_labels = df.emotion\n\nX_train, X_test, y_train, y_test = train_test_split(img_array, img_labels,\n shuffle=True, stratify=img_labels,\n test_size=0.1, random_state=42)\n\n\n\nle = LabelEncoder()\nimg_labels = le.fit_transform(y_train)\nimg_labels = utils.to_categorical(img_labels)\nle_name_mapping = dict(zip(le.classes_, le.transform(le.classes_)))\n\n\nX_train, X_valid, y_train, y_valid = train_test_split(X_train, img_labels ,\n shuffle=True, stratify=img_labels,\n test_size=0.1, random_state=42)\n\n\nimg_width = X_train.shape[1]\nimg_height = X_train.shape[2]\nimg_depth = X_train.shape[3]\nnum_classes = y_train.shape[1]\n\n\n#Normalize data\nX_train = X_train / 255.\nX_valid = X_valid / 255.\nX_test = X_test / 255.", "_____no_output_____" ], [ "le_name_mapping", "_____no_output_____" ] ], [ [ "### Model", "_____no_output_____" ], [ "* En este paso se genera la red que vamos a generar, su arquitectura será la siguiente:\n * Una red de tipo secuencial dividida en 4 partes:\n * Las 3 primeras partes están compuestas por capas convolucionales que recogerán las características de las imágenes, capas de normalización, reducción y Dropout para evitar sobre-ajustes\n * En la última parte se pasa a aplanar los resultados obtenidos para introducirlos después en una red densa que devolverá las probabilidades de que una imagen forme parte de alguna de las diferentes clases\n * Compilación del modelo con una función de pérdida de entropía cruzada categorizada y optimizador Adam\n * Se van a aplicar unas funciones de reducción del learning rate en caso que la función de pérdida quede estancada y de parada del entrenamiento en cuanto se alcancen unos valores de validación satisfactorios\n * Entrenamiento del modelo", "_____no_output_____" ] ], [ [ "batch_size = 32\nepochs = 100\ndef build_model():\n\n model = Sequential(name='CNN_Face_Emotions')\n ''' First model '''\n model.add(Conv2D(filters=64,kernel_size=(5,5),input_shape=(img_width, img_height, img_depth),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=64,kernel_size=(5,5),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(MaxPool2D(pool_size=(2,2)))\n model.add(Dropout(0.5))\n\n ''' Second layer '''\n model.add(Conv2D(filters=128,kernel_size=(3,3),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=128,kernel_size=(3,3),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(MaxPool2D(pool_size=(2,2)))\n model.add(Dropout(0.3))\n\n ''' Extra layer '''\n model.add(Conv2D(filters=256,kernel_size=(3,3),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(Conv2D(filters=256,kernel_size=(3,3),activation='relu',padding='same',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(MaxPool2D(pool_size=(2,2)))\n model.add(Dropout(0.3))\n\n ''' Third layer '''\n model.add(Flatten())\n model.add(Dense(128,activation='relu',kernel_initializer='he_normal'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n model.add(Dense(num_classes,activation='softmax'))\n\n return model", "_____no_output_____" ], [ "''' Initialize model '''\nmodel= None\nmodel = build_model()\nmodel.summary()", "Model: \"CNN_Face_Emotions\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n conv2d (Conv2D) (None, 48, 48, 64) 1664 \n \n batch_normalization (BatchN (None, 48, 48, 64) 256 \n ormalization) \n \n conv2d_1 (Conv2D) (None, 48, 48, 64) 102464 \n \n batch_normalization_1 (Batc (None, 48, 48, 64) 256 \n hNormalization) \n \n max_pooling2d (MaxPooling2D (None, 24, 24, 64) 0 \n ) \n \n dropout (Dropout) (None, 24, 24, 64) 0 \n \n conv2d_2 (Conv2D) (None, 24, 24, 128) 73856 \n \n batch_normalization_2 (Batc (None, 24, 24, 128) 512 \n hNormalization) \n \n conv2d_3 (Conv2D) (None, 24, 24, 128) 147584 \n \n batch_normalization_3 (Batc (None, 24, 24, 128) 512 \n hNormalization) \n \n max_pooling2d_1 (MaxPooling (None, 12, 12, 128) 0 \n 2D) \n \n dropout_1 (Dropout) (None, 12, 12, 128) 0 \n \n conv2d_4 (Conv2D) (None, 12, 12, 256) 295168 \n \n batch_normalization_4 (Batc (None, 12, 12, 256) 1024 \n hNormalization) \n \n conv2d_5 (Conv2D) (None, 12, 12, 256) 590080 \n \n batch_normalization_5 (Batc (None, 12, 12, 256) 1024 \n hNormalization) \n \n max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 \n 2D) \n \n dropout_2 (Dropout) (None, 6, 6, 256) 0 \n \n flatten (Flatten) (None, 9216) 0 \n \n dense (Dense) (None, 128) 1179776 \n \n batch_normalization_6 (Batc (None, 128) 512 \n hNormalization) \n \n dropout_3 (Dropout) (None, 128) 0 \n \n dense_1 (Dense) (None, 7) 903 \n \n=================================================================\nTotal params: 2,395,591\nTrainable params: 2,393,543\nNon-trainable params: 2,048\n_________________________________________________________________\n" ], [ "plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)", "_____no_output_____" ], [ "model.compile(\n loss='categorical_crossentropy',\n optimizer=Adam(learning_rate=1e-3),\n metrics=['accuracy']\n )", "_____no_output_____" ], [ "''' Callbacks '''\nearly_stopping = EarlyStopping(\n monitor='val_accuracy',\n min_delta=0.00005,\n patience=11,\n verbose=1,\n restore_best_weights=True,\n)\n\nlr_scheduler = ReduceLROnPlateau(\n monitor='val_accuracy',\n factor=0.5,\n patience=7,\n min_lr=1e-7,\n verbose=1,\n)", "_____no_output_____" ], [ "import datetime\n# Load the TensorBoard notebook extension\n%load_ext tensorboard\n\n%rm -rf ./logs/\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n%tensorboard --logdir logs/fit", "_____no_output_____" ], [ "callbacks = [\n ModelCheckpoint('model/vgg-face-v4.h5',save_best_only=False,verbose=0),\n early_stopping,\n lr_scheduler,\n tensorboard_callback\n]", "_____no_output_____" ], [ "# train_datagen = ImageDataGenerator(\n# rotation_range=15,\n# width_shift_range=0.15,\n# height_shift_range=0.15,\n# shear_range=0.15,\n# zoom_range=0.15,\n# horizontal_flip=True,\n# )\n# train_datagen.fit(X_train)", "_____no_output_____" ], [ "''' Train model '''\nhistory=model.fit(X_train, y_train,\n callbacks=callbacks,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(X_valid, y_valid),\n shuffle=True)\n\n\n# train_datagen.flow(X_train, y_train, batch_size=batch_size),\n# validation_data=(X_valid, y_valid),\n\n''' Saving the model to use it later on'''\nfer_json = model.to_json()\nwith open(\"model/vgg-face-model-v4.json\", \"w\") as json_file:\n json_file.write(fer_json)\nmodel.save_weights(\"model/vgg-face-v4.h5\")", "Epoch 1/100\n909/909 [==============================] - 71s 66ms/step - loss: 1.7474 - accuracy: 0.3429 - val_loss: 1.4320 - val_accuracy: 0.4582 - lr: 0.0010\nEpoch 2/100\n909/909 [==============================] - 59s 65ms/step - loss: 1.4110 - accuracy: 0.4559 - val_loss: 1.2883 - val_accuracy: 0.5102 - lr: 0.0010\nEpoch 3/100\n909/909 [==============================] - 59s 65ms/step - loss: 1.2868 - accuracy: 0.5117 - val_loss: 1.1989 - val_accuracy: 0.5412 - lr: 0.0010\nEpoch 4/100\n909/909 [==============================] - 59s 65ms/step - loss: 1.2004 - accuracy: 0.5470 - val_loss: 1.1187 - val_accuracy: 0.5854 - lr: 0.0010\nEpoch 5/100\n909/909 [==============================] - 58s 64ms/step - loss: 1.1297 - accuracy: 0.5755 - val_loss: 1.0776 - val_accuracy: 0.5978 - lr: 0.0010\nEpoch 6/100\n909/909 [==============================] - 58s 64ms/step - loss: 1.0681 - accuracy: 0.6009 - val_loss: 1.1764 - val_accuracy: 0.5619 - lr: 0.0010\nEpoch 7/100\n909/909 [==============================] - 59s 65ms/step - loss: 1.0417 - accuracy: 0.6105 - val_loss: 1.1983 - val_accuracy: 0.6074 - lr: 0.0010\nEpoch 8/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.9680 - accuracy: 0.6418 - val_loss: 1.3156 - val_accuracy: 0.5724 - lr: 0.0010\nEpoch 9/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.9248 - accuracy: 0.6567 - val_loss: 1.0050 - val_accuracy: 0.6238 - lr: 0.0010\nEpoch 10/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.8546 - accuracy: 0.6846 - val_loss: 1.0397 - val_accuracy: 0.6211 - lr: 0.0010\nEpoch 11/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.7869 - accuracy: 0.7087 - val_loss: 1.0620 - val_accuracy: 0.6235 - lr: 0.0010\nEpoch 12/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.7300 - accuracy: 0.7329 - val_loss: 1.2294 - val_accuracy: 0.6006 - lr: 0.0010\nEpoch 13/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.6704 - accuracy: 0.7544 - val_loss: 1.4763 - val_accuracy: 0.5464 - lr: 0.0010\nEpoch 14/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.6359 - accuracy: 0.7685 - val_loss: 1.1195 - val_accuracy: 0.6263 - lr: 0.0010\nEpoch 15/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.5696 - accuracy: 0.7945 - val_loss: 1.1565 - val_accuracy: 0.6291 - lr: 0.0010\nEpoch 16/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.5259 - accuracy: 0.8098 - val_loss: 1.1595 - val_accuracy: 0.6362 - lr: 0.0010\nEpoch 17/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.4988 - accuracy: 0.8215 - val_loss: 1.2253 - val_accuracy: 0.6362 - lr: 0.0010\nEpoch 18/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.4591 - accuracy: 0.8341 - val_loss: 1.2089 - val_accuracy: 0.6384 - lr: 0.0010\nEpoch 19/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.4205 - accuracy: 0.8477 - val_loss: 1.2362 - val_accuracy: 0.6502 - lr: 0.0010\nEpoch 20/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.3968 - accuracy: 0.8554 - val_loss: 1.2910 - val_accuracy: 0.6350 - lr: 0.0010\nEpoch 21/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.3836 - accuracy: 0.8622 - val_loss: 1.3671 - val_accuracy: 0.6350 - lr: 0.0010\nEpoch 22/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.3557 - accuracy: 0.8760 - val_loss: 1.3721 - val_accuracy: 0.6430 - lr: 0.0010\nEpoch 23/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.3450 - accuracy: 0.8774 - val_loss: 1.3643 - val_accuracy: 0.6331 - lr: 0.0010\nEpoch 24/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.3157 - accuracy: 0.8872 - val_loss: 1.3285 - val_accuracy: 0.6381 - lr: 0.0010\nEpoch 25/100\n909/909 [==============================] - 57s 63ms/step - loss: 0.3108 - accuracy: 0.8905 - val_loss: 1.3800 - val_accuracy: 0.6390 - lr: 0.0010\nEpoch 26/100\n908/909 [============================>.] - ETA: 0s - loss: 0.3036 - accuracy: 0.8912\nEpoch 00026: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.\n909/909 [==============================] - 58s 63ms/step - loss: 0.3037 - accuracy: 0.8912 - val_loss: 1.4411 - val_accuracy: 0.6393 - lr: 0.0010\nEpoch 27/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.2296 - accuracy: 0.9193 - val_loss: 1.5200 - val_accuracy: 0.6424 - lr: 5.0000e-04\nEpoch 28/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.2014 - accuracy: 0.9291 - val_loss: 1.5620 - val_accuracy: 0.6505 - lr: 5.0000e-04\nEpoch 29/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1745 - accuracy: 0.9392 - val_loss: 1.5368 - val_accuracy: 0.6418 - lr: 5.0000e-04\nEpoch 30/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.1734 - accuracy: 0.9401 - val_loss: 1.6352 - val_accuracy: 0.6437 - lr: 5.0000e-04\nEpoch 31/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1566 - accuracy: 0.9467 - val_loss: 1.6118 - val_accuracy: 0.6409 - lr: 5.0000e-04\nEpoch 32/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1454 - accuracy: 0.9498 - val_loss: 1.7100 - val_accuracy: 0.6517 - lr: 5.0000e-04\nEpoch 33/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1520 - accuracy: 0.9476 - val_loss: 1.7290 - val_accuracy: 0.6505 - lr: 5.0000e-04\nEpoch 34/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1418 - accuracy: 0.9517 - val_loss: 1.8152 - val_accuracy: 0.6430 - lr: 5.0000e-04\nEpoch 35/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.1458 - accuracy: 0.9485 - val_loss: 1.7807 - val_accuracy: 0.6467 - lr: 5.0000e-04\nEpoch 36/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1366 - accuracy: 0.9523 - val_loss: 1.7523 - val_accuracy: 0.6480 - lr: 5.0000e-04\nEpoch 37/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.1280 - accuracy: 0.9567 - val_loss: 1.8754 - val_accuracy: 0.6430 - lr: 5.0000e-04\nEpoch 38/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.1300 - accuracy: 0.9541 - val_loss: 1.8776 - val_accuracy: 0.6409 - lr: 5.0000e-04\nEpoch 39/100\n908/909 [============================>.] - ETA: 0s - loss: 0.1268 - accuracy: 0.9566\nEpoch 00039: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.\n909/909 [==============================] - 59s 65ms/step - loss: 0.1267 - accuracy: 0.9567 - val_loss: 1.8801 - val_accuracy: 0.6437 - lr: 5.0000e-04\nEpoch 40/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.1111 - accuracy: 0.9622 - val_loss: 1.8477 - val_accuracy: 0.6452 - lr: 2.5000e-04\nEpoch 41/100\n909/909 [==============================] - 59s 65ms/step - loss: 0.0975 - accuracy: 0.9672 - val_loss: 1.8771 - val_accuracy: 0.6514 - lr: 2.5000e-04\nEpoch 42/100\n909/909 [==============================] - 58s 64ms/step - loss: 0.0942 - accuracy: 0.9677 - val_loss: 1.9398 - val_accuracy: 0.6452 - lr: 2.5000e-04\nEpoch 43/100\n908/909 [============================>.] - ETA: 0s - loss: 0.0924 - accuracy: 0.9687Restoring model weights from the end of the best epoch: 32.\n909/909 [==============================] - 59s 65ms/step - loss: 0.0923 - accuracy: 0.9687 - val_loss: 1.9035 - val_accuracy: 0.6467 - lr: 2.5000e-04\nEpoch 00043: early stopping\n" ] ], [ [ "### Análisis de resultados", "_____no_output_____" ], [ "* A continuación se va a visualizar la evolución del entrenamiento para los conjuntos de entrenamiento y validación tanto para el accuracy como para la pérdida\n* Después pasaremos a guardar el modelo y sus pesos en unos archivos para más adelante utilizarlos en el modelo definitivo\n* Por último se pasará a realizar la predicción de los datos de test y a dibujar la matriz de confusión para visualizar qué tan bien ha logrado acertar nuestro modelo en las diferentes emociones", "_____no_output_____" ] ], [ [ "def print_result(my_history,my_dropout=None):\n\n acc = my_history.history['accuracy']\n val_acc = my_history.history['val_accuracy']\n\n loss = my_history.history['loss']\n val_loss = my_history.history['val_loss']\n\n # Extraemos el número de épocas\n epochs = range(len(acc))\n\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,figsize=(14,5))\n # Representamos con plot nuestro accuracy por epoch\n ax1.plot(epochs, acc, label = 'train_acc')\n ax1.plot(epochs, val_acc, label = 'val_acc')\n if (my_dropout):\n title= 'Training and validation accuracy with dropout rate = '+str(my_dropout)\n else:\n title= 'Training and validation accuracy'\n\n ax1.set_title(title)\n ax1.legend()\n \n\n # Representamos con plot nuestro loss por epoch\n ax2.plot(epochs, loss, label = 'loss')\n ax2.plot(epochs, val_loss, label = 'val_loss')\n if (my_dropout):\n title= 'Training and validation loss with dropout rate = '+str(my_dropout)\n else:\n title= 'Training and validation loss'\n\n ax2.set_title(title)\n ax2.legend()\n fig.suptitle('Resultados')\n \n plt.show()\n \n\n return None", "_____no_output_____" ], [ "print_result(history)", "_____no_output_____" ], [ "file_model=\"./model/vgg-face-model-v4.json\"\nfile_weights=\"./model/vgg-face-v4.h5\"", "_____no_output_____" ], [ "json_file = open(file_model, 'r')\nmodel_json = json_file.read()\njson_file.close()\nmodel = tf.keras.models.model_from_json(model_json)\nmodel.load_weights(file_weights)", "_____no_output_____" ], [ "model.compile(\n loss='categorical_crossentropy',\n optimizer=Adam(learning_rate=1e-3),\n metrics=['accuracy']\n )", "_____no_output_____" ], [ "test_prob = model.predict(X_test)\ny_test_pred = np.argmax(test_prob, axis=1)", "_____no_output_____" ], [ "conf_mat = confusion_matrix(np.array(y_test), y_test_pred)\npd.DataFrame(conf_mat, columns=CLASSES, index=CLASSES)\n", "_____no_output_____" ], [ "print(classification_report(y_test, y_test_pred, target_names=CLASSES))", " precision recall f1-score support\n\n Angry 0.57 0.56 0.57 495\n Disgust 0.84 0.47 0.60 55\n Fear 0.53 0.49 0.51 512\n Happy 0.83 0.85 0.84 899\n Sad 0.55 0.55 0.55 608\n Surprise 0.72 0.81 0.76 400\n Neutral 0.63 0.63 0.63 620\n\n accuracy 0.66 3589\n macro avg 0.67 0.62 0.64 3589\nweighted avg 0.66 0.66 0.66 3589\n\n" ], [ "# Sacamos la matriz de confusión\ny_predicted = model.predict(X_test)\ny_predicted = np.argmax(y_predicted, axis=1)\n\nfig, ax = plt.subplots(figsize=(15, 8))\ncmd = ConfusionMatrixDisplay(conf_mat, display_labels=CLASSES)\ncmd.plot(ax=ax,cmap='Greens');\n", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec858bd86e0490e0191a61dc4511af420d897ba3
539,737
ipynb
Jupyter Notebook
slides/2022-03-07-splines.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
null
null
null
slides/2022-03-07-splines.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
null
null
null
slides/2022-03-07-splines.ipynb
cu-numcomp/spring22
f4c1f9287bff2c10645809e65c21829064493a66
[ "MIT" ]
2
2022-02-09T21:05:12.000Z
2022-03-11T20:34:46.000Z
248.497698
23,131
0.677028
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec858c48114cb276d4f39a2016e77392d75eb429
302,935
ipynb
Jupyter Notebook
Notebooks/Text WOT Analysis.ipynb
wscottreynders/Wheel-of-Time
ab6f597d14434d5991c4bdac5b0bb27c402ff593
[ "MIT" ]
null
null
null
Notebooks/Text WOT Analysis.ipynb
wscottreynders/Wheel-of-Time
ab6f597d14434d5991c4bdac5b0bb27c402ff593
[ "MIT" ]
null
null
null
Notebooks/Text WOT Analysis.ipynb
wscottreynders/Wheel-of-Time
ab6f597d14434d5991c4bdac5b0bb27c402ff593
[ "MIT" ]
null
null
null
158.687795
23,483
0.339119
[ [ [ "import random\nimport sys\nimport os\nimport re\n\nimport requests\nimport pandas as pd\nimport numpy as np\n\nimport seaborn as sns\nimport spacy\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.decomposition import PCA, TruncatedSVD\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.metrics.pairwise import cosine_similarity\n%matplotlib inline\n\n\n# Used to filter out stop words in the text\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nnltk.download('punkt')\n\n", "[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\wscot\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ], [ "# Take in a text file and save it as a string\n# This is for Eye of the Worls Chapter 1\ntext_file = open(r\"C:\\Users\\wscot\\OneDrive\\Desktop\\DataSets\\WheelOfTime\\EoftWch_1.txt\", \"r\", encoding = 'utf-8')\nEoftW_ch_1 = text_file.read()\ntext_file.close()", "_____no_output_____" ], [ "# Take in a text file and save it as a string\n# This is for wheel of time episode 1\ntext_file = open(r\"C:\\Users\\wscot\\OneDrive\\Desktop\\DataSets\\WheelOfTime\\WOT1.1.txt\", \"r\")\nWofT_1_1 = text_file.read()\ntext_file.close()", "_____no_output_____" ], [ "# Take in a text file and save it as a string\n# This is for wheel of time episode 1\ntext_file = open(r\"C:\\Users\\wscot\\OneDrive\\Desktop\\DataSets\\WheelOfTime\\EoftWch_2.txt\", \"r\")\nEoftW_ch_2 = text_file.read()\ntext_file.close()", "_____no_output_____" ], [ "# cleaning the text\ndef clean_doc(text):\n #text = re.sub(r\"[|]\"\", \"\", text)\n #text = text.replace('(|)', '', text)\n text = re.sub(r\"\\n|:\", \" \", text)\n text = re.sub(r\"(@\\[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?\", \"\", text)\n text = re.sub(r\" \", \" \", text)\n text = text.lower().lstrip().rstrip()\n text = re.sub(r\" \", \" \", text)\n text = re.sub(r\"\\b\\d+\\b\", \"\", text)\n \n return text", "_____no_output_____" ], [ "# Tokenizing and removing stop words\ndef remove_stop_words(text):\n\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(text)\n\n filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]\n \n filtered_sentence = []\n \n for w in word_tokens:\n if w not in stop_words:\n filtered_sentence.append(w)\n\n return filtered_sentence\n", "_____no_output_____" ], [ "def listToString(s): \n \n return ' '.join(s)", "_____no_output_____" ], [ "EoftW_ch_1_clean = clean_doc(EoftW_ch_1)\nEoftW_ch_1_tokens = remove_stop_words(EoftW_ch_1_clean)\nEoftW_ch_1_ready = listToString(EoftW_ch_1_tokens)", "_____no_output_____" ], [ "WofT_1_1_clean = clean_doc(WofT_1_1)\nWofT_1_1_tokens = remove_stop_words(WofT_1_1_clean)\nWofT_1_1_ready = listToString(WofT_1_1_tokens)", "_____no_output_____" ], [ "EoftW_ch_2_clean = clean_doc(EoftW_ch_2)\nEoftW_ch_2_tokens = remove_stop_words(EoftW_ch_2_clean)\nEoftW_ch_2_ready = listToString(EoftW_ch_2_tokens)", "_____no_output_____" ], [ "data = [EoftW_ch_1_ready, WofT_1_1_ready, EoftW_ch_2_ready]", "_____no_output_____" ], [ "#Call vectorizer\n\ncount_vectorizer = CountVectorizer()\nvector_matrix = count_vectorizer.fit_transform(data)\nvector_matrix", "_____no_output_____" ], [ "tokens = count_vectorizer.get_feature_names()\ntokens", "_____no_output_____" ], [ "vector_matrix.toarray()", "_____no_output_____" ], [ "def create_dataframe(matrix, tokens):\n\n doc_names = [f'doc_{i+1}' for i, _ in enumerate(matrix)]\n df = pd.DataFrame(data=matrix, index=doc_names, columns=tokens)\n return(df)", "_____no_output_____" ], [ "create_dataframe(vector_matrix.toarray(),tokens)", "_____no_output_____" ], [ "from sklearn.metrics.pairwise import cosine_similarity\n\ncosine_similarity_matrix = cosine_similarity(vector_matrix)\ncreate_dataframe(cosine_similarity_matrix,[EoftW_ch_1_ready, WofT_1_1_ready,EoftW_ch_2_ready])", "_____no_output_____" ], [ "df_time_E1 = pd.read_csv (r\"C:\\Users\\wscot\\OneDrive\\Desktop\\DataSets\\WheelOfTime\\Episode_1_Location_by_Time.csv\")\n\ndf_time_E1", "_____no_output_____" ] ], [ [ " I am going to also go through the actual text of those ten chapters and specifically mark the ammount of words in a specific location.\n Then I am goint to compare the time spent in each location from the book chapters to the text.\n After that, I can make visualizations explaining the differencec found between book and film.\n", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec858cd942a2db5f020af68997ad1911a311d319
22,646
ipynb
Jupyter Notebook
cp2/cp2_method23.ipynb
jet-code/multivariable-control-systems
81b57d51a4dfc92964f989794f71d525af0359ff
[ "MIT" ]
null
null
null
cp2/cp2_method23.ipynb
jet-code/multivariable-control-systems
81b57d51a4dfc92964f989794f71d525af0359ff
[ "MIT" ]
null
null
null
cp2/cp2_method23.ipynb
jet-code/multivariable-control-systems
81b57d51a4dfc92964f989794f71d525af0359ff
[ "MIT" ]
null
null
null
23.298354
117
0.387574
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec8596589ac11a0b2da3754c445c80fb75b2fef2
5,117
ipynb
Jupyter Notebook
15-Music-the-magic-of-12/15.4-Problems.ipynb
bcarter19/RabbitMathConstruction
e607d079f54221f8642e1bd582d4e04a37beb61b
[ "MIT" ]
null
null
null
15-Music-the-magic-of-12/15.4-Problems.ipynb
bcarter19/RabbitMathConstruction
e607d079f54221f8642e1bd582d4e04a37beb61b
[ "MIT" ]
null
null
null
15-Music-the-magic-of-12/15.4-Problems.ipynb
bcarter19/RabbitMathConstruction
e607d079f54221f8642e1bd582d4e04a37beb61b
[ "MIT" ]
null
null
null
37.903704
544
0.609928
[ [ [ "# 15.4 Problems\n\nSee if you can solve these problems with code, or on paper.", "_____no_output_____" ], [ "1. How many notes would a 15-note octave and a 20-note octave have in common? That is, how many of the frequencies that appear in the n=15 column of the above table would also appear in the n=20 column? Make a list of these notes (give me their frequencies relative to the frequency of the first note of the octave).", "_____no_output_____" ], [ "2. Recall that we are using $n$ as the number of notes in the octave, and for any $n$, the frequency ratio $r$ between successive notes is determined as the solution to the equation $𝑟𝑛=2$. Denote by $p$ the value of $r$ which provides a 15-note octave and by $q$ the value of $r$ which provides a 20-note octave. What is the simplest relationship between $p$ and $q$?", "_____no_output_____" ], [ "3. Solve the following equations for $x$. Give both an exact answer and, if appropriate, a 3-place decimal approximation.\n 1. $x^{10} = 100$\n 2. $10^x = 100$\n 3. $x^{100} = 10$\n 4. $100^x = 10$\n 5. $x^{\\frac{1}{10}} = 100$", "_____no_output_____" ], [ "4. Use three decimal place numbers to answer the following.\n 1. Which of the 7 intermediate notes in the acoustic scale (see below) are found to within ½% accuracy in the equal-tempered 20-note octave?\n 2. Which of the 7 intermediate notes in the acoustic scale are found to within 1% in the equal-tempered 20-note octave?\n 3. Which of the notes in the equal-tempered table (that goes from $n=4$ to $n=16$) are within ½% of 7/4?", "_____no_output_____" ], [ "5. Simple ratios between two frequencies can be given a nice geometric interpretation in terms of a guitar string. Consider 3/2. Clamp the string down in the middle and vibrate it to get the first note. Then clamp it one third the way along and vibrate it again. The two frequencies are in the ratio 3:2. Can you explain why this is the case? If not, do a bit of research around this question. Now take 4/3, what would you do to the string to produce two frequencies in that ratio?", "_____no_output_____" ], [ "## The design of the scale: two different schemes.\n\nThe system of having a constant frequency ratio between successive notes (which we are pursuing here) is called **even temperament**, and that’s the scheme that western music has adopted. Because of the homogeneity of the scale, all keys are equally good, in the sense that any two will have the same relative intervals between pairs of notes. To popularize this Bach wrote a series of 24 preludes and fugues (12 major keys and 12 minor keys) called The [Well-Tempered Clavier](https://en.wikipedia.org/wiki/The_Well-Tempered_Clavier).\n\nOn the other hand, the **acoustic scale** would require that the 8 notes in any “key” be the correct small integers ratios and for the key of C major these are given, along with the corresponding decimals, in the table below. The problem with this is that what’s right in one key won’t usually be right in another and some pieces, when transposed, will sound awful.\n\n|Note|Acoustic Scale|Even Temperment|\n|-|-|-|\n|C|$1.000$|$2^\\frac{0}{12}=1.000$|\n|D|$\\frac{9}{8}=1.125$|$2^\\frac{2}{12}=1.000$|\n|E|$\\frac{5}{4}=1.250$|$2^\\frac{4}{12}=1.122$|\n|F|$\\frac{4}{3}=1.333$|$2^\\frac{5}{12}=1.260$|\n|G|$\\frac{3}{2}=1.5000$|$2^\\frac{7}{12}=1.335$|\n|A|$\\frac{5}{3}=1.667$|$2^\\frac{9}{12}=1.498$|\n|B|$\\frac{15}{8}=1.875$|$2^\\frac{11}{12}=1.888$|\n|C|$\\frac{2}{1}=2.000$|$2^\\frac{12}{12}=2.000$|", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec85a631ed4edf23e839deef6c6f8e18572905f7
205,684
ipynb
Jupyter Notebook
Exercises/E7-DecisionTrees_Bagging.ipynb
dcrojasa/AdvancedMethodsDataAnalysisClass
8470809e5c3604330d0a04badf2a62bb5f8f7a9f
[ "MIT" ]
null
null
null
Exercises/E7-DecisionTrees_Bagging.ipynb
dcrojasa/AdvancedMethodsDataAnalysisClass
8470809e5c3604330d0a04badf2a62bb5f8f7a9f
[ "MIT" ]
null
null
null
Exercises/E7-DecisionTrees_Bagging.ipynb
dcrojasa/AdvancedMethodsDataAnalysisClass
8470809e5c3604330d0a04badf2a62bb5f8f7a9f
[ "MIT" ]
null
null
null
54.170134
24,860
0.569237
[ [ [ "# Exercise 7\n\n# Part 1 - DT\n\n## Capital Bikeshare data", "_____no_output_____" ], [ "## Introduction\n\n- Capital Bikeshare dataset from Kaggle: [data](https://github.com/justmarkham/DAT8/blob/master/data/bikeshare.csv), [data dictionary](https://www.kaggle.com/c/bike-sharing-demand/data)\n- Each observation represents the bikeshare rentals initiated during a given hour of a given day", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor, export_graphviz", "_____no_output_____" ], [ "# read the data and set \"datetime\" as the index\nbikes = pd.read_csv('../datasets/bikeshare.csv', index_col='datetime', parse_dates=True)", "_____no_output_____" ], [ "bikes.head()", "_____no_output_____" ], [ "bikes.columns", "_____no_output_____" ], [ "# \"count\" is a method, so it's best to rename that column\nbikes.rename(columns={'count':'total'}, inplace=True)", "_____no_output_____" ], [ "# create \"hour\" as its own feature\nbikes['hour'] = bikes.index.hour", "_____no_output_____" ], [ "bikes.head()", "_____no_output_____" ], [ "bikes.tail()", "_____no_output_____" ] ], [ [ "- **hour** ranges from 0 (midnight) through 23 (11pm)\n- **workingday** is either 0 (weekend or holiday) or 1 (non-holiday weekday)", "_____no_output_____" ], [ "# Exercise 7.1\n\nRun these two `groupby` statements and figure out what they tell you about the data.", "_____no_output_____" ] ], [ [ "# mean rentals for each value of \"workingday\"\nbikes.groupby('workingday').total.mean()", "_____no_output_____" ], [ "# mean rentals for each value of \"hour\"\nbikes.groupby('hour').total.mean()", "_____no_output_____" ] ], [ [ "# Exercise 7.2\n\nRun this plotting code, and make sure you understand the output. Then, separate this plot into two separate plots conditioned on \"workingday\". (In other words, one plot should display the hourly trend for \"workingday=0\", and the other should display the hourly trend for \"workingday=1\".)", "_____no_output_____" ] ], [ [ "# mean rentals for each value of \"hour\"\nbikes.groupby('hour').total.mean().plot()", "_____no_output_____" ] ], [ [ "Plot for workingday == 0 and workingday == 1", "_____no_output_____" ] ], [ [ "# hourly rental trend for \"workingday=0\"\nbikes[bikes.workingday==0].groupby('hour').total.mean().plot()", "_____no_output_____" ], [ "# hourly rental trend for \"workingday=1\"\nbikes[bikes.workingday==1].groupby('hour').total.mean().plot()", "_____no_output_____" ], [ "# combine the two plots\nbikes.groupby(['hour', 'workingday']).total.mean().unstack().plot()", "_____no_output_____" ] ], [ [ "Write about your findings", "_____no_output_____" ], [ "# Exercise 7.3\n\nFit a linear regression model to the entire dataset, using \"total\" as the response and \"hour\" and \"workingday\" as the only features. Then, print the coefficients and interpret them. What are the limitations of linear regression in this instance?", "_____no_output_____" ] ], [ [ "x1=np.array(bikes[\"workingday\"])\nx2=np.array(bikes[\"hour\"])\ny=np.array(bikes[\"total\"])", "_____no_output_____" ], [ "x1=x1.reshape(-1, 1)\nx2=x2.reshape(-1, 1)", "_____no_output_____" ], [ "model = LinearRegression()\nmodel.fit(x1, x2 , y)\nr_sq = model.score(x1,x2, y)\nprint('coefficient of determination:', r_sq)", "coefficient of determination: 0.0006333043981145982\n" ] ], [ [ "# Exercice 7.4\n\nCreate a Decision Tree to forecast \"total\" by manually iterating over the features \"hour\" and \"workingday\". The algorithm must at least have 6 end nodes.", "_____no_output_____" ], [ "# Exercise 7.5\n\nTrain a Decision Tree using scikit-learn. Comment about the performance of the models.", "_____no_output_____" ], [ "# Part 2 - Bagging", "_____no_output_____" ], [ "## Mashable news stories analysis\n\nPredicting if a news story is going to be popular", "_____no_output_____" ] ], [ [ "df = pd.read_csv('../datasets/mashable.csv', index_col=0)\ndf.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "X = df.drop(['url', 'Popular'], axis=1)\ny = df['Popular']", "_____no_output_____" ], [ "y.mean()", "_____no_output_____" ], [ "# train/test split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ] ], [ [ "# Exercise 7.6\n\nEstimate a Decision Tree Classifier and a Logistic Regression\n\nEvaluate using the following metrics:\n* Accuracy\n* F1-Score", "_____no_output_____" ], [ "## Decision Classifier", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\n\ntree = DecisionTreeClassifier(criterion='gini',splitter='best', max_depth=None,)", "_____no_output_____" ], [ "tree.fit(X_train, y_train)", "_____no_output_____" ], [ "# Predict \ny_pred_df = pd.DataFrame(index=X_test.index)\n\ny_pred_df = tree.predict(X_test)\n\ny_pred_df", "_____no_output_____" ], [ "from sklearn import metrics\n#Accuracy ()\nprint(\"Accuracy\",tree.score(X_test, y_test))\nprint(\"F1-Score\",metrics.f1_score(y_pred_df, y_test))", "Accuracy 0.5406666666666666\nF1-Score 0.5385130609511053\n" ] ], [ [ "## Regresión logística", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)", "C:\\Users\\Daniel Camillo Rojas\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ], [ "# Predict \ny_pred_lg = pd.DataFrame(index=X_test.index)\ny_pred_lg = logreg.predict(X_test)\ny_pred_lg", "_____no_output_____" ], [ "# Accuracy\nprint(\"Accuracy\",logreg.score(X_test, y_test))\nprint(\"F1 Score\",metrics.f1_score(y_pred_lg, y_test))", "Accuracy 0.6273333333333333\nF1 Score 0.6104529616724739\n" ] ], [ [ "## Conclusión\nAl ver los medidas de accuracy y F1-score se puede concluir que la regresión logística presenta un mejor desempeño para predecir \nsi la historia es o no popular", "_____no_output_____" ], [ "# Exercise 7.7\n\nEstimate 300 bagged samples\n\nEstimate the following set of classifiers:\n\n* 100 Decision Trees where max_depth=None\n* 100 Decision Trees where max_depth=2\n* 100 Logistic Regressions", "_____no_output_____" ] ], [ [ "# set a seed for reproducibility\nnp.random.seed(123)\n\n# Defidiendo el tamaño de los sampple (n_B)\nn_samples = X_train.shape[0]\nn_B = 100\n# create ten bootstrap samples (will be used to select rows from the DataFrame)\nsamples = [np.random.choice(a=n_samples, size=n_samples, replace=True) for _ in range(1, n_B +1 )]\nsamples", "_____no_output_____" ], [ "len(samples)", "_____no_output_____" ], [ "# show the rows for the first decision tree\nX_train.iloc[samples[0], :]", "_____no_output_____" ], [ "# Haciendo el primer decision tree con max_depth=None\nfrom sklearn.tree import DecisionTreeClassifier\n\n# grow each tree deep\ntreereg = DecisionTreeClassifier(max_depth=None, random_state=123) # ramdom state es la semilla\n\n# DataFrame for storing predicted price from each tree\ny_pred = pd.DataFrame(index=X_test.index, columns=[list(range(n_B))])\n\n# grow one tree for each bootstrap sample and make predictions on testing data\nfor i, sample in enumerate(samples):\n X_train = X_train.iloc[sample, 0:]\n y_train = y_train.iloc[sample, ]\n treereg.fit(X_train, y_train)\n y_pred[[i]] = treereg.predict(X_test)", "_____no_output_____" ], [ "# Haciendo el primer decision tree con max_depth=2\nfrom sklearn.tree import DecisionTreeClassifier\n\n# grow each tree deep\ntreereg = DecisionTreeClassifier(max_depth=2, random_state=123) # ramdom state es la semilla\n\n# DataFrame for storing predicted price from each tree\ny_pred2 = pd.DataFrame(index=X_test.index, columns=[list(range(n_B))])\n\n# grow one tree for each bootstrap sample and make predictions on testing data\nfor i, sample in enumerate(samples):\n X_train = X_train.iloc[sample, 0:]\n y_train = y_train.iloc[sample, ]\n treereg.fit(X_train, y_train)\n y_pred2[[i]] = treereg.predict(X_test)", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "y_pred2", "_____no_output_____" ], [ "y_train", "_____no_output_____" ], [ "y_train.iloc[sample, 0]", "_____no_output_____" ], [ "X_test", "_____no_output_____" ], [ "y_pred.shape", "_____no_output_____" ], [ "X_train.iloc[sample, 0:]", "_____no_output_____" ], [ "from sklearn.ensemble import BaggingRegressor\nbagreg = BaggingRegressor(DecisionTreeRegressor(), n_estimators=500,\n bootstrap=True, oob_score=True, random_state=1)", "_____no_output_____" ] ], [ [ "# Exercise 7.8\n\nEnsemble using majority voting\n\nEvaluate using the following metrics:\n* Accuracy\n* F1-Score", "_____no_output_____" ], [ "# Exercise 7.9\n\nEstimate te probability as %models that predict positive\n\nModify the probability threshold and select the one that maximizes the F1-Score", "_____no_output_____" ], [ "# Exercise 7.10\n\nEnsemble using weighted voting using the oob_error\n\nEvaluate using the following metrics:\n* Accuracy\n* F1-Score", "_____no_output_____" ], [ "# Exercise 7.11\n\nEstimate te probability of the weighted voting\n\nModify the probability threshold and select the one that maximizes the F1-Score", "_____no_output_____" ], [ "# Exercise 7.12\n\nEstimate a logistic regression using as input the estimated classifiers\n\nModify the probability threshold such that maximizes the F1-Score", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec85ab6eb37fd5a2e3b8d61b1c7b486b78512980
1,859
ipynb
Jupyter Notebook
Applied_Social_Network_Analysis_in_Python.ipynb
krreation/CodesAndSynatx
3e548585e8d784fbba96a1d950144bb150d9f963
[ "Apache-2.0" ]
null
null
null
Applied_Social_Network_Analysis_in_Python.ipynb
krreation/CodesAndSynatx
3e548585e8d784fbba96a1d950144bb150d9f963
[ "Apache-2.0" ]
null
null
null
Applied_Social_Network_Analysis_in_Python.ipynb
krreation/CodesAndSynatx
3e548585e8d784fbba96a1d950144bb150d9f963
[ "Apache-2.0" ]
null
null
null
28.6
192
0.436794
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/krreation/CodesAndSynatx/blob/master/Applied_Social_Network_Analysis_in_Python.ipynb)", "_____no_output_____" ] ], [ [ "import networkx as nx\nfrom networkx.algorithms import bipartite\n\nB = nx.Graph()\nB.add_edges_from([('A', 'G'),('A','I'), ('B','H'), ('C', 'G'), ('C', 'I'),('D', 'H'), ('E', 'I'), ('F', 'G'), ('F', 'J')])\nX1 = set(['A', 'B', 'C', 'D', 'E', 'F'])#\nP = bipartite.weighted_projected_graph(B,X1) \nP.edges(data=True)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ec85abd8dee002794573f7e4816ff1c6a10b0ae2
3,453
ipynb
Jupyter Notebook
notebooks/PCA Notebook.ipynb
vingkan/phylo
f9612b06940a5f62113e123be80bb4f9bd57dea7
[ "MIT" ]
null
null
null
notebooks/PCA Notebook.ipynb
vingkan/phylo
f9612b06940a5f62113e123be80bb4f9bd57dea7
[ "MIT" ]
null
null
null
notebooks/PCA Notebook.ipynb
vingkan/phylo
f9612b06940a5f62113e123be80bb4f9bd57dea7
[ "MIT" ]
null
null
null
21.054878
84
0.538372
[ [ [ "import os \nimport numpy as np\nimport pandas as pd\nimport scipy\nimport imageio\nimport math\nimport phylo\n\nfrom PIL import Image\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"ticks\")\nplt.style.use(\"seaborn-whitegrid\")", "_____no_output_____" ], [ "%load_ext blackcellmagic", "The blackcellmagic extension is already loaded. To reload it, use:\n %reload_ext blackcellmagic\n" ], [ "REGULAR_POKEMON = phylo.vectorize_pokemon(phylo.REGULAR_POKEMON_PATH)\nSHINY_POKEMON = phylo.vectorize_pokemon(phylo.SHINY_POKEMON_PATH)", "Vectorizing pokemon from /Users/rchatrath/workspace/phylo/images/regular/\nVector iteration 0\nVector iteration 100\nVector iteration 200\nVector iteration 300\nVector iteration 400\nVector iteration 500\nVector iteration 600\nDone vectorizing\nVectorizing pokemon from /Users/rchatrath/workspace/phylo/images/shiny/\nVector iteration 0\nVector iteration 100\nVector iteration 200\nVector iteration 300\nVector iteration 400\nVector iteration 500\nVector iteration 600\nDone vectorizing\n" ], [ "f", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ec85bab8f41c79f0cc8e635aacbb86cefca2ca83
1,770
ipynb
Jupyter Notebook
content/lessons/01/Watch-Me-Code/WMC3-Input-And-Output.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
1
2020-01-17T13:22:31.000Z
2020-01-17T13:22:31.000Z
content/lessons/01/Watch-Me-Code/WMC3-Input-And-Output.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
null
null
null
content/lessons/01/Watch-Me-Code/WMC3-Input-And-Output.ipynb
MahopacHS/spring-2020-oubinam0717
5b35579e658e34cbb07c3477a9fce13ce01830af
[ "MIT" ]
null
null
null
19.88764
60
0.499435
[ [ [ "name = input(\"Enter your name: \")\nage = input(\"Enter your age: \")\nprint(name, \"is\", age, \"years old\")", "Enter your name: mike\nEnter your age: 45\nmike is 45 years old\n" ], [ "name = input(\"Enter your name: \")\nprint(\"How old are you\", name, \"?\", end = \"\")\nage = input()\nprint (f\"{age}? That's old!\")", "Enter your name: bob\nHow old are you bob ?99\n99? That's old!\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec85d85b4a17818dc8d6a1df077ad61c6350d0f0
60,805
ipynb
Jupyter Notebook
Tutorial-Start_to_Finish-ScalarWave.ipynb
dinatraykova/nrpytutorial
74d1bab0c45380727975568ba956b69c082e2293
[ "BSD-2-Clause" ]
null
null
null
Tutorial-Start_to_Finish-ScalarWave.ipynb
dinatraykova/nrpytutorial
74d1bab0c45380727975568ba956b69c082e2293
[ "BSD-2-Clause" ]
null
null
null
Tutorial-Start_to_Finish-ScalarWave.ipynb
dinatraykova/nrpytutorial
74d1bab0c45380727975568ba956b69c082e2293
[ "BSD-2-Clause" ]
2
2019-11-14T03:31:18.000Z
2019-12-12T13:42:52.000Z
99.844007
30,388
0.768144
[ [ [ "<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Start-to-Finish Example: Numerical Solution of the Scalar Wave Equation, in Cartesian Coordinates\n\n## Author: Zach Etienne\n### Formatting improvements courtesy Brandon Clark\n\n## This module solves the scalar wave equation for a plane wave in Cartesian coordinates. To make the entire code immediately visible, the [`MoLtimestepping`](Tutorial-Method_of_Lines-C_Code_Generation.ipynb) module is not used here.\n\n**Module Status:** <font color='green'><b>Validated</b></font>\n\n**Validation Notes:** This module has been validated to converge at the expected order to the exact solution (see [plot](#convergence) at bottom).\n\n### NRPy+ Source Code for this module: \n* [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py) [\\[**tutorial**\\]](Tutorial-ScalarWave.ipynb) Generates the right-hand side for the Scalar Wave Equation in cartesian coordinates\n* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) [\\[**tutorial**\\]](Tutorial-ScalarWave.ipynb) Generating C code for plane wave initial data for the scalar wave equation\n\n## Introduction:\n\nAs outlined in the [previous NRPy+ tutorial module](Tutorial-ScalarWave.ipynb), we first use NRPy+ to generate initial data for the scalar wave equation, and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).\n\nThe entire algorithm is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>.\n\n1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.\n1. <font color='green'>Set gridfunction values to initial data.</font>\n1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following:\n 1. <font color='green'>Evaluate scalar wave RHS expressions.</font>\n 1. Apply boundary conditions.\n1. At the end of each iteration in time, output the relative error between numerical and exact solutions.", "_____no_output_____" ], [ "<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis module is organized as follows\n\n1. [Step 1](#generate): Generating the NRPy+ Components to the Scalar Wave Equation in Three Spatial Dimensions, Fourth-Order Finite Differencing, with Monochromatic Plane Wave Initial Data\n1. [Step 2](#mainc): `ScalarWave_Playground.c`: The Main C Code\n1. [Step 3](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order\n1. [Step 4](#latex_pdf_output): Output this module to $\\LaTeX$-formatted PDF file", "_____no_output_____" ], [ "<a id='generate'></a>\n\n# Step 1: Generating the NRPy+ Components to the Scalar Wave Equation in Three Spatial Dimensions, Fourth-Order Finite Differencing, with Monochromatic Plane Wave Initial Data \\[Back to [top](#toc)\\]\n$$\\label{generate}$$\n\nLet's pick up where we left off in the [previous module](Tutorial-ScalarWave.ipynb), interfacing with the [ScalarWave/InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) and [ScalarWave/ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) NRPy+ modules to generate\n* monochromatic (single-wavelength) plane wave scalar wave initial data, and\n* the scalar wave equation RHSs at **4th** finite difference order in **3 spatial dimensions**", "_____no_output_____" ] ], [ [ "# Step P1: Import needed NRPy+ core modules:\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport grid as gri\nimport finite_difference as fin\nimport loop as lp\nfrom outputC import *\nimport cmdline_helper as cmd\nimport os\noutdir = os.path.join(\"ScalarWave_Ccodes/\")\ncmd.mkdir(outdir)\ncmd.delete_existing_files(os.path.join(outdir,\"*\"))\n\n# Step 1: Import the ScalarWave.InitialData module. \n# This command only declares ScalarWave initial data \n# parameters and the InitialData_PlaneWave() function.\nimport ScalarWave.InitialData_PlaneWave as swid\n\n# Step 2: Import ScalarWave_RHSs module. \n# This command only declares ScalarWave RHS parameters\n# and the ScalarWave_RHSs function (called later)\nimport ScalarWave.ScalarWave_RHSs as swrhs\n\n# Step 3: Set the spatial dimension parameter\n# to 3, and then read the parameter as DIM.\npar.set_parval_from_str(\"grid::DIM\",3)\nDIM = par.parval_from_str(\"grid::DIM\")\n\n# Step 4: Set the finite differencing order to 4.\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\",4)\n\n# Step 5: Call the InitialData_PlaneWave() function to set up\n# monochromatic (single frequency/wavelength) scalar\n# wave initial data.\nswid.InitialData_PlaneWave()\n\n# Step 6: Generate SymPy symbolic expressions for\n# uu_rhs and vv_rhs; the ScalarWave RHSs.\n# This function also declares the uu and vv\n# gridfunctions, which need to be declared\n# to output even the initial data to C file.\nswrhs.ScalarWave_RHSs()\n\n# Step 7: Generate C code for the initial data,\n# output to a file named \"SENR/ScalarWave_InitialData.h\".\nIDstring = fin.FD_outputC(\"returnstring\",[lhrh(lhs=gri.gfaccess(\"in_gfs\",\"uu\"),rhs=swid.uu_ID),\n lhrh(lhs=gri.gfaccess(\"in_gfs\",\"vv\"),rhs=swid.vv_ID)])\nwith open(os.path.join(outdir,\"ScalarWave_ExactSolution.h\"), \"w\") as file:\n file.write(lp.loop([\"i2\",\"i1\",\"i0\"],[\"0\",\"0\",\"0\"],\n [\"Nxx_plus_2NGHOSTS[2]\",\"Nxx_plus_2NGHOSTS[1]\",\"Nxx_plus_2NGHOSTS[0]\"],[\"1\",\"1\",\"1\"],\n [\"#pragma omp parallel for\",\" const REAL xx2=xx[2][i2];\",\n \" const REAL xx1=xx[1][i1];\"],\"\",\n \"const REAL xx0=xx[0][i0];\\n\"+IDstring))\n\n# Step 8: Generate C code for scalarwave RHSs,\n# output to a file named \"SENR/ScalarWave_RHSs.h\".\nRHSstring = fin.FD_outputC(\"returnstring\",[lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"uu\"),rhs=swrhs.uu_rhs),\n lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"vv\"),rhs=swrhs.vv_rhs)])\nwith open(os.path.join(outdir,\"ScalarWave_RHSs.h\"), \"w\") as file:\n file.write(lp.loop([\"i2\",\"i1\",\"i0\"],[\"NGHOSTS\",\"NGHOSTS\",\"NGHOSTS\"],\n [\"NGHOSTS+Nxx[2]\",\"NGHOSTS+Nxx[1]\",\"NGHOSTS+Nxx[0]\"],\n [\"1\",\"1\",\"1\"],[\"const REAL invdx0 = 1.0/dxx[0];\\n\"+\n \"const REAL invdx1 = 1.0/dxx[1];\\n\"+\n \"const REAL invdx2 = 1.0/dxx[2];\\n\"+\n \"#pragma omp parallel for\",\"\",\"\"],\"\",RHSstring))", "_____no_output_____" ] ], [ [ "<a id='mainc'></a>\n\n# Step 2: `ScalarWave_Playground.c`: The Main C Code \\[Back to [top](#toc)\\]\n$$\\label{mainc}$$\n\nNext we will write the C code infrastructure necessary to make use of the above NRPy+-generated codes. Again, we'll be using RK4 time integration via the Method of Lines.", "_____no_output_____" ] ], [ [ "# Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER\nwith open(os.path.join(outdir,\"ScalarWave_Playground.c\"), \"w\") as file:\n file.write(\"// Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER\\n\")\n file.write(\"#define NGHOSTS \"+str(int(par.parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\")/2))+\"\\n\")", "_____no_output_____" ], [ "%%writefile -a $outdir/ScalarWave_Playground.c\n\nconst int NSKIP_2D_OUTPUT = 5;\n\n// Part P1: Import needed header files\n#include \"stdio.h\"\n#include \"stdlib.h\"\n#include \"math.h\"\n\n// Part P2: Add needed #define's to set data type, the IDX4() macro, and the gridfunctions\n// Part P2a: set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits.\n#define REAL double\n// Part P2b: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of\n// data in a 1D array. In this case, consecutive values of \"i\" \n// (all other indices held to a fixed value) are consecutive in memory, where \n// consecutive values of \"j\" (fixing all other indices) are separated by \n// Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of\n// \"k\" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc.\n#define IDX4(g,i,j,k) \\\n( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) )\n// Part P2c: Set UUGF and VVGF macros\n#define NUM_GFS 2\n#define UUGF 0\n#define VVGF 1\n\n// Step P3: Set free parameters for the initial data\nconst REAL wavespeed = 1.0;\nconst REAL kk0 = 1.0;\nconst REAL kk1 = 1.0;\nconst REAL kk2 = 1.0;\n\n// Part P4: Declare the function for the exact solution. time==0 corresponds to the initial data.\nvoid exact_solution(const int Nxx_plus_2NGHOSTS[3],const REAL time,REAL *xx[3], REAL *in_gfs) {\n#include \"ScalarWave_ExactSolution.h\"\n}\n\n// Part P5: Declare the function to evaluate the scalar wave RHSs\nvoid rhs_eval(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], const REAL *in_gfs, REAL *rhs_gfs) {\n#include \"ScalarWave_RHSs.h\"\n}\n\n// Part P6: Declare boundary condition FACE_UPDATE macro,\n// which updates a single face of the 3D grid cube\n// using quadratic polynomial extrapolation.\nconst int MAXFACE = -1;\nconst int NUL = +0;\nconst int MINFACE = +1;\n#define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \\\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \\\n gfs[IDX4(which_gf,i0,i1,i2)] = \\\n +3.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \\\n -3.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \\\n +1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \\\n }\n\n// Part P7: Boundary condition driver routine: Apply BCs to all six\n// boundary faces of the cube, filling in the innermost\n// ghost zone first, and moving outward.\nvoid apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *gfs) {\n#pragma omp parallel for\n for(int which_gf=0;which_gf<NUM_GFS;which_gf++) {\n int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };\n int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };\n for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {\n // After updating each face, adjust imin[] and imax[] \n // to reflect the newly-updated face extents.\n FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;\n FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;\n\n FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;\n FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;\n\n FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;\n FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;\n }\n }\n}\n// Part P8: 2D File output routine, for comparing numerical results to exact solution\nvoid output_2D(const int iter,const REAL time, \n const REAL *numerical_gridfunction_data,REAL *gridfunction_to_store_exact, \n const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3]) {\n // Step 2a: Validation: Output to 2D data files numerical and exact solutions\n //exact_solution(Nxx_plus_2NGHOSTS,time, xx, gridfunction_to_store_exact);\n char filename[100];\n sprintf(filename,\"out2D__resolution_%dx%dx%d__iter_%d.txt\",Nxx[0],Nxx[1],Nxx[2],iter);\n FILE *out2D = fopen(filename, \"w\");\n for(int i0=0;i0<Nxx[0]+2*NGHOSTS;i0++) {\n for(int i1=0;i1<Nxx[1]+2*NGHOSTS;i1++) {\n if(i0> (Nxx[0]+2*NGHOSTS)*.25 && i0< (Nxx[0]+2*NGHOSTS)*.75 &&\n i1> (Nxx[1]+2*NGHOSTS)*.25 && i1< (Nxx[1]+2*NGHOSTS)*.75) { \n REAL xx0 = xx[0][i0];\n REAL xx1 = xx[1][i1];\n fprintf(out2D,\"%e %e %e %e\\n\", xx0,xx1,\n numerical_gridfunction_data[IDX4(0,i0,i1, (int)((Nxx[2]+ 2*NGHOSTS)*0.5))],\n gridfunction_to_store_exact[ IDX4(0,i0,i1, (int)((Nxx[2]+ 2*NGHOSTS)*0.5))]);\n }\n }\n }\n fclose(out2D);\n}\n\n// main() function:\n// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates\n// Step 1: Set up scalar wave initial data\n// Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,\n// applying quadratic extrapolation outer boundary conditions.\n// Step 3: Output relative error between numerical and exact solution.\n// Step 4: Free all allocated memory\nint main(int argc, const char *argv[]) {\n\n // Step 0a: Read command-line input, error out if nonconformant\n if(argc != 2 || atoi(argv[1]) < NGHOSTS) {\n fprintf(stderr,\"Error: Expected one command-line argument: ./ScalarWave_Playground [Nx(=Ny=Nz)],\\n\");\n fprintf(stderr,\"where Nx is the number of grid points in the x,y, and z directions.\\n\");\n fprintf(stderr,\"Nx MUST BE larger than NGHOSTS (= %d)\\n\",NGHOSTS);\n exit(1);\n }\n // Step 0b: Set up numerical grid structure, first in space...\n const int Nx0x1x2 = atoi(argv[1]);\n const int Nxx[3] = { Nx0x1x2, Nx0x1x2, Nx0x1x2 };\n const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS };\n const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2];\n\n const REAL xxmin[3] = {-10.,-10.,-10. };\n const REAL xxmax[3] = { 10., 10., 10. };\n // ... and then set up the numerical grid structure in time:\n const REAL t_final = xxmax[0]*0.8; /* Final time is set so that at t=t_final, \n data at the origin have not been corrupted \n by the approximate outer boundary condition */\n const REAL CFL_FACTOR = 0.5; // Set the CFL Factor\n\n // Step 0c: Allocate memory for gridfunctions\n REAL *evol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n REAL *next_in_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n REAL *k1_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n REAL *k2_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n REAL *k3_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n REAL *k4_gfs = (REAL *)malloc(sizeof(REAL) * NUM_GFS * Nxx_plus_2NGHOSTS_tot);\n\n // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition\n #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )\n // xx[0][i] = xxmin[0] + (i-NGHOSTS)*dxx[0]\n REAL dxx[3];\n for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]);\n REAL dt = CFL_FACTOR * MIN(dxx[0],MIN(dxx[1],dxx[2])); // CFL condition\n int Nt = (int)(t_final / dt + 0.5); // The number of points in time.\n //Add 0.5 to account for C rounding down integers.\n // Step 0e: Set up Cartesian coordinate grids\n REAL *xx[3];\n for(int i=0;i<3;i++) {\n xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]);\n for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) {\n xx[i][j] = xxmin[i] + (j-NGHOSTS)*dxx[i];\n }\n }\n\n // Step 1: Set up initial data to be exact solution at time=0:\n exact_solution(Nxx_plus_2NGHOSTS, 0.0, xx, evol_gfs);\n\n for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time.\n /* Step 2: Validation: Output relative error between numerical and exact solution, */\n // Step 2a: Evaluate exact solution at current time, (n+1)*dt. Store to k1_gfs.\n exact_solution(Nxx_plus_2NGHOSTS,((REAL)n)*dt, xx, k1_gfs);\n\n // Step 2b: Output to 2D grid (region of x-y plane near origin) \n // every NSKIP_2D_OUTPUT iterations.\n if((n)%NSKIP_2D_OUTPUT ==0) {\n output_2D(n,((REAL)n)*dt,evol_gfs,k1_gfs, Nxx, Nxx_plus_2NGHOSTS,xx);\n }\n\n // Step 2c: Output relative error between exact & numerical at center of grid.\n const int i0mid=Nxx_plus_2NGHOSTS[0]/2;\n const int i1mid=Nxx_plus_2NGHOSTS[1]/2;\n const int i2mid=Nxx_plus_2NGHOSTS[2]/2;\n const REAL exact = k1_gfs[IDX4(UUGF,i0mid,i1mid,i2mid)];\n const REAL numerical = evol_gfs[IDX4(UUGF,i0mid,i1mid,i2mid)];\n const REAL relative_error = fabs((exact-numerical)/exact);\n printf(\"%e %e || %e %e %e: %e %e\\n\",((double)n)*dt, log10(relative_error),\n xx[0][i0mid],xx[1][i1mid],xx[2][i2mid], numerical,exact);\n\n // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm,\n // applying quadratic extrapolation outer boundary conditions.\n /***************************************************/\n /* Implement RK4 for Method of Lines timestepping: */\n /***************************************************/\n /* -= RK4: Step 1 of 4 =- */\n /* First evaluate k1 = RHSs expression */\n rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, evol_gfs, k1_gfs);\n /* Next k1 -> k1*dt, and then set the input for */\n /* the next RHS eval call to y_n+k1/2 */\n#pragma omp parallel for\n for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_GFS;i++) {\n k1_gfs[i] *= dt;\n next_in_gfs[i] = evol_gfs[i] + k1_gfs[i]*0.5;\n }\n /* Finally, apply boundary conditions to */\n /* next_in_gfs, so its data are set everywhere. */\n apply_bcs(Nxx,Nxx_plus_2NGHOSTS,next_in_gfs);\n\n /* -= RK4: Step 2 of 4 =- */\n rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, next_in_gfs, k2_gfs);\n#pragma omp parallel for\n for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_GFS;i++) {\n k2_gfs[i] *= dt;\n next_in_gfs[i] = evol_gfs[i] + k2_gfs[i]*0.5;\n }\n apply_bcs(Nxx,Nxx_plus_2NGHOSTS,next_in_gfs);\n\n /* -= RK4: Step 3 of 4 =- */\n rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, next_in_gfs, k3_gfs);\n#pragma omp parallel for\n for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_GFS;i++) {\n k3_gfs[i] *= dt;\n next_in_gfs[i] = evol_gfs[i] + k3_gfs[i];\n }\n apply_bcs(Nxx,Nxx_plus_2NGHOSTS,next_in_gfs);\n\n /* -= RK4: Step 4 of 4 =- */\n rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, next_in_gfs, k4_gfs);\n#pragma omp parallel for\n for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_GFS;i++) {\n k4_gfs[i] *= dt;\n evol_gfs[i] += (1.0/6.0)*(k1_gfs[i] + 2.0*k2_gfs[i] + 2.0*k3_gfs[i] + k4_gfs[i]);\n }\n apply_bcs(Nxx,Nxx_plus_2NGHOSTS,evol_gfs);\n } // End main loop to progress forward in time.\n\n // Step 4: Free all allocated memory\n free(k4_gfs);\n free(k3_gfs);\n free(k2_gfs);\n free(k1_gfs);\n free(next_in_gfs);\n free(evol_gfs);\n for(int i=0;i<3;i++) free(xx[i]);\n return 0;\n}", "Appending to ScalarWave_Ccodes//ScalarWave_Playground.c\n" ] ], [ [ "In summary, we have output a total of 3 files (click on any to view or edit):\n\n* [ScalarWave_Ccodes/ScalarWave_RHSs.h](../edit/ScalarWave_Ccodes/ScalarWave_RHSs.h)\n* [ScalarWave_Ccodes/ScalarWave_ExactSolution.h](../edit/ScalarWave_Ccodes/ScalarWave_ExactSolution.h)\n* [ScalarWave_Ccodes/ScalarWave_Playground.c](../edit/ScalarWave_Ccodes/ScalarWave_Playground.c)\n\n... which will now be compiled and run:", "_____no_output_____" ] ], [ [ "import cmdline_helper as cmd\n\ncmd.C_compile(os.path.join(outdir,\"ScalarWave_Playground.c\"), \"ScalarWave_Playground\")\ncmd.delete_existing_files(\"out*.txt\")\ncmd.delete_existing_files(\"out*.png\")\ncmd.Execute(\"ScalarWave_Playground\", \"48\", os.path.join(outdir,\"out48.txt\"))\ncmd.Execute(\"ScalarWave_Playground\", \"64\", os.path.join(outdir,\"out64.txt\"))\n# For benchmarking purposes: \n# cmd.Execute(\"ScalarWave_Playground\", \"96\", \"out96.txt\")", "Compiling executable...\nExecuting `gcc -Ofast -fopenmp -march=native -funroll-loops ScalarWave_Ccodes/ScalarWave_Playground.c -o ScalarWave_Playground -lm`...\nFinished executing in 0.818536996841 seconds.\nFinished compilation.\nExecuting `taskset -c 0,1,2,3,4,5 ./ScalarWave_Playground 48`...\nFinished executing in 0.215828895569 seconds.\nExecuting `taskset -c 0,1,2,3,4,5 ./ScalarWave_Playground 64`...\nFinished executing in 0.416137933731 seconds.\n" ] ], [ [ "<a id='convergence'></a>\n\n# Step 3: Code Validation: Verify that relative error in numerical solution converges to zero at the expected order \\[Back to [top](#toc)\\]\n$$\\label{convergence}$$", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport mpmath as mp\nimport csv\n\ndef file_reader(filename):\n with open(filename) as file:\n reader = csv.reader(file, delimiter=\" \")\n data = list(zip(*reader))\n # data is a tuple of strings. Tuples are immutable, and we need to perform math on\n # the data, so here we convert tuple to lists of floats:\n data0 = []\n data1 = []\n for i in range(len(data[0])):\n data0.append(float(data[0][i]))\n data1.append(float(data[1][i]))\n return data0,data1\n \nfirst_col48,second_col48 = file_reader(os.path.join(outdir,\"out48.txt\"))\nfirst_col64,second_col64 = file_reader(os.path.join(outdir,\"out64.txt\"))\n\nfor i in range(len(second_col64)):\n # data64 = data48*(64/48)**4 \n # -> log10(data64) = log10(data48) + 4*log(64/48)\n second_col64[i] += 4*mp.log10(64./48.)\n\n# https://matplotlib.org/gallery/text_labels_and_annotations/legend.html#sphx-glr-gallery-text-labels-and-annotations-legend-py \nfig, ax = plt.subplots()\n\nplt.title(\"Plot Demonstrating 4th-order Convergence\")\nplt.xlabel(\"time\")\nplt.ylabel(\"log10(Relative error)\")\n\nax.plot(first_col48, second_col48, 'k--', label='Nx = 48')\nax.plot(first_col64, second_col64, 'k-', label='Nx = 64, mult by (64/48)^4')\nlegend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')\nlegend.get_frame().set_facecolor('C1')\nplt.show()", "_____no_output_____" ] ], [ [ "<a id='latex_pdf_output'></a>\n\n# Step 4: Output this module to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-Start_to_Finish-ScalarWave.pdf](Tutorial-Start_to_Finish-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-Start_to_Finish-ScalarWave.ipynb\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWave.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWave.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWave.tex\n!rm -f Tut*.out Tut*.aux Tut*.log", "[NbConvertApp] Converting notebook Tutorial-Start_to_Finish-ScalarWave.ipynb to latex\n[NbConvertApp] Support files will be in Tutorial-Start_to_Finish-ScalarWave_files/\n[NbConvertApp] Making directory Tutorial-Start_to_Finish-ScalarWave_files\n[NbConvertApp] Writing 60157 bytes to Tutorial-Start_to_Finish-ScalarWave.tex\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec85dcdb471aa92476c2e882477d06d8f1fbb771
583,923
ipynb
Jupyter Notebook
SourceCode/GraphAlgorithms/GraphAlgorithms1.ipynb
nlharri/PythonCurriculum
0f18b7def68575a44b5a464d58e0896763568c75
[ "MIT" ]
1
2021-04-04T06:42:09.000Z
2021-04-04T06:42:09.000Z
SourceCode/GraphAlgorithms/GraphAlgorithms1.ipynb
nlharri/PythonCurriculum
0f18b7def68575a44b5a464d58e0896763568c75
[ "MIT" ]
null
null
null
SourceCode/GraphAlgorithms/GraphAlgorithms1.ipynb
nlharri/PythonCurriculum
0f18b7def68575a44b5a464d58e0896763568c75
[ "MIT" ]
null
null
null
811.004167
269,904
0.930383
[ [ [ "!pip install plotly --upgrade\n!pip install networkx --upgrade", "Collecting plotly\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f7/05/3c32c6bc85acbd30a18fbc3ba732fed5e48e5f8fd60d2a148877970f4a61/plotly-4.2.1-py2.py3-none-any.whl (7.2MB)\n\u001b[K |████████████████████████████████| 7.2MB 9.6MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from plotly) (1.12.0)\nRequirement already satisfied, skipping upgrade: retrying>=1.3.3 in /usr/local/lib/python3.6/dist-packages (from plotly) (1.3.3)\nInstalling collected packages: plotly\n Found existing installation: plotly 4.1.1\n Uninstalling plotly-4.1.1:\n Successfully uninstalled plotly-4.1.1\nSuccessfully installed plotly-4.2.1\nRequirement already up-to-date: networkx in /usr/local/lib/python3.6/dist-packages (2.4)\nRequirement already satisfied, skipping upgrade: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx) (4.4.1)\n" ], [ "import networkx as nx\n\nG=nx.Graph()\nG.add_nodes_from([\"0\", \"1\", \"2\", \"3\", \"4\"])\nG.add_edges_from([(\"0\", \"1\"), (\"1\", \"2\"), (\"2\", \"0\"), (\"0\", \"3\"), (\"2\", \"4\"), (\"3\", \"4\")])\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\n\nnx.draw_networkx(G)", "Nodes of graph: ['0', '1', '2', '3', '4']\nEdges of graph: [('0', '1'), ('0', '2'), ('0', '3'), ('1', '2'), ('2', '4'), ('3', '4')]\n" ], [ "m = nx.to_numpy_matrix(G)\nprint(m)", "[[0. 1. 1. 1. 0.]\n [1. 0. 1. 0. 0.]\n [1. 1. 0. 0. 1.]\n [1. 0. 0. 0. 1.]\n [0. 0. 1. 1. 0.]]\n" ] ], [ [ "\\begin{pmatrix} \n0 & 1 & 1 & 1 & 0 \\\\\n1 & 0 & 1 & 0 & 0 \\\\\n1 & 1 & 0 & 0 & 1 \\\\\n1 & 0 & 0 & 0 & 1 \\\\\n0 & 0 & 1 & 1 & 0 \\\\\n\\end{pmatrix}\n", "_____no_output_____" ] ], [ [ "import networkx as nx\nimport numpy as np\n\nfrom networkx.drawing.nx_pydot import graphviz_layout\nimport matplotlib.pyplot as plt\n\ndef depth_first_search(v, m, visited_vertices, depth):\n padding = \" \"\n if v not in visited_vertices:\n print(\"{}visiting {}\".format(padding*depth, v))\n visited_vertices.append(v)\n num_of_vertices = np.shape(m)[0]\n print(\"{}visiting neighbours of {}\".format(padding*depth, v))\n for j in range(0, num_of_vertices):\n if m[v,j] != 0:\n print(\"{}stepping to edge ({}, {})\".format(padding*depth, v, j))\n depth_first_search(j, m, visited_vertices, depth + 1)\n else:\n print(\"{}{} was already visited\".format(padding*depth, v))\n\n\nG=nx.Graph()\nG.add_nodes_from([\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"])\nG.add_edges_from([(\"0\", \"1\"), \n (\"1\", \"2\"), \n (\"2\", \"0\"), \n (\"0\", \"3\"), \n (\"2\", \"4\"), \n (\"3\", \"4\"), \n (\"0\", \"6\"), \n (\"4\", \"6\"), \n (\"2\", \"5\"), \n (\"3\", \"5\"), \n (\"0\", \"5\"), \n (\"2\", \"7\")])\n\npos = graphviz_layout(G, prog='neato')\nplt.figure(figsize=(10, 10))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\nvisited_vertices = []\ndepth_first_search(0, nx.to_numpy_matrix(G), visited_vertices, 0)\nprint(\"Vertices were visited in the following sequence: {}\".format(visited_vertices))", "_____no_output_____" ], [ "import networkx as nx\nimport numpy as np\nfrom networkx.drawing.nx_pydot import graphviz_layout\nimport matplotlib.pyplot as plt\n\ndef mark_as_visited(v, m, visited_vertices, to_be_visited_vertices):\n if v not in visited_vertices:\n print(\"visiting {}\".format(v))\n visited_vertices.append(v)\n num_of_vertices = np.shape(m)[0]\n for j in range(0, num_of_vertices):\n if m[v,j] != 0 and j not in visited_vertices:\n to_be_visited_vertices.append(j)\n else:\n print(\"{} was already visited\".format(v))\n\ndef breadth_first_search(v, m, visited_vertices):\n to_be_visited_vertices = []\n mark_as_visited(v, m, visited_vertices, to_be_visited_vertices)\n while len(to_be_visited_vertices) != 0:\n j = to_be_visited_vertices.pop(0)\n mark_as_visited(j, m, visited_vertices, to_be_visited_vertices)\n\n\nG=nx.Graph()\nG.add_nodes_from([\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"])\nG.add_edges_from([(\"0\", \"1\"), \n (\"1\", \"2\"), \n (\"2\", \"0\"), \n (\"0\", \"3\"), \n (\"2\", \"4\"), \n (\"3\", \"4\"), \n (\"0\", \"6\"), \n (\"4\", \"6\"), \n (\"2\", \"5\"), \n (\"3\", \"5\"), \n (\"0\", \"5\"), \n (\"2\", \"7\")])\n\npos = graphviz_layout(G, prog='neato')\nplt.figure(figsize=(10, 10))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\nvisited_vertices = []\nbreadth_first_search(0, nx.to_numpy_matrix(G), visited_vertices)\nprint(\"Vertices were visited in the following sequence: {}\".format(visited_vertices))", "_____no_output_____" ], [ "import networkx as nx\nimport numpy as np\nfrom networkx.drawing.nx_pydot import graphviz_layout\nimport matplotlib.pyplot as plt\n\ndef mark_as_visited(v, m, visited_vertices, to_be_visited_vertices):\n if v not in visited_vertices:\n print(\"visiting {}\".format(v))\n visited_vertices.append(v)\n num_of_vertices = np.shape(m)[0]\n for j in range(0, num_of_vertices):\n if m[v,j] != 0 and j not in visited_vertices:\n to_be_visited_vertices.append(j)\n else:\n print(\"{} was already visited\".format(v))\n\ndef breadth_first_search(v, m, visited_vertices):\n to_be_visited_vertices = []\n mark_as_visited(v, m, visited_vertices, to_be_visited_vertices)\n while len(to_be_visited_vertices) != 0:\n j = to_be_visited_vertices.pop(0)\n mark_as_visited(j, m, visited_vertices, to_be_visited_vertices)\n\n\nG=nx.Graph()\nG.add_nodes_from([\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\"])\nG.add_edges_from([(\"0\", \"1\"), \n (\"1\", \"2\"), \n (\"2\", \"0\"), \n (\"0\", \"3\"), \n (\"2\", \"4\"), \n (\"3\", \"4\"), \n (\"0\", \"6\"), \n (\"4\", \"6\"), \n (\"2\", \"5\"), \n (\"3\", \"5\"), \n (\"0\", \"5\"), \n (\"2\", \"7\"),\n (\"7\", \"8\"),\n (\"8\", \"2\"),\n (\"8\", \"3\"),\n (\"8\", \"4\"),\n (\"8\", \"5\"),\n (\"8\", \"6\"),\n (\"8\", \"9\"),\n (\"8\", \"10\"),\n (\"8\", \"11\"),\n (\"10\", \"11\")])\n\npos = graphviz_layout(G, prog='twopi')\nplt.figure(figsize=(10, 10))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\nvisited_vertices = []\nbreadth_first_search(0, nx.to_numpy_matrix(G), visited_vertices)\nprint(\"Vertices were visited in the following sequence: {}\".format(visited_vertices))", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx.drawing.nx_pydot import graphviz_layout\nimport numpy as np\n\ndef mark_as_visited(v, m, visited_vertices, to_be_visited_vertices):\n print(\"visiting {}\".format(v))\n visited_vertices.append(v)\n num_of_vertices = np.shape(m)[0]\n for j in range(0, num_of_vertices):\n if m[v,j] != 0 and j not in visited_vertices:\n to_be_visited_vertices.append(j)\n\ndef breadth_first_search(v, m, visited_vertices):\n to_be_visited_vertices = []\n mark_as_visited(v, m, visited_vertices, to_be_visited_vertices)\n while len(to_be_visited_vertices) != 0:\n j = to_be_visited_vertices.pop(0)\n mark_as_visited(j, m, visited_vertices, to_be_visited_vertices)\n\nG = nx.balanced_tree(2, 3)\npos = graphviz_layout(G, prog='twopi')\nplt.figure(figsize=(10, 10))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\nvisited_vertices = []\nbreadth_first_search(0, nx.to_numpy_matrix(G), visited_vertices)\nprint(\"Vertices were visited in the following sequence: {}\".format(visited_vertices))", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx.drawing.nx_pydot import graphviz_layout\nimport numpy as np\n\ndef mark_as_visited(v, m, visited_vertices, to_be_visited_vertices):\n print(\"visiting {}\".format(v))\n visited_vertices.append(v)\n num_of_vertices = np.shape(m)[0]\n for j in range(0, num_of_vertices):\n if m[v,j] != 0 and j not in visited_vertices:\n to_be_visited_vertices.append(j)\n\ndef breadth_first_search(v, m, visited_vertices):\n to_be_visited_vertices = []\n mark_as_visited(v, m, visited_vertices, to_be_visited_vertices)\n while len(to_be_visited_vertices) != 0:\n j = to_be_visited_vertices.pop(0)\n mark_as_visited(j, m, visited_vertices, to_be_visited_vertices)\n\nG = nx.balanced_tree(2, 3)\npos = graphviz_layout(G, prog='twopi')\nplt.figure(figsize=(10, 10))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()\n\nprint(\"Nodes of graph: {}\".format(G.nodes()))\nprint(\"Edges of graph: {}\".format(G.edges()))\nvisited_vertices = []\nbreadth_first_search(0, nx.to_numpy_matrix(G), visited_vertices)\nprint(\"Vertices were visited in the following sequence: {}\".format(visited_vertices))", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport networkx as nx\n\nG = nx.random_geometric_graph(100, 0.125)\npos = graphviz_layout(G, prog='circo')\nplt.figure(figsize=(20, 20))\nnx.draw(G, pos, node_size=500, alpha=1, node_color=\"orange\", with_labels=True)\nplt.axis('equal')\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec85e1fc56a6b842d03477118c70097a3d9212dc
437,017
ipynb
Jupyter Notebook
samples/voc/inspect_voc_data.py.ipynb
niltecedu/Mask_RCNN_tf2
49cc9396132a187c8e49763dddfb3ee70443dff7
[ "MIT" ]
null
null
null
samples/voc/inspect_voc_data.py.ipynb
niltecedu/Mask_RCNN_tf2
49cc9396132a187c8e49763dddfb3ee70443dff7
[ "MIT" ]
null
null
null
samples/voc/inspect_voc_data.py.ipynb
niltecedu/Mask_RCNN_tf2
49cc9396132a187c8e49763dddfb3ee70443dff7
[ "MIT" ]
1
2021-06-25T09:05:26.000Z
2021-06-25T09:05:26.000Z
2,051.723005
66,110
0.924781
[ [ [ "import os\nimport sys\nimport itertools\nimport math\nimport logging\nimport json\nimport re\nimport random\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nfrom matplotlib.patches import Polygon\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nfrom mrcnn import visualize\nfrom mrcnn.visualize import display_images\nimport mrcnn.model as modellib\nfrom mrcnn.model import log\n\n\n", "_____no_output_____" ], [ "import voc\nconfig = voc.VocConfig()\nVOC_DIR = \"../../VOCdevkit\" # TODO: enter value here", "_____no_output_____" ], [ "dataset = voc.VocDataset()\ndataset.load_voc(VOC_DIR, \"val\", year='2012')\ndataset.prepare()\nprint(\"Image Count: {}\".format(len(dataset.image_ids)))\nprint(\"Class Count: {}\".format(dataset.num_classes))\nfor i, info in enumerate(dataset.class_info):\n print(\"{:3}. {:50}\".format(i, info['name']))", "Image Count: 1392\nClass Count: 21\n 0. BG \n 1. aeroplane \n 2. bicycle \n 3. bird \n 4. boat \n 5. bottle \n 6. bus \n 7. car \n 8. cat \n 9. chair \n 10. cow \n 11. diningtable \n 12. dog \n 13. horse \n 14. motorbike \n 15. person \n 16. potted plant \n 17. sheep \n 18. sofa \n 19. train \n 20. tv/monitor \n" ], [ "image_id = np.random.choice(dataset.image_ids)\nprint(image_id)\nimage = dataset.load_image(image_id)\nclass_label = dataset.load_class_label(image_id)\ninstance_mask = dataset.load_raw_mask(image_id, 'object_mask')\nplt.figure()\nplt.subplot(131)\nplt.imshow(image);\nplt.subplot(132)\nplt.imshow(class_label);\nplt.subplot(133)\nplt.imshow(instance_mask);", "836\n" ], [ "%matplotlib inline", "_____no_output_____" ], [ "# Load and display random samples\nimage_ids = np.random.choice(dataset.image_ids, 4)\nfor image_id in image_ids:\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n visualize.display_top_masks(image, mask, class_ids, dataset.class_names)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ec85e6e49c21a3361a29ccd8c8e2ac38a729fd81
90,152
ipynb
Jupyter Notebook
wikipedia_bias_analysis.ipynb
Nadr0jj/data-512-a2
29f056d27ce3ba62d398d458a9111c01fe602f74
[ "MIT" ]
null
null
null
wikipedia_bias_analysis.ipynb
Nadr0jj/data-512-a2
29f056d27ce3ba62d398d458a9111c01fe602f74
[ "MIT" ]
null
null
null
wikipedia_bias_analysis.ipynb
Nadr0jj/data-512-a2
29f056d27ce3ba62d398d458a9111c01fe602f74
[ "MIT" ]
null
null
null
37.03862
836
0.354202
[ [ [ "# Goals\n\nWe will explore the potential of data bias exhibited by the english Wikipedia.\n\nTo do this, we will create a series of tables to show...\n\n1. The countries with the greatest and least coverage of politicians on Wikipedia compared to their population. \n2. The countries with the highest and lowest proportion of high quality articles about politicians (according to ORES).\n3. A ranking of geographic regions by articles-per-person and proportion of high quality articles.\n\n", "_____no_output_____" ], [ "# Step 1: Gathering the data\n\nWikipedia politicians by country dataset: [https://figshare.com/articles/Untitled_Item/5513449](https://figshare.com/articles/Untitled_Item/5513449)\n\nPopulation data by country/region: [https://www.prb.org/international/indicator/population/table/](https://www.prb.org/international/indicator/population/table/)", "_____no_output_____" ], [ "# Step 2: Cleaning the data\nThe file page_data.csv contains some page names that start with the string \"Template:\". These pages are not articles and need to be removed.\n\nSimilarly, WPDF_2020_data.cs contains some rows that provide cumulative regional population counts rather than country-level counts. These rows have all caps in the values in the 'geography' field. They need to be removed and stored somewhere else for later analysis. ", "_____no_output_____" ], [ "First, we'll need to import the data into Pandas DataFrames. Then, we'll execute the data cleaning steps.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "page_data = pd.read_csv('page_data.csv')\npopulation_data = pd.read_csv('/content/WPDS_2020_data.csv')", "_____no_output_____" ], [ "# Removes all articles whose title contains \"Template:\"\npage_data2 = page_data[~page_data.page.str.contains(\"Template:\")]\n\n# Removes all regional population count rows\npopulation_data2 = population_data[~population_data.Name.str.isupper()]\n\n# Stores all regional population count rows in a new df\nregional_pop_data = population_data[population_data.Name.str.isupper()]", "_____no_output_____" ] ], [ [ "# Step 3: Estimating article quality\nUsing the Objective Revision Evaluation Service (ORES), a machine learning tool created to estimate wikipedia article quality, we will obtain the predicted article quality for our list of articles in the page_data2 DataFrame.", "_____no_output_____" ] ], [ [ "!pip install ores\nfrom ores import api", "_____no_output_____" ], [ "# Provide useragent string to help ORES team track requests\nores_session = api.Session(\"https://ores.wikimedia.org\", \"[email protected]\")", "_____no_output_____" ], [ "# Process all ~50k articles in one call\nresults = ores_session.score(\"enwiki\", [\"articlequality\"], page_data2['rev_id'])", "_____no_output_____" ], [ "# Create a new column we can add to our page_data2 df which includes predicted article quality\nscores = []\n\nfor score in results:\n try:\n scores.append(score['articlequality']['score']['prediction'])\n except:\n scores.append(-1) # -1 will be the code for the case where ORES was unable to provide a prediction", "_____no_output_____" ], [ "# Create a DataFrame with predicted_score added as column\npage_data3 = page_data2\npage_data3['predicted_quality'] = scores", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "page_data3['predicted_quality'].value_counts()", "_____no_output_____" ] ], [ [ "# Step 4: Merging the page_data3 DF and population_data DF\nWe will merge these datasets together in order to complete our analysis.", "_____no_output_____" ] ], [ [ "page_data3.head(1)", "_____no_output_____" ], [ "population_data3 = population_data2\npopulation_data3.rename(columns={'Name': 'country'}, inplace=True)\npopulation_data3.head(1)", "/usr/local/lib/python3.7/dist-packages/pandas/core/frame.py:4308: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n errors=errors,\n" ], [ "df = pd.merge(page_data3, population_data2, on=['country'], how='outer', indicator=True)\ndf.head(3)", "_____no_output_____" ], [ "df['_merge'].value_counts()", "_____no_output_____" ] ], [ [ "The new \"_merge\" column indicates whether the merge key exists in only the left (left_only) frame, right (right_only) frame, or both. We will use this to preserve the data which did not merge succesfully but also remove it from the df.", "_____no_output_____" ] ], [ [ "df_both = df[df['_merge'] == 'both']\ndf_not_both = df[df['_merge'] != 'both']\n\n# Save dfs to CSV files\ndf_both.to_csv(\"wp_wpds_countries-no_match.csv\")\ndf_not_both.to_csv(\"wp_wpds_politicians_by_country.csv\")", "_____no_output_____" ] ], [ [ "# Step 5: Analysis\nWe will calculate the proportion (as a percentage) of articles-per-population and high-quality articles for each country and for each geographic region. We define \"high quality\" to be articles which recieved a predicted quality score from ORES of \"FA\" or \"GA\" (featured article or good article).", "_____no_output_____" ] ], [ [ "# Make a DF which lists each country and the count of number of articles they have\n\narticle_counts = pd.DataFrame(df_both['country'].value_counts()) # Sum up number of rows that exist for each country (each represents article)\n\n# Organize data into nice DF for joining later\narticle_counts.rename(columns={'country': 'article_count'}, inplace=True)\narticle_counts.reset_index(inplace=True)\narticle_counts.rename(columns={'index': 'country'}, inplace=True)\n\narticle_counts.head(3)", "_____no_output_____" ], [ "# Make a DF which lists the number of \"FA\" or \"GA\" articles for each country\ngafa_count = df_both[(df_both.predicted_quality == 'FA') | (df_both.predicted_quality == 'GA')] # Eliminate rows which do not represent GA/FA article\ngafa_count = pd.DataFrame(gafa_count['country'].value_counts()) # Sum up the number of rows that exist for each country\n\n# Organize data into nice DF for joining later\ngafa_count.rename(columns={'country': 'gafa_count'}, inplace=True)\ngafa_count.reset_index(inplace=True)\ngafa_count.rename(columns={'index': 'country'}, inplace=True)\n\ngafa_count.head(3)", "_____no_output_____" ], [ "# Create final df by joining cleaned population data and the two dfs we just created above\n\ncounts_df = pd.merge(population_data3, article_counts, on=['country'], how='left')\ncounts_df = pd.merge(counts_df, gafa_count, on=['country'], how='left')\ncounts_df.fillna(0, inplace=True)\n\ncounts_df.head(3)", "_____no_output_____" ] ], [ [ "# A note about regions_df\nThe population data (provided in the WPDS_2020_data.csv file) implicitly defines the region and subregion each country belongs to in a heirarchical manner. For example, If the first two rows included the data REGION = AFRICA and SUBREGION = NORTH AFRICA, then the following rows containing country information would be implicitly defined to belong to the region Africa and the subregion North Africa. However, due to some missing data this would lead to the USA and Canada also belonging to Africa because they are provided with a subregion but not a region. Due to this, I manually downloaded DataFrame population_data and added subregions and regions to each country row as I saw appropriate while staying as close to the provided data as possible. The result of my manual modification can be seen in population_data_mod.csv", "_____no_output_____" ] ], [ [ "regions_df = pd.read_csv(\"population_data_mod.csv\", encoding='utf-8')", "_____no_output_____" ], [ "regions_df.head()", "_____no_output_____" ], [ "# Finally we can join and make our final df for the analysis\n\nanalysis_df = counts_df.merge(regions_df, on='country', how='inner')[['country', 'Population_x', 'article_count', 'gafa_count', 'sub-region', 'region']]\nanalysis_df.rename(columns={'Population_x': 'population'}, inplace=True)\n\nanalysis_df.head()", "_____no_output_____" ] ], [ [ "### Articles/Population percentage by country", "_____no_output_____" ] ], [ [ "articles_over_population_county_df = analysis_df[['country', 'population', 'article_count']]\narticles_over_population_county_df['article_pop_percentage'] = (100 * articles_over_population_county_df['article_count'])/articles_over_population_county_df['population']\n\narticles_over_population_county_df.head()", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n" ] ], [ [ "### High quality articles (gafa_count)/Population percentage by country", "_____no_output_____" ] ], [ [ "gafa_over_population_country_df = analysis_df[['country', 'population', 'gafa_count']]\ngafa_over_population_country_df['gafa_pop_percentage'] = (100 * gafa_over_population_country_df['gafa_count'])/gafa_over_population_country_df['population']\n\ngafa_over_population_country_df.head()", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n" ] ], [ [ "### Articles/Population percentage by subregion", "_____no_output_____" ] ], [ [ "articles_over_population_sub_region_df = analysis_df[['sub-region', 'population', 'article_count']]\narticles_over_population_sub_region_df = articles_over_population_sub_region_df.groupby(by=['sub-region'], dropna=False).sum()\narticles_over_population_sub_region_df.reset_index(inplace=True)\narticles_over_population_sub_region_df['article_pop_percentage'] = (100 * articles_over_population_sub_region_df['article_count'])/articles_over_population_sub_region_df['population']\n\narticles_over_population_sub_region_df.head()", "_____no_output_____" ] ], [ [ "### High quality articles (gafa_count)/Populatio percentage by subregion", "_____no_output_____" ] ], [ [ "gafa_over_population_sub_region_df = analysis_df[['sub-region', 'population', 'gafa_count']]\ngafa_over_population_sub_region_df = gafa_over_population_sub_region_df.groupby(by=['sub-region'], dropna=False).sum()\ngafa_over_population_sub_region_df.reset_index(inplace=True)\ngafa_over_population_sub_region_df['gafa_pop_percentage'] = (100 * gafa_over_population_sub_region_df['gafa_count'])/gafa_over_population_sub_region_df['population']\n\ngafa_over_population_sub_region_df.head()", "_____no_output_____" ] ], [ [ "# Step 6:\nWe will now imbed several tables in the notebook which show the results of our analysis.", "_____no_output_____" ] ], [ [ "# Top 10 countries by coverage: 10 highest-ranked countries in terms of number of politician articles as a proportion of country population\n\narticles_over_population_county_df.sort_values(by=['article_pop_percentage'], ascending=False).head(10)", "_____no_output_____" ], [ "# Bottom 10 countries by coverage: 10 lowest-ranked countries in terms of number of politican articles as a proportion of country population\n\narticles_over_population_county_df.sort_values(by=['article_pop_percentage'], ascending=True).head(10)", "_____no_output_____" ], [ "# Top 10 countries by relative quality: 10 highest-ranked countries in terms of the relative proportion of politican articles that are of GA and FA-quality (refered to as gafa)\n\ngafa_over_population_country_df.sort_values(by=['gafa_pop_percentage'], ascending=False).head(10)", "_____no_output_____" ], [ "# Bottom 10 countries by relative quality: 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality\n\ngafa_over_population_country_df.sort_values(by=['gafa_pop_percentage'], ascending=True).head(10)", "_____no_output_____" ], [ "# Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population\n\narticles_over_population_sub_region_df.sort_values(by=['article_pop_percentage'], ascending=False).head(10)", "_____no_output_____" ], [ "# Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the relative proportion of politican articles from countries in each region that are of GA and FA-quality\n\ngafa_over_population_sub_region_df.sort_values(by=['gafa_pop_percentage'], ascending=False).head(10)", "_____no_output_____" ] ], [ [ "# Conclusions\n\nOverwhelmingly, western countries are better represented on english Wikipedia both in terms of all articles and high quality articles. This suggests that the type of bias exhibited by english Wikipedia is a western lean. The engligh Wikipedia still remains a highly valuable data source, but this bias should be kept in mind when using the data.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec85e77c8cb8ef6bb9d49ec3b4001e3cef7268c4
11,551
ipynb
Jupyter Notebook
Capstone_code_submission/code/coreference_resolver/huggingface_neuralcoref.ipynb
anurag19006/OntoLearnBench
413f66998e34e882a5d598263715efe92dd43a89
[ "Apache-2.0" ]
null
null
null
Capstone_code_submission/code/coreference_resolver/huggingface_neuralcoref.ipynb
anurag19006/OntoLearnBench
413f66998e34e882a5d598263715efe92dd43a89
[ "Apache-2.0" ]
null
null
null
Capstone_code_submission/code/coreference_resolver/huggingface_neuralcoref.ipynb
anurag19006/OntoLearnBench
413f66998e34e882a5d598263715efe92dd43a89
[ "Apache-2.0" ]
null
null
null
28.949875
217
0.515193
[ [ [ "import spacy\nimport neuralcoref", "_____no_output_____" ] ], [ [ "## Instantiate `NeuralCoref`", "_____no_output_____" ], [ "1. Load a `spaCy` English model - `sm`, `md` or `lg`.\n2. Add `NeuralCoref` to `spaCy`'s pipe.\n3. Use `Neuralcoref` as you would usually use a `spaCy` Document - so simple!", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\nneuralcoref.add_to_pipe(nlp)", "_____no_output_____" ], [ "text = \"Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas.\"\ndoc = nlp(text)", "_____no_output_____" ] ], [ [ "## Coreference resolution with `Huggingface`", "_____no_output_____" ], [ "`Huggingface` has a [demo](https://huggingface.co/coref/) and a very good [README](https://github.com/huggingface/neuralcoref#using-neuralcoref) where all additional CR functionalities are explained. \n\nHere's a fragment of Huggingface's documentation - the most commonly used CR methods:\n\n| Attribute | Type | Description\n|:--------------------------|:-------------------|:----------------------------------------------------\n|`doc._.coref_clusters` |list of `Cluster` |All the clusters of corefering mentions in the doc\n|`doc._.coref_resolved` |unicode |Each corefering mention is replaced by the main mention in the associated cluster.\n|`doc._.coref_scores` |Dict of Dict |Scores of the coreference resolution between mentions.\n|`span._.coref_cluster` |`Cluster` |Cluster of mentions that corefer with the span\n|`span._.coref_scores` |Dict |Scores of the coreference resolution of & span with other mentions (if applicable).\n|`token._.coref_clusters` |list of `Cluster` |All the clusters of corefering mentions that contains the token\n", "_____no_output_____" ] ], [ [ "# it's our original text\ndoc ", "_____no_output_____" ], [ "# it has two clusters \nclusters = doc._.coref_clusters\nclusters", "_____no_output_____" ], [ "# and that's how it looks after coreference resolution\ndoc._.coref_resolved", "_____no_output_____" ], [ "# now we'll show how those NeuralCoref methods can be used - we need a Span and a Token\neva_and_martha_span = clusters[0][0] # the first Span form the first Cluster is 'Eva and Martha'\neva_token = eva_and_martha_span[0] # the first Token from this Span is 'Eva'", "_____no_output_____" ], [ "# we see the score values between our span and all other candidate mentions\neva_and_martha_span._.coref_scores # the same can be achieved with doc._.coref_scores[eva_and_martha_span]", "_____no_output_____" ], [ "# the Cluster to which the Span belongs\neva_and_martha_span._.coref_cluster", "_____no_output_____" ], [ "# all Clusters (in the case of nested clusters there can be more than one cluster) to which the Token belongs\neva_token._.coref_clusters", "_____no_output_____" ] ], [ [ "## Use it just like a regular `spaCy` document", "_____no_output_____" ], [ "The beauty of `Huggingface`'s `NeuralCoref` is that it just adds those coreference resolution functionalities to the `spaCy`'s Document, making it easy to use and enabling access to all its additional functions.", "_____no_output_____" ] ], [ [ "from spacy import displacy\nimport pandas as pd", "_____no_output_____" ], [ "displacy.render(doc, style=\"ent\")", "_____no_output_____" ], [ "df = pd.DataFrame([[token.text, token.pos_, token.tag_] for token in doc], columns=['token', 'POS', 'TAG'])\ndf.head()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
ec85f0fe52a435700b97f2b3b002805d6e1e8056
9,338
ipynb
Jupyter Notebook
notebooks/cz/1_preprocessing.ipynb
jarobyte91/post_ocr_correction
bae2e601c838a23cc31a82e10ed5cd1b10ccdac6
[ "MIT" ]
3
2021-11-15T08:29:39.000Z
2021-12-20T21:56:54.000Z
notebooks/cz/1_preprocessing.ipynb
jarobyte91/post_ocr_correction
bae2e601c838a23cc31a82e10ed5cd1b10ccdac6
[ "MIT" ]
3
2021-11-15T08:29:36.000Z
2022-01-06T13:52:34.000Z
notebooks/cz/1_preprocessing.ipynb
jarobyte91/post_ocr_correction
bae2e601c838a23cc31a82e10ed5cd1b10ccdac6
[ "MIT" ]
1
2021-11-08T20:15:52.000Z
2021-11-08T20:15:52.000Z
21.174603
121
0.484044
[ [ [ "import pandas as pd\nimport numpy as np\nfrom tqdm.notebook import tqdm\nfrom nltk.lm import Vocabulary\nimport torch\nimport torch.utils.data as tud\nimport sys\nsys.path.append(\"../../lib\")\nfrom metrics import levenshtein\nimport pickle\nfrom pathlib import Path\nimport re", "_____no_output_____" ], [ "folder = Path(\"../../data/cz/data/\")", "_____no_output_____" ], [ "train = pd.read_pickle(folder/\"train_aligned.pkl\")\ntrain.shape", "_____no_output_____" ], [ "dev = pd.read_pickle(folder/\"dev_aligned.pkl\")\ndev.shape", "_____no_output_____" ], [ "with open(folder/\"vocabulary.pkl\", \"rb\") as file:\n vocabulary = pickle.load(file)", "_____no_output_____" ], [ "char2i = {c:i for i, c in enumerate(sorted(vocabulary), 3)}\nchar2i[\"<PAD>\"] = 0\nchar2i[\"<START>\"] = 1\nchar2i[\"<END>\"] = 2\nlen(char2i)", "_____no_output_____" ], [ "i2char = {i:c for i, c in enumerate(sorted(vocabulary), 3)}\ni2char[0] = \"<PAD>\"\ni2char[1] = \"<START>\"\ni2char[2] = \"<END>\"\nlen(i2char)", "_____no_output_____" ], [ "length = 100\n\noutput = []\nfor s in tqdm(train.source):\n output.append(torch.tensor([1] + [char2i[c] for c in s] + [2]))\n \ntrain_source = torch.nn.utils.rnn.pad_sequence(output, batch_first = True)\nprint(train_source.shape)\n\noutput = []\nfor s in tqdm(train.target):\n output.append(torch.tensor([1] + [char2i[c] for c in s] + [2]))\n \ntrain_target = torch.nn.utils.rnn.pad_sequence(output, batch_first = True)\nprint(train_target.shape)", "_____no_output_____" ], [ "train.source[0] == re.sub(r\"<START>|<END>|<PAD>\", \"\", \"\".join([i2char[c] for c in train_source[0].tolist()]))", "_____no_output_____" ], [ "train.target[0] == re.sub(r\"<START>|<END>|<PAD>\", \"\", \"\".join([i2char[c] for c in train_target[0].tolist()]))", "_____no_output_____" ], [ "output = []\nfor s in tqdm(dev.source):\n output.append(torch.tensor([1] + [char2i[c] for c in s] + [2]))\n \ndev_source = torch.nn.utils.rnn.pad_sequence(output, batch_first = True)\nprint(dev_source.shape)\n\noutput = []\nfor s in tqdm(dev.target):\n output.append(torch.tensor([1] + [char2i[c] for c in s] + [2]))\n \ndev_target = torch.nn.utils.rnn.pad_sequence(output, batch_first = True)\nprint(dev_target.shape)", "_____no_output_____" ], [ "dev.source[0] == re.sub(r\"<START>|<END>|<PAD>\", \"\", \"\".join([i2char[c] for c in dev_source[0].tolist()]))", "_____no_output_____" ], [ "dev.target[0] == re.sub(r\"<START>|<END>|<PAD>\", \"\", \"\".join([i2char[c] for c in dev_target[0].tolist()]))", "_____no_output_____" ], [ "torch.save(train_source, folder/\"train_source.pt\")", "_____no_output_____" ], [ "torch.save(train_target, folder/\"train_target.pt\")", "_____no_output_____" ], [ "torch.save(dev_source, folder/\"dev_source.pt\")", "_____no_output_____" ], [ "torch.save(dev_target, folder/\"dev_target.pt\")", "_____no_output_____" ], [ "with open(folder/\"char2i.pkl\", \"wb\") as file:\n pickle.dump(char2i, file)", "_____no_output_____" ], [ "with open(folder/\"i2char.pkl\", \"wb\") as file:\n pickle.dump(i2char, file)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec85f64846f15cd31a58cc0550243c3f7aba8201
178,147
ipynb
Jupyter Notebook
courseware/Latitude-dependent grey radiation.ipynb
rabernat/climlab
d4af28f5850005aaa8fe1e17f0ae6182a4e25865
[ "MIT" ]
1
2021-08-19T19:40:19.000Z
2021-08-19T19:40:19.000Z
courseware/Latitude-dependent grey radiation.ipynb
lijunde/climlab
d4af28f5850005aaa8fe1e17f0ae6182a4e25865
[ "MIT" ]
null
null
null
courseware/Latitude-dependent grey radiation.ipynb
lijunde/climlab
d4af28f5850005aaa8fe1e17f0ae6182a4e25865
[ "MIT" ]
2
2021-05-06T21:11:19.000Z
2021-08-19T19:40:20.000Z
139.942655
13,998
0.88757
[ [ [ "# Latitude-dependent grey radiation", "_____no_output_____" ], [ "Here is a quick example of using the `climlab.GreyRadiationModel` with a latitude dimension and seasonally varying insolation.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport climlab", "_____no_output_____" ], [ "np.__version__", "_____no_output_____" ], [ "model = climlab.GreyRadiationModel(num_lev=30, num_lat=90)\nprint model", "climlab Process of type <class 'climlab.model.column.GreyRadiationModel'>. \nState variables and domain shapes: \n Tatm: (90, 30) \n Ts: (90, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.GreyRadiationModel'>\n LW: <class 'climlab.radiation.greygas.GreyGas'>\n SW: <class 'climlab.radiation.greygas.GreyGasSW'>\n insolation: <class 'climlab.radiation.insolation.FixedInsolation'>\n\n" ], [ "print model.lat", "[-89. -87. -85. -83. -81. -79. -77. -75. -73. -71. -69. -67. -65. -63. -61.\n -59. -57. -55. -53. -51. -49. -47. -45. -43. -41. -39. -37. -35. -33. -31.\n -29. -27. -25. -23. -21. -19. -17. -15. -13. -11. -9. -7. -5. -3. -1.\n 1. 3. 5. 7. 9. 11. 13. 15. 17. 19. 21. 23. 25. 27. 29.\n 31. 33. 35. 37. 39. 41. 43. 45. 47. 49. 51. 53. 55. 57. 59.\n 61. 63. 65. 67. 69. 71. 73. 75. 77. 79. 81. 83. 85. 87. 89.]\n" ], [ "insolation = climlab.radiation.insolation.DailyInsolation(domains=model.Ts.domain)", "_____no_output_____" ], [ "model.add_subprocess('insolation', insolation)\nmodel.subprocess.SW.flux_from_space = insolation.insolation", "_____no_output_____" ], [ "print model", "climlab Process of type <class 'climlab.model.column.GreyRadiationModel'>. \nState variables and domain shapes: \n Tatm: (90, 30) \n Ts: (90, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.GreyRadiationModel'>\n LW: <class 'climlab.radiation.greygas.GreyGas'>\n SW: <class 'climlab.radiation.greygas.GreyGasSW'>\n insolation: <class 'climlab.radiation.insolation.DailyInsolation'>\n\n" ], [ "model.step_forward()", "_____no_output_____" ], [ "plt.plot(model.lat, model.SW_down_TOA)", "_____no_output_____" ], [ "model.Tatm.shape", "_____no_output_____" ], [ "model.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 1.00207478763 years.\n" ], [ "plt.plot(model.lat, model.Ts)", "_____no_output_____" ], [ "model.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 2.00141166601 years.\n" ], [ "plt.plot(model.lat, model.timeave['Ts'])", "_____no_output_____" ], [ "def plot_temp_section(model, timeave=True):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if timeave:\n field = model.timeave['Tatm'].transpose()\n else:\n field = model.Tatm.transpose()\n cax = ax.contourf(model.lat, model.lev, field)\n ax.invert_yaxis()\n ax.set_xlim(-90,90)\n ax.set_xticks([-90, -60, -30, 0, 30, 60, 90])\n fig.colorbar(cax)", "_____no_output_____" ], [ "plot_temp_section(model)", "_____no_output_____" ], [ "model2 = climlab.RadiativeConvectiveModel(num_lev=30, num_lat=90)\ninsolation = climlab.radiation.insolation.DailyInsolation(domains=model2.Ts.domain)\nmodel2.add_subprocess('insolation', insolation)\nmodel2.subprocess.SW.flux_from_space = insolation.insolation", "_____no_output_____" ], [ "model2.step_forward()", "_____no_output_____" ], [ "model2.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 1.00207478763 years.\n" ], [ "model2.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 2.00141166601 years.\n" ], [ "plot_temp_section(model2)", "_____no_output_____" ] ], [ [ "## Testing out multi-dimensional Band Models", "_____no_output_____" ] ], [ [ "import climlab\nimport numpy as np\n\n# Put in some ozone\nimport netCDF4 as nc\n\ndatapath = \"http://ramadda.atmos.albany.edu:8080/repository/opendap/latest/Top/Users/Brian+Rose/CESM+runs/\"\nendstr = \"/entry.das\"\n\ntopo = nc.Dataset( datapath + 'som_input/USGS-gtopo30_1.9x2.5_remap_c050602.nc' + endstr )\nozone = nc.Dataset( datapath + 'som_input/ozone_1.9x2.5_L26_2000clim_c091112.nc' + endstr )\n\n# Dimensions of the ozone file\nlat = ozone.variables['lat'][:]\nlon = ozone.variables['lon'][:]\nlev = ozone.variables['lev'][:]\n\n# Taking annual, zonal average of the ozone data\nO3_zon = np.mean( ozone.variables['O3'],axis=(0,3) )", "_____no_output_____" ], [ "import climlab\nimport numpy as np\n# make a model on the same grid as the ozone\nmodel3 = climlab.BandRCModel(lev=lev, lat=lat)\ninsolation = climlab.radiation.insolation.DailyInsolation(domains=model3.Ts.domain)\nmodel3.add_subprocess('insolation', insolation)\nmodel3.subprocess.SW.flux_from_space = insolation.insolation\nprint model3", "climlab Process of type <class 'climlab.model.column.BandRCModel'>. \nState variables and domain shapes: \n Tatm: (96, 26) \n Ts: (96, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.BandRCModel'>\n LW: <class 'climlab.radiation.nband.FourBandLW'>\n H2O: <class 'climlab.radiation.water_vapor.ManabeWaterVapor'>\n convective adjustment: <class 'climlab.convection.convadj.ConvectiveAdjustment'>\n SW: <class 'climlab.radiation.nband.ThreeBandSW'>\n insolation: <class 'climlab.radiation.insolation.DailyInsolation'>\n\n" ], [ "# Set the ozone mixing ratio\nO3_trans = np.transpose(O3_zon)\n# model and ozone data are on the same grid, after the transpose.\nprint O3_trans.shape\nprint lev\nprint model3.lev\n", "(96, 26)\n[ 3.544638 7.3888135 13.967214 23.944625 37.23029 53.114605\n 70.05915 85.439115 100.514695 118.250335 139.115395 163.66207\n 192.539935 226.513265 266.481155 313.501265 368.81798 433.895225\n 510.455255 600.5242 696.79629 787.70206 867.16076 929.648875\n 970.55483 992.5561 ]\n[ 3.544638 7.3888135 13.967214 23.944625 37.23029 53.114605\n 70.05915 85.439115 100.514695 118.250335 139.115395 163.66207\n 192.539935 226.513265 266.481155 313.501265 368.81798 433.895225\n 510.455255 600.5242 696.79629 787.70206 867.16076 929.648875\n 970.55483 992.5561 ]\n" ], [ "# Put in the ozone\nmodel3.absorber_vmr['O3'] = O3_trans", "_____no_output_____" ], [ "print model3.absorber_vmr['O3'].shape\nprint model3.Tatm.shape", "(96, 26)\n(96, 26)\n" ], [ "model3.step_forward()", "_____no_output_____" ], [ "model3.integrate_years(1.)", "Integrating for 365 steps, 365.2422 days, or 1.0 years.\nTotal elapsed time is 1.00207478763 years.\n" ], [ "model3.integrate_years(1.)", "Integrating for 365 steps, 365.2422 days, or 1.0 years.\nTotal elapsed time is 2.00141166601 years.\n" ], [ "#plt.contour(model3.lat, model3.lev, model3.Tatm.transpose())\nplot_temp_section(model3)", "_____no_output_____" ] ], [ [ "This is now working. Will need to do some model tuning.\n\nAnd start to add dynamics!", "_____no_output_____" ] ], [ [ "testmodel = climlab.BandRCModel(num_lat=90, num_lev=30)", "_____no_output_____" ], [ "testmodel.step_forward()\ntestmodel.step_forward()\ntestmodel.step_forward()", "_____no_output_____" ], [ "%timeit testmodel.step_forward()", "100 loops, best of 3: 11.6 ms per loop\n" ], [ "np.__version__", "_____no_output_____" ] ], [ [ "Definitely get better performance with numpy version 1.9 and above due to vectorization of `numpy.tril()` (lower triangle operator) for multi-dimensional arrays.", "_____no_output_____" ], [ "## Experimental... adding meridional diffusion!", "_____no_output_____" ] ], [ [ "print model2", "climlab Process of type <class 'climlab.model.column.RadiativeConvectiveModel'>. \nState variables and domain shapes: \n Tatm: (90, 30) \n Ts: (90, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.RadiativeConvectiveModel'>\n convective adjustment: <class 'climlab.convection.convadj.ConvectiveAdjustment'>\n LW: <class 'climlab.radiation.greygas.GreyGas'>\n SW: <class 'climlab.radiation.greygas.GreyGasSW'>\n insolation: <class 'climlab.radiation.insolation.DailyInsolation'>\n\n" ], [ "diffmodel = climlab.process_like(model2)", "_____no_output_____" ], [ "# thermal diffusivity in W/m**2/degC\nD = 0.05\n# meridional diffusivity in 1/s\nK = D / diffmodel.Tatm.domain.heat_capacity[0]\nprint K", "1.46414342629e-07\n" ], [ "d = climlab.dynamics.diffusion.MeridionalDiffusion(K=K, state={'Tatm': diffmodel.state['Tatm']}, **diffmodel.param)", "_____no_output_____" ], [ "diffmodel.add_subprocess('diffusion', d)", "_____no_output_____" ], [ "print diffmodel", "climlab Process of type <class 'climlab.model.column.RadiativeConvectiveModel'>. \nState variables and domain shapes: \n Tatm: (90, 30) \n Ts: (90, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.RadiativeConvectiveModel'>\n convective adjustment: <class 'climlab.convection.convadj.ConvectiveAdjustment'>\n LW: <class 'climlab.radiation.greygas.GreyGas'>\n diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>\n SW: <class 'climlab.radiation.greygas.GreyGasSW'>\n insolation: <class 'climlab.radiation.insolation.DailyInsolation'>\n\n" ], [ "diffmodel.step_forward()", "_____no_output_____" ], [ "diffmodel.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 3.00348645365 years.\n" ], [ "diffmodel.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 4.00282333202 years.\n" ], [ "plot_temp_section(model2)\nplot_temp_section(diffmodel)", "_____no_output_____" ] ], [ [ "Amazingly... this actually works!\n\nAs long as K is a constant.\n\nThe diffusion operation is broadcast over all vertical levels without any special code.", "_____no_output_____" ] ], [ [ "def inferred_heat_transport( energy_in, lat_deg ):\n '''Returns the inferred heat transport (in PW) by integrating the net energy imbalance from pole to pole.'''\n from scipy import integrate\n from climlab import constants as const\n lat_rad = np.deg2rad( lat_deg )\n return ( 1E-15 * 2 * np.math.pi * const.a**2 * integrate.cumtrapz( np.cos(lat_rad)*energy_in,\n x=lat_rad, initial=0. ) )\n", "_____no_output_____" ], [ "# Plot the northward heat transport in this model\nRtoa = np.squeeze(diffmodel.timeave['ASR'] - diffmodel.timeave['OLR'])\nplt.plot(diffmodel.lat, inferred_heat_transport(Rtoa, diffmodel.lat))", "_____no_output_____" ] ], [ [ "### Band model with diffusion", "_____no_output_____" ] ], [ [ "diffband = climlab.process_like(model3)", "_____no_output_____" ], [ "# thermal diffusivity in W/m**2/degC\nD = 0.05\n# meridional diffusivity in 1/s\nK = D / diffband.Tatm.domain.heat_capacity[0]\nprint K", "8.92760732994e-07\n" ], [ "d = climlab.dynamics.diffusion.MeridionalDiffusion(K=K, state={'Tatm': diffband.state['Tatm']}, **diffband.param)\ndiffband.add_subprocess('diffusion', d)\nprint diffband", "climlab Process of type <class 'climlab.model.column.BandRCModel'>. \nState variables and domain shapes: \n Tatm: (96, 26) \n Ts: (96, 1) \nThe subprocess tree: \ntop: <class 'climlab.model.column.BandRCModel'>\n diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>\n LW: <class 'climlab.radiation.nband.FourBandLW'>\n H2O: <class 'climlab.radiation.water_vapor.ManabeWaterVapor'>\n convective adjustment: <class 'climlab.convection.convadj.ConvectiveAdjustment'>\n SW: <class 'climlab.radiation.nband.ThreeBandSW'>\n insolation: <class 'climlab.radiation.insolation.DailyInsolation'>\n\n" ], [ "diffband.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 3.00074854439 years.\n" ], [ "diffband.integrate_years(1)", "Integrating for 365 steps, 365.2422 days, or 1 years.\nTotal elapsed time is 4.00008542277 years.\n" ], [ "plot_temp_section(model3)\nplot_temp_section(diffband)", "_____no_output_____" ], [ "plt.plot(diffband.lat, diffband.timeave['ASR'] - diffband.timeave['OLR'])", "_____no_output_____" ], [ "# Plot the northward heat transport in this model\nRtoa = np.squeeze(diffband.timeave['ASR'] - diffband.timeave['OLR'])\nplt.plot(diffband.lat, inferred_heat_transport(Rtoa, diffband.lat))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec860c30956c60b1649e336b24d7c137278e4c5e
10,101
ipynb
Jupyter Notebook
Supervised Learning/08 Training and Tuning/Diabetes_Case_Study-zh.ipynb
stephengineer/Introduction-to-Machine-Learning-with-TensorFlow
fc13795db3e20d87f625864e4e7ff68b4afcedb3
[ "MIT" ]
null
null
null
Supervised Learning/08 Training and Tuning/Diabetes_Case_Study-zh.ipynb
stephengineer/Introduction-to-Machine-Learning-with-TensorFlow
fc13795db3e20d87f625864e4e7ff68b4afcedb3
[ "MIT" ]
null
null
null
Supervised Learning/08 Training and Tuning/Diabetes_Case_Study-zh.ipynb
stephengineer/Introduction-to-Machine-Learning-with-TensorFlow
fc13795db3e20d87f625864e4e7ff68b4afcedb3
[ "MIT" ]
null
null
null
27.75
452
0.581824
[ [ [ "### Diabetes Case Study\n\n现在你将有机会使用一系列的监督学习技术来进行分类和回归。将这些应用到项目中之前,让我们再举一个例子,用另一个流行数据集来从头到尾了解一下机器学习的工作流程。", "_____no_output_____" ] ], [ [ "# Import our libraries\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nimport seaborn as sns\nsns.set(style=\"ticks\")\n\nimport check_file as ch\n\n%matplotlib inline\n\n# Read in our dataset\ndiabetes = pd.read_csv('diabetes.csv')\n\n# Take a look at the first few rows of the dataset\ndiabetes.head()", "_____no_output_____" ] ], [ [ "我们将从加载数据和导入相关库开始。然后,你将了解如何使用网格搜索来优化多个模型。因为这门课程的目的是了解机器学习技术,所以我们在很大程度上省略了创建机器学习模型前的数据分析阶段的相关过程,包括探索性数据分析、特征工程、数据清理和数据整理等。\n\n> **步骤 1:** 让我们完成下面几个步骤。先看一下这个数据集的一些摘要统计数据,以便准确地将统计值与下面的字典中的适当键进行匹配。", "_____no_output_____" ] ], [ [ "# Cells for work\n", "_____no_output_____" ], [ "# Possible keys for the dictionary\na = '0.65'\nb = '0'\nc = 'Age'\nd = '0.35'\ne = 'Glucose'\nf = '0.5'\ng = \"More than zero\"\n\n# Fill in the dictionary with the correct values here\nanswers_one = {\n 'The proportion of diabetes outcomes in the dataset': # add letter here,\n 'The number of missing data points in the dataset': # add letter here,\n 'A dataset with a symmetric distribution': # add letter here,\n 'A dataset with a right-skewed distribution': # add letter here, \n 'This variable has the strongest correlation with the outcome': # add letter here\n}\n\n# Just to check your answer, don't change this\nch.check_one(answers_one)", "_____no_output_____" ] ], [ [ "> **步骤 2**:因为我们的数据集非常干净,我们将直接开始机器学习步骤。我们的目标是能够预测糖尿病病例。首先,需要确定y向量和X矩阵。然后,用下面的代码将数据集划分为训练数据和测试数据。", "_____no_output_____" ] ], [ [ "y = # Pull y column\nX = # Pull X variable columns\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)", "_____no_output_____" ] ], [ [ "现在你已经有一个训练集和一个测试集,我们需要创建一些模型,并最终从中选择一个最好的模型。然而,与前面的课程中使用默认值不同,现在我们要对这些模型进行调优。\n\n通过测试所有可能的超参数组合来找到最佳模型通常很困难(而且非常耗时)。因此,通常我们使用随机搜索。\n\n在实践中,虽然优化的很好,但使用随机搜索来查找超参数会更耗时。这是与此主题相关的一篇[文章](https://blog.h2o.ai/2016/06/hyperparameter-optimization-in-h2o-grid-search-random-search-and-the-future/)。这是两篇介绍如何在sklearn中使用随机搜索的文档 [1](http://scikit-learn.org/stable/auto_examples/model_selection/plot_randomized_search.html#sphx-glr-auto-examples-model-selection-plot-randomized-search-py) 和 [2](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)。\n\n\n为了有效地使用随机化搜索,你最好对超参数的常用分布有很好的理解。了解你的超参数有哪些可能值将使你能编写一个好的网格搜素算法(不中断的那种)。\n\n> **步骤 3**:在这一步中,我将向你展示如何使用随机搜索,然后你可以在步骤4中为其他模型设置网格搜索。但是因为我不记得 SVMs 的每一个超参数具体做什么,请你帮我将每个超参数与其相应的调优函数进行匹配。", "_____no_output_____" ] ], [ [ "# build a classifier\nclf_rf = RandomForestClassifier()\n\n# Set up the hyperparameter search\nparam_dist = {\"max_depth\": [3, None],\n \"n_estimators\": list(range(10, 200)),\n \"max_features\": list(range(1, X_test.shape[1]+1)),\n \"min_samples_split\": list(range(2, 11)),\n \"min_samples_leaf\": list(range(1, 11)),\n \"bootstrap\": [True, False],\n \"criterion\": [\"gini\", \"entropy\"]}\n\n\n# Run a randomized search over the hyperparameters\nrandom_search = RandomizedSearchCV(clf_rf, param_distributions=param_dist)\n\n# Fit the model on the training data\nrandom_search.fit(X_train, y_train)\n\n# Make predictions on the test data\nrf_preds = random_search.best_estimator_.predict(X_test)\n\nch.print_metrics(y_test, rf_preds, 'random forest')", "_____no_output_____" ] ], [ [ "> **步骤4**:现在你已经了解了如何使用随机森林模型来运行随机网格搜索,请尝试使用 AdaBoost 和 SVC 分类器执行此操作。你也可以尝试使用在本课程前面见过的其他分类器, 看看哪些分类器最有效。", "_____no_output_____" ] ], [ [ "# build a classifier for ada boost\n\n\n# Set up the hyperparameter search\n# look at setting up your search for n_estimators, learning_rate\n# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html\n\n\n# Run a randomized search over the hyperparameters\n\n\n# Fit the model on the training data\n\n\n# Make predictions on the test data\nada_preds = \n\n# Return your metrics on test data\nch.print_metrics(y_test, ada_preds, 'adaboost')", "_____no_output_____" ], [ "# build a classifier for support vector machines\n\n\n# Set up the hyperparameter search\n# look at setting up your search for C (recommend 0-10 range), \n# kernel, and degree\n# http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html\n\n\n\n# Run a randomized search over the hyperparameters\n\n\n# Fit the model on the training data\n\n\n# Make predictions on the test data\nsvc_preds = \n\n\n# Return your metrics on test data\nch.print_metrics(y_test, svc_preds, 'svc')", "_____no_output_____" ] ], [ [ "> **步骤5**:使用下面的测试查看一下,你选择的最佳模型是否和我们通过网格搜索找到的一样。 ", "_____no_output_____" ] ], [ [ "a = 'randomforest'\nb = 'adaboost'\nc = 'supportvector'\n\nbest_model = # put your best model here as a string or variable\n\n# See if your best model was also mine. \n# Notice these might not match depending your search!\nch.check_best(best_model)", "_____no_output_____" ] ], [ [ "一旦你找到了最佳模型,了解它为什么运行良好也很重要。在可以看到权重的回归模型中,可以更容易地解释结果。\n\n> **步骤6**:尽管你的模型很难解释,但是有一些方法可以让我们知道哪些特征是重要的。使用前一个问题中选择的“最佳模型”,来查找帮助判断个体是否患有糖尿病的最重要的特征。你的结论是否与你在本notebook的数据探索阶段的预期相匹配?", "_____no_output_____" ] ], [ [ "# Show your work here - the plot below was helpful for me\n# https://stackoverflow.com/questions/44101458/random-forest-feature-importance-chart-using-python\n", "_____no_output_____" ] ], [ [ "> **步骤 7**:使用上面的结果来完成下面的字典。", "_____no_output_____" ] ], [ [ "# Check your solution by matching the correct values in the dictionary\n# and running this cell\na = 'Age'\nb = 'BloodPressure'\nc = 'BMI'\nd = 'DiabetesPedigreeFunction'\ne = 'Insulin'\nf = 'Glucose'\ng = 'Pregnancy'\nh = 'SkinThickness'\n\n\n\nsol_seven = {\n 'The variable that is most related to the outcome of diabetes' : # letter here,\n 'The second most related variable to the outcome of diabetes' : # letter here,\n 'The third most related variable to the outcome of diabetes' : # letter here,\n 'The fourth most related variable to the outcome of diabetes' : # letter here\n}\n\nch.check_q_seven(sol_seven)", "_____no_output_____" ] ], [ [ "> **步骤 8**: 现在总结一下,在这个notebook中你做过什么,并且你怎样把这个结果向一个非技术背景的人解释。完成后,单击左上角的橙色图标可以查看解决方案的notebook。", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec860e17341b647c1acc6144b6e90fe55c02d3da
8,837
ipynb
Jupyter Notebook
notebook.ipynb
atriantafybbc/dc_tts
2fb9f2cf2d9814054e677a22d74adf11d669543c
[ "Apache-2.0" ]
1
2019-08-28T16:30:53.000Z
2019-08-28T16:30:53.000Z
notebook.ipynb
atriantafybbc/dc_tts
2fb9f2cf2d9814054e677a22d74adf11d669543c
[ "Apache-2.0" ]
null
null
null
notebook.ipynb
atriantafybbc/dc_tts
2fb9f2cf2d9814054e677a22d74adf11d669543c
[ "Apache-2.0" ]
null
null
null
32.251825
284
0.603485
[ [ [ "## Setup\n\n### Install dependencies", "_____no_output_____" ] ], [ [ "import os\nimport IPython\nfrom IPython.display import Audio\nfrom hyperparams import Hyperparams as hp\n", "_____no_output_____" ], [ "from synthesize import Synthesizer", "/home/ubuntu/anaconda3/envs/tensorflow_p27/lib/python2.7/site-packages/librosa/__init__.py:40: DeprecationWarning: You are using librosa with Python 2. Please note that librosa 0.7 will be the last version to support Python 2, after which it will require Python 3 or later.\n DeprecationWarning)\n" ], [ "#! ls checkpoints/i-0db58090af9cb95af/LJ01-1", "_____no_output_____" ], [ "#! ls checkpoints/i-031e73172dafa5ce9/LJ01-2", "_____no_output_____" ], [ "checkpoint_text2mel = \"checkpoints/i-0db58090af9cb95af/LJ01-1/model_gs_820k\"\ncheckpoint_ssrn = \"checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_773k\"", "_____no_output_____" ], [ "synthesizer = Synthesizer(checkpoint_text2mel, checkpoint_ssrn)", "WARNING:tensorflow:From /home/ubuntu/anaconda3/envs/tensorflow_p27/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n\nWARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\nFor more information, please see:\n * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n * https://github.com/tensorflow/addons\nIf you depend on functionality not listed there, please file an issue.\n\nWARNING:tensorflow:From modules.py:134: conv1d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.conv1d instead.\nWARNING:tensorflow:From modules.py:139: dropout (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.dropout instead.\nWARNING:tensorflow:From networks.py:140: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nWARNING:tensorflow:From modules.py:239: conv2d_transpose (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.conv2d_transpose instead.\nWARNING:tensorflow:From /home/ubuntu/anaconda3/envs/tensorflow_p27/lib/python2.7/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse standard file APIs to check for files with this prefix.\nINFO:tensorflow:Restoring parameters from /home/ubuntu/pytorch-dc-tts/notebooks/checkpoints/i-0db58090af9cb95af/LJ01-1/model_gs_820k\nINFO:tensorflow:Restoring parameters from /home/ubuntu/pytorch-dc-tts/notebooks/checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_773k\n" ], [ "! ls checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_747k*", "checkpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_747k.data-00000-of-00001\r\ncheckpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_747k.index\r\ncheckpoints/i-031e73172dafa5ce9/LJ01-2/model_gs_747k.meta\r\n" ], [ "tongue_twisters = [\n \"Peter Piper picked a peck of pickled peppers\",\n \"A peck of pickled peppers Peter Piper picked\",\n \"If Peter Piper picked a peck of pickled peppers\",\n \"Where’s the peck of pickled peppers Peter Piper picked?\",\n \"How much wood would a woodchuck chuck if a woodchuck could chuck wood?\",\n \"He would chuck, he would, as much as he could, and chuck as much wood\",\n \"As a woodchuck would if a woodchuck could chuck wood\",\n \"She sells seashells by the seashore\",\n \"Susie works in a shoeshine shop. Where she shines she sits, and where she sits she shines\",\n \"Fuzzy Wuzzy was a bear. Fuzzy Wuzzy had no hair. Fuzzy Wuzzy wasn’t fuzzy, was he?\"\n]\nljset = [\n \"The birch canoe slid on the smooth planks\",\n \"Glue the sheet to the dark blue background\",\n \"It's easy to tell the depth of a well\",\n \"These days a chicken leg is a rare dish\",\n \"Rice is often served in round bowls\",\n \"The juice of lemons makes fine punch\",\n \"The box was thrown beside the parked truck\",\n \"The hogs were fed chopped corn and garbage\",\n \"Four hours of steady work faced us\",\n \"Large size in stockings is hard to sell\",\n \"The boy was there when the sun rose\" \n]\n\nsentences = ljset\n", "_____no_output_____" ], [ "for i, sentence in enumerate(sentences):\n filename = \"test%d.wav\" % i\n synthesizer.synthesize(sentence, filename)\n print(sentence)\n IPython.display.display(Audio(filename, rate=hp.sr))", " 47%|████▋ | 98/210 [00:04<00:05, 21.66it/s]" ], [ "! ls", "checkpoints\t hyperparams.pyc\tREADME.md\t test2.wav test.wav\r\ndata_load.py\t LICENSE\t\trequirements.txt test3.wav train.py\r\ndata_load.pyc\t modules.py\tsynthesize.py\t test4.wav train.pyc\r\ndctts_tf.ipynb\t modules.pyc\tsynthesize.pyc\t test5.wav utils.py\r\nfig\t\t networks.py\ttest0.wav\t test6.wav utils.pyc\r\nharvard_sentences.txt networks.pyc\ttest10.wav\t test7.wav\r\nhorse.ogg\t notebook.ipynb\ttest11.wav\t test8.wav\r\nhyperparams.py\t prepo.py\t\ttest1.wav\t test9.wav\r\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec860f02e057d86e5a3dbf1d4a2780124bd892f1
176,486
ipynb
Jupyter Notebook
nbs/46_tutorial.collab.ipynb
flpeters/fastai2
370ac0169a190abe45bae061daa5fb00ab2d1e01
[ "Apache-2.0" ]
1
2020-03-04T04:56:26.000Z
2020-03-04T04:56:26.000Z
nbs/46_tutorial.collab.ipynb
flpeters/fastai2
370ac0169a190abe45bae061daa5fb00ab2d1e01
[ "Apache-2.0" ]
1
2021-09-28T05:58:59.000Z
2021-09-28T05:58:59.000Z
nbs/46_tutorial.collab.ipynb
flpeters/fastai2
370ac0169a190abe45bae061daa5fb00ab2d1e01
[ "Apache-2.0" ]
null
null
null
174.565776
149,168
0.884693
[ [ [ "# Collaborative filtering\n\n> Using the fastai library for collaborative filtering.", "_____no_output_____" ] ], [ [ "from fastai2.tabular.all import *\nfrom fastai2.collab import *", "_____no_output_____" ], [ "# all_slow", "_____no_output_____" ] ], [ [ "This tutorial highlights on how to quickly build a `Learner` and train a model on collaborative filtering tasks.", "_____no_output_____" ], [ "## Training a model", "_____no_output_____" ], [ "For this tutorial, we will use the [Movielens 100k data dataset](https://grouplens.org/datasets/movielens/100k/). We can download it easily and decompress it with the following function:", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.ML_100k)", "_____no_output_____" ] ], [ [ "The main table is in `u.data`. Since it's not a proper csv, we have to specify a few things while opening it: the tab delimiter, the columns we want to keep and their names.", "_____no_output_____" ] ], [ [ "ratings = pd.read_csv(path/'u.data', delimiter='\\t', header=None,\n usecols=(0,1,2), names=['user','movie','rating'])\nratings.head()", "_____no_output_____" ] ], [ [ "Movie ids are not ideal to look at things, so we load the corresponding movie id to the title that is in the table `u.item`:", "_____no_output_____" ] ], [ [ "movies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1',\n usecols=(0,1), names=('movie','title'), header=None)\nmovies.head()", "_____no_output_____" ] ], [ [ "Next we merge it to our ratings table:", "_____no_output_____" ] ], [ [ "ratings = ratings.merge(movies)\nratings.head()", "_____no_output_____" ] ], [ [ "We can then build a `DataLoaders` object from this table. By default, it takes the first column for user, the second column for the item (here our movies) and the third column for the ratings. We need to change the value of `item_name` in our case, to use the titles instead of the ids:", "_____no_output_____" ] ], [ [ "dls = CollabDataLoaders.from_df(ratings, item_name='title', bs=64)", "_____no_output_____" ] ], [ [ "In all applications, when the data has been assembled in a `DataLoaders`, you can have a look at it with the `show_batch` method:", "_____no_output_____" ] ], [ [ "dls.show_batch()", "_____no_output_____" ] ], [ [ "fastai can create and train a collaborative filtering model by using `collab_learner`:", "_____no_output_____" ] ], [ [ "learn = collab_learner(dls, n_factors=50, y_range=(0, 5.5))", "_____no_output_____" ] ], [ [ "It uses a simple dot product model with 50 latent factors. To train it using the 1cycle policy, we just run this command:", "_____no_output_____" ] ], [ [ "learn.fit_one_cycle(5, 5e-3, wd=0.1)", "_____no_output_____" ] ], [ [ "Here's [some benchmarks](https://www.librec.net/release/v1.3/example.html) on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91 (scroll down to the 100k dataset), which corresponds to an MSE of `0.91**2 = 0.83`. So in less than a minute, we got pretty good results!", "_____no_output_____" ], [ "## Interpretation", "_____no_output_____" ], [ "Let's analyze the results of our previous model. We will keep the 1000 most rated movies for this:", "_____no_output_____" ] ], [ [ "g = ratings.groupby(title)['rating'].count()\ntop_movies = g.sort_values(ascending=False).index.values[:1000]\ntop_movies[:10]", "_____no_output_____" ] ], [ [ "### Movie bias", "_____no_output_____" ], [ "Our model has learned one bias per movie, a unique number independent of users that can be interpreted as the intrinsic \"value\" of the movie. We can grab the bias of each movie in our `top_movies` list with the following conmmand:", "_____no_output_____" ] ], [ [ "movie_bias = learn.model.bias(top_movies, is_item=True)\nmovie_bias.shape", "_____no_output_____" ] ], [ [ "Let's compare those biases with the average ratings:", "_____no_output_____" ] ], [ [ "mean_ratings = ratings.groupby(title)['rating'].mean()\nmovie_ratings = [(b, i, mean_ratings.loc[i]) for i,b in zip(top_movies,movie_bias)]", "_____no_output_____" ] ], [ [ "Now let's have a look at the movies with the worst bias:", "_____no_output_____" ] ], [ [ "item0 = lambda o:o[0]\nsorted(movie_ratings, key=item0)[:15]", "_____no_output_____" ] ], [ [ "Or the ones with the best bias:", "_____no_output_____" ] ], [ [ "sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]", "_____no_output_____" ] ], [ [ "There is certainly a storng correlation!", "_____no_output_____" ], [ "### Movie weights", "_____no_output_____" ], [ "Now let's try to analyze the latent factors our model has learned. We can grab the weights for each movie in `top_movies` the same way as we did for the bias before.", "_____no_output_____" ] ], [ [ "movie_w = learn.model.weight(top_movies, is_item=True)\nmovie_w.shape", "_____no_output_____" ] ], [ [ "Let's try a PCA to reduce the dimensions and see if we can see what the model learned:", "_____no_output_____" ] ], [ [ "movie_pca = movie_w.pca(3)\nmovie_pca.shape", "_____no_output_____" ], [ "fac0,fac1,fac2 = movie_pca.t()\nmovie_comp = [(f, i) for f,i in zip(fac0, top_movies)]", "_____no_output_____" ] ], [ [ "Here are the highest score on the first dimension:", "_____no_output_____" ] ], [ [ "sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]", "_____no_output_____" ] ], [ [ "And the worst:", "_____no_output_____" ] ], [ [ "sorted(movie_comp, key=itemgetter(0))[:10]", "_____no_output_____" ] ], [ [ "Same thing for our second dimension:", "_____no_output_____" ] ], [ [ "movie_comp = [(f, i) for f,i in zip(fac1, top_movies)]", "_____no_output_____" ], [ "sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]", "_____no_output_____" ], [ "sorted(movie_comp, key=itemgetter(0))[:10]", "_____no_output_____" ] ], [ [ "And we can even plot the movies according to their scores on those dimensions:", "_____no_output_____" ] ], [ [ "idxs = np.random.choice(len(top_movies), 50, replace=False)\nidxs = list(range(50))\nX = fac0[idxs]\nY = fac2[idxs]\nplt.figure(figsize=(15,15))\nplt.scatter(X, Y)\nfor i, x, y in zip(top_movies[idxs], X, Y):\n plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec8625103cc6a07df95eef7f1ac132f11f8808b7
14,289
ipynb
Jupyter Notebook
jupyter/visualize_convolution_filters.ipynb
ltbyshi/cardiacai
e5e7936aea0cfc7a86a110f2f12f94c7c640fda0
[ "MIT" ]
null
null
null
jupyter/visualize_convolution_filters.ipynb
ltbyshi/cardiacai
e5e7936aea0cfc7a86a110f2f12f94c7c640fda0
[ "MIT" ]
null
null
null
jupyter/visualize_convolution_filters.ipynb
ltbyshi/cardiacai
e5e7936aea0cfc7a86a110f2f12f94c7c640fda0
[ "MIT" ]
null
null
null
90.436709
8,594
0.859542
[ [ [ "%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "import h5py", "_____no_output_____" ], [ "def load_hdf5(filename):\n f = h5py.File(filename, 'r')\n data = {}\n for key in ['/model_weights/conv2d_1/conv2d_1/kernel:0']:\n data[key] = f[key][:]\n f.close()\n return data", "_____no_output_____" ], [ "model = load_hdf5('../output/classify_types/model')", "_____no_output_____" ], [ "kernel0 = model['/model_weights/conv2d_1/conv2d_1/kernel:0']", "_____no_output_____" ], [ "matshow(kernel0[:, :, 0, 0], cmap=cm.Greys)", "_____no_output_____" ], [ "matshow(np.einsum('ijkl->ikjl', np.einsum('ijkl->lij', kernel0).reshape((8, 8, 3, 3))).reshape((24, 24)), cmap=cm.RdBu)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec862e0e99b2e108644455380b58b2aba66a81f9
59,824
ipynb
Jupyter Notebook
model/Connect-X-RL-DoubleQ.ipynb
vnck/Connect-X-With-RL
8407298a45310c11909fe747c142c59fdb47014d
[ "MIT" ]
null
null
null
model/Connect-X-RL-DoubleQ.ipynb
vnck/Connect-X-With-RL
8407298a45310c11909fe747c142c59fdb47014d
[ "MIT" ]
null
null
null
model/Connect-X-RL-DoubleQ.ipynb
vnck/Connect-X-With-RL
8407298a45310c11909fe747c142c59fdb47014d
[ "MIT" ]
1
2021-08-31T16:25:29.000Z
2021-08-31T16:25:29.000Z
83.320334
21,880
0.782362
[ [ [ "!pip install 'kaggle-environments>=0.1.6'", "Requirement already satisfied: kaggle-environments>=0.1.6 in /opt/conda/lib/python3.6/site-packages (1.2.1)\nRequirement already satisfied: jsonschema>=3.0.1 in /opt/conda/lib/python3.6/site-packages (from kaggle-environments>=0.1.6) (3.2.0)\nRequirement already satisfied: pyrsistent>=0.14.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema>=3.0.1->kaggle-environments>=0.1.6) (0.16.0)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /opt/conda/lib/python3.6/site-packages (from jsonschema>=3.0.1->kaggle-environments>=0.1.6) (1.6.1)\nRequirement already satisfied: setuptools in /opt/conda/lib/python3.6/site-packages (from jsonschema>=3.0.1->kaggle-environments>=0.1.6) (47.1.1.post20200529)\nRequirement already satisfied: attrs>=17.4.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema>=3.0.1->kaggle-environments>=0.1.6) (19.3.0)\nRequirement already satisfied: six>=1.11.0 in /opt/conda/lib/python3.6/site-packages (from jsonschema>=3.0.1->kaggle-environments>=0.1.6) (1.15.0)\nRequirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.6/site-packages (from importlib-metadata; python_version < \"3.8\"->jsonschema>=3.0.1->kaggle-environments>=0.1.6) (3.1.0)\n" ], [ "import numpy as np\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom kaggle_environments import evaluate, make, utils", "_____no_output_____" ] ], [ [ "# Define Gym Class", "_____no_output_____" ] ], [ [ "import random\n\ndef combined_agent(default_agent, alternate_agent, epsilon):\n def updated_agent(obs, config):\n if (random.random() < epsilon):\n return alternate_agent(obs, config)\n return default_agent(obs,config)\n return updated_agent\n\nfrom kaggle_environments.envs.connectx.connectx import negamax_agent\nfrom kaggle_environments.envs.connectx.connectx import random_agent\n\ne_greedy_negamax = combined_agent(negamax_agent, random_agent, 0.4)", "_____no_output_____" ], [ "class ConnectX(gym.Env):\n def __init__(self, switch_prob=0.5):\n self.env = make('connectx', debug=False)\n self.pair = [None, e_greedy_negamax]\n self.trainer = self.env.train(self.pair)\n self.switch_prob = switch_prob\n\n config = self.env.configuration\n self.action_space = gym.spaces.Discrete(config.columns)\n self.observation_space = gym.spaces.Discrete(config.columns\n * config.rows)\n\n def switch_trainer(self):\n self.pair = self.pair[::-1]\n self.trainer = self.env.train(self.pair)\n\n def step(self, action):\n return self.trainer.step(action)\n\n def reset(self):\n if np.random.random() < self.switch_prob:\n self.switch_trainer()\n return self.trainer.reset()\n\n def render(self, **kwargs):\n return self.env.render(**kwargs)", "_____no_output_____" ] ], [ [ "# Define Model", "_____no_output_____" ] ], [ [ "class DeepModel(nn.Module):\n def __init__(\n self,\n num_states,\n hidden_units,\n num_actions,\n ):\n super(DeepModel, self).__init__()\n self.hidden_layers = nn.ModuleList([])\n for i in range(len(hidden_units)):\n if i == 0:\n self.hidden_layers.append(nn.Linear(num_states,\n hidden_units[i]))\n else:\n self.hidden_layers.append(nn.Linear(hidden_units[i\n - 1], hidden_units[i]))\n self.output_layer = nn.Linear(hidden_units[-1], num_actions)\n\n def forward(self, x):\n\n for layer in self.hidden_layers:\n x = torch.sigmoid(layer(x))\n x = self.output_layer(x)\n return x\n\n\nclass DoubleDQN:\n def __init__(\n self,\n num_states,\n num_actions,\n hidden_units,\n gamma,\n max_experiences,\n min_experiences,\n batch_size,\n lr,\n target_update_freq\n ):\n self.device = torch.device(('cuda'\n if torch.cuda.is_available() else 'cpu'\n ))\n print(self.device)\n self.num_actions = num_actions\n self.batch_size = batch_size\n self.gamma = gamma\n self.model = DeepModel(num_states, hidden_units,\n num_actions).to(self.device)\n self.model_target = DeepModel(num_states, hidden_units, num_actions).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=lr)\n self.experience = {\n 's': [],\n 'a': [],\n 'r': [],\n 's2': [],\n 'done': [],\n }\n self.max_experiences = max_experiences\n self.min_experiences = min_experiences\n self.param_update_count = 0\n self.target_update_freq = target_update_freq\n\n def predict(self, inputs, model):\n return model(torch.from_numpy(inputs).float().to(self.device))\n\n def train(self, TargetNet):\n if len(self.experience['s']) < self.min_experiences:\n # only start training process if enough experiences in buffer\n return 0\n\n # randomly select n experiences in buffer to form batch\n ids = np.random.randint(low=0, high=len(self.experience['s']),\n size=self.batch_size)\n states = np.asarray([self.preprocess(self.experience['s'][i])\n for i in ids])\n actions = np.asarray([self.experience['a'][i] for i in ids])\n actions = torch.autograd.Variable(torch.from_numpy(actions)).type(torch.LongTensor).to(self.device)\n rewards = np.asarray([self.experience['r'][i] for i in ids])\n rewards = torch.autograd.Variable(torch.from_numpy(rewards)).type(torch.FloatTensor).to(self.device)\n\n\n # prepare labels\n states_next = np.asarray([self.preprocess(self.experience['s2'\n ][i]) for i in ids])\n dones = np.asarray([self.experience['done'][i] for i in ids])\n dones_mask = torch.autograd.Variable(torch.from_numpy(dones)).type(torch.FloatTensor).to(self.device)\n \n # get Q values for best actions in observation\n q_values = self.predict(states_next, self.model).detach()\n _, a_prime = q_values.max(1)\n \n # get Q values from target network for next state and choise action\n q_target_values = self.predict(states_next, self.model_target).detach()\n q_target_a_prime = q_target_values.gather(1, a_prime.unsqueeze(1))\n q_target_a_prime = q_target_a_prime.squeeze()\n \n # if current state is end of episode, no next Q value\n q_target_a_prime = (1 - dones_mask) * q_target_a_prime\n \n # get q values of current observation\n q_vals = self.predict(states, self.model)\n q_actions = q_vals.gather(1, actions.unsqueeze(1))\n q_actions = q_actions.squeeze()\n \n # compute bellman error\n error = rewards + self.gamma * q_target_a_prime - q_actions\n \n clipped_error = -1.0 * error.clamp(-1,1)\n \n self.optimizer.zero_grad()\n q_actions.backward(clipped_error.data)\n \n self.optimizer.step()\n \n self.param_update_count += 1\n \n if self.param_update_count % self.target_update_freq == 0:\n self.model_target.load_state_dict(self.model.state_dict())\n\n def get_action(self, state, epsilon):\n # to get an action by using epsilon-greedy\n if np.random.random() < epsilon:\n return int(np.random.choice([c for c in\n range(self.num_actions) if state['board'][c]\n == 0]))\n else:\n prediction = \\\n self.predict(np.atleast_2d(self.preprocess(state)), self.model)[0].detach().cpu().numpy()\n for i in range(self.num_actions):\n if state['board'][i] != 0:\n prediction[i] = -1e7\n return int(np.argmax(prediction))\n\n def add_experience(self, exp):\n if len(self.experience['s']) >= self.max_experiences:\n for key in self.experience.keys():\n self.experience[key].pop(0)\n for (key, value) in exp.items():\n self.experience[key].append(value)\n\n def copy_weights(self, TrainNet):\n self.model.load_state_dict(TrainNet.model.state_dict())\n\n def save_weights(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load_weights(self, path):\n self.model.load_state_dict(torch.load(path))\n\n def preprocess(self, state):\n # each state consists of overview of the board and the mark in the obsevations\n results = (state['board'])[:]\n results.append(state.mark)\n return results", "_____no_output_____" ], [ "def play_game(\n env,\n TrainNet,\n TargetNet,\n epsilon,\n copy_step,\n ):\n rewards = 0\n iter = 0\n done = False\n observations = env.reset()\n while not done:\n action = TrainNet.get_action(observations, epsilon)\n prev_observations = observations\n (observations, reward, done, _) = env.step(action)\n\n if done:\n if reward == 1:\n reward = 20\n elif reward == 0:\n reward = -20\n else:\n reward = 10\n else:\n reward = 0.5\n\n rewards += reward\n\n exp = {\n 's': prev_observations,\n 'a': action,\n 'r': reward,\n 's2': observations,\n 'done': done,\n }\n \n TrainNet.add_experience(exp)\n TrainNet.train(TargetNet)\n \n iter += 1\n if iter % copy_step == 0:\n TargetNet.copy_weights(TrainNet)\n return rewards", "_____no_output_____" ] ], [ [ "# Training", "_____no_output_____" ] ], [ [ "gamma = 0.99\ncopy_step = 1000\nhidden_units = [128, 128, 128, 128, 128]\nmax_experiences = 10000\nmin_experiences = 100\nbatch_size = 32\nlr = 1e-2\nepsilon = 0.999\ndecay = 0.9999\nmin_epsilon = 0.01\nepisodes = 50000\ntarget_update_freq = 1000\n\nprecision = 7", "_____no_output_____" ], [ "env = ConnectX()\n\nnum_states = env.observation_space.n + 1\nnum_actions = env.action_space.n\n\nall_total_rewards = np.empty(episodes)\nall_avg_rewards = np.empty(episodes) # Last 100 steps\nall_epsilons = np.empty(episodes)\n\n# Initialize models\nTrainNet = DoubleDQN(num_states, num_actions, hidden_units, gamma, max_experiences, min_experiences, batch_size, lr, target_update_freq)\nTargetNet = DoubleDQN(num_states, num_actions, hidden_units, gamma, max_experiences, min_experiences, batch_size, lr, target_update_freq)", "cuda\ncuda\n" ], [ "from tqdm.notebook import tqdm\n\nfor n in tqdm(range(episodes)):\n epsilon = max(min_epsilon, epsilon * decay)\n total_reward = play_game(env, TrainNet, TargetNet, epsilon, copy_step)\n all_total_rewards[n] = total_reward\n avg_reward = all_total_rewards[max(0, n - 100):(n + 1)].mean()\n all_avg_rewards[n] = avg_reward\n all_epsilons[n] = epsilon\n\n if n % 1000 == 0:\n print('[{}/{}] episode_reward: {:.3f}, avg_reward: {:.3f}, epsilon: {:.5f}'.format(n,episodes,total_reward, avg_reward, epsilon))", "_____no_output_____" ], [ "plt.plot(all_avg_rewards)\nplt.xlabel('Episode')\nplt.ylabel('Avg rewards (100)')\nplt.show()", "_____no_output_____" ], [ "plt.plot(all_epsilons)\nplt.xlabel('Episode')\nplt.ylabel('Epsilon')\nplt.show()", "_____no_output_____" ], [ "TrainNet.save_weights('./weights-doubleq.pth')", "_____no_output_____" ] ], [ [ "# Create Agent", "_____no_output_____" ] ], [ [ "fc_layers = []\n\n# Get all hidden layers' weights\nfor i in range(len(hidden_units)):\n fc_layers.extend([\n TrainNet.model.hidden_layers[i].weight.T.tolist(), # weights\n TrainNet.model.hidden_layers[i].bias.tolist() # bias\n ])\n\n# Get output layer's weights\nfc_layers.extend([\n TrainNet.model.output_layer.weight.T.tolist(), # weights\n TrainNet.model.output_layer.bias.tolist() # bias\n])\n\n# Convert all layers into usable form before integrating to final agent\nfc_layers = list(map(\n lambda x: str(list(np.round(x, precision))) \\\n .replace('array(', '').replace(')', '') \\\n .replace(' ', '') \\\n .replace('\\n', ''),\n fc_layers\n))\nfc_layers = np.reshape(fc_layers, (-1, 2))\n\n# Create the agent\nmy_agent = '''def my_agent(observation, configuration):\n import numpy as np\n\n'''\n\n# Write hidden layers\nfor i, (w, b) in enumerate(fc_layers[:-1]):\n my_agent += ' hl{}_w = np.array({}, dtype=np.float32)\\n'.format(i+1, w)\n my_agent += ' hl{}_b = np.array({}, dtype=np.float32)\\n'.format(i+1, b)\n# Write output layer\nmy_agent += ' ol_w = np.array({}, dtype=np.float32)\\n'.format(fc_layers[-1][0])\nmy_agent += ' ol_b = np.array({}, dtype=np.float32)\\n'.format(fc_layers[-1][1])\n\nmy_agent += '''\n state = observation.board[:]\n state.append(observation.mark)\n out = np.array(state, dtype=np.float32)\n\n'''\n\n# Calculate hidden layers\nfor i in range(len(fc_layers[:-1])):\n my_agent += ' out = np.matmul(out, hl{0}_w) + hl{0}_b\\n'.format(i+1)\n my_agent += ' out = 1/(1 + np.exp(-out))\\n' # Sigmoid function\n# Calculate output layer\nmy_agent += ' out = np.matmul(out, ol_w) + ol_b\\n'\n\nmy_agent += '''\n for i in range(configuration.columns):\n if observation.board[i] != 0:\n out[i] = -1e7\n\n return int(np.argmax(out))\n '''\n", "_____no_output_____" ], [ "with open('submission-doubleq.py', 'w') as f:\n f.write(my_agent)", "_____no_output_____" ], [ "from submission import my_agent", "_____no_output_____" ], [ "def mean_reward(rewards):\n return np.round(rewards.count([1,-1])/len(rewards),2)\n\nprint(\"My Agent vs. Random Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, \"random\"], num_episodes=10)))\nprint(\"My Agent vs. Negamax Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, \"negamax\"], num_episodes=10)))\nprint(\"Random Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [\"random\", my_agent], num_episodes=10)))\nprint(\"Negamax Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [\"negamax\", my_agent], num_episodes=10)))", "My Agent vs. Random Agent: 0.7\nMy Agent vs. Negamax Agent: 0.0\nRandom Agent vs. My Agent: 0.0\nNegamax Agent vs. My Agent: 1.0\n" ], [ "print(\"Negamax Agent vs. Negamax Agent:\", mean_reward(evaluate(\"connectx\", [\"negamax\", \"negamax\"], num_episodes=10)))", "Negamax Agent vs. Negamax Agent: 0.3\n" ], [ "def my_agent(observation, configuration):\n return TrainNet.get_action(observation, 0)", "_____no_output_____" ], [ "def mean_reward(rewards):\n return np.round(rewards.count([1,-1])/len(rewards),2)\n\nfrom nega_agents import return_nega\nnegamax_agent1 = return_nega(1)\nnegamax_agent2 = return_nega(2)\nnegamax_agent3 = return_nega(3)\n\nprint(\"My Agent vs. Random Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, \"random\"], num_episodes=100)))\nprint(\"My Agent vs. Negamax1 Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, negamax_agent1], num_episodes=100)))\nprint(\"My Agent vs. Negamax2 Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, negamax_agent2], num_episodes=100)))\nprint(\"My Agent vs. Negamax3 Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, negamax_agent3], num_episodes=100)))\nprint(\"My Agent vs. Negamax4 Agent:\", mean_reward(evaluate(\"connectx\", [my_agent, \"negamax\"], num_episodes=100)))\nprint(\"Random Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [\"random\", my_agent], num_episodes=100)))\nprint(\"Negamax1 Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [negamax_agent1, my_agent], num_episodes=100)))\nprint(\"Negamax2 Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [negamax_agent2, my_agent], num_episodes=100)))\nprint(\"Negamax3 Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [negamax_agent3, my_agent], num_episodes=100)))\nprint(\"Negamax4 Agent vs. My Agent:\", mean_reward(evaluate(\"connectx\", [\"negamax\", my_agent], num_episodes=100)))", "My Agent vs. Random Agent: 0.89\nMy Agent vs. Negamax1 Agent: 0.95\nMy Agent vs. Negamax2 Agent: 0.21\nMy Agent vs. Negamax3 Agent: 0.82\nMy Agent vs. Negamax4 Agent: 0.0\nRandom Agent vs. My Agent: 0.17\nNegamax1 Agent vs. My Agent: 0.27\nNegamax2 Agent vs. My Agent: 0.9\nNegamax3 Agent vs. My Agent: 0.36\nNegamax4 Agent vs. My Agent: 1.0\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec862efab20d3ea53960f92b232b9662e2a9dfdd
5,385
ipynb
Jupyter Notebook
1.Study/2. with computer/3.Deep_Learning_code/4. Pytorch/0.basic/1. Pytorch convention for CV, NLP according to the dimension.ipynb
jskim0406/Study
07b559b95f8f658303ee53114107ae35940a6080
[ "MIT" ]
null
null
null
1.Study/2. with computer/3.Deep_Learning_code/4. Pytorch/0.basic/1. Pytorch convention for CV, NLP according to the dimension.ipynb
jskim0406/Study
07b559b95f8f658303ee53114107ae35940a6080
[ "MIT" ]
null
null
null
1.Study/2. with computer/3.Deep_Learning_code/4. Pytorch/0.basic/1. Pytorch convention for CV, NLP according to the dimension.ipynb
jskim0406/Study
07b559b95f8f658303ee53114107ae35940a6080
[ "MIT" ]
null
null
null
21.979592
103
0.423955
[ [ [ "import torch", "_____no_output_____" ] ], [ [ "# 1. PyTorch Tensor Shape Convention", "_____no_output_____" ], [ "## 1) 2-D tensor convention\n\n$$|t| = (batch\\_size, dim)$$\n\n- 행의 크기 : batch-size\n- 열의 크기 : dim", "_____no_output_____" ], [ "- ex) 2x3 텐서라면, 파이토치에선 |t| = (2,3) = (batch size = 2, dim = 3) 이라 표현한다.\n\n```\n만약,\n\n데이터 갯수 : 3,000\n데이터의 feature 수 : 256\n\nmatrix = (3000, 256) ==>> batch-size = 3,000, dim = 256\n\n하지만,\n**컴퓨터는 보통 데이터를 하나씩 보다는 묶어서 한번에 처리하는 경우가 많음**\n데이터를 한 번에 30개씩 묶어서 처리한다면(batch-size)\n컴퓨터가 한번에 처리하는 텐서의 사이즈는\n\n|t| = (30,256) ==>> batch-size = 30, dim = 256\n\n이 된다.\n```", "_____no_output_____" ], [ "## 2) 3-D tensor convention : typical computer vision\n\n$$|t| = (batch\\_size, width, height)$$\n", "_____no_output_____" ], [ "## 3) 3-D tensor convention : typical natural language processing\n\n$$|t| = (batch\\_size, length, dim)$$\n\n- batch_size\n- length : 문장의 길이\n- dim : 단어 벡터의 차원", "_____no_output_____" ], [ "### 3-1) NLP tensor practice", "_____no_output_____" ], [ "**전처리**\n- 단어 단위로 전환 : 하나의 문장이 row, 문장 내 단어가 columns 생성", "_____no_output_____" ] ], [ [ "# 컴퓨터는 아직 '나는', '사과를' 등을 단어로 구분하지 못함. 따라서, 이를 나눠주는 전처리 작업 필요\ndata = [['나는 사과를 좋아해'], ['나는 바나나를 좋아해'], ['나는 사과를 싫어해'], ['나는 바나나를 싫어해']]", "_____no_output_____" ], [ "# 다음과 같이 전처리 작업 필요\n# 아래와 같이 4x3 tensor 가 됨\ndata = [['나는', '사과를', '좋아해'], ['나는', '바나나를', '좋아해'], ['나는', '사과를', '싫어해'], ['나는', '바나나를', '싫어해']]", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "**단어 -> 숫자 벡터로 변환**\n```python\n'나는' = [0.1, 0.2, 0.9]\n'사과를' = [0.3, 0.5, 0.1]\n'바나나를' = [0.3, 0.5, 0.2]\n'좋아해' = [0.7, 0.6, 0.5]\n'싫어해' = [0.5, 0.6, 0.7]\n```", "_____no_output_____" ] ], [ [ "data = [[[0.1, 0.2, 0.9], [0.3, 0.5, 0.1], [0.7, 0.6, 0.5]],\n [[0.1, 0.2, 0.9], [0.3, 0.5, 0.2], [0.7, 0.6, 0.5]],\n [[0.1, 0.2, 0.9], [0.3, 0.5, 0.1], [0.5, 0.6, 0.7]],\n [[0.1, 0.2, 0.9], [0.3, 0.5, 0.2], [0.5, 0.6, 0.7]]]", "_____no_output_____" ], [ "# 이제, (4x3)x3 tensor 가 됨 (3-D tensor)\ndata", "_____no_output_____" ] ], [ [ "**mini-batch size**\n- 컴퓨터 연산 수행 단위인 batch size로 데이터 분할", "_____no_output_____" ] ], [ [ "# batch size : 2 라면\n# 1 batch -> (2x3)x3 = batch_size, length, dim\ndata_1 = [[[0.1, 0.2, 0.9], [0.3, 0.5, 0.1], [0.7, 0.6, 0.5]],\n [[0.1, 0.2, 0.9], [0.3, 0.5, 0.2], [0.7, 0.6, 0.5]]]\n\n# 2 batch -> (2x3)x3 = batch_size, length, dim\ndata_2 = [[[0.1, 0.2, 0.9], [0.3, 0.5, 0.1], [0.5, 0.6, 0.7]],\n [[0.1, 0.2, 0.9], [0.3, 0.5, 0.2], [0.5, 0.6, 0.7]]]", "_____no_output_____" ] ], [ [ "**결국 NLP의 3-D tensors는**\n\n$$|t| = (mini\\_batch\\_size, length\\_of\\_sentence, dim\\_of\\_word)$$\n\n로 정의된다.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec8641acc90ace6857fc2e2b62ffa90f24c3d9a1
29,912
ipynb
Jupyter Notebook
PlotComparision_1000.ipynb
18819F-Project/ModellingDynamicalSystem
2610385e38c4cff7c9cbe25e1c6801a3de9eaf69
[ "MIT" ]
null
null
null
PlotComparision_1000.ipynb
18819F-Project/ModellingDynamicalSystem
2610385e38c4cff7c9cbe25e1c6801a3de9eaf69
[ "MIT" ]
null
null
null
PlotComparision_1000.ipynb
18819F-Project/ModellingDynamicalSystem
2610385e38c4cff7c9cbe25e1c6801a3de9eaf69
[ "MIT" ]
null
null
null
433.507246
27,698
0.929794
[ [ [ "import matplotlib.pyplot as plt\n\nplt.style.use(\"seaborn\")\nfig, (ax1) = plt.subplots(figsize=(6,6))\n\nax1.plot([18.7175,18.0037,17.0614,16.1019,15.3320,14.2279,14.0142,13.7744,13.5487,13.2977,13.2855,13.1232,12.5402,12.2375,12.2382,11.9743,11.9404,11.6876,11.6016,11.5760], \"-ob\", label=\"With Classical LDNN layer\")\nax1.plot([218.4223,45.8933,43.7937,41.0098,37.9548,34.3935,31.6346,29.4701,27.8955,26.6748,25.4257,23.9735,21.4985,15.9134,14.3776,14.2930,13.9772,13.9003,13.8255,13.5646], \"-or\", label=\"With Hybrid LDNN Classical-Quantum layer\")\nax1.plot([0.3200,0.1100,0.0831,0.0826,0.0772,0.0754,0.0745,0.0702,0.0690,0.0677,0.0672,0.0657,0.0659,0.0645,0.0641,0.0646,0.0652,0.0631,0.0633,0.0627], \"-og\", label=\"With Hybrid LDNN Quantum-Classical-Quantum layer\")\n\nax1.set_ylabel(\"Loss\")\nax1.set_ylim([0, 240])\nax1.set_xlabel(\"Epoch (Iteration)\")\nax1.legend(loc=2, prop = {'size': 10})", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec865a54b7e22c820ddb8f8519c6586a7314b66a
700,460
ipynb
Jupyter Notebook
covid_data.ipynb
michhottinger/project_x
1a832dfd52f69efed1c64a9831ace11cbcc77239
[ "MIT" ]
null
null
null
covid_data.ipynb
michhottinger/project_x
1a832dfd52f69efed1c64a9831ace11cbcc77239
[ "MIT" ]
null
null
null
covid_data.ipynb
michhottinger/project_x
1a832dfd52f69efed1c64a9831ace11cbcc77239
[ "MIT" ]
null
null
null
833.880952
441,640
0.948659
[ [ [ "<a href=\"https://colab.research.google.com/github/michhottinger/project_x/blob/master/covid_data.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##Covid data source is from: \nhttps://github.com/owid/covid-19-data/tree/master/public/data/ \nThis notebook is to better explore the data from the Covid CSV.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndf = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv')\ndf.head(10)\ndf.columns", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.sort_values(by='total_deaths', ascending=False, inplace=True)", "_____no_output_____" ], [ "df.head(20)\ndf.shape", "_____no_output_____" ], [ "dfdate = df[df['date']=='2021-01-25']\ndfdate.shape", "_____no_output_____" ], [ "dfpop = dfdate[dfdate['population'] > 6000000]\ndfpop.shape", "_____no_output_____" ], [ "dfpop.shape", "_____no_output_____" ], [ "df_simple = dfdate.loc[:, ('date', 'location', 'total_deaths', 'continent', 'population', 'total_deaths_per_million')]\ndf_simple['death_pop'] = df_simple['total_deaths']/df_simple['population']\n\ndf_short = df_simple[:-150]\ndf_short.shape", "_____no_output_____" ] ], [ [ "# Total deaths \nThis shows the total not adjusted for population size. Since lives are lost, and every life matters, the raw number is worth looking at and evaluating.", "_____no_output_____" ] ], [ [ "import seaborn as sns\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n#df_short is death/pop for each country\nsns.lmplot(x='location', y='total_deaths', data=df_short,\n fit_reg=False, # No regression line\n hue='continent', height=10, aspect=20/10) # Color by continent\n\n# Set title with matplotlib\nplt.title('Covid-19 Deaths by Country')\n# Rotate x-labels\nplt.xticks(rotation=-90);", "_____no_output_____" ] ], [ [ "# Total deaths divided by total population \nThis gives us a raw visual of the worst 50 countries affected by Covid-19 deaths.", "_____no_output_____" ] ], [ [ "df_ord = df_short.sort_values(by='death_pop', ascending=False)\n\nsns.lmplot(x='location', y='death_pop', data=df_ord,\n fit_reg=False, # No regression line\n hue='continent', height=8.27, aspect=20/8.27) # Color by continent\n\n# Set title with matplotlib\nplt.title('Covid-19 Deaths by Country')\n# Rotate x-labels\nplt.xticks(rotation=-90);", "_____no_output_____" ] ], [ [ "# Filter by pop size over 6 million \nOrdered by total deaths per million \nThis enables us to look at the countries that did the worst containment of Covid 19 as well as the best given there population.", "_____no_output_____" ] ], [ [ "\ndf_pop = dfpop.loc[:, ('date', 'location', 'total_deaths', 'continent', 'population', 'total_deaths_per_million')]\ndf_pop['death_pop'] = df_pop['total_deaths']/df_pop['population']\n\ndf_pop = df_pop.sort_values(by='total_deaths_per_million', ascending=False)\nplt.figure(figsize=(30,20))\n\nsns.lmplot(x='location', y='total_deaths_per_million', data=df_pop,\n fit_reg=False, # No regression line\n hue='continent', height=8.27, aspect=20/8.27) # Color by continent\n\n# Set title with matplotlib\nplt.title('Covid-19 Deaths by Country')\n# Rotate x-labels\nplt.xticks(rotation=-90);", "_____no_output_____" ], [ "df_denmark = df[df['location']=='Denmark']\ndf_denmark.shape\n", "_____no_output_____" ], [ "df_usa = df[df['location']=='United States']\ndf_usa.shape", "_____no_output_____" ], [ "# Here we can see the shape of the Covid curves between the two countries, but beware that\n# the y labels are at a different scale. These graphs should be on one graph so that the \n# different scales should be realized easily.\n\nplt.figure(figsize=(11,5))\nplt.subplot(1, 2, 1)\n\nsns.lineplot(x='date', y='total_deaths_per_million', data=df_denmark)\nplt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False) # labels along the bottom edge are off\nplt.title('Denmark Covid Deaths per Mil')\n\n\nplt.subplot(1, 2, 2)\n\nsns.lineplot(x='date', y='total_deaths_per_million', data=df_usa)\nplt.tick_params(\n axis='x',\n which='both', \n bottom=False, \n top=False, \n labelbottom=False) \nplt.title('USA Covid Deaths per Mil')\nplt.show()", "_____no_output_____" ], [ "# https://www.usnews.com/news/best-countries/overall-rankings\n#df = titanic[(titanic[\"Pclass\"] == 2) | (titanic[\"Pclass\"] == 3)]\ndf_top20 = df[(df['location']== 'Switzerland') | (df['location']== 'Canada') | \n (df['location']== 'Japan') | (df['location']== 'Germany') | \n (df['location']== 'Australia') | (df['location']== 'United Kingdom') |\n (df['location']== 'United States') | (df['location']== 'Sweden') |\n (df['location']== 'Netherlands') | (df['location']== 'Norway') | \n (df['location']== 'New Zealand') | (df['location']== 'France') | \n (df['location']== 'Denmark') | (df['location']== 'Finland') | \n (df['location']== 'China') | (df['location']=='Singapore') | \n (df['location']== 'Italy') | (df['location']== 'Austria') | \n (df['location']== 'Spain') | (df['location']=='South Korea')]", "_____no_output_____" ], [ "df_top20 = df_top20.sort_values(by='date', ascending=True)\ndf_top20.head(5)", "_____no_output_____" ], [ "data= df_top20\nplt.figure(figsize=(40,30))\nsns.lineplot(data=data, x=\"date\", y=\"total_deaths_per_million\", hue=\"location\", legend=\"full\")\n# Set title with matplotlib\nplt.title('Covid-19 Deaths by Top 20 Countries')\n# Rotate x-labels\nplt.xticks(rotation=-90)\nplt.autoscale_on=True;", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec865a656f03978b0ac98d19c95def95c54db1e7
194,626
ipynb
Jupyter Notebook
machine-learning/Electricity/Bias-Variance-Tradeoff.ipynb
imgoodman/play-with-machine-learning
377da326aba699e76295cb9020f9fa392febf1a4
[ "Apache-2.0" ]
null
null
null
machine-learning/Electricity/Bias-Variance-Tradeoff.ipynb
imgoodman/play-with-machine-learning
377da326aba699e76295cb9020f9fa392febf1a4
[ "Apache-2.0" ]
null
null
null
machine-learning/Electricity/Bias-Variance-Tradeoff.ipynb
imgoodman/play-with-machine-learning
377da326aba699e76295cb9020f9fa392febf1a4
[ "Apache-2.0" ]
null
null
null
349.418312
90,360
0.924239
[ [ [ "# Bias-Variance Trade-off", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "electricity = pd.read_excel(\"./input/Folds5x2_pp.xlsx\")", "_____no_output_____" ], [ "electricity.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 9568 entries, 0 to 9567\nData columns (total 5 columns):\nAT 9568 non-null float64\nV 9568 non-null float64\nAP 9568 non-null float64\nRH 9568 non-null float64\nPE 9568 non-null float64\ndtypes: float64(5)\nmemory usage: 373.8 KB\n" ], [ "electricity.head()", "_____no_output_____" ] ], [ [ "# PE是目标变量,其他是潜在特征", "_____no_output_____" ], [ "# 共9568个样本,按照80:20比例,分为训练集和验证集\n\n# 这样,训练集:7654,验证集:1914\n\n# 为了生成学习曲线,训练集的样本数从1开始,最后增大到7654.\n\n## 方便起见,训练集大小取:【1, 100, 500, 2000, 5000, 7654】", "_____no_output_____" ], [ "## 每个特定大小的训练集都会训练出一个新的模型。\n\n## 如果使用交叉验证,那么,每个训练集都会训练出k个不同模型。", "_____no_output_____" ], [ "# 使用sklearn中的learning_curve()函数", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import learning_curve", "_____no_output_____" ], [ "features=['AT', 'V', 'AP', 'RH']\ntarget='PE'\ntrain_sizes=[1, 100, 500, 2000, 5000, 7654]", "_____no_output_____" ], [ "\n\ntrain_sizes, train_scores, validation_scores=learning_curve(estimator=LinearRegression(), \n X=electricity[features],\n y=electricity[target],\n train_sizes=train_sizes,\n cv=5, \n scoring=\"neg_mean_squared_error\", \n shuffle=True)", "_____no_output_____" ], [ "train_sizes", "_____no_output_____" ], [ "train_scores", "_____no_output_____" ], [ "validation_scores", "_____no_output_____" ] ], [ [ "# 训练集有多少,就有多少行。\n# 训练集被交叉验证分成多少份,就有多少列", "_____no_output_____" ], [ "# 对交叉验证误差求平均", "_____no_output_____" ] ], [ [ "# axis=0 沿着行的方向,就是每一列求平均\n# axis=1 沿着列的方向,就是每一行求平均\ntrain_scores_mean = - train_scores.mean(axis=1)\nvalidation_scores_mean = - validation_scores.mean(axis=1)", "_____no_output_____" ], [ "train_scores_mean", "_____no_output_____" ], [ "validation_scores_mean", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "plt.style.use(\"seaborn\")\n\nplt.plot(train_sizes, train_scores_mean, label=\"train error\")\nplt.plot(train_sizes, validation_scores_mean, label=\"validation error\")\n\nplt.xlabel(\"train set size\")\nplt.ylabel(\"MSE\")\nplt.title(\"Learning curve for a linear regression model\")\nplt.legend()\nplt.ylim(0, 40)\nplt.show()", "_____no_output_____" ] ], [ [ "# 上述训练误差维持在20左右。要去判断这个20是高,还是低。\n\n# 如果训练误差高,说明bias高。\n# 如果训练误差低,说明bias低。", "_____no_output_____" ], [ "# 区分两个概念:\n\n## 1.增加数据量\n## 2. 增加数据特征", "_____no_output_____" ] ], [ [ "def learning_curves(estimator, data, features, target, train_sizes, cv):\n train_sizes, train_scores, validation_scores=learning_curve(estimator=estimator, \n X=data[features],\n y=data[target],\n train_sizes=train_sizes,\n cv=cv, \n scoring=\"neg_mean_squared_error\", \n shuffle=True)\n train_scores_mean = - train_scores.mean(axis=1)\n validation_scores_mean = - validation_scores.mean(axis=1)\n plt.plot(train_sizes, train_scores_mean, label=\"train error\")\n plt.plot(train_sizes, validation_scores_mean, label=\"validation error\")\n\n plt.xlabel(\"train set size\")\n plt.ylabel(\"MSE\")\n plt.title(\"Learning curve for a \"+str(estimator).split(\"(\")[0]+\" model\")\n plt.legend()\n plt.ylim(0, 40)", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "plt.figure(figsize=(16,5)) \nfor model,i in [(RandomForestRegressor(), 1),(LinearRegression(), 2)]: \n plt.subplot(1,2,i)\n learning_curves(model, electricity, features, target, train_sizes, 5)", "_____no_output_____" ], [ "plt.figure(figsize=(16,5)) \nfor model,i in [(RandomForestRegressor(max_leaf_nodes=350), 1),(LinearRegression(), 2)]: \n plt.subplot(1,2,i)\n learning_curves(model, electricity, features, target, train_sizes, 5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
ec865e3518a3911d578bbc0eaa0619b552c7d297
14,435
ipynb
Jupyter Notebook
server/src/Preprocessing.ipynb
getChan/SuicideSquad
e4ae8e4f5a85c96ab862c8695b1a0020130f853a
[ "MIT" ]
1
2019-04-19T04:42:26.000Z
2019-04-19T04:42:26.000Z
src/Preprocessing.ipynb
getChan/SuicideSquad
e4ae8e4f5a85c96ab862c8695b1a0020130f853a
[ "MIT" ]
null
null
null
src/Preprocessing.ipynb
getChan/SuicideSquad
e4ae8e4f5a85c96ab862c8695b1a0020130f853a
[ "MIT" ]
null
null
null
61.425532
6,420
0.80187
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec86688139b8cec8694ab7f05673b0505fbf32db
124,306
ipynb
Jupyter Notebook
1A/S5/UE Traitement du Signal et Automatique/Langage C/c1/1SN_LangageC.ipynb
badrs26/ENSEEIHT
7d04264c1be2c3993bfbcd8f7cdd64b4fb780b8f
[ "Apache-2.0" ]
4
2020-12-02T15:42:52.000Z
2021-11-07T20:31:09.000Z
1A/S5/UE Traitement du Signal et Automatique/Langage C/c1/1SN_LangageC.ipynb
Sajid-Badr/ENSEEIHT
7d04264c1be2c3993bfbcd8f7cdd64b4fb780b8f
[ "Apache-2.0" ]
null
null
null
1A/S5/UE Traitement du Signal et Automatique/Langage C/c1/1SN_LangageC.ipynb
Sajid-Badr/ENSEEIHT
7d04264c1be2c3993bfbcd8f7cdd64b4fb780b8f
[ "Apache-2.0" ]
4
2021-12-07T22:15:16.000Z
2022-03-08T21:49:50.000Z
33.532776
3,648
0.536877
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec866a8f801c448d0c441f09c18d1bd18dc36d62
65,496
ipynb
Jupyter Notebook
biobb_wf_md_setup_api/docs/notebooks/biobb_MDsetupAPI_tutorial.docs.ipynb
bioexcel/biobb_wf_md_setup_api
078a9a445adf87b9a20757f3e28d28702cf7a07e
[ "Apache-2.0" ]
null
null
null
biobb_wf_md_setup_api/docs/notebooks/biobb_MDsetupAPI_tutorial.docs.ipynb
bioexcel/biobb_wf_md_setup_api
078a9a445adf87b9a20757f3e28d28702cf7a07e
[ "Apache-2.0" ]
null
null
null
biobb_wf_md_setup_api/docs/notebooks/biobb_MDsetupAPI_tutorial.docs.ipynb
bioexcel/biobb_wf_md_setup_api
078a9a445adf87b9a20757f3e28d28702cf7a07e
[ "Apache-2.0" ]
1
2020-05-24T01:41:29.000Z
2020-05-24T01:41:29.000Z
32.879518
445
0.55921
[ [ [ "# Protein MD Setup tutorial using BioExcel Building Blocks (biobb) through REST API\n**Based on the official GROMACS tutorial:** [http://www.mdtutorials.com/gmx/lysozyme/index.html](http://www.mdtutorials.com/gmx/lysozyme/index.html)\n***\nThis tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein**, step by step, using the **BioExcel Building Blocks (biobb) [REST API](https://mmb.irbbarcelona.org/biobb-api)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI). \n***\n\n## Settings\n \n### Auxiliar libraries used\n\n - [requests](https://pypi.org/project/requests/): Requests allows you to send *organic, grass-fed* HTTP/1.1 requests, without the need for manual labor. \n - [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels): Enables a Jupyter Notebook or JupyterLab application in one conda environment to access kernels for Python, R, and other languages found in other environments.\n - [nglview](http://nglviewer.org/#nglview): Jupyter/IPython widget to interactively view molecular structures and trajectories in notebooks.\n - [ipywidgets](https://github.com/jupyter-widgets/ipywidgets): Interactive HTML widgets for Jupyter notebooks and the IPython kernel.\n - [plotly](https://plot.ly/python/offline/): Python interactive graphing library integrated in Jupyter notebooks.\n - [simpletraj](https://github.com/arose/simpletraj): Lightweight coordinate-only trajectory reader based on code from GROMACS, MDAnalysis and VMD.\n\n### Conda Installation and Launch\n\n```console\n git clone https://github.com/bioexcel/biobb_wf_md_setup_api.git\n cd biobb_wf_md_setup_api\n conda env create -f conda_env/environment.yml\n conda activate biobb_MDsetupAPI_tutorial\n jupyter-nbextension enable --py --user widgetsnbextension\n jupyter-nbextension enable --py --user nglview\n jupyter-notebook biobb_wf_md_setup_api/notebooks/biobb_MDsetupAPI_tutorial.ipynb\n ``` \n\n***\n \n## Pipeline steps\n 1. [Input Parameters](#input)\n 2. [Fetching PDB Structure](#fetch)\n 3. [Fix Protein Structure](#fix)\n 4. [Create Protein System Topology](#top)\n 5. [Create Solvent Box](#box)\n 6. [Fill the Box with Water Molecules](#water)\n 7. [Adding Ions](#ions)\n 8. [Energetically Minimize the System](#min)\n 9. [Equilibrate the System (NVT)](#nvt)\n 10. [Equilibrate the System (NPT)](#npt)\n 11. [Free Molecular Dynamics Simulation](#free)\n 12. [Post-processing and Visualizing Resulting 3D Trajectory](#post)\n 13. [Output Files](#output)\n 14. [Questions & Comments](#questions)\n \n***\n<img src=\"https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png\" alt=\"Bioexcel2 logo\"\n\ttitle=\"Bioexcel2 logo\" width=\"400\" />\n***\n", "_____no_output_____" ], [ "<a id=\"input\"></a>\n## Input parameters\n**Input parameters** needed:\n - **pdbCode**: PDB code of the protein structure (e.g. 1AKI)\n - **apiURL**: Base URL for the Biobb REST API (https://mmb.irbbarcelona.org/biobb-api/rest/v1/)\n \nAdditionally, the **utils** library is loaded. This library contains global functions that are used for sending and retrieving data to / from the REST API. [Click here](https://mmb.irbbarcelona.org/biobb-api/tutorial) for more information about how the BioBB REST API works and which is the purpose for each of these functions.", "_____no_output_____" ] ], [ [ "import nglview\nimport ipywidgets\nfrom utils import *\n\npdbCode = \"1AKI\"\napiURL = \"https://mmb.irbbarcelona.org/biobb-api/rest/v1/\" ", "_____no_output_____" ] ], [ [ "<a id=\"fetch\"></a>\n***\n## Fetching PDB structure\nDownloading **PDB structure** with the **protein molecule** from the RCSB PDB database.<br>\nAlternatively, a **PDB file** can be used as starting structure. <br>\n\n***\n**BioBB REST API** end points used:\n - [PDB](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_io/pdb) from **biobb_io.api.pdb**\n***", "_____no_output_____" ] ], [ [ "# Downloading desired PDB file\n\n# Create properties dict and inputs/outputs\ndownloaded_pdb = pdbCode + '.pdb'\nprop = {\n 'pdb_code': pdbCode\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_io/pdb', \n config = prop,\n output_pdb_path = downloaded_pdb)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"vis3D\"></a>\n### Visualizing 3D structure\nVisualizing the downloaded/given **PDB structure** using **NGL**: ", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(downloaded_pdb)\nview.add_representation(repr_type='ball+stick', selection='all')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview", "_____no_output_____" ] ], [ [ "<img src='ngl1.png'></img>", "_____no_output_____" ], [ "<a id=\"fix\"></a>\n***\n## Fix protein structure\n**Checking** and **fixing** (if needed) the protein structure:<br>\n- **Modeling** **missing side-chain atoms**, modifying incorrect **amide assignments**, choosing **alternative locations**.<br>\n- **Checking** for missing **backbone atoms**, **heteroatoms**, **modified residues** and possible **atomic clashes**.\n\n***\n**BioBB REST API** end points used:\n - [FixSideChain](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_model/fix_side_chain) from **biobb_model.model.fix_side_chain**\n***", "_____no_output_____" ] ], [ [ "# Check & Fix PDB\n\n# Create inputs/outputs\nfixed_pdb = pdbCode + '_fixed.pdb'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_model/fix_side_chain',\n input_pdb_path = downloaded_pdb,\n output_pdb_path = fixed_pdb)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "### Visualizing 3D structure\nVisualizing the fixed **PDB structure** using **NGL**. In this particular example, the checking step didn't find any issue to be solved, so there is no difference between the original structure and the fixed one. ", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(fixed_pdb)\nview.add_representation(repr_type='ball+stick', selection='all')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview.camera='orthographic'\nview", "_____no_output_____" ] ], [ [ "<img src='ngl2.png'></img>", "_____no_output_____" ], [ "<a id=\"top\"></a>\n***\n## Create protein system topology\n**Building GROMACS topology** corresponding to the protein structure.<br>\nForce field used in this tutorial is [**amber99sb-ildn**](https://dx.doi.org/10.1002%2Fprot.22711): AMBER **parm99** force field with **corrections on backbone** (sb) and **side-chain torsion potentials** (ildn). Water molecules type used in this tutorial is [**spc/e**](https://pubs.acs.org/doi/abs/10.1021/j100308a038).<br>\nAdding **hydrogen atoms** if missing. Automatically identifying **disulfide bridges**. <br>\n\nGenerating two output files: \n- **GROMACS structure** (gro file)\n- **GROMACS topology** ZIP compressed file containing:\n - *GROMACS topology top file* (top file)\n - *GROMACS position restraint file/s* (itp file/s)\n***\n**BioBB REST API** end points used:\n - [Pdb2gmx](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/pdb2gmx) from **biobb_md.gromacs.pdb2gmx**\n***", "_____no_output_____" ] ], [ [ "# Create system topology\n\n# Create inputs/outputs\noutput_pdb2gmx_gro = pdbCode + '_pdb2gmx.gro'\noutput_pdb2gmx_top_zip = pdbCode + '_pdb2gmx_top.zip'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/pdb2gmx', \n input_pdb_path = fixed_pdb,\n output_gro_path = output_pdb2gmx_gro,\n output_top_zip_path = output_pdb2gmx_top_zip)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "### Visualizing 3D structure\nVisualizing the generated **GRO structure** using **NGL**. Note that **hydrogen atoms** were added to the structure by the **pdb2gmx GROMACS tool** when generating the **topology**. ", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(output_pdb2gmx_gro)\nview.add_representation(repr_type='ball+stick', selection='all')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview.camera='orthographic'\nview", "_____no_output_____" ] ], [ [ "<img src='ngl3.png'></img>", "_____no_output_____" ], [ "<a id=\"box\"></a>\n***\n## Create solvent box\nDefine the unit cell for the **protein structure MD system** to fill it with water molecules.<br>\nA **cubic box** is used to define the unit cell, with a **distance from the protein to the box edge of 1.0 nm**. The protein is **centered in the box**. \n\n***\n**BioBB REST API** end points used:\n - [Editconf](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/editconf) from **biobb_md.gromacs.editconf**\n***", "_____no_output_____" ] ], [ [ "# Editconf: Create solvent box\n\n# Create properties dict and inputs/outputs\noutput_editconf_gro = pdbCode + '_editconf.gro'\nprop = {\n 'box_type': 'cubic',\n 'distance_to_molecule': 1.0\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/editconf', \n config = prop,\n input_gro_path = output_pdb2gmx_gro,\n output_gro_path = output_editconf_gro)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"water\"></a>\n***\n## Fill the box with water molecules\nFill the unit cell for the **protein structure system** with water molecules.<br>\nThe solvent type used is the default **Simple Point Charge water (SPC)**, a generic equilibrated 3-point solvent model. \n\n***\n**BioBB REST API** end points used:\n - [Solvate](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/solvate) from **biobb_md.gromacs.solvate**\n***", "_____no_output_____" ] ], [ [ "# Solvate: Fill the box with water molecules\n\n# Create inputs/outputs\noutput_solvate_gro = pdbCode + '_solvate.gro'\noutput_solvate_top_zip = pdbCode + '_solvate_top.zip'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/solvate', \n input_solute_gro_path = output_editconf_gro,\n input_top_zip_path = output_pdb2gmx_top_zip,\n output_gro_path = output_solvate_gro,\n output_top_zip_path = output_solvate_top_zip)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "### Visualizing 3D structure\nVisualizing the **protein system** with the newly added **solvent box** using **NGL**.<br> Note the **cubic box** filled with **water molecules** surrounding the **protein structure**, which is **centered** right in the middle of the cube.", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(output_solvate_gro)\nview.clear_representations()\nview.add_representation(repr_type='cartoon', selection='solute', color='green')\nview.add_representation(repr_type='ball+stick', selection='SOL')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview.camera='orthographic'\nview", "_____no_output_____" ] ], [ [ "<img src='ngl4.png'></img>", "_____no_output_____" ], [ "<a id=\"ions\"></a>\n***\n## Adding ions\nAdd ions to neutralize the **protein structure** charge\n- [Step 1](#ionsStep1): Creating portable binary run file for ion generation\n- [Step 2](#ionsStep2): Adding ions to **neutralize** the system\n\n***\n**BioBB REST API** end points used:\n - [Grompp](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/grompp) from **biobb_md.gromacs.grompp**\n - [Genion](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/genion) from **biobb_md.gromacs.genion**\n***", "_____no_output_____" ], [ "<a id=\"ionsStep1\"></a>\n### Step 1: Creating portable binary run file for ion generation\nA simple **energy minimization** molecular dynamics parameters (mdp) properties will be used to generate the portable binary run file for **ion generation**, although **any legitimate combination of parameters** could be used in this step.", "_____no_output_____" ] ], [ [ "# Grompp: Creating portable binary run file for ion generation\n\n# Create prop dict and inputs/outputs\noutput_gppion_tpr = pdbCode + '_gppion.tpr'\nprop = {\n 'simulation_type':'minimization'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/grompp', \n config = prop,\n input_gro_path = output_solvate_gro,\n input_top_zip_path = output_solvate_top_zip,\n output_tpr_path = output_gppion_tpr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"ionsStep2\"></a>\n### Step 2: Adding ions to neutralize the system\nReplace **solvent molecules** with **ions** to **neutralize** the system.", "_____no_output_____" ] ], [ [ "# Genion: Adding ions to neutralize the system\n\n# Create prop dict and inputs/outputs\noutput_genion_gro = pdbCode + '_genion.gro'\noutput_genion_top_zip = pdbCode + '_genion_top.zip'\nprop={\n 'neutral':True\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/genion',\n config = prop,\n input_tpr_path = output_gppion_tpr,\n input_top_zip_path = output_solvate_top_zip,\n output_gro_path = output_genion_gro,\n output_top_zip_path = output_genion_top_zip)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "### Visualizing 3D structure\nVisualizing the **neutralized protein system** with the newly added **ions** using **NGL**", "_____no_output_____" ] ], [ [ "# Show protein\nview = nglview.show_structure_file(output_genion_gro)\nview.clear_representations()\nview.add_representation(repr_type='cartoon', selection='solute', color='sstruc')\nview.add_representation(repr_type='ball+stick', selection='NA')\nview.add_representation(repr_type='ball+stick', selection='CL')\nview._remote_call('setSize', target='Widget', args=['','600px'])\nview.camera='orthographic'\nview", "_____no_output_____" ] ], [ [ "<img src='ngl5.png'></img>", "_____no_output_____" ], [ "<a id=\"min\"></a>\n***\n## Energetically minimize the system\nEnergetically minimize the **protein system** till reaching a desired potential energy.\n- [Step 1](#emStep1): Creating portable binary run file for energy minimization\n- [Step 2](#emStep2): Energetically minimize the **system** till reaching a force of 500 kJ mol-1 nm-1.\n- [Step 3](#emStep3): Checking **energy minimization** results. Plotting energy by time during the **minimization** process.\n\n***\n**BioBB REST API** end points used:\n - [Grompp](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/grompp) from **biobb_md.gromacs.grompp**\n - [Mdrun](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/mdrun) from **biobb_md.gromacs.mdrun**\n - [GMXEnergy](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_energy) from **biobb_analysis.gromacs.gmx_energy**\n***", "_____no_output_____" ], [ "<a id=\"emStep1\"></a>\n### Step 1: Creating portable binary run file for energy minimization\nThe **minimization** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **energy minimization**:\n\n- integrator = steep ; Algorithm (steep = steepest descent minimization)\n- emtol = 1000.0 ; Stop minimization when the maximum force < 1000.0 kJ/mol/nm\n- emstep = 0.01 ; Minimization step size (nm)\n- nsteps = 50000 ; Maximum number of (minimization) steps to perform\n\nIn this particular example, the method used to run the **energy minimization** is the default **steepest descent**, but the **maximum force** is placed at **500 KJ/mol\\*nm^2**, and the **maximum number of steps** to perform (if the maximum force is not reached) to **5,000 steps**. ", "_____no_output_____" ] ], [ [ "# Grompp: Creating portable binary run file for mdrun\n\n# Create prop dict and inputs/outputs\noutput_gppmin_tpr = pdbCode + '_gppmin.tpr'\nprop = {\n 'mdp':{\n 'emtol':'500',\n 'nsteps':'5000'\n },\n 'simulation_type':'minimization'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/grompp', \n config = prop,\n input_gro_path = output_genion_gro,\n input_top_zip_path = output_genion_top_zip,\n output_tpr_path = output_gppmin_tpr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"emStep2\"></a>\n### Step 2: Running Energy Minimization\nRunning **energy minimization** using the **tpr file** generated in the previous step. ", "_____no_output_____" ] ], [ [ "# Mdrun: Running minimization\n\n# Create inputs/outputs\noutput_min_trr = pdbCode + '_min.trr'\noutput_min_gro = pdbCode + '_min.gro'\noutput_min_edr = pdbCode + '_min.edr'\noutput_min_log = pdbCode + '_min.log'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/mdrun', \n input_tpr_path = output_gppmin_tpr,\n output_trr_path = output_min_trr,\n output_gro_path = output_min_gro,\n output_edr_path = output_min_edr,\n output_log_path = output_min_log)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"emStep3\"></a>\n### Step 3: Checking Energy Minimization results\nChecking **energy minimization** results. Plotting **potential energy** by time during the minimization process. ", "_____no_output_____" ] ], [ [ "# GMXEnergy: Getting system energy by time \n\n# Create prop dict and inputs/outputs\noutput_min_ene_xvg = pdbCode + '_min_ene.xvg'\nprop = {\n 'terms': [\"Potential\"]\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_energy',\n config = prop,\n input_energy_path = output_min_edr,\n output_xvg_path = output_min_ene_xvg)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "import plotly\nimport plotly.graph_objs as go\n\n#Read data from file and filter energy values higher than 1000 Kj/mol^-1\nwith open(output_min_ene_xvg,'r') as energy_file:\n x,y = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in energy_file \n if not line.startswith((\"#\",\"@\")) \n if float(line.split()[1]) < 1000 \n ])\n )\n\nplotly.offline.init_notebook_mode(connected=True)\n\nfig = {\n \"data\": [go.Scatter(x=x, y=y)],\n \"layout\": go.Layout(title=\"Energy Minimization\",\n xaxis=dict(title = \"Energy Minimization Step\"),\n yaxis=dict(title = \"Potential Energy KJ/mol-1\")\n )\n}\n\nplotly.offline.iplot(fig)", "_____no_output_____" ] ], [ [ "<img src='plot1.png'></img>", "_____no_output_____" ], [ "<a id=\"nvt\"></a>\n***\n## Equilibrate the system (NVT)\nEquilibrate the **protein system** in **NVT ensemble** (constant Number of particles, Volume and Temperature). Protein **heavy atoms** will be restrained using position restraining forces: movement is permitted, but only after overcoming a substantial energy penalty. The utility of position restraints is that they allow us to equilibrate our solvent around our protein, without the added variable of structural changes in the protein.\n\n- [Step 1](#eqNVTStep1): Creating portable binary run file for system equilibration\n- [Step 2](#eqNVTStep2): Equilibrate the **protein system** with **NVT** ensemble.\n- [Step 3](#eqNVTStep3): Checking **NVT Equilibration** results. Plotting **system temperature** by time during the **NVT equilibration** process. \n\n***\n**BioBB REST API** end points used:\n - [Grompp](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/grompp) from **biobb_md.gromacs.grompp**\n - [Mdrun](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/mdrun) from **biobb_md.gromacs.mdrun**\n - [GMXEnergy](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_energy) from **biobb_analysis.gromacs.gmx_energy**\n***", "_____no_output_____" ], [ "<a id=\"eqNVTStep1\"></a>\n### Step 1: Creating portable binary run file for system equilibration (NVT)\nThe **nvt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NVT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):\n\n- Define = -DPOSRES\n- integrator = md\n- dt = 0.002\n- nsteps = 5000\n- pcoupl = no\n- gen_vel = yes\n- gen_temp = 300\n- gen_seed = -1\n\nIn this particular example, the default parameters will be used: **md** integrator algorithm, a **step size** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a temperature of **300K**.\n\n*Please note that for the sake of time this tutorial is only running 10ps of NVT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/06_equil.html) the simulated time was 100ps.*", "_____no_output_____" ] ], [ [ "# Grompp: Creating portable binary run file for NVT Equilibration\n\n# Create prop dict and inputs/outputs\noutput_gppnvt_tpr = pdbCode + '_gppnvt.tpr'\nprop = {\n 'mdp':{\n 'nsteps': 5000,\n 'dt': 0.002,\n 'Define': '-DPOSRES',\n #'tc_grps': \"DNA Water_and_ions\" # NOTE: uncomment this line if working with DNA\n },\n 'simulation_type':'nvt'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/grompp',\n config = prop,\n input_gro_path = output_min_gro,\n input_top_zip_path = output_genion_top_zip,\n output_tpr_path = output_gppnvt_tpr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"eqNVTStep2\"></a>\n### Step 2: Running NVT equilibration\n\nRunning **energy minimization** using the **tpr file** generated in the previous step.", "_____no_output_____" ] ], [ [ "# Mdrun: Running Equilibration NVT\n\n# Create inputs/outputs\noutput_nvt_trr = pdbCode + '_nvt.trr'\noutput_nvt_gro = pdbCode + '_nvt.gro'\noutput_nvt_edr = pdbCode + '_nvt.edr'\noutput_nvt_log = pdbCode + '_nvt.log'\noutput_nvt_cpt = pdbCode + '_nvt.cpt'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/mdrun',\n input_tpr_path = output_gppnvt_tpr,\n output_trr_path = output_nvt_trr,\n output_gro_path = output_nvt_gro,\n output_edr_path = output_nvt_edr,\n output_log_path = output_nvt_log,\n output_cpt_path = output_nvt_cpt)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"eqNVTStep3\"></a>\n### Step 3: Checking NVT Equilibration results\nChecking **NVT Equilibration** results. Plotting **system temperature** by time during the NVT equilibration process. ", "_____no_output_____" ] ], [ [ "# GMXEnergy: Getting system temperature by time during NVT Equilibration \n\n# Create prop dict and inputs/outputs\noutput_nvt_temp_xvg = pdbCode + '_nvt_temp.xvg'\nprop = {\n 'terms': [\"Temperature\"]\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_energy', \n config = prop,\n input_energy_path = output_nvt_edr,\n output_xvg_path = output_nvt_temp_xvg)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "import plotly\nimport plotly.graph_objs as go\n\n# Read temperature data from file \nwith open(output_nvt_temp_xvg,'r') as temperature_file:\n x,y = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in temperature_file \n if not line.startswith((\"#\",\"@\")) \n ])\n )\n\nplotly.offline.init_notebook_mode(connected=True)\n\nfig = {\n \"data\": [go.Scatter(x=x, y=y)],\n \"layout\": go.Layout(title=\"Temperature during NVT Equilibration\",\n xaxis=dict(title = \"Time (ps)\"),\n yaxis=dict(title = \"Temperature (K)\")\n )\n}\n\nplotly.offline.iplot(fig)", "_____no_output_____" ] ], [ [ "<img src='plot2.png'></img>", "_____no_output_____" ], [ "<a id=\"npt\"></a>\n***\n## Equilibrate the system (NPT)\nEquilibrate the **protein system** in **NPT** ensemble (constant Number of particles, Pressure and Temperature).\n- [Step 1](#eqNPTStep1): Creating portable binary run file for system equilibration\n- [Step 2](#eqNPTStep2): Equilibrate the **protein system** with **NPT** ensemble.\n- [Step 3](#eqNPTStep3): Checking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process.\n***\n**BioBB REST API** end points used:\n - [Grompp](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/grompp) from **biobb_md.gromacs.grompp**\n - [Mdrun](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/mdrun) from **biobb_md.gromacs.mdrun**\n - [GMXEnergy](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_energy) from **biobb_analysis.gromacs.gmx_energy**\n***", "_____no_output_____" ], [ "<a id=\"eqNPTStep1\"></a>\n### Step 1: Creating portable binary run file for system equilibration (NPT)\n\nThe **npt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NPT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):\n\n- Define = -DPOSRES\n- integrator = md\n- dt = 0.002\n- nsteps = 5000\n- pcoupl = Parrinello-Rahman\n- pcoupltype = isotropic\n- tau_p = 1.0\n- ref_p = 1.0\n- compressibility = 4.5e-5\n- refcoord_scaling = com\n- gen_vel = no\n\nIn this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a Parrinello-Rahman **pressure coupling** algorithm.\n\n*Please note that for the sake of time this tutorial is only running 10ps of NPT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/07_equil2.html) the simulated time was 100ps.*", "_____no_output_____" ] ], [ [ "# Grompp: Creating portable binary run file for NPT System Equilibration\n\n# Create prop dict and inputs/outputs\noutput_gppnpt_tpr = pdbCode + '_gppnpt.tpr'\nprop = {\n 'mdp':{\n 'nsteps':'5000',\n #'tc_grps': \"DNA Water_and_ions\" # NOTE: uncomment this line if working with DNA\n },\n 'simulation_type':'npt'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/grompp', \n config = prop,\n input_gro_path = output_nvt_gro,\n input_top_zip_path = output_genion_top_zip,\n output_tpr_path = output_gppnpt_tpr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"eqNPTStep2\"></a>\n### Step 2: Running NPT equilibration", "_____no_output_____" ] ], [ [ "# Mdrun: Running NPT System Equilibration\n\n# Create inputs/outputs\noutput_npt_trr = pdbCode + '_npt.trr'\noutput_npt_gro = pdbCode + '_npt.gro'\noutput_npt_edr = pdbCode + '_npt.edr'\noutput_npt_log = pdbCode + '_npt.log'\noutput_npt_cpt = pdbCode + '_npt.cpt'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/mdrun',\n input_tpr_path = output_gppnpt_tpr,\n output_trr_path = output_npt_trr,\n output_gro_path = output_npt_gro,\n output_edr_path = output_npt_edr,\n output_log_path = output_npt_log,\n output_cpt_path = output_npt_cpt)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"eqNPTStep3\"></a>\n### Step 3: Checking NPT Equilibration results\nChecking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process. ", "_____no_output_____" ] ], [ [ "# GMXEnergy: Getting system pressure and density by time during NPT Equilibration \n\n# Create prop dict and inputs/outputs\noutput_npt_pd_xvg = pdbCode + '_npt_PD.xvg'\nprop = {\n 'terms': [\"Pressure\",\"Density\"]\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_energy',\n config = prop,\n input_energy_path = output_npt_edr,\n output_xvg_path = output_npt_pd_xvg)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "import plotly\nfrom plotly import subplots\nimport plotly.graph_objs as go\n\n# Read pressure and density data from file \nwith open(output_npt_pd_xvg,'r') as pd_file:\n x,y,z = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]),float(line.split()[2]))\n for line in pd_file \n if not line.startswith((\"#\",\"@\")) \n ])\n )\n\nplotly.offline.init_notebook_mode(connected=True)\n\ntrace1 = go.Scatter(\n x=x,y=y\n)\ntrace2 = go.Scatter(\n x=x,y=z\n)\n\nfig = subplots.make_subplots(rows=1, cols=2, print_grid=False)\n\nfig.append_trace(trace1, 1, 1)\nfig.append_trace(trace2, 1, 2)\n\nfig['layout']['xaxis1'].update(title='Time (ps)')\nfig['layout']['xaxis2'].update(title='Time (ps)')\nfig['layout']['yaxis1'].update(title='Pressure (bar)')\nfig['layout']['yaxis2'].update(title='Density (Kg*m^-3)')\n\nfig['layout'].update(title='Pressure and Density during NPT Equilibration')\nfig['layout'].update(showlegend=False)\n\nplotly.offline.iplot(fig)", "_____no_output_____" ] ], [ [ "<img src='plot3.png'></img>", "_____no_output_____" ], [ "<a id=\"free\"></a>\n***\n## Free Molecular Dynamics Simulation\nUpon completion of the **two equilibration phases (NVT and NPT)**, the system is now well-equilibrated at the desired temperature and pressure. The **position restraints** can now be released. The last step of the **protein** MD setup is a short, **free MD simulation**, to ensure the robustness of the system. \n- [Step 1](#mdStep1): Creating portable binary run file to run a **free MD simulation**.\n- [Step 2](#mdStep2): Run short MD simulation of the **protein system**.\n- [Step 3](#mdStep3): Checking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step. \n***\n**BioBB REST API** end points used:\n - [Grompp](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/grompp) from **biobb_md.gromacs.grompp**\n - [Mdrun](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_md/mdrun) from **biobb_md.gromacs.mdrun**\n - [GMXRms](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_rms) from **biobb_analysis.gromacs.gmx_rms**\n - [GMXRgyr](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_rgyr) from **biobb_analysis.gromacs.gmx_rgyr**\n***", "_____no_output_____" ], [ "<a id=\"mdStep1\"></a>\n### Step 1: Creating portable binary run file to run a free MD simulation\n\nThe **free** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **free MD simulation** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):\n\n- integrator = md\n- dt = 0.002 (ps)\n- nsteps = 50000\n\nIn this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, and a total of **50,000 md steps** (100ps).\n\n*Please note that for the sake of time this tutorial is only running 100ps of free MD, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/08_MD.html) the simulated time was 1ns (1000ps).*", "_____no_output_____" ] ], [ [ "# Grompp: Creating portable binary run file for mdrun\n\n# Create prop dict and inputs/outputs\noutput_gppmd_tpr = pdbCode + '_gppmd.tpr'\nprop = {\n 'mdp':{\n 'nsteps':'50000',\n #'tc_grps': \"DNA Water_and_ions\" # NOTE: uncomment this line if working with DNA\n },\n 'simulation_type':'free'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/grompp',\n config = prop,\n input_gro_path = output_npt_gro,\n input_top_zip_path = output_genion_top_zip,\n output_tpr_path = output_gppmd_tpr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"mdStep2\"></a>\n### Step 2: Running short free MD simulation", "_____no_output_____" ] ], [ [ "# Mdrun: Running free dynamics\n\n# Create inputs/outputs\noutput_md_trr = pdbCode + '_md.trr'\noutput_md_gro = pdbCode + '_md.gro'\noutput_md_edr = pdbCode + '_md.edr'\noutput_md_log = pdbCode + '_md.log'\noutput_md_cpt = pdbCode + '_md.cpt'\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_md/mdrun',\n input_tpr_path = output_gppmd_tpr,\n output_trr_path = output_md_trr,\n output_gro_path = output_md_gro,\n output_edr_path = output_md_edr,\n output_log_path = output_md_log,\n output_cpt_path = output_md_cpt)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"mdStep3\"></a>\n### Step 3: Checking free MD simulation results\nChecking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step. **RMSd** against the **experimental structure** (input structure of the pipeline) and against the **minimized and equilibrated structure** (output structure of the NPT equilibration step).", "_____no_output_____" ] ], [ [ "# GMXRms: Computing Root Mean Square deviation to analyse structural stability \n# RMSd against minimized and equilibrated snapshot (backbone atoms) \n\n# Create prop dict and inputs/outputs\noutput_rms_first = pdbCode + '_rms_first.xvg'\nprop = {\n 'selection': 'Backbone',\n #'selection': 'non-Water'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_rms', \n config = prop,\n input_structure_path = output_gppmd_tpr,\n input_traj_path = output_md_trr,\n output_xvg_path = output_rms_first)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "# GMXRms: Computing Root Mean Square deviation to analyse structural stability \n# RMSd against experimental structure (backbone atoms) \n\n# Create prop dict and inputs/outputs\noutput_rms_exp = pdbCode + '_rms_exp.xvg'\nprop = {\n 'selection': 'Backbone',\n #'selection': 'non-Water'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_rms',\n config = prop,\n input_structure_path = output_gppmin_tpr,\n input_traj_path = output_md_trr,\n output_xvg_path = output_rms_exp)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "import plotly\nimport plotly.graph_objs as go\n\n# Read RMS vs first snapshot data from file \nwith open(output_rms_first,'r') as rms_first_file:\n x,y = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in rms_first_file \n if not line.startswith((\"#\",\"@\")) \n ])\n )\n\n# Read RMS vs experimental structure data from file \nwith open(output_rms_exp,'r') as rms_exp_file:\n x2,y2 = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in rms_exp_file\n if not line.startswith((\"#\",\"@\")) \n ])\n )\n \ntrace1 = go.Scatter(\n x = x,\n y = y,\n name = 'RMSd vs first'\n)\n\ntrace2 = go.Scatter(\n x = x,\n y = y2,\n name = 'RMSd vs exp'\n)\n\ndata = [trace1, trace2]\n\nplotly.offline.init_notebook_mode(connected=True)\n\nfig = {\n \"data\": data,\n \"layout\": go.Layout(title=\"RMSd during free MD Simulation\",\n xaxis=dict(title = \"Time (ps)\"),\n yaxis=dict(title = \"RMSd (nm)\")\n )\n}\n\nplotly.offline.iplot(fig)\n", "_____no_output_____" ] ], [ [ "<img src='plot4.png'></img>", "_____no_output_____" ] ], [ [ "# GMXRgyr: Computing Radius of Gyration to measure the protein compactness during the free MD simulation \n\n# Create prop dict and inputs/outputs\noutput_rgyr = pdbCode + '_rgyr.xvg'\nprop = {\n 'selection': 'Backbone'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_rgyr', \n config = prop,\n input_structure_path = output_gppmin_tpr,\n input_traj_path = output_md_trr,\n output_xvg_path = output_rgyr)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ], [ "import plotly\nimport plotly.graph_objs as go\n\n# Read Rgyr data from file \nwith open(output_rgyr,'r') as rgyr_file:\n x,y = map(\n list,\n zip(*[\n (float(line.split()[0]),float(line.split()[1]))\n for line in rgyr_file \n if not line.startswith((\"#\",\"@\")) \n ])\n )\n\nplotly.offline.init_notebook_mode(connected=True)\n\nfig = {\n \"data\": [go.Scatter(x=x, y=y)],\n \"layout\": go.Layout(title=\"Radius of Gyration\",\n xaxis=dict(title = \"Time (ps)\"),\n yaxis=dict(title = \"Rgyr (nm)\")\n )\n}\n\nplotly.offline.iplot(fig)", "_____no_output_____" ] ], [ [ "<img src='plot5.png'></img>", "_____no_output_____" ], [ "<a id=\"post\"></a>\n***\n## Post-processing and Visualizing resulting 3D trajectory\nPost-processing and Visualizing the **protein system** MD setup **resulting trajectory** using **NGL**\n- [Step 1](#ppStep1): *Imaging* the resulting trajectory, **stripping out water molecules and ions** and **correcting periodicity issues**.\n- [Step 2](#ppStep2): Generating a *dry* structure, **removing water molecules and ions** from the final snapshot of the MD setup pipeline.\n- [Step 3](#ppStep3): Visualizing the *imaged* trajectory using the *dry* structure as a **topology**. \n***\n**BioBB REST API** end points used:\n - [GMXImage](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_image) from **biobb_analysis.gromacs.gmx_image**\n - [GMXTrjConvStr](https://mmb.irbbarcelona.org/biobb-api/rest/v1/launch/biobb_analysis/gmx_trjconv_str) from **biobb_analysis.gromacs.gmx_trjconv_str**\n***", "_____no_output_____" ], [ "<a id=\"ppStep1\"></a>\n### Step 1: *Imaging* the resulting trajectory.\nStripping out **water molecules and ions** and **correcting periodicity issues** ", "_____no_output_____" ] ], [ [ "# GMXImage: \"Imaging\" the resulting trajectory\n# Removing water molecules and ions from the resulting structure\n\n# Create prop dict and inputs/outputs\noutput_imaged_traj = pdbCode + '_imaged_traj.trr'\nprop = {\n 'center_selection': 'Protein',\n 'output_selection': 'Protein',\n 'pbc' : 'mol',\n 'center' : True\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_image',\n config = prop,\n input_traj_path = output_md_trr,\n input_top_path = output_gppmd_tpr,\n output_traj_path = output_imaged_traj)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"ppStep2\"></a>\n### Step 2: Generating the output *dry* structure.\n**Removing water molecules and ions** from the resulting structure", "_____no_output_____" ] ], [ [ "# GMXTrjConvStr: Converting and/or manipulating a structure\n# Removing water molecules and ions from the resulting structure\n# The \"dry\" structure will be used as a topology to visualize \n# the \"imaged dry\" trajectory generated in the previous step.\n\n# Create prop dict and inputs/outputs\noutput_dry_gro = pdbCode + '_md_dry.gro'\nprop = {\n 'selection': 'Protein'\n}\n\n# Launch bb on REST API\ntoken = launch_job(url = apiURL + 'launch/biobb_analysis/gmx_trjconv_str',\n config = prop,\n input_structure_path = output_md_gro,\n input_top_path = output_gppmd_tpr,\n output_str_path = output_dry_gro)", "_____no_output_____" ], [ "# Check job status\nout_files = check_job(token, apiURL)", "_____no_output_____" ], [ "# Save generated file to disk\nretrieve_data(out_files, apiURL)", "_____no_output_____" ] ], [ [ "<a id=\"ppStep3\"></a>\n### Step 3: Visualizing the generated dehydrated trajectory.\nUsing the **imaged trajectory** (output of the [Post-processing step 1](#ppStep1)) with the **dry structure** (output of the [Post-processing step 2](#ppStep2)) as a topology.", "_____no_output_____" ] ], [ [ "# Show trajectory\nview = nglview.show_simpletraj(nglview.SimpletrajTrajectory(output_imaged_traj, output_dry_gro), gui=True)\nview", "_____no_output_____" ] ], [ [ "<img src='trajectory.gif'></img>", "_____no_output_____" ], [ "<a id=\"output\"></a>\n## Output files\n\nImportant **Output files** generated:\n - {{output_md_gro}}: **Final structure** (snapshot) of the MD setup protocol.\n - {{output_md_trr}}: **Final trajectory** of the MD setup protocol.\n - {{output_md_cpt}}: **Final checkpoint file**, with information about the state of the simulation. It can be used to **restart** or **continue** a MD simulation.\n - {{output_gppmd_tpr}}: **Final tpr file**, GROMACS portable binary run input file. This file contains the starting structure of the **MD setup free MD simulation step**, together with the molecular topology and all the simulation parameters. It can be used to **extend** the simulation.\n - {{output_genion_top_zip}}: **Final topology** of the MD system. It is a compressed zip file including a **topology file** (.top) and a set of auxiliar **include topology** files (.itp).\n\n**Analysis** (MD setup check) output files generated:\n - {{output_rms_first}}: **Root Mean Square deviation (RMSd)** against **minimized and equilibrated structure** of the final **free MD run step**.\n - {{output_rms_exp}}: **Root Mean Square deviation (RMSd)** against **experimental structure** of the final **free MD run step**.\n - {{output_rgyr}}: **Radius of Gyration** of the final **free MD run step** of the **setup pipeline**.\n ", "_____no_output_____" ], [ "***\n<a id=\"questions\"></a>\n\n## Questions & Comments\n\nQuestions, issues, suggestions and comments are really welcome!\n\n* GitHub issues:\n * [https://github.com/bioexcel/biobb](https://github.com/bioexcel/biobb)\n\n* BioExcel forum:\n * [https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library](https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ec866ca3ffeb8f1ac431d070b8ffa53af71000a5
1,607
ipynb
Jupyter Notebook
UdemyPandas/regex.ipynb
simonbaier/learn-pandas
4b73045593a56b15e9d7cebb45f7557d24a7c6c5
[ "MIT" ]
null
null
null
UdemyPandas/regex.ipynb
simonbaier/learn-pandas
4b73045593a56b15e9d7cebb45f7557d24a7c6c5
[ "MIT" ]
null
null
null
UdemyPandas/regex.ipynb
simonbaier/learn-pandas
4b73045593a56b15e9d7cebb45f7557d24a7c6c5
[ "MIT" ]
null
null
null
16.07
34
0.481021
[ [ [ "# Reguar Expressions", "_____no_output_____" ] ], [ [ "import re", "_____no_output_____" ], [ "t = ''", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
ec867be9aab6250e143f88f49ff04454da3e9c51
12,608
ipynb
Jupyter Notebook
pytorch/PyTorch.ipynb
ChiangC/DeepLearning
117a76109ab068ecb6c4e216f26d8901736d0907
[ "Apache-2.0" ]
2
2021-11-05T03:34:55.000Z
2022-03-04T09:41:56.000Z
pytorch/PyTorch.ipynb
ChiangC/DeepLearning
117a76109ab068ecb6c4e216f26d8901736d0907
[ "Apache-2.0" ]
79
2020-01-28T22:53:46.000Z
2022-03-12T00:51:25.000Z
pytorch/PyTorch.ipynb
ChiangC/DeepLearning
117a76109ab068ecb6c4e216f26d8901736d0907
[ "Apache-2.0" ]
1
2021-08-18T11:32:58.000Z
2021-08-18T11:32:58.000Z
20.908789
664
0.474937
[ [ [ "import torch", "_____no_output_____" ] ], [ [ "在开始使用PyTorch时应该了解的主要元素:\n\n* PyTorch张量\n\n* 数学运算\n\n* Autograd模块\n\n* Optim模块\n\n* 神经网络模块\n\n下面让我们依次介绍这些元素吧。", "_____no_output_____" ], [ "## 一、PyTorch张量\n张量只是多维数组。PyTorch中的的张量类似于numpy的ndarrays,另外,张量也可以在GPU上使用。PyTorch支持各种类型的张量。", "_____no_output_____" ], [ "### 1.1Tensor的数据类型\n### 1.1.1 torch.FloatTensor", "_____no_output_____" ] ], [ [ "#指定纬度\na = torch.FloatTensor(2,3)\nprint(a)\nprint(\"\\n b:\")\n\n#按照给定列表生成浮点型的Tensor\nb = torch.FloatTensor([2,3,4,5])\nprint(b)", "tensor([[0., 0., 0.],\n [0., 0., 0.]])\n\n b:\ntensor([2., 3., 4., 5.])\n" ] ], [ [ "### 1.1.2 torch.IntTensor \n用于生成数据类型为整型的Tensor,传递torch.IntTensor的参数可以是一个列表,也可以是表示纬度的元组。", "_____no_output_____" ] ], [ [ "a = torch.IntTensor(2, 3)\nprint(a)\nb = torch.IntTensor([5, 6, 7, 8])\nprint(\"\\n b:\")\nprint(b)", "tensor([[0, 0, 0],\n [0, 0, 0]], dtype=torch.int32)\n\n b:\ntensor([5, 6, 7, 8], dtype=torch.int32)\n" ] ], [ [ "### 1.1.3 torch.rand\n用于生成数据类型为浮点型且维度指定的随机Tensor,和在Numpy中使用的numpy.rand生成随机数的方法类似,随机生成的浮点数据在0~1区间均匀分布。", "_____no_output_____" ] ], [ [ "a = torch.rand(2,3)\nprint(a)", "tensor([[0.7617, 0.8165, 0.5766],\n [0.3291, 0.0191, 0.6009]])\n" ] ], [ [ "### 1.1.4 torch.randn\n用于数据类型为浮点型且维度指定的随机Tensor,和在Numpy中使用的Numpy.randn生成的随机数的方法类似,随机生成的浮点数的取值满足均值为0,方差为1的正态分布。", "_____no_output_____" ] ], [ [ "a = torch.randn(2, 3)\nprint(a)", "tensor([[-0.9301, -1.3113, 0.5000],\n [ 0.1787, 0.2772, -0.5851]])\n" ] ], [ [ "### 1.1.5 torch.arange \ntorch.range已经被弃用\n\n用于生成数据类型为浮点型且自定义开始范围和结束范围的Tensor,所以传递给torch.range的参数有三个,分别是范围的起始值,范围的结束值和步长,其中,步长用于指定从起始值到结束值得每步的数据间隔。", "_____no_output_____" ] ], [ [ "a = torch.arange(1, 10, 1)\nprint(a)", "tensor([1, 2, 3, 4, 5, 6, 7, 8, 9])\n" ] ], [ [ "### 1.1.6 torch.zeros\n用于生成数据类型为浮点型且维度指定的Tensor,不过这个浮点型的Tensor中的元素全部为0.", "_____no_output_____" ] ], [ [ "a = torch.zeros(2, 3)\nprint(a)", "tensor([[0., 0., 0.],\n [0., 0., 0.]])\n" ] ], [ [ "### 1.2 Tensor的运算", "_____no_output_____" ], [ "### 1.2.1 torch.mul\n将参数传递到torch.mul后返回输入参数求积的结果作为输出,参与运算的参数可以全部是Tensor数据类型的变量,也可以是Tensor数据类型的变量和标量的组合", "_____no_output_____" ] ], [ [ "a = torch.randn(2,3)\nprint(\"a:\")\nprint(a)\n\nb = torch.randn(2,3)\nprint(\"\\n b:\")\nprint(b)\n\nprint(\"\\ntorch.mul(a,b):\")\nc = torch.mul(a, b)\nprint(c)", "a:\ntensor([[ 4.6872e-01, 3.2202e-01, 2.4221e+00],\n [ 8.1761e-01, -2.1217e-03, -1.1340e+00]])\n\n b:\ntensor([[-0.4322, 1.5551, -1.3506],\n [-1.2597, -0.6036, 2.1189]])\n\ntorch.mul(a,b):\ntensor([[-2.0258e-01, 5.0076e-01, -3.2714e+00],\n [-1.0299e+00, 1.2807e-03, -2.4028e+00]])\n" ] ], [ [ "### 1.2.2 torch. mm\n将参数传递到torch.mm后返回输入参数的求积结果作为输出,不过这个求积的方式和之前的torch.mul运算方式不太一样,torch.mm运用矩阵之间的乘法规则进行计算,所以被传入的参数会被当作矩阵进行处理,参数的维度自然也要满足矩阵乘法的前提条件,即前一个矩阵的行数必须和后一个矩阵的列数相等,否则不能进行计算。", "_____no_output_____" ] ], [ [ "a = torch.randn(3,2)\nprint(a)\nb = torch.randn(2,3)\nprint(\"\\nb:\")\nprint(b)\n\nc = torch.mm(a, b)\nprint(\"\\ntorch.mm(a,b):\")\nprint(c)", "tensor([[-1.0622, -2.0696],\n [ 0.2198, 0.0640],\n [-0.2105, 0.8796]])\n\nb:\ntensor([[-2.0049, 2.2201, -0.5509],\n [ 0.2837, -1.5067, 1.9639]])\n\ntorch.mm(a,b):\ntensor([[ 1.5425, 0.7603, -3.4794],\n [-0.4225, 0.3916, 0.0046],\n [ 0.6716, -1.7927, 1.8434]])\n" ] ], [ [ "### 1.2.3 torch.mv\n将参数传递到torch.mv后返回输入参数的求积结果作为输出,torch.mv运用矩阵与向量之间的乘法法则进行计算,被传入的参数中第1个参数代表矩阵,第2个参数代表向量,顺序不能颠倒。", "_____no_output_____" ] ], [ [ "a = torch.randn(2,3)\nprint(\"a:\")\nprint(a)\n\nb = torch.randn(3)\nprint(\"\\nb:\")\nprint(b)\n\nc = torch.mv(a,b)\nprint(\"\\ntorch.mv(a,b):\")\nprint(c)", "a:\ntensor([[-0.2934, 0.0307, -0.2456],\n [-1.1221, -1.3581, -0.9236]])\n\nb:\ntensor([-1.7865, -0.0162, 0.2873])\n\ntorch.mv(a,b):\ntensor([0.4530, 1.7612])\n" ] ], [ [ "## 二、数学运算\n与numpy一样,科学计算库非常重要的一点是能够实现高效的数学功能。而PyTorch提供了一个类似的接口,可以使用200个以上的数学运算。", "_____no_output_____" ] ], [ [ "a = torch.FloatTensor([2])\nb = torch.FloatTensor([3])\na + b", "_____no_output_____" ] ], [ [ "还可以在定义的PyTorch张量上执行各种矩阵运算", "_____no_output_____" ] ], [ [ "matrix = torch.randn(3,3)\nmatrix", "_____no_output_____" ], [ "matrix.t()", "_____no_output_____" ] ], [ [ "## 三、Autograd模块\nPyTorch使用了一种叫做自动微分的技术。也就是说,它会有一个记录我们所有执行操作的记录器,之后再回放记录来计算我们的梯度。这一技术在构建神经网络时尤其有效,因为我们可以通过计算前路参数的微分来节省时间。\n", "_____no_output_____" ] ], [ [ "from torch.autograd import Variable", "_____no_output_____" ], [ "x = Variable(train_x)\ny = Variable(train_y, requires_grad = False)", "_____no_output_____" ] ], [ [ "## 四、Optim模块\ntorch.optim是一个实现各种优化算法的模块,用于构建神经网络。它支持大多数常用的方法,因此我们不必从头开始构建它们。", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec867ca4e0e068809aadcb5aeedd07287f89bf3e
14,461
ipynb
Jupyter Notebook
examples/99.To be released, but working/4. SearchNet/1. DDPG_SN.ipynb
XinruLiu/RecNN
8dec25a1f27cf5b26d809768ac98e596c1622d73
[ "Apache-2.0" ]
495
2019-01-22T06:50:38.000Z
2022-03-29T01:45:37.000Z
examples/99.To be released, but working/4. SearchNet/1. DDPG_SN.ipynb
XinruLiu/RecNN
8dec25a1f27cf5b26d809768ac98e596c1622d73
[ "Apache-2.0" ]
25
2019-04-10T02:33:08.000Z
2021-11-02T16:21:16.000Z
examples/99.To be released, but working/4. SearchNet/1. DDPG_SN.ipynb
XinruLiu/RecNN
8dec25a1f27cf5b26d809768ac98e596c1622d73
[ "Apache-2.0" ]
108
2019-03-31T10:32:10.000Z
2022-03-29T09:54:28.000Z
30.508439
101
0.511375
[ [ [ "## Deep TopK Search with Critic Adjustment", "_____no_output_____" ] ], [ [ "from abc import ABC\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.nn.functional as F\nimport torch_optimizer as optim\n\nfrom tqdm.auto import tqdm\n\nfrom IPython.display import clear_output\n%matplotlib inline\n\n\n# == recnn ==\nimport sys\nsys.path.append(\"../../\")\nimport recnn\n\ncuda = torch.device('cuda')\n\n# ---\nframe_size = 10\nbatch_size = 25\nn_epochs = 100\nplot_every = 30\nstep = 0\n# ---\n\ntqdm.pandas()\n\nfrom jupyterthemes import jtplot\njtplot.style(theme='grade3')", "_____no_output_____" ], [ "# embeddgings: https://drive.google.com/open?id=1EQ_zXBR3DKpmJR3jBgLvt-xoOvArGMsL\ndirs = recnn.data.env.DataPath(\n base=\"../../data/\",\n embeddings=\"embeddings/ml20_pca128.pkl\",\n ratings=\"ml-20m/ratings.csv\",\n cache=\"cache/frame_env.pkl\", # cache will generate after you run\n use_cache=True\n)\nenv = recnn.data.env.FrameEnv(dirs, frame_size, batch_size)", "_____no_output_____" ], [ "class Actor(nn.Module):\n def __init__(self, input_dim, action_dim, hidden_size, init_w=3e-1):\n super(Actor, self).__init__()\n\n self.drop_layer = nn.Dropout(p=0.5)\n\n self.linear1 = nn.Linear(input_dim, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, action_dim)\n\n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, state):\n # state = self.state_rep(state)\n x = F.relu(self.linear1(state))\n x = self.drop_layer(x)\n x = F.relu(self.linear2(x))\n x = self.drop_layer(x)\n # x = torch.tanh(self.linear3(x)) # in case embeds are -1 1 normalized\n x = self.linear3(x) # in case embeds are standard scaled / wiped using PCA whitening\n # return state, x\n return x", "_____no_output_____" ], [ "class Critic(nn.Module):\n def __init__(self, input_dim, action_dim, hidden_size, init_w=3e-5):\n super(Critic, self).__init__()\n\n self.drop_layer = nn.Dropout(p=0.5)\n\n self.linear1 = nn.Linear(input_dim + action_dim, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, 1)\n\n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = self.drop_layer(x)\n x = F.relu(self.linear2(x))\n x = self.drop_layer(x)\n x = self.linear3(x)\n return x", "_____no_output_____" ], [ "class SearchK(nn.Module):\n def __init__(self, input_dim, action_dim, hidden_size, topK, init_w=3e-1):\n super(SearchK, self).__init__()\n\n self.drop_layer = nn.Dropout(p=0.5)\n self.linear1 = nn.Linear(input_dim + action_dim, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, action_dim*topK)\n\n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = self.drop_layer(x)\n x = F.relu(self.linear2(x))\n x = self.drop_layer(x)\n x = self.linear3(x)\n return x", "_____no_output_____" ], [ "def soft_update(net, target_net, soft_tau=1e-2):\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\n )\n\ndef run_tests():\n test_batch = next(iter(env.test_dataloader))\n losses = ddpg_sn_update(test_batch, params, learn=False, step=step)\n\n gen_actions = debug['next_action']\n true_actions = env.base.embeddings.detach().cpu().numpy()\n\n f = plotter.kde_reconstruction_error(ad, gen_actions, true_actions, cuda)\n writer.add_figure('rec_error',f, losses['step'])\n return losses", "_____no_output_____" ], [ "def ddpg_sn_update(batch, params, learn=True, step=-1):\n\n state, action, reward, next_state, done = recnn.data.get_base_batch(batch)\n\n # --------------------------------------------------------#\n # Value Learning\n\n with torch.no_grad():\n next_action = target_policy_net(next_state)\n target_value = target_value_net(next_state, next_action.detach())\n expected_value = reward + (1.0 - done) * params['gamma'] * target_value\n expected_value = torch.clamp(expected_value,\n params['min_value'], params['max_value'])\n\n value = value_net(state, action)\n\n value_loss = torch.pow(value - expected_value.detach(), 2).mean()\n\n if learn:\n value_optimizer.zero_grad()\n value_loss.backward()\n value_optimizer.step()\n else:\n debug['next_action'] = next_action\n writer.add_figure('next_action',\n recnn.utils.pairwise_distances_fig(next_action[:50]), step)\n writer.add_histogram('value', value, step)\n writer.add_histogram('target_value', target_value, step)\n writer.add_histogram('expected_value', expected_value, step)\n\n # --------------------------------------------------------#\n # Policy learning\n\n gen_action = policy_net(state)\n policy_loss = -value_net(state, gen_action)\n\n if not learn:\n debug['gen_action'] = gen_action\n writer.add_histogram('policy_loss', policy_loss, step)\n writer.add_figure('next_action',\n recnn.utils.pairwise_distances_fig(gen_action[:50]), step)\n\n policy_loss = policy_loss.mean()\n\n if learn and step % params['policy_step']== 0:\n policy_optimizer.zero_grad()\n policy_loss.backward()\n torch.nn.utils.clip_grad_norm_(policy_net.parameters(), -1, 1)\n policy_optimizer.step()\n\n soft_update(value_net, target_value_net, soft_tau=params['soft_tau'])\n soft_update(policy_net, target_policy_net, soft_tau=params['soft_tau'])\n\n # dont forget search loss here !\n losses = {'value': value_loss.item(), 'policy': policy_loss.item(), 'step': step}\n recnn.utils.write_losses(writer, losses, kind='train' if learn else 'test')\n return losses", "_____no_output_____" ], [ "# === ddpg settings ===\n\nparams = {\n 'gamma' : 0.99,\n 'min_value' : -10,\n 'max_value' : 10,\n 'policy_step': 10,\n 'soft_tau' : 0.001,\n\n 'policy_lr' : 1e-5,\n 'value_lr' : 1e-5,\n 'search_lr' : 1e-5,\n 'actor_weight_init': 54e-2,\n 'search_weight_init': 54e-2,\n 'critic_weight_init': 6e-1,\n}\n\n# === end ===", "_____no_output_____" ], [ "value_net = Critic(1290, 128, 256, params['critic_weight_init']).to(cuda)\npolicy_net = Actor(1290, 128, 256, params['actor_weight_init']).to(cuda)\nsearch_net = SearchK(1290, 128, 2048, topK=10, init_w=params['search_weight_init']).to(cuda)\n\ntarget_value_net = Critic(1290, 128, 256).to(cuda)\ntarget_policy_net = Actor(1290, 128, 256).to(cuda)\ntarget_search_net = SearchK(1290, 128, 2048, topK=10).to(cuda)\n\nad = recnn.nn.models.AnomalyDetector().to(cuda)\nad.load_state_dict(torch.load('../../models/anomaly.pt'))\nad.eval()\n\ntarget_policy_net.eval()\ntarget_value_net.eval()\n\nsoft_update(value_net, target_value_net, soft_tau=1.0)\nsoft_update(policy_net, target_policy_net, soft_tau=1.0)\nsoft_update(search_net, target_search_net, soft_tau=1.0)\n\nvalue_criterion = nn.MSELoss()\nsearch_criterion = nn.MSELoss()\n\n# from good to bad: Ranger Radam Adam RMSprop\nvalue_optimizer = optim.Ranger(value_net.parameters(),\n lr=params['value_lr'], weight_decay=1e-2)\npolicy_optimizer = optim.Ranger(policy_net.parameters(),\n lr=params['policy_lr'], weight_decay=1e-5)\nsearch_optimizer = optim.Ranger(search_net.parameters(),\n weight_decay=1e-5,\n lr=params['search_lr'])\n\nloss = {\n 'test': {'value': [], 'policy': [], 'search': [], 'step': []},\n 'train': {'value': [], 'policy': [], 'search': [], 'step': []}\n }\n\ndebug = {}\n\nwriter = SummaryWriter(log_dir='../../runs')\nplotter = recnn.utils.Plotter(loss, [['value', 'policy', 'search']],)", "_____no_output_____" ], [ "for epoch in range(n_epochs):\n for batch in tqdm(env.train_dataloader):\n loss = ddpg_sn_update(batch, params, step=step)\n plotter.log_losses(loss)\n step += 1\n if step % plot_every == 0:\n clear_output(True)\n print('step', step)\n test_loss = run_tests()\n plotter.log_losses(test_loss, test=True)\n plotter.plot_loss()\n if step > 1000:\n assert False", "_____no_output_____" ], [ "torch.save(value_net.state_dict(), \"../../models/ddpg_value.pt\")\ntorch.save(policy_net.state_dict(), \"../../models/ddpg_policy.pt\")", "_____no_output_____" ] ], [ [ "# Reconstruction error", "_____no_output_____" ] ], [ [ "gen_actions = debug['next_action']\ntrue_actions = env.base.embeddings.numpy()\n\n\nad = recnn.nn.AnomalyDetector().to(cuda)\nad.load_state_dict(torch.load('../../models/anomaly.pt'))\nad.eval()\n\nplotter.plot_kde_reconstruction_error(ad, gen_actions, true_actions, cuda)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec867feca153d127d599e46b315a42f75efc2635
15,741
ipynb
Jupyter Notebook
2017/ferran/day13/day13.ipynb
bbglab/adventofcode
65b6d8331d10f229b59232882d60024b08d69294
[ "MIT" ]
null
null
null
2017/ferran/day13/day13.ipynb
bbglab/adventofcode
65b6d8331d10f229b59232882d60024b08d69294
[ "MIT" ]
null
null
null
2017/ferran/day13/day13.ipynb
bbglab/adventofcode
65b6d8331d10f229b59232882d60024b08d69294
[ "MIT" ]
3
2016-12-02T09:20:42.000Z
2021-12-01T13:31:07.000Z
25.145367
107
0.436567
[ [ [ "# Packet Scanners", "_____no_output_____" ], [ "## Part 1", "_____no_output_____" ] ], [ [ "import csv\nfrom collections import defaultdict\n\ndef parse_scanners(input_file):\n scanners = defaultdict(int)\n with open(input_file, 'rt') as f_input:\n csv_reader = csv.reader(f_input, delimiter=' ')\n for l in csv_reader:\n scanners[int(l[0].rstrip(':'))] = int(l[1].rstrip())\n return scanners", "_____no_output_____" ], [ "def tick(lrank, time):\n r = time % (2 * (lrank - 1))\n return (r <= lrank - 1) * r + (r > lrank - 1) * (2 * (lrank - 1) - r)\n\n\ndef get_state(time, scanners): \n state = dict(zip(list(scanners.keys()), [0] * len(scanners)))\n if time == 0:\n return state\n elif time > 0:\n for t in range(time + 1):\n for scanner in scanners:\n state[scanner] = tick(scanners[scanner], t)\n return state", "_____no_output_____" ] ], [ [ "Some nice time-lapse plotting :)", "_____no_output_____" ] ], [ [ "from time import sleep\n\ndef print_state(time, scanners):\n stt = get_state(time + 1, scanners)\n depths = range(max(list(scanners.keys())) + 1)\n depths_printable = '\\t'.join(map(str, depths))\n print(depths_printable)\n max_range = max(list(scanners.values()))\n printable = '\\t'.join(['(S)' if ((scanners[j] >= 1) and (stt[j] == 0) and (time == j)) \\\n else '[S]' if ((scanners[j] >= 1) and (stt[j] == 0) and (time != j)) \\\n else '( )' if ((scanners[j] >= 1) and (stt[j] != 0) and (time == j)) \\\n else '[ ]' if ((scanners[j] >= 1) and (stt[j] != 0) and (time != j)) \\\n else '(.)' if (time == j) \\\n else '...' for j in depths])\n print(printable)\n for i in range(2, max_range + 1):\n printable = '\\t'.join(['[S]' if ((scanners[j] >= i) and (stt[j] == i-1)) \\\n else '[ ]' if ((scanners[j] >= i) and (stt[j] != i-1)) \\\n else ' ' for j in depths])\n print(printable)\n\ndef timelapse(input_path): \n scanners = parse_scanners(input_path)\n for t in range(max(list(scanners.keys())) + 10):\n if t > 0: sleep(1)\n print('After Picosecond {}:'.format(t))\n print_state(t, scanners)", "_____no_output_____" ] ], [ [ "### Test", "_____no_output_____" ] ], [ [ "timelapse('input.test1.txt')", "After Picosecond 0:\n0\t1\t2\t3\t4\t5\t6\n( )\t[ ]\t...\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[S]\t \t[S]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 1:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t(S)\t...\t...\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[S]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 2:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t(.)\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[S]\t \t[S]\nAfter Picosecond 3:\n0\t1\t2\t3\t4\t5\t6\n[S]\t[S]\t...\t(.)\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 4:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t( )\t...\t[ ]\n[S]\t[S]\t \t \t[S]\t \t[S]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 5:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[S]\t...\t...\t[S]\t(.)\t[S]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[S]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 6:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t[ ]\t...\t( )\n[S]\t[S]\t \t \t[S]\t \t[S]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 7:\n0\t1\t2\t3\t4\t5\t6\n[S]\t[S]\t...\t...\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 8:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[S]\t \t[S]\nAfter Picosecond 9:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[S]\t...\t...\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[S]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 10:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[S]\t \t[S]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 11:\n0\t1\t2\t3\t4\t5\t6\n[S]\t[S]\t...\t...\t[S]\t...\t[S]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 12:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[S]\t \t[S]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 13:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[S]\t...\t...\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[S]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\nAfter Picosecond 14:\n0\t1\t2\t3\t4\t5\t6\n[ ]\t[ ]\t...\t...\t[ ]\t...\t[ ]\n[S]\t[S]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[ ]\t \t[ ]\n \t \t \t \t[S]\t \t[S]\nAfter Picosecond 15:\n0\t1\t2\t3\t4\t5\t6\n[S]\t[S]\t...\t...\t[ ]\t...\t[ ]\n[ ]\t[ ]\t \t \t[ ]\t \t[ ]\n[ ]\t \t \t \t[S]\t \t[S]\n \t \t \t \t[ ]\t \t[ ]\n" ] ], [ [ "Trip severity calculator:", "_____no_output_____" ] ], [ [ "def trip_severity(input_path):\n severity = 0\n scanners = parse_scanners(input_path)\n layers = max(list(scanners.keys()))\n for t in range(layers + 1):\n if scanners[t] != 0:\n tick_before = tick(scanners[t], t)\n tick_now = tick(scanners[t], t + 1)\n if (tick_before == 0):\n severity += scanners[t] * t\n return severity", "_____no_output_____" ] ], [ [ "### Test", "_____no_output_____" ] ], [ [ "assert(trip_severity('input.test1.txt') == 24)", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "trip_severity('input.txt')", "_____no_output_____" ] ], [ [ "## Part 2A: Simple solution based on the trip severity calculator", "_____no_output_____" ] ], [ [ "def trip_caught_delayed(delay):\n severity = 0\n caught = False\n layers = max(list(scanners.keys()))\n for t in range(0, layers + 1):\n if scanners[t] != 0:\n tick_before = tick(scanners[t], t + delay)\n if (tick_before == 0):\n caught = True\n severity += scanners[t] * t\n return caught", "_____no_output_____" ], [ "def minimum_delay():\n delay = 0\n while trip_caught_delayed(delay):\n delay += 1\n return delay", "_____no_output_____" ] ], [ [ "### Test", "_____no_output_____" ] ], [ [ "%%time\nscanners = parse_scanners('input.test1.txt')\ntrip_caught_delayed(10)", "CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 437 µs\n" ], [ "%%time\nminimum_delay()", "CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 359 µs\n" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "%%time\nscanners = parse_scanners('input.txt')\nprint(minimum_delay())", "3964778\nCPU times: user 2min 6s, sys: 0 ns, total: 2min 6s\nWall time: 2min 6s\n" ] ], [ [ "## Part 2B: A little bit more self contained and efficient :)", "_____no_output_____" ] ], [ [ "def alt_minimum_delay():\n delay = 0\n caught = True\n layers = list(scanners.keys())\n while (delay >= 0) and caught:\n caught = False\n for j in layers:\n if (delay + 1 + j) % (2 * scanners[j] - 2) == 0:\n caught = True\n delay += 1\n return delay", "_____no_output_____" ] ], [ [ "### Test", "_____no_output_____" ] ], [ [ "%%time\nscanners = parse_scanners('input.test1.txt')\nprint(alt_minimum_delay())", "10\nCPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 1.9 ms\n" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "%%time\nscanners = parse_scanners('input.txt')\nprint(alt_minimum_delay())", "3964778\nCPU times: user 40.6 s, sys: 0 ns, total: 40.6 s\nWall time: 40.6 s\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec869b78bb317e1511541f43fd41ccdf2de26748
58,733
ipynb
Jupyter Notebook
2.Data_Structure_output.ipynb
Sagar-Gilda/Python
da495ae9fb7696e6eb7f96a6d19d4912c637d598
[ "MIT" ]
null
null
null
2.Data_Structure_output.ipynb
Sagar-Gilda/Python
da495ae9fb7696e6eb7f96a6d19d4912c637d598
[ "MIT" ]
null
null
null
2.Data_Structure_output.ipynb
Sagar-Gilda/Python
da495ae9fb7696e6eb7f96a6d19d4912c637d598
[ "MIT" ]
null
null
null
23.050628
481
0.410144
[ [ [ "# <center> <u>Data Structures <u> </center>", "_____no_output_____" ], [ "## 1. Lists\n\nReference:- https://www.youtube.com/watch?v=ohCDWZgNIU0\n", "_____no_output_____" ], [ "<p style='text-align: right;'> 1+1+1 = 3 points</p>\n", "_____no_output_____" ], [ "#### 1.1 Create an empty list with the name ‘a’, print the value of a and type(a).\n", "_____no_output_____" ] ], [ [ "# create empty list, name it 'a'\n\na = []", "_____no_output_____" ], [ "# print the value of a\n\na", "_____no_output_____" ], [ "# print the type of a\ntype(a)\n", "_____no_output_____" ] ], [ [ "#### 1.2.Create a list , languages = ['R','Python', 'SAS', 'Scala', 42], ", "_____no_output_____" ], [ "<p style='text-align: right;'> 1+1+1+1+1+1+1+1 = 7 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\nlanguages = ['R','Python', 'SAS', 'Scala', 42]", "_____no_output_____" ] ], [ [ "Print the number of elements in the list", "_____no_output_____" ] ], [ [ "#code here\nprint(len(languages))\n", "5\n" ] ], [ [ "\nUsing for loop iterate and print all the elements in the list", "_____no_output_____" ] ], [ [ "#code here\n\nfor item in languages:\n print(item)", "R\nPython\nSAS\nScala\n42\n" ] ], [ [ "Select the second item, 'Python' and store it in a new variable named 'temp'", "_____no_output_____" ] ], [ [ "#code here\ntemp = languages[1]", "_____no_output_____" ] ], [ [ "Print the value of temp and type(temp)", "_____no_output_____" ] ], [ [ "#code here\nprint(temp)\ntype(temp)\n", "Python\n" ] ], [ [ "Using list comprehension, print the last two elemets of list", "_____no_output_____" ] ], [ [ "# code here\nlanguages[-2::]", "_____no_output_____" ] ], [ [ "Append the element 'Java' in the list", "_____no_output_____" ] ], [ [ "#code here\nlanguages.append(\"Java\")", "_____no_output_____" ] ], [ [ "Remove the element 42 from the list and print the list", "_____no_output_____" ] ], [ [ "#code here\nlanguages.remove(42)\nlanguages", "_____no_output_____" ] ], [ [ "#### 1.3. Create a list, colors = [‘Red’, ‘Blue’, ‘White’]", "_____no_output_____" ], [ "<p style='text-align: right;'> 1+1+1+1+1+1+1 = 6 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\ncolors = ['Red', 'Blue', 'White']", "_____no_output_____" ] ], [ [ "Append the element 'Black' to colors", "_____no_output_____" ] ], [ [ "#code here\ncolors.insert(1,'Orange')\nprint(colors)\n", "['Red', 'Orange', 'Blue', 'White']\n" ] ], [ [ "Append the color 'Orange' to second position (index=1) and print the list", "_____no_output_____" ] ], [ [ "# code here\ncolors.append('Black')", "_____no_output_____" ] ], [ [ "Print the list\n", "_____no_output_____" ] ], [ [ "# code here\ncolors", "_____no_output_____" ] ], [ [ "Create another list, colors2 = [‘Grey’, ‘Sky Blue’]", "_____no_output_____" ] ], [ [ "# code \ncolors2 = ['Grey', 'Sky Blue']\n", "_____no_output_____" ] ], [ [ "Add the elements of colors2 to colors using extend function in the list", "_____no_output_____" ] ], [ [ "#code here\ncolors.extend(colors2)", "_____no_output_____" ] ], [ [ "Print len of colors and its elements", "_____no_output_____" ] ], [ [ "# code here\nprint(len(colors))\nprint(colors)\n", "7\n['Red', 'Orange', 'Blue', 'White', 'Black', 'Grey', 'Sky Blue']\n" ] ], [ [ "Sort the list and print it.", "_____no_output_____" ] ], [ [ "# code here\ncolors.sort()\ncolors", "_____no_output_____" ] ], [ [ "#### 1.4. Create a string, sent = ‘Coronavirus Caused Lockdowns Around The World.”", "_____no_output_____" ], [ "<p style='text-align: right;'> 7 points</p>\n", "_____no_output_____" ] ], [ [ "# code here\nsent = 'Coronavirus Caused Lockdowns Around The World.'", "_____no_output_____" ] ], [ [ "Use split function to convert the string into a list of words and save it in variable words and print the same", "_____no_output_____" ] ], [ [ "# code here\nsent.split()", "_____no_output_____" ] ], [ [ "Using list comprehensions, convert each word in the list to lower case and store it in variable words_lower. \nPrint words_lower", "_____no_output_____" ] ], [ [ "# code here\nwords_lower = [word.lower() for word in sent.split(' ')]\nwords_lower", "_____no_output_____" ] ], [ [ "Check whether ‘country’ is in the list", "_____no_output_____" ] ], [ [ "# code here\n'country' in words_lower", "_____no_output_____" ] ], [ [ "Remove the element ‘the’ from the list and print the list.", "_____no_output_____" ] ], [ [ "# code here\nwords_lower.remove('the')\nwords_lower", "_____no_output_____" ] ], [ [ "Select the first 4 words from the list words_lower using slicing and store them in a new variable x4", "_____no_output_____" ] ], [ [ "#code here\nx4 = words_lower[0:4]", "_____no_output_____" ], [ "# print x4\nx4", "_____no_output_____" ] ], [ [ "Convert the list of elements to single string using join function and print it", "_____no_output_____" ] ], [ [ "#code here\n''.join(word + ' ' for word in words_lower)", "_____no_output_____" ] ], [ [ "## 2. Sets\n\nReference:-https://www.youtube.com/watch?v=sBvaPopWOmQ", "_____no_output_____" ], [ "#### 2.1. Create stud_grades = ['A','A','B','C','C','F']", "_____no_output_____" ], [ "<p style='text-align: right;'> 7 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\nstud_grades = ['A','A','B','C','C','F']\n", "_____no_output_____" ] ], [ [ "Print the len of stud_grades", "_____no_output_____" ] ], [ [ "#code here\nlen(stud_grades)", "_____no_output_____" ] ], [ [ "Create a new variable, stud_grades_set = set(stud_grades)", "_____no_output_____" ] ], [ [ "#code here\nstud_grades_set = set(stud_grades)", "_____no_output_____" ] ], [ [ "Print stud_grades_set. ", "_____no_output_____" ] ], [ [ "#code here\nlen(stud_grades_set)", "_____no_output_____" ] ], [ [ "print the type of stud_grades and stud_grades_set and print their corresponding elements. Try to understand the difference between them.", "_____no_output_____" ] ], [ [ "#code here\nprint(type(stud_grades),stud_grades)\nprint(type(stud_grades_set),stud_grades_set)\n", "<class 'list'> ['A', 'A', 'B', 'C', 'C', 'F']\n<class 'set'> {'A', 'C', 'B', 'G'}\n" ] ], [ [ "Add a new element ‘G’ to stud_grades_set", "_____no_output_____" ] ], [ [ "#code here\nstud_grades_set.add('G')\n", "_____no_output_____" ] ], [ [ "Add element 'F' to stud_grades_set. and print it.", "_____no_output_____" ] ], [ [ "#code here\nstud_grades_set.add('F')\nprint(stud_grades_set)\n", "{'A', 'C', 'B', 'G', 'F'}\n" ] ], [ [ "!!Did you notice? set doesn't add an element if it's already present in it, unlike lists.", "_____no_output_____" ], [ "Remove ‘F’ from stud_grades_set", "_____no_output_____" ] ], [ [ "#code here\nstud_grades_set.remove('F')\n", "_____no_output_____" ] ], [ [ "Print the elements and the length of stud_grades_set", "_____no_output_____" ] ], [ [ "#code here\nstud_grades_set\n", "_____no_output_____" ] ], [ [ "#### 2.2. Create colors = [‘red’,’blue’,’orange’], and fruits = [‘orange’,’grapes’,’apples’] ", "_____no_output_____" ], [ "<p style='text-align: right;'> 6 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\ncolors = ['red','blue','orange']\nfruits = ['orange','grapes','apples']\n", "_____no_output_____" ] ], [ [ "Print color and fruits", "_____no_output_____" ] ], [ [ "#code here\nprint(colors)\nprint(fruits)", "['red', 'blue', 'orange']\n['orange', 'grapes', 'apples']\n" ] ], [ [ "Create colors_set, and fruits_set. (using set() ) and print them", "_____no_output_____" ] ], [ [ "#code here\ncolors_set = set(colors)\nfruits_set = set(fruits)\nprint(colors_set)\nprint(fruits_set)", "{'red', 'orange', 'blue'}\n{'apples', 'grapes', 'orange'}\n" ] ], [ [ "Find the union of both the sets.", "_____no_output_____" ] ], [ [ "#code here\ncolors_set.union(fruits_set)\n", "_____no_output_____" ] ], [ [ "Find the intersection of both the sets ", "_____no_output_____" ] ], [ [ "#code here\n\ncolors_set.intersection(fruits_set)", "_____no_output_____" ] ], [ [ "Find the elements which are Fruits but not colors (using set.difference() )", "_____no_output_____" ] ], [ [ "#code here\n\nfruits_set.difference(colors_set)", "_____no_output_____" ] ], [ [ "## 3. TUPLES\n\nReference:-https://www.youtube.com/watch?v=NI26dqhs2Rk", "_____no_output_____" ], [ "#### 3.1 . Create temp = [17, ’Virat’, 50.0]", "_____no_output_____" ], [ "<p style='text-align: right;'> 7 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\n\ntemp = [17, 'Virat', 50.0]\n", "_____no_output_____" ] ], [ [ "Iterate through temp and print all the items in temp", "_____no_output_____" ] ], [ [ "#code here\n\nfor i in temp:\n print(i)\n", "17\nVirat\n50.0\n" ] ], [ [ "replace first element with 11 in temp", "_____no_output_____" ] ], [ [ "#code\ntemp[0] = 11\ntemp", "_____no_output_____" ] ], [ [ "Set temp1 = tuple(temp)", "_____no_output_____" ] ], [ [ "#code here\n\ntemp1 = tuple(temp)", "_____no_output_____" ] ], [ [ "Iterate through temp1 and print all the items in temp1. ", "_____no_output_____" ] ], [ [ "#code here\nfor i in temp1:\n print(i)\n", "11\nVirat\n50.0\n" ] ], [ [ "replace first element with 17 in temp1", "_____no_output_____" ] ], [ [ "#code here\ntemp1[0] = 17\n", "_____no_output_____" ] ], [ [ "<b>Oops!! You got an error. Hey Don't worry! Its because Once a tuple is created, you cannot change its values unlike list.</b>", "_____no_output_____" ], [ "#### 3.2 . Create city = (\"Bangalore\", 28.9949521, 72)", "_____no_output_____" ], [ "<p style='text-align: right;'> 6 points</p>\n", "_____no_output_____" ] ], [ [ "#code here\ncity = (\"Bangalore\", 28.9949521, 72)\n", "_____no_output_____" ] ], [ [ "Print first element of city", "_____no_output_____" ] ], [ [ "#code here\ncity[0]\n", "_____no_output_____" ] ], [ [ "Create city2 = (‘Chennai’, 30.01, 74)", "_____no_output_____" ] ], [ [ "#code here\n\ncity2 = ('Chennai', 30.01, 74)", "_____no_output_____" ] ], [ [ "Create cities which consist of city and city2", "_____no_output_____" ] ], [ [ "#code here\n\ncities = (city,city2)", "_____no_output_____" ] ], [ [ "Print cities", "_____no_output_____" ] ], [ [ "#code here\ncities\n", "_____no_output_____" ] ], [ [ "Print type of first element in cities", "_____no_output_____" ] ], [ [ "#code here\ntype(cities[0])", "_____no_output_____" ] ], [ [ "print the type of cities\n", "_____no_output_____" ] ], [ [ "#code here\ntype(cities)\n", "_____no_output_____" ] ], [ [ "Hey that implies you made a nested tuples!!", "_____no_output_____" ], [ "## 4. DICT\n\nReference:-https://www.youtube.com/watch?v=XCcpzWs-CI4", "_____no_output_____" ], [ "<p style='text-align: right;'> 11 points</p>\n", "_____no_output_____" ], [ "#### 4.1 Create a dictionary d = {\"actor\":\"amir\",\"animal\":\"cat\",\"earth\":2,\"list\":[23,32,12]}", "_____no_output_____" ] ], [ [ "#code here\n\nd = {\"actor\":\"amir\",\"animal\":\"cat\",\"earth\":2,\"list\":[23,32,12]}", "_____no_output_____" ] ], [ [ "Print the value of d[0]", "_____no_output_____" ] ], [ [ "#code here\nd[0]\n", "_____no_output_____" ] ], [ [ "<b>Oops!! again an error. again a fun fact. Dictionary return the value for key if key is in the dictionary, else throws KeyError\nand we don't have key 0 here :( </b>", "_____no_output_____" ], [ "Store the value of d[‘actor’] to a new variable actor.", "_____no_output_____" ] ], [ [ "#code here\n\nd['actor']", "_____no_output_____" ] ], [ [ "Print the type of actor", "_____no_output_____" ] ], [ [ "#code here\n\ntype(d['actor'])", "_____no_output_____" ] ], [ [ "Store the value of d[‘list’] in new variable l. ", "_____no_output_____" ] ], [ [ "#code here\n\nl = d['list']\n", "_____no_output_____" ] ], [ [ "Print the type of l. ", "_____no_output_____" ] ], [ [ "#code here\n\ntype(l)", "_____no_output_____" ] ], [ [ "Create d1 = { ‘singer’ : ‘Kr$na’ , ‘album’: ‘Still here’, ‘genre’ : ‘hip-hop’}", "_____no_output_____" ] ], [ [ "#code here\n\nd1 = { 'singer' : 'Kr$na' , 'album': 'Still here', 'genre' : 'hip-hop'}", "_____no_output_____" ] ], [ [ "Merge d1 into d.", "_____no_output_____" ] ], [ [ "#code here\n\nd.update(d1)", "_____no_output_____" ] ], [ [ "print d", "_____no_output_____" ] ], [ [ "#code here\nd\n", "_____no_output_____" ] ], [ [ "Print all the keys in d", "_____no_output_____" ] ], [ [ "#code here\nprint(d.keys())", "dict_keys(['actor', 'animal', 'earth', 'list', 'singer', 'album', 'genre'])\n" ] ], [ [ "Print all the values in d", "_____no_output_____" ] ], [ [ "#code here\nprint(d.values())\n", "dict_values(['amir', 'cat', 2, [23, 32, 12], 'Kr$na', 'Still here', 'hip-hop'])\n" ] ], [ [ "Iterate over d, and print each key, value pair as given in output", "_____no_output_____" ] ], [ [ "#code here\nfor key, value in d.items():\n print(key, \" ----> \", value)\n\n", "actor ----> amir\nanimal ----> cat\nearth ----> 2\nlist ----> [23, 32, 12]\nsinger ----> Kr$na\nalbum ----> Still here\ngenre ----> hip-hop\n" ] ], [ [ "\nCreate a string, sent = ‘Coronavirus Caused Lockdowns Around The World.”\n\nCount the number of occurences of charachters in string named \"sent\" using dictionary and print the same.\n", "_____no_output_____" ] ], [ [ "#code here\nsent = 'Coronavirus Caused Lockdowns Around The World.'\ndict = {}\nfor i in sent:\n dict[i] = sent.count(i)\nprint(dict)\n", "{'C': 2, 'o': 6, 'r': 4, 'n': 3, 'a': 2, 'v': 1, 'i': 1, 'u': 3, 's': 3, ' ': 5, 'e': 2, 'd': 4, 'L': 1, 'c': 1, 'k': 1, 'w': 1, 'A': 1, 'T': 1, 'h': 1, 'W': 1, 'l': 1, '.': 1}\n" ] ], [ [ "# --------------------------------------------------------------------", "_____no_output_____" ], [ "# Hurray!! Second milestone completed. The next challenge is waiting for you :)\n\n# --------------------------------------------------------------------\n\n-----------------\n# FeedBack\nWe hope you’ve enjoyed this course so far. We’re committed to help you use \"AI for All\" course to its full potential, so that you have a great learning experience. And that’s why we need your help in form of a feedback here.\n\nPlease fill this feedback form https://zfrmz.in/MtRG5oWXBdesm6rmSM7N", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec869c5f061055809a545b0ed08f6179a1f39b07
109,501
ipynb
Jupyter Notebook
section-04-research-and-development/04-machine-learning-pipeline-model-training.ipynb
ashwinpatil05/deploying-machine-learning-models
2776ea8ecf5b8d743f10692d837ee9786988b019
[ "BSD-3-Clause" ]
null
null
null
section-04-research-and-development/04-machine-learning-pipeline-model-training.ipynb
ashwinpatil05/deploying-machine-learning-models
2776ea8ecf5b8d743f10692d837ee9786988b019
[ "BSD-3-Clause" ]
null
null
null
section-04-research-and-development/04-machine-learning-pipeline-model-training.ipynb
ashwinpatil05/deploying-machine-learning-models
2776ea8ecf5b8d743f10692d837ee9786988b019
[ "BSD-3-Clause" ]
null
null
null
82.393529
42,080
0.739509
[ [ [ "# Machine Learning Pipeline - Model Training\n\nIn this notebook, we pick up the transformed datasets and the selected variables that we saved in the previous notebooks.", "_____no_output_____" ], [ "# Reproducibility: Setting the seed\n\nWith the aim to ensure reproducibility between runs of the same notebook, but also between the research and production environment, for each step that includes some element of randomness, it is extremely important that we **set the seed**.", "_____no_output_____" ] ], [ [ "# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for plotting\nimport matplotlib.pyplot as plt\n\n# to save the model\nimport joblib\n\n# to build the model\nfrom sklearn.linear_model import Lasso\n\n# to evaluate the model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)", "_____no_output_____" ], [ "# load the train and test set with the engineered variables\n\n# we built and saved these datasets in a previous notebook.\n# If you haven't done so, go ahead and check the previous notebooks (step 2)\n# to find out how to create these datasets\n\nX_train = pd.read_csv('xtrain.csv')\nX_test = pd.read_csv('xtest.csv')\n\nX_train.head()", "_____no_output_____" ], [ "# load the target (remember that the target is log transformed)\ny_train = pd.read_csv('ytrain.csv')\ny_test = pd.read_csv('ytest.csv')\n\ny_train.head()", "_____no_output_____" ], [ "# load the pre-selected features\n# ==============================\n\n# we selected the features in the previous notebook (step 3)\n\n# if you haven't done so, go ahead and visit the previous notebook\n# to find out how to select the features\n\nfeatures = pd.read_csv('selected_features.csv')\nfeatures = features['0'].to_list() \n\n# display final feature set\nfeatures", "_____no_output_____" ], [ "# reduce the train and test set to the selected features\n\nX_train = X_train[features]\nX_test = X_test[features]", "_____no_output_____" ] ], [ [ "### Regularised linear regression: Lasso\n\nRemember to set the seed.", "_____no_output_____" ] ], [ [ "# set up the model\n# remember to set the random_state / seed\n\nlin_model = Lasso(alpha=0.001, random_state=0)\n\n# train the model\n\nlin_model.fit(X_train, y_train)", "_____no_output_____" ], [ "# evaluate the model:\n# ====================\n\n# remember that we log transformed the output (SalePrice)\n# in our feature engineering notebook (step 2).\n\n# In order to get the true performance of the Lasso\n# we need to transform both the target and the predictions\n# back to the original house prices values.\n\n# We will evaluate performance using the mean squared error and\n# the root of the mean squared error and r2\n\n# make predictions for train set\npred = lin_model.predict(X_train)\n\n# determine mse, rmse and r2\nprint('train mse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred)))))\nprint('train rmse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred), squared=False))))\nprint('train r2: {}'.format(\n r2_score(np.exp(y_train), np.exp(pred))))\nprint()\n\n# make predictions for test set\npred = lin_model.predict(X_test)\n\n# determine mse, rmse and r2\nprint('test mse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred)))))\nprint('test rmse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred), squared=False))))\nprint('test r2: {}'.format(\n r2_score(np.exp(y_test), np.exp(pred))))\nprint()\n\nprint('Average house price: ', int(np.exp(y_train).median()))", "train mse: 781396538\ntrain rmse: 27953\ntrain r2: 0.8748530463468015\n\ntest mse: 1060767982\ntest rmse: 32569\ntest r2: 0.8456417073258413\n\nAverage house price: 163000\n" ], [ "# let's evaluate our predictions respect to the real sale price\nplt.scatter(y_test, lin_model.predict(X_test))\nplt.xlabel('True House Price')\nplt.ylabel('Predicted House Price')\nplt.title('Evaluation of Lasso Predictions')", "_____no_output_____" ] ], [ [ "We can see that our model is doing a pretty good job at estimating house prices.", "_____no_output_____" ] ], [ [ "y_test.reset_index(drop=True)", "_____no_output_____" ], [ "# let's evaluate the distribution of the errors: \n# they should be fairly normally distributed\n\ny_test.reset_index(drop=True, inplace=True)\n\npreds = pd.Series(lin_model.predict(X_test))\n\npreds", "_____no_output_____" ], [ "# let's evaluate the distribution of the errors: \n# they should be fairly normally distributed\n\nerrors = y_test['SalePrice'] - preds\nerrors.hist(bins=30)\nplt.show()", "_____no_output_____" ] ], [ [ "The distribution of the errors follows quite closely a gaussian distribution. That suggests that our model is doing a good job as well.", "_____no_output_____" ], [ "### Feature importance", "_____no_output_____" ] ], [ [ "# Finally, just for fun, let's look at the feature importance\n\nimportance = pd.Series(np.abs(lin_model.coef_.ravel()))\nimportance.index = features\nimportance.sort_values(inplace=True, ascending=False)\nimportance.plot.bar(figsize=(18,6))\nplt.ylabel('Lasso Coefficients')\nplt.title('Feature Importance')", "_____no_output_____" ] ], [ [ "## Save the Model", "_____no_output_____" ] ], [ [ "# we are happy to our model, so we save it to be able\n# to score new data\n\njoblib.dump(lin_model, 'linear_regression.joblib') ", "_____no_output_____" ] ], [ [ "# Additional Resources\n\n\n## Feature Engineering\n\n- [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) - Online Course\n- [Packt Feature Engineering Cookbook](https://www.packtpub.com/data/python-feature-engineering-cookbook) - Book\n- [Feature Engineering for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-engineering-for-machine-learning-a-comprehensive-overview-a7ad04c896f8) - Article\n- [Practical Code Implementations of Feature Engineering for Machine Learning with Python](https://towardsdatascience.com/practical-code-implementations-of-feature-engineering-for-machine-learning-with-python-f13b953d4bcd) - Article\n\n## Feature Selection\n\n- [Feature Selection for Machine Learning](https://www.udemy.com/course/feature-selection-for-machine-learning/?referralCode=186501DF5D93F48C4F71) - Online Course\n- [Feature Selection for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-selection-for-machine-learning-a-comprehensive-overview-bd571db5dd2d) - Article\n\n## Machine Learning\n\n- [Best Resources to Learn Machine Learning](https://trainindata.medium.com/find-out-the-best-resources-to-learn-machine-learning-cd560beec2b7) - Article\n- [Machine Learning with Imbalanced Data](https://www.udemy.com/course/machine-learning-with-imbalanced-data/?referralCode=F30537642DA57D19ED83) - Online Course", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec869ec454145cc03cf05a2cff9ecfa6371152d9
2,754
ipynb
Jupyter Notebook
docs/python/testing/test_code_speed.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-03-18T21:13:25.000Z
2020-03-18T21:13:25.000Z
docs/python/testing/test_code_speed.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
null
null
null
docs/python/testing/test_code_speed.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-10-17T22:00:42.000Z
2020-10-17T22:00:42.000Z
2,754
2,754
0.583152
[ [ [ "---\ntitle: \"Test Code Speed\"\nauthor: \"Chris Albon\"\ndate: 2017-12-20T11:53:49-07:00\ndescription: \"Test code speed using Python.\"\ntype: technical_note\ndraft: false\n---", "_____no_output_____" ] ], [ [ "## Preliminaries", "_____no_output_____" ] ], [ [ "import cProfile", "_____no_output_____" ] ], [ [ "## Create A Slow Function", "_____no_output_____" ] ], [ [ "def slow_function():\n total = 0.0\n \n for i, _ in enumerate(range(10000)):\n \n for j, _ in enumerate(range(1, 10000)):\n total += (i * j)\n\n return total", "_____no_output_____" ] ], [ [ "## Test The Speed Of The Function", "_____no_output_____" ] ], [ [ "cProfile.run('slow_function()', sort='time')", " 4 function calls in 13.291 seconds\n\n Ordered by: internal time\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 13.291 13.291 13.291 13.291 <ipython-input-2-64fc1cd43878>:1(slow_function)\n 1 0.000 0.000 13.291 13.291 {built-in method builtins.exec}\n 1 0.000 0.000 13.291 13.291 <string>:1(<module>)\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n\n\n" ] ], [ [ "## How To Read cProfile's Output\n\n- **ncalls:** Number of calls to the function.\n- **tottime:** Total time.\n- **percall:** Time per call.\n- **cumtime:** Total time in function and sub-functions.\n- **percall:** Time to call.\n- **:lineno(function):** Name of the operation.", "_____no_output_____" ], [ "## Alternative In Jupyter Notebook:", "_____no_output_____" ] ], [ [ "%%timeit\n\nslow_function()", "1 loop, best of 3: 12.9 s per loop\n" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec86a33c5191e619f7b74e35827e442ff64d0362
127,985
ipynb
Jupyter Notebook
2018-06-29-2.ipynb
jjAugust/word2vec-lstm-attention
8168ae64b5a7a34e3e3ea5125eee1abc84aa1671
[ "Apache-2.0" ]
2
2018-11-05T17:41:51.000Z
2022-02-21T20:53:00.000Z
.ipynb_checkpoints/2018-06-29-2-checkpoint.ipynb
jjAugust/word2vec-lstm-attention
8168ae64b5a7a34e3e3ea5125eee1abc84aa1671
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/2018-06-29-2-checkpoint.ipynb
jjAugust/word2vec-lstm-attention
8168ae64b5a7a34e3e3ea5125eee1abc84aa1671
[ "Apache-2.0" ]
1
2018-11-05T17:41:52.000Z
2018-11-05T17:41:52.000Z
57.833258
23,396
0.656632
[ [ [ "# LSTM for international airline passengers problem with window regression framing\nimport numpy\nimport numpy as np\nimport keras\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.cross_validation import train_test_split\nfrom keras.utils.vis_utils import plot_model", "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n/usr/local/lib/python3.5/dist-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n" ], [ "# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)", "_____no_output_____" ], [ " def create_dataset2(dataset, look_back=1):\n dataX, dataY = [], []\n dataZ=[]\n for i in range(len(dataset)-look_back-1):\n a = dataset[i:(i+look_back), 1]\n dataX.append(a)\n b = dataset[i + look_back, 0]\n dataZ.append(b)\n dataY.append(dataset[i + look_back, 1])\n return numpy.array(dataX), numpy.array(dataY),numpy.array(dataZ) ", "_____no_output_____" ], [ "# fix random seed for reproducibility\nnumpy.random.seed(7)\n# load the dataset\n# dataframe = read_csv('w_d_v.csv', usecols=[7], engine='python', skipfooter=3)\ndataframe = read_csv('t6192.csv', usecols=[8,0], engine='python',dtype=np.int32,skiprows=1,header=None)\npattern = read_csv('t6192.csv', usecols=[7], engine='python',dtype=np.int32,skiprows=1,header=None)\nMatrix = read_csv('matrix621.csv', usecols=[2,3,4,5,6,7,8,9,10,11,12,13], engine='python',header=None)\nall_data = read_csv('all_data.csv', usecols=[7], engine='python')\ndataset = dataframe.values\nMatrix = Matrix.values\npattern=pattern.values\nallData=all_data.values\nMatrix=np.append([[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]],Matrix,axis=0)\n\nweek_info = read_csv('t6192.csv', usecols=[11], engine='python',dtype=np.int32,skiprows=1,header=None)\nStart_info = read_csv('t6192.csv', usecols=[12], engine='python',dtype=np.int32,skiprows=1,header=None)\nEnd_info = read_csv('t6192.csv', usecols=[13], engine='python',dtype=np.int32,skiprows=1,header=None)\nStay_info = read_csv('t6192.csv', usecols=[14], engine='python',dtype=np.int32,skiprows=1,header=None)\nWeather_info = read_csv('t6192.csv', usecols=[15], engine='python',dtype=np.int32,skiprows=1,header=None)\n\nweek_info = week_info.values\nStart_info = Start_info.values\nEnd_info=End_info.values\nStay_info=Stay_info.values\nWeather_info=Weather_info.values", "_____no_output_____" ], [ "week_info=week_info[3:-1]\nStart_info=Start_info[3:-1]\nEnd_info=End_info[2:-2]\nStay_info=Stay_info[2:-2]\nWeather_info=Weather_info[3:-1]", "_____no_output_____" ], [ "print(End_info.shape)\nprint(Start_info.shape)", "(32609, 1)\n(32609, 1)\n" ], [ "look_back = 3\n\ntrainX, trainY, trainZ = create_dataset2(dataset, look_back)\nAllX, AllY = create_dataset(allData, look_back)\npatternX, patternY = create_dataset(pattern, look_back)\n\ntrainY=numpy.reshape(trainY,(trainY.shape[0],-1))\nAllY=numpy.reshape(AllY,(AllY.shape[0],-1))\n\n", "_____no_output_____" ], [ "trainY[-10:]\ntrainX[-10:]\nStay_info[-10:]", "_____no_output_____" ], [ "encX = OneHotEncoder()\nencX.fit(trainX)\nencY = OneHotEncoder()\nencY.fit(trainY)", "_____no_output_____" ], [ "trainX_one=encX.transform(trainX).toarray()\ntrain_X=numpy.reshape(trainX_one,(trainX_one.shape[0],look_back,-1))\ntrain_Y=encY.transform(trainY).toarray()", "_____no_output_____" ] ], [ [ "#还没能直接拆分,其他维度没有做对应\na_train, a_test, b_train, b_test = train_test_split(train_X, train_Y, test_size=0.1, random_state=42)", "_____no_output_____" ] ], [ [ "emdedding_size=Matrix.shape[1] #\nvo_len=look_back #\nvocab_size=Matrix.shape[0] #", "_____no_output_____" ], [ "a_train=trainX.reshape(-1,3,1)\na_train=a_train.reshape(-1,3)\nb_train=train_Y\nk=trainZ\nk=k.reshape(-1,1)\npretrained_weights=Matrix\nLSTM_size=32", "_____no_output_____" ] ], [ [ "print(\"------------------------\")\nprint(\"in size:\")\nprint(a_train.shape)\nprint(\"------------------------\")\nprint(\"out size:\")\nprint(b_train.shape)\nprint(\"------------------------\")\nprint(\"user size:\")\nprint(k.shape)\nprint(\"------------------------\")", "_____no_output_____" ] ], [ [ "N_test=-1\nprint(\"------------------------\")\nprint(\"input semantic example:\")\nfor x in a_train[N_test]:\n print(pretrained_weights[x])\nprint(\"------------------------\")\nprint(\"user_id example:\")\nprint(k[N_test])\nprint(\"------------------------\")\nprint(\"input move pattern example:\")\nprint(a_train[N_test])\nprint(\"------------------------\")\nprint(\"input week_info example:\")\nprint(week_info[N_test])\nprint(\"----------------\")\nprint(\"input Start time example:\")\nprint(\"Start time sector 4 means 0-6 o'clock in the morning, 3 means 6-9 o'clock in the morning, 2 means 9-17 working hours, 1 means 17-24 points at night +happyHour\")\nprint(Start_info[N_test])\nprint(\"----------------\")\nprint(\"input End time example:\")\nprint(End_info[N_test])\nprint(\"----------------\")\nprint(\"input Stay time example:\")\nprint(\"per sector means minute\")\nprint(Stay_info[N_test])\nprint(\"----------------\")\nprint(\"input Weather_info example:\")\nprint(\"1 fog; 2 fog rain; 3 Fog, Rain, Snow; 4 Fog, Rain, Thunderstorm; 5 snow; 6 rain; 7 Thunderstorm; 8 Hail; 9 rain + Thunderstorm; 10 fog + snow; 11 rain + snow\")\nprint(Weather_info[N_test])\nprint(\"------------------------\")", "------------------------\ninput semantic example:\n[0.3220339 0.0338983 0.01694915 0. 0. 0.0338983\n 0. 0.06779661 0.30508474 0.08474577 0.13559322 0. ]\n[0.43137255 0. 0. 0. 0. 0.03921569\n 0. 0. 0.3529412 0.05882353 0.11764706 0. ]\n[0.3220339 0.0338983 0.01694915 0. 0. 0.0338983\n 0. 0.06779661 0.30508474 0.08474577 0.13559322 0. ]\n------------------------\nuser_id example:\n181\n------------------------\ninput move pattern example:\n[167 139 167]\n------------------------\ninput week_info example:\n[7]\n----------------\ninput Start time example:\nStart time sector 4 means 0-6 o'clock in the morning, 3 means 6-9 o'clock in the morning, 2 means 9-17 working hours, 1 means 17-24 points at night +happyHour\n[4]\n----------------\ninput End time example:\n[4]\n----------------\ninput Stay time example:\nper sector means minute\n[25]\n----------------\ninput Weather_info example:\n1 fog; 2 fog rain; 3 Fog, Rain, Snow; 4 Fog, Rain, Thunderstorm; 5 snow; 6 rain; 7 Thunderstorm; 8 Hail; 9 rain + Thunderstorm; 10 fog + snow; 11 rain + snow\n[0]\n------------------------\n" ], [ "print(\"------------------------\")\n# print(\"output encode example:\")\n# print(b_train[0])\n# print(\"------------------------\")\n\nprint(\"output decode example:\")\nprint(trainY[N_test])\nprint(\"------------------------\")", "------------------------\noutput decode example:\n[167]\n------------------------\n" ] ], [ [ "print(\"------------------------\")\nprint(\"emdedding_size:\")\nprint(emdedding_size)\nprint(\"------------------------\")\n\nprint(\"vocab_length:\")\nprint(vo_len)\nprint(\"------------------------\")\n\nprint(\"vocab_size:\")\nprint(vocab_size)\nprint(\"------------------------\")", "_____no_output_____" ] ], [ [ "print(\"使用 T+S+W\")\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge,Flatten\nfrom keras.models import Model\n# a_train=a_train.reshape(-1,3)\n\nemdedding_size=100\nLocation_size=201\nUser_size=183\nLSTM_size=200\nTime_size=5\nWeek_size=8\nStay_size=1440\npretrained_weights_size=12\nWeather_type=13\n\n# Move_Pattern Sequences\ninput_pattern = Input(shape=(3, ),name=\"Move_Pattern_Input\")\n# User-Id\nUser_id = Input(shape=(1,),name=\"User_id_Input\")\n# Temporary\nStart_Time = Input(shape=(1,),name=\"Start_Time_Input\")\nEnd_Time = Input(shape=(1,),name=\"End_Time_Input\")\nStay_Time = Input(shape=(1,),name=\"Stay_Time_Input\")\nDate_Info = Input(shape=(1,),name=\"Date_Info_Input\")#1-7 Monday to Sunday\n# Spatial\nLocation_Info = Input(shape=(3,),name=\"Semantic_Location_Info_Input\")#12 categories Interest_point\n\n# Weather\nWeather_Info = Input(shape=(1,),name=\"Weather_Info_Input\")#1-7 Weather Type\n\n#Spatial\nem = Embedding(input_dim=Location_size, output_dim=emdedding_size,input_length=vo_len,name=\"Spatial_Pattern\")(input_pattern)\nlstm_out = LSTM(LSTM_size,name=\"Spatial_Feature\")(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\n#User_id\nUser_em = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1,name=\"User_id\")(User_id)\nUser_em=Flatten(name=\"User_Feature\")(User_em)\n\n#Temporary\nemStart_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1,name=\"Start_Time\")(Start_Time)\nemEnd_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1,name=\"End_Time\")(End_Time)\nemStay_Time = Embedding(input_dim=Stay_size, output_dim=emdedding_size,input_length=1,name=\"Stay_Time\")(Stay_Time)\nemDate_Info = Embedding(input_dim=Week_size, output_dim=emdedding_size,input_length=1,name=\"Date_Info\")(Date_Info)\nTemporary = keras.layers.concatenate([emStart_Time, emEnd_Time,emStay_Time,emDate_Info],name=\"Temporary_Feature_Model\")\nTemporary = Flatten(name=\"Temporary_Feature\")(Temporary)\n\n#Semantic\nLocation_Semantic=Embedding(input_dim=Location_size, output_dim=pretrained_weights_size,input_length=vo_len,weights=[pretrained_weights],name=\"Semantic_Location_Info\")(Location_Info)\nSemantic_lstm = LSTM(36,return_sequences=True,name=\"Semantic_Feature_Model\")(Location_Semantic)\nLocation_Semantic=Flatten(name=\"Semantic_Feature\")(Semantic_lstm)\n\n#Weather\nWeather_Em = Embedding(input_dim=Weather_type, output_dim=emdedding_size,input_length=1,name=\"Weather_info\")(Weather_Info)\nWeather_Em=Flatten(name=\"Weather\")(Weather_Em)\n\n\n\nx = keras.layers.concatenate([lstm_out, User_em, Temporary,Location_Semantic,Weather_Em])\nx=Dense(808,activation='relu',name=\"C\")(x)\nx=Dense(404,activation='relu',name=\"C2\")(x)\nx=Dense(202,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax',name='x')(x)\n\n\nmodel = Model(inputs=[input_pattern,User_id,Start_Time,End_Time,Stay_Time,Date_Info,Location_Info,Weather_Info], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_T+S+W.png',show_shapes=True)", "使用 T+S+W\n" ], [ "newc=np.c_[a_train,k,Start_info,End_info,Stay_info,week_info,Weather_info,trainY]\nprint(newc[:2])", "[[167 157 150 0 2 2 38 4 6 150]\n [157 150 150 0 2 4 980 7 0 31]]\n" ], [ "history_T_S = model.fit([a_train,k,Start_info,End_info,Stay_info,week_info,a_train,Weather_info], b_train, epochs=100, batch_size=512, verbose=2)", "Epoch 1/100\n - 6s - loss: 4.4769 - acc: 0.0741\nEpoch 2/100\n - 5s - loss: 3.6117 - acc: 0.1915\nEpoch 3/100\n - 5s - loss: 3.2341 - acc: 0.2399\nEpoch 4/100\n" ], [ "print(trainY[0])\nprint(np.argmax(b_train[0]))", "[150]\n146\n" ], [ "test=np.asarray([[30,170,145]])\nuserid=np.asarray([163])\nStart_info1=np.asarray([1])\nweek_info1=np.asarray([5])\nEnd_info1=np.asarray([1])\nStay_info1=np.asarray([85])\nWeather_info1=np.asarray([0])", "_____no_output_____" ], [ "testout=model.predict([test,userid,Start_info1,End_info1,week_info1,test,Weather_info1])\nprint(np.argmax(testout))", "141\n" ], [ "print(\"使用 T+S+W\")\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge,Flatten\nfrom keras.models import Model\n# a_train=a_train.reshape(-1,3)\n\nemdedding_size=100\nLocation_size=201\nUser_size=183\nLSTM_size=200\nTime_size=5\nWeek_size=8\nStay_size=1440\npretrained_weights_size=12\nWeather_type=13\n\n# Move_Pattern Sequences\ninput_pattern = Input(shape=(3, ),name=\"Move_Pattern_Input\")\n# User-Id\nUser_id = Input(shape=(1,),name=\"User_id_Input\")\n# Temporary\nStart_Time = Input(shape=(1,),name=\"Start_Time_Input\")\nEnd_Time = Input(shape=(1,),name=\"End_Time_Input\")\nStay_Time = Input(shape=(1,),name=\"Stay_Time_Input\")\nDate_Info = Input(shape=(1,),name=\"Date_Info_Input\")#1-7 Monday to Sunday\n# Spatial\nLocation_Info = Input(shape=(3,),name=\"Semantic_Location_Info_Input\")#12 categories Interest_point\n\n# Weather\nWeather_Info = Input(shape=(1,),name=\"Weather_Info_Input\")#1-7 Weather Type\n\n#Spatial\nem = Embedding(input_dim=Location_size, output_dim=emdedding_size,input_length=vo_len,name=\"Spatial_Pattern\")(input_pattern)\nlstm_out = LSTM(LSTM_size,name=\"Spatial_Feature\")(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\n#User_id\nUser_em = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1,name=\"User_id\")(User_id)\nUser_em=Flatten(name=\"User_Feature\")(User_em)\n\n#Temporary\nemStart_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1,name=\"Start_Time\")(Start_Time)\nemEnd_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1,name=\"End_Time\")(End_Time)\n# emStay_Time = Embedding(input_dim=Stay_size, output_dim=emdedding_size,input_length=1,name=\"Stay_Time\")(Stay_Time)\nemDate_Info = Embedding(input_dim=Week_size, output_dim=emdedding_size,input_length=1,name=\"Date_Info\")(Date_Info)\nTemporary = keras.layers.concatenate([emStart_Time, emEnd_Time,emDate_Info],name=\"Temporary_Feature_Model\")\nTemporary = Flatten(name=\"Temporary_Feature\")(Temporary)\n\n#Semantic\nLocation_Semantic=Embedding(input_dim=Location_size, output_dim=pretrained_weights_size,input_length=vo_len,weights=[pretrained_weights],name=\"Semantic_Location_Info\")(Location_Info)\nSemantic_lstm = LSTM(36,return_sequences=True,name=\"Semantic_Feature_Model\")(Location_Semantic)\nLocation_Semantic=Flatten(name=\"Semantic_Feature\")(Semantic_lstm)\n\n#Weather\nWeather_Em = Embedding(input_dim=Weather_type, output_dim=emdedding_size,input_length=1,name=\"Weather_info\")(Weather_Info)\nWeather_Em=Flatten(name=\"Weather\")(Weather_Em)\n\n\n\nx = keras.layers.concatenate([lstm_out, User_em, Temporary,Location_Semantic,Weather_Em])\nx=Dense(808,activation='relu',name=\"C\")(x)\nx=Dense(404,activation='relu',name=\"C2\")(x)\nx=Dense(202,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax',name='x')(x)\n\n\nmodel = Model(inputs=[input_pattern,User_id,Start_Time,End_Time,Date_Info,Location_Info,Weather_Info], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_T+S+W.png',show_shapes=True)", "使用 T+S+W\n" ], [ "history_T_S_W = model.fit([a_train,k,Start_info,End_info,week_info,a_train,Weather_info], b_train, epochs=100, batch_size=512, verbose=2)", "Epoch 1/100\n - 5s - loss: 2.4706 - acc: 0.3612\nEpoch 2/100\n - 5s - loss: 2.4130 - acc: 0.3692\nEpoch 3/100\n - 5s - loss: 2.3757 - acc: 0.3756\nEpoch 4/100\n - 5s - loss: 2.3209 - acc: 0.3854\nEpoch 5/100\n - 5s - loss: 2.2613 - acc: 0.3976\nEpoch 6/100\n - 5s - loss: 2.2109 - acc: 0.4049\nEpoch 7/100\n - 5s - loss: 2.1631 - acc: 0.4105\nEpoch 8/100\n - 5s - loss: 2.1071 - acc: 0.4233\nEpoch 9/100\n - 5s - loss: 2.0529 - acc: 0.4347\nEpoch 10/100\n - 5s - loss: 2.0051 - acc: 0.4448\nEpoch 11/100\n - 5s - loss: 1.9416 - acc: 0.4554\nEpoch 12/100\n - 5s - loss: 1.8828 - acc: 0.4689\nEpoch 13/100\n - 5s - loss: 1.8206 - acc: 0.4813\nEpoch 14/100\n - 5s - loss: 1.7587 - acc: 0.4913\nEpoch 15/100\n - 5s - loss: 1.7009 - acc: 0.5062\nEpoch 16/100\n - 5s - loss: 1.6307 - acc: 0.5220\nEpoch 17/100\n - 5s - loss: 1.5675 - acc: 0.5381\nEpoch 18/100\n - 5s - loss: 1.5152 - acc: 0.5514\nEpoch 19/100\n - 5s - loss: 1.4550 - acc: 0.5618\nEpoch 20/100\n - 5s - loss: 1.3893 - acc: 0.5803\nEpoch 21/100\n - 5s - loss: 1.3240 - acc: 0.5945\nEpoch 22/100\n - 5s - loss: 1.2549 - acc: 0.6151\nEpoch 23/100\n - 5s - loss: 1.2072 - acc: 0.6277\nEpoch 24/100\n - 5s - loss: 1.1393 - acc: 0.6433\nEpoch 25/100\n - 5s - loss: 1.0894 - acc: 0.6578\nEpoch 26/100\n - 5s - loss: 1.0450 - acc: 0.6663\nEpoch 27/100\n - 5s - loss: 0.9809 - acc: 0.6878\nEpoch 28/100\n - 5s - loss: 0.9308 - acc: 0.7068\nEpoch 29/100\n - 5s - loss: 0.8798 - acc: 0.7173\nEpoch 30/100\n - 5s - loss: 0.8307 - acc: 0.7300\nEpoch 31/100\n - 5s - loss: 0.7955 - acc: 0.7404\nEpoch 32/100\n - 5s - loss: 0.7491 - acc: 0.7569\nEpoch 33/100\n - 5s - loss: 0.7151 - acc: 0.7660\nEpoch 34/100\n - 5s - loss: 0.6781 - acc: 0.7775\nEpoch 35/100\n - 5s - loss: 0.6464 - acc: 0.7867\nEpoch 36/100\n - 5s - loss: 0.6243 - acc: 0.7933\nEpoch 37/100\n - 5s - loss: 0.5896 - acc: 0.8081\nEpoch 38/100\n - 5s - loss: 0.5557 - acc: 0.8177\nEpoch 39/100\n - 5s - loss: 0.5555 - acc: 0.8176\nEpoch 40/100\n - 5s - loss: 0.5206 - acc: 0.8305\nEpoch 41/100\n - 5s - loss: 0.5002 - acc: 0.8353\nEpoch 42/100\n - 5s - loss: 0.4864 - acc: 0.8388\nEpoch 43/100\n - 5s - loss: 0.4672 - acc: 0.8471\nEpoch 44/100\n - 5s - loss: 0.4475 - acc: 0.8519\nEpoch 45/100\n - 5s - loss: 0.4369 - acc: 0.8559\nEpoch 46/100\n - 5s - loss: 0.4177 - acc: 0.8636\nEpoch 47/100\n - 5s - loss: 0.4046 - acc: 0.8671\nEpoch 48/100\n - 5s - loss: 0.3944 - acc: 0.8694\nEpoch 49/100\n - 5s - loss: 0.3845 - acc: 0.8747\nEpoch 50/100\n - 5s - loss: 0.3721 - acc: 0.8788\nEpoch 51/100\n - 5s - loss: 0.3665 - acc: 0.8806\nEpoch 52/100\n - 5s - loss: 0.3467 - acc: 0.8849\nEpoch 53/100\n" ], [ "print(\"使用 encode2 (语义权重 pretrained_weights)方法\")\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge,Flatten\nfrom keras.models import Model\n# a_train=a_train.reshape(-1,3)\n\nemdedding_size=100\nLocation_size=201\nUser_size=183\nLSTM_size=200\nTime_size=5\nWeek_size=8\nStay_size=1440\n\n# Move_Pattern Sequences\ninput_pattern = Input(shape=(3, ),name=\"Move_Pattern\")\n# User-Id\nUser_id = Input(shape=(1,),name=\"User_id\")\n# Temporary\nStart_Time = Input(shape=(1,),name=\"Start_Time\")\nEnd_Time = Input(shape=(1,),name=\"End_Time\")\nStay_Time = Input(shape=(1,),name=\"Stay_Time\")\nDate_Info = Input(shape=(1,),name=\"Date_Info\")#1-7 Monday to Sunday\n# Spatial\nLocation_Info = Input(shape=(12,),name=\"Location_Info\")#12 categories Interest_point\n# Weather\nWeather_Info = Input(shape=(1,),name=\"Weather_Info\")#1-7 Weather Type\n\n\nem = Embedding(input_dim=Location_size, output_dim=emdedding_size,input_length=vo_len)(input_pattern)\nlstm_out = LSTM(LSTM_size)(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\nem2 = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1)(User_id)\n\nemStart_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1)(Start_Time)\nemEnd_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1)(End_Time)\nemStay_Time = Embedding(input_dim=Stay_size, output_dim=emdedding_size,input_length=1)(Stay_Time)\nemDate_Info = Embedding(input_dim=Week_size, output_dim=emdedding_size,input_length=1)(Date_Info)\n\nTemporary = keras.layers.concatenate([emStart_Time, emEnd_Time,emStay_Time,emDate_Info])\nTemporary = Flatten()(Temporary)\n\n\nem2=Flatten()(em2)\n\nx = keras.layers.concatenate([lstm_out, em2, Temporary])\nx=Dense(700,activation='relu',name=\"C\")(x)\nx=Dense(400,activation='relu',name=\"C2\")(x)\nx=Dense(250,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax',name='x')(x)\n\n\nmodel = Model(inputs=[input_pattern,User_id,Start_Time,End_Time,Stay_Time,Date_Info], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm624.png',show_shapes=True)", "使用 encode2 (语义权重 pretrained_weights)方法\n" ], [ "history_nopre = model.fit([a_train,k,Start_info,End_info,Stay_info,week_info], b_train, epochs=100, batch_size=512, verbose=2)", "Epoch 1/100\n - 5s - loss: 4.5151 - acc: 0.0695\nEpoch 2/100\n - 4s - loss: 3.6527 - acc: 0.1808\nEpoch 3/100\n - 4s - loss: 3.2736 - acc: 0.2294\nEpoch 4/100\n - 4s - loss: 3.0785 - acc: 0.2548\nEpoch 5/100\n - 4s - loss: 2.9331 - acc: 0.2816\nEpoch 6/100\n - 4s - loss: 2.8254 - acc: 0.3030\nEpoch 7/100\n - 4s - loss: 2.7225 - acc: 0.3212\nEpoch 8/100\n - 4s - loss: 2.6440 - acc: 0.3354\nEpoch 9/100\n - 4s - loss: 2.5695 - acc: 0.3478\nEpoch 10/100\n - 4s - loss: 2.4987 - acc: 0.3633\nEpoch 11/100\n - 4s - loss: 2.4260 - acc: 0.3776\nEpoch 12/100\n - 4s - loss: 2.3638 - acc: 0.3881\nEpoch 13/100\n - 4s - loss: 2.3012 - acc: 0.4041\nEpoch 14/100\n - 4s - loss: 2.2367 - acc: 0.4136\nEpoch 15/100\n - 4s - loss: 2.1730 - acc: 0.4289\nEpoch 16/100\n - 4s - loss: 2.1138 - acc: 0.4398\nEpoch 17/100\n - 4s - loss: 2.0453 - acc: 0.4566\nEpoch 18/100\n - 4s - loss: 1.9823 - acc: 0.4705\nEpoch 19/100\n - 4s - loss: 1.9170 - acc: 0.4845\nEpoch 20/100\n - 4s - loss: 1.8601 - acc: 0.4936\nEpoch 21/100\n - 4s - loss: 1.7918 - acc: 0.5095\nEpoch 22/100\n - 4s - loss: 1.7239 - acc: 0.5239\nEpoch 23/100\n - 4s - loss: 1.6538 - acc: 0.5407\nEpoch 24/100\n - 4s - loss: 1.5851 - acc: 0.5565\nEpoch 25/100\n - 4s - loss: 1.5219 - acc: 0.5722\nEpoch 26/100\n - 4s - loss: 1.4487 - acc: 0.5863\nEpoch 27/100\n - 4s - loss: 1.3865 - acc: 0.6049\nEpoch 28/100\n - 4s - loss: 1.3142 - acc: 0.6206\nEpoch 29/100\n - 4s - loss: 1.2544 - acc: 0.6339\nEpoch 30/100\n - 4s - loss: 1.1927 - acc: 0.6517\nEpoch 31/100\n - 4s - loss: 1.1342 - acc: 0.6641\nEpoch 32/100\n - 4s - loss: 1.0678 - acc: 0.6813\nEpoch 33/100\n - 4s - loss: 1.0129 - acc: 0.6960\nEpoch 34/100\n - 4s - loss: 0.9511 - acc: 0.7160\nEpoch 35/100\n - 4s - loss: 0.8933 - acc: 0.7288\nEpoch 36/100\n - 4s - loss: 0.8395 - acc: 0.7427\nEpoch 37/100\n - 4s - loss: 0.7928 - acc: 0.7576\nEpoch 38/100\n - 4s - loss: 0.7477 - acc: 0.7680\nEpoch 39/100\n - 4s - loss: 0.7010 - acc: 0.7827\nEpoch 40/100\n - 4s - loss: 0.6547 - acc: 0.7966\nEpoch 41/100\n - 4s - loss: 0.6161 - acc: 0.8076\nEpoch 42/100\n - 4s - loss: 0.5771 - acc: 0.8201\nEpoch 43/100\n - 4s - loss: 0.5428 - acc: 0.8295\nEpoch 44/100\n - 4s - loss: 0.5103 - acc: 0.8402\nEpoch 45/100\n - 4s - loss: 0.4871 - acc: 0.8451\nEpoch 46/100\n - 4s - loss: 0.4555 - acc: 0.8553\nEpoch 47/100\n - 4s - loss: 0.4285 - acc: 0.8643\nEpoch 48/100\n - 4s - loss: 0.4160 - acc: 0.8658\nEpoch 49/100\n - 4s - loss: 0.3776 - acc: 0.8810\nEpoch 50/100\n - 4s - loss: 0.3596 - acc: 0.8851\nEpoch 51/100\n - 4s - loss: 0.3470 - acc: 0.8896\nEpoch 52/100\n - 4s - loss: 0.3279 - acc: 0.8953\nEpoch 53/100\n - 4s - loss: 0.3127 - acc: 0.9006\nEpoch 54/100\n - 4s - loss: 0.3013 - acc: 0.9031\nEpoch 55/100\n - 4s - loss: 0.2888 - acc: 0.9079\nEpoch 56/100\n - 4s - loss: 0.2727 - acc: 0.9137\nEpoch 57/100\n - 4s - loss: 0.2692 - acc: 0.9143\nEpoch 58/100\n - 4s - loss: 0.2476 - acc: 0.9225\nEpoch 59/100\n - 4s - loss: 0.2337 - acc: 0.9262\nEpoch 60/100\n - 4s - loss: 0.2322 - acc: 0.9237\nEpoch 61/100\n - 4s - loss: 0.2236 - acc: 0.9284\nEpoch 62/100\n - 4s - loss: 0.2215 - acc: 0.9296\nEpoch 63/100\n - 4s - loss: 0.2173 - acc: 0.9305\nEpoch 64/100\n - 4s - loss: 0.2054 - acc: 0.9349\nEpoch 65/100\n - 4s - loss: 0.2033 - acc: 0.9346\nEpoch 66/100\n - 4s - loss: 0.1937 - acc: 0.9386\nEpoch 67/100\n - 4s - loss: 0.1919 - acc: 0.9373\nEpoch 68/100\n - 4s - loss: 0.1864 - acc: 0.9416\nEpoch 69/100\n - 4s - loss: 0.1811 - acc: 0.9409\nEpoch 70/100\n - 4s - loss: 0.1719 - acc: 0.9455\nEpoch 71/100\n - 4s - loss: 0.1669 - acc: 0.9473\nEpoch 72/100\n - 4s - loss: 0.1668 - acc: 0.9466\nEpoch 73/100\n - 4s - loss: 0.1669 - acc: 0.9462\nEpoch 74/100\n - 4s - loss: 0.1539 - acc: 0.9511\nEpoch 75/100\n - 4s - loss: 0.1609 - acc: 0.9489\nEpoch 76/100\n - 4s - loss: 0.1539 - acc: 0.9508\nEpoch 77/100\n - 4s - loss: 0.1583 - acc: 0.9482\nEpoch 78/100\n - 4s - loss: 0.1553 - acc: 0.9510\nEpoch 79/100\n - 4s - loss: 0.1525 - acc: 0.9518\nEpoch 80/100\n - 4s - loss: 0.1448 - acc: 0.9546\nEpoch 81/100\n - 4s - loss: 0.1481 - acc: 0.9511\nEpoch 82/100\n - 4s - loss: 0.1380 - acc: 0.9544\nEpoch 83/100\n - 4s - loss: 0.1406 - acc: 0.9544\nEpoch 84/100\n - 4s - loss: 0.1398 - acc: 0.9545\nEpoch 85/100\n - 4s - loss: 0.1352 - acc: 0.9575\nEpoch 86/100\n - 4s - loss: 0.1230 - acc: 0.9606\nEpoch 87/100\n - 4s - loss: 0.1293 - acc: 0.9572\nEpoch 88/100\n - 4s - loss: 0.1361 - acc: 0.9559\nEpoch 89/100\n - 4s - loss: 0.1291 - acc: 0.9593\nEpoch 90/100\n - 4s - loss: 0.1183 - acc: 0.9622\nEpoch 91/100\n - 4s - loss: 0.1187 - acc: 0.9621\nEpoch 92/100\n - 4s - loss: 0.1146 - acc: 0.9635\nEpoch 93/100\n - 4s - loss: 0.1186 - acc: 0.9606\nEpoch 94/100\n - 4s - loss: 0.1167 - acc: 0.9626\nEpoch 95/100\n - 4s - loss: 0.1195 - acc: 0.9615\nEpoch 96/100\n - 4s - loss: 0.1180 - acc: 0.9618\nEpoch 97/100\n - 4s - loss: 0.1103 - acc: 0.9647\nEpoch 98/100\n - 4s - loss: 0.1165 - acc: 0.9625\nEpoch 99/100\n - 4s - loss: 0.1253 - acc: 0.9600\nEpoch 100/100\n - 4s - loss: 0.1119 - acc: 0.9631\n" ], [ "print(\"使用 encode2 (语义权重 pretrained_weights)方法\")\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge,Flatten\nfrom keras.models import Model\n# a_train=a_train.reshape(-1,3)\n\nemdedding_size=100\nLocation_size=201\nUser_size=183\nLSTM_size=200\nTime_size=5\nWeek_size=8\nStay_size=1440\n\n# Move_Pattern Sequences\ninput_pattern = Input(shape=(3, ),name=\"Move_Pattern\")\n# User-Id\nUser_id = Input(shape=(1,),name=\"User_id\")\n# Temporary\nStart_Time = Input(shape=(1,),name=\"Start_Time\")\nEnd_Time = Input(shape=(1,),name=\"End_Time\")\nStay_Time = Input(shape=(1,),name=\"Stay_Time\")\nDate_Info = Input(shape=(1,),name=\"Date_Info\")#1-7 Monday to Sunday\n# Spatial\nLocation_Info = Input(shape=(12,),name=\"Location_Info\")#12 categories Interest_point\n# Weather\nWeather_Info = Input(shape=(1,),name=\"Weather_Info\")#1-7 Weather Type\n\n\nem = Embedding(input_dim=Location_size, output_dim=emdedding_size,input_length=vo_len)(input_pattern)\nlstm_out = LSTM(LSTM_size)(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\nem2 = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1)(User_id)\n\nemStart_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1)(Start_Time)\nemEnd_Time = Embedding(input_dim=Time_size, output_dim=emdedding_size,input_length=1)(End_Time)\nemStay_Time = Embedding(input_dim=Stay_size, output_dim=emdedding_size,input_length=1)(Stay_Time)\nemDate_Info = Embedding(input_dim=Week_size, output_dim=emdedding_size,input_length=1)(Date_Info)\n\n# Temporary = keras.layers.concatenate([emStart_Time, emEnd_Time,emStay_Time,emDate_Info])\n# Temporary = Flatten()(Temporary)\n\n\nem2=Flatten()(em2)\n\nx = keras.layers.concatenate([lstm_out, em2])\nx=Dense(700,activation='relu',name=\"C\")(x)\nx=Dense(400,activation='relu',name=\"C2\")(x)\nx=Dense(250,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax',name='x')(x)\n\n\nmodel = Model(inputs=[input_pattern,User_id,Start_Time,End_Time,Stay_Time,Date_Info], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm624_notem.png',show_shapes=True)\nhistory_notem = model.fit([a_train,k,Start_info,End_info,Stay_info,week_info], b_train, epochs=100, batch_size=512, verbose=2)", "使用 encode2 (语义权重 pretrained_weights)方法\nEpoch 1/100\n - 5s - loss: 4.4098 - acc: 0.0946\nEpoch 2/100\n - 4s - loss: 3.5862 - acc: 0.1948\nEpoch 3/100\n - 4s - loss: 3.2489 - acc: 0.2342\nEpoch 4/100\n - 4s - loss: 3.0829 - acc: 0.2527\nEpoch 5/100\n - 4s - loss: 2.9850 - acc: 0.2709\nEpoch 6/100\n - 4s - loss: 2.9109 - acc: 0.2798\nEpoch 7/100\n - 4s - loss: 2.8564 - acc: 0.2916\nEpoch 8/100\n - 4s - loss: 2.8051 - acc: 0.3000\nEpoch 9/100\n - 4s - loss: 2.7660 - acc: 0.3041\nEpoch 10/100\n - 4s - loss: 2.7346 - acc: 0.3113\nEpoch 11/100\n - 4s - loss: 2.7005 - acc: 0.3153\nEpoch 12/100\n - 4s - loss: 2.6724 - acc: 0.3184\nEpoch 13/100\n - 4s - loss: 2.6443 - acc: 0.3258\nEpoch 14/100\n - 4s - loss: 2.6169 - acc: 0.3250\nEpoch 15/100\n - 4s - loss: 2.5882 - acc: 0.3295\nEpoch 16/100\n - 4s - loss: 2.5637 - acc: 0.3382\nEpoch 17/100\n - 4s - loss: 2.5374 - acc: 0.3366\nEpoch 18/100\n - 4s - loss: 2.5117 - acc: 0.3431\nEpoch 19/100\n - 4s - loss: 2.4897 - acc: 0.3464\nEpoch 20/100\n - 4s - loss: 2.4642 - acc: 0.3504\nEpoch 21/100\n - 4s - loss: 2.4436 - acc: 0.3550\nEpoch 22/100\n - 4s - loss: 2.4096 - acc: 0.3583\nEpoch 23/100\n - 4s - loss: 2.3890 - acc: 0.3622\nEpoch 24/100\n - 4s - loss: 2.3635 - acc: 0.3678\nEpoch 25/100\n - 4s - loss: 2.3287 - acc: 0.3775\nEpoch 26/100\n - 4s - loss: 2.3091 - acc: 0.3785\nEpoch 27/100\n - 4s - loss: 2.2825 - acc: 0.3810\nEpoch 28/100\n - 4s - loss: 2.2614 - acc: 0.3869\nEpoch 29/100\n - 4s - loss: 2.2239 - acc: 0.3928\nEpoch 30/100\n - 4s - loss: 2.1955 - acc: 0.3981\nEpoch 31/100\n - 4s - loss: 2.1752 - acc: 0.4034\nEpoch 32/100\n - 4s - loss: 2.1372 - acc: 0.4098\nEpoch 33/100\n - 4s - loss: 2.1150 - acc: 0.4115\nEpoch 34/100\n - 4s - loss: 2.0778 - acc: 0.4209\nEpoch 35/100\n - 4s - loss: 2.0477 - acc: 0.4278\nEpoch 36/100\n - 4s - loss: 2.0169 - acc: 0.4307\nEpoch 37/100\n - 4s - loss: 1.9840 - acc: 0.4387\nEpoch 38/100\n - 4s - loss: 1.9531 - acc: 0.4478\nEpoch 39/100\n - 4s - loss: 1.9146 - acc: 0.4547\nEpoch 40/100\n - 4s - loss: 1.8796 - acc: 0.4603\nEpoch 41/100\n - 4s - loss: 1.8557 - acc: 0.4633\nEpoch 42/100\n - 4s - loss: 1.8136 - acc: 0.4724\nEpoch 43/100\n - 4s - loss: 1.7850 - acc: 0.4809\nEpoch 44/100\n - 4s - loss: 1.7472 - acc: 0.4906\nEpoch 45/100\n - 4s - loss: 1.7151 - acc: 0.4950\nEpoch 46/100\n - 4s - loss: 1.6770 - acc: 0.5021\nEpoch 47/100\n - 4s - loss: 1.6516 - acc: 0.5106\nEpoch 48/100\n - 4s - loss: 1.6174 - acc: 0.5188\nEpoch 49/100\n - 4s - loss: 1.5774 - acc: 0.5271\nEpoch 50/100\n - 4s - loss: 1.5425 - acc: 0.5359\nEpoch 51/100\n - 4s - loss: 1.5179 - acc: 0.5407\nEpoch 52/100\n - 4s - loss: 1.4874 - acc: 0.5495\nEpoch 53/100\n - 4s - loss: 1.4520 - acc: 0.5568\nEpoch 54/100\n - 4s - loss: 1.4284 - acc: 0.5638\nEpoch 55/100\n - 4s - loss: 1.4005 - acc: 0.5738\nEpoch 56/100\n - 4s - loss: 1.3617 - acc: 0.5825\nEpoch 57/100\n - 4s - loss: 1.3421 - acc: 0.5887\nEpoch 58/100\n - 4s - loss: 1.3114 - acc: 0.5947\nEpoch 59/100\n - 4s - loss: 1.2904 - acc: 0.5998\nEpoch 60/100\n - 4s - loss: 1.2625 - acc: 0.6047\nEpoch 61/100\n - 4s - loss: 1.2418 - acc: 0.6154\nEpoch 62/100\n - 4s - loss: 1.2092 - acc: 0.6228\nEpoch 63/100\n - 4s - loss: 1.1966 - acc: 0.6267\nEpoch 64/100\n - 4s - loss: 1.1692 - acc: 0.6314\nEpoch 65/100\n - 4s - loss: 1.1453 - acc: 0.6405\nEpoch 66/100\n - 4s - loss: 1.1359 - acc: 0.6428\nEpoch 67/100\n - 4s - loss: 1.1221 - acc: 0.6454\nEpoch 68/100\n - 4s - loss: 1.0973 - acc: 0.6531\nEpoch 69/100\n - 4s - loss: 1.0709 - acc: 0.6643\nEpoch 70/100\n - 4s - loss: 1.0496 - acc: 0.6677\nEpoch 71/100\n - 4s - loss: 1.0482 - acc: 0.6673\nEpoch 72/100\n - 4s - loss: 1.0289 - acc: 0.6747\nEpoch 73/100\n - 4s - loss: 1.0194 - acc: 0.6755\nEpoch 74/100\n - 4s - loss: 1.0012 - acc: 0.6779\nEpoch 75/100\n - 4s - loss: 0.9882 - acc: 0.6848\nEpoch 76/100\n - 4s - loss: 0.9718 - acc: 0.6940\nEpoch 77/100\n - 4s - loss: 0.9651 - acc: 0.6932\nEpoch 78/100\n - 4s - loss: 0.9382 - acc: 0.6960\nEpoch 79/100\n - 4s - loss: 0.9264 - acc: 0.7001\nEpoch 80/100\n - 4s - loss: 0.9163 - acc: 0.7049\nEpoch 81/100\n - 4s - loss: 0.9055 - acc: 0.7086\nEpoch 82/100\n - 4s - loss: 0.9088 - acc: 0.7084\nEpoch 83/100\n - 4s - loss: 0.8901 - acc: 0.7151\nEpoch 84/100\n - 4s - loss: 0.8792 - acc: 0.7163\nEpoch 85/100\n - 4s - loss: 0.8692 - acc: 0.7214\nEpoch 86/100\n - 4s - loss: 0.8586 - acc: 0.7212\nEpoch 87/100\n - 4s - loss: 0.8468 - acc: 0.7263\nEpoch 88/100\n - 4s - loss: 0.8433 - acc: 0.7287\nEpoch 89/100\n - 4s - loss: 0.8355 - acc: 0.7285\nEpoch 90/100\n - 4s - loss: 0.8261 - acc: 0.7328\nEpoch 91/100\n - 4s - loss: 0.8177 - acc: 0.7350\nEpoch 92/100\n - 4s - loss: 0.8186 - acc: 0.7347\nEpoch 93/100\n - 4s - loss: 0.8098 - acc: 0.7379\nEpoch 94/100\n - 4s - loss: 0.8100 - acc: 0.7373\nEpoch 95/100\n - 4s - loss: 0.8027 - acc: 0.7373\nEpoch 96/100\n - 4s - loss: 0.7903 - acc: 0.7421\nEpoch 97/100\n - 4s - loss: 0.7901 - acc: 0.7431\nEpoch 98/100\n - 4s - loss: 0.7855 - acc: 0.7427\nEpoch 99/100\n - 4s - loss: 0.7699 - acc: 0.7499\nEpoch 100/100\n - 4s - loss: 0.7691 - acc: 0.7482\n" ], [ "fig = plt.figure()\nplt.plot(history_nopre.history['acc'])\nplt.plot(history_notem.history['acc'])\nplt.plot(history_T_S.history['acc'])\n\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['None', 'T','T+S'], loc='upper left')", "_____no_output_____" ], [ "print(\"使用 encode1 方法\")\naa_train=train_X\n\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\ninput_pattern = Input(shape=(3, aa_train.shape[2]),name=\"Move_Pattern\")\ninput_id = Input(shape=(1,),name=\"User_id\")\n\nlstm_out = LSTM(units=200,return_sequences=False)(input_pattern)\nlstm_out = Dropout(0.2)(lstm_out)\n\nem2 = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1)(input_id)\nem2=Flatten()(em2)\n\nx = keras.layers.concatenate([lstm_out, em2])\nx=Dense(400,activation='relu',name=\"C\")(x)\nx=Dense(300,activation='relu',name=\"C2\")(x)\nx=Dense(250,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax')(x)\n\n\nmodel = Model(inputs=[input_pattern,input_id], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_encode1.png',show_shapes=True)", "使用 encode1 方法\n" ], [ "history_encode1 = model.fit([aa_train,k], b_train, epochs=100, batch_size=512, verbose=2)", "Epoch 1/100\n - 4s - loss: 4.4523 - acc: 0.0925\nEpoch 2/100\n - 3s - loss: 3.6074 - acc: 0.1954\nEpoch 3/100\n - 3s - loss: 3.2661 - acc: 0.2316\nEpoch 4/100\n - 3s - loss: 3.0907 - acc: 0.2558\nEpoch 5/100\n - 3s - loss: 2.9844 - acc: 0.2712\nEpoch 6/100\n - 3s - loss: 2.9094 - acc: 0.2850\nEpoch 7/100\n - 3s - loss: 2.8517 - acc: 0.2902\nEpoch 8/100\n - 3s - loss: 2.8035 - acc: 0.2990\nEpoch 9/100\n - 3s - loss: 2.7607 - acc: 0.3067\nEpoch 10/100\n - 3s - loss: 2.7275 - acc: 0.3122\nEpoch 11/100\n - 3s - loss: 2.6969 - acc: 0.3147\nEpoch 12/100\n - 3s - loss: 2.6657 - acc: 0.3205\nEpoch 13/100\n - 3s - loss: 2.6371 - acc: 0.3258\nEpoch 14/100\n - 3s - loss: 2.6091 - acc: 0.3308\nEpoch 15/100\n - 3s - loss: 2.5838 - acc: 0.3344\nEpoch 16/100\n - 3s - loss: 2.5607 - acc: 0.3383\nEpoch 17/100\n - 3s - loss: 2.5401 - acc: 0.3419\nEpoch 18/100\n - 3s - loss: 2.5118 - acc: 0.3459\nEpoch 19/100\n - 3s - loss: 2.4928 - acc: 0.3492\nEpoch 20/100\n - 3s - loss: 2.4602 - acc: 0.3539\nEpoch 21/100\n - 3s - loss: 2.4381 - acc: 0.3582\nEpoch 22/100\n - 3s - loss: 2.4138 - acc: 0.3627\nEpoch 23/100\n - 3s - loss: 2.3898 - acc: 0.3684\nEpoch 24/100\n - 3s - loss: 2.3633 - acc: 0.3684\nEpoch 25/100\n - 3s - loss: 2.3420 - acc: 0.3724\nEpoch 26/100\n - 3s - loss: 2.3146 - acc: 0.3771\nEpoch 27/100\n - 3s - loss: 2.2912 - acc: 0.3820\nEpoch 28/100\n - 3s - loss: 2.2662 - acc: 0.3888\nEpoch 29/100\n - 3s - loss: 2.2378 - acc: 0.3930\nEpoch 30/100\n - 3s - loss: 2.2185 - acc: 0.3947\nEpoch 31/100\n - 3s - loss: 2.1913 - acc: 0.4038\nEpoch 32/100\n - 3s - loss: 2.1699 - acc: 0.4051\nEpoch 33/100\n - 3s - loss: 2.1402 - acc: 0.4080\nEpoch 34/100\n - 3s - loss: 2.1140 - acc: 0.4168\nEpoch 35/100\n - 3s - loss: 2.0909 - acc: 0.4202\nEpoch 36/100\n - 3s - loss: 2.0597 - acc: 0.4235\nEpoch 37/100\n - 3s - loss: 2.0369 - acc: 0.4318\nEpoch 38/100\n - 3s - loss: 2.0078 - acc: 0.4356\nEpoch 39/100\n - 3s - loss: 1.9811 - acc: 0.4400\nEpoch 40/100\n - 3s - loss: 1.9565 - acc: 0.4465\nEpoch 41/100\n - 3s - loss: 1.9248 - acc: 0.4553\nEpoch 42/100\n - 3s - loss: 1.9052 - acc: 0.4566\nEpoch 43/100\n - 3s - loss: 1.8818 - acc: 0.4613\nEpoch 44/100\n - 3s - loss: 1.8506 - acc: 0.4720\nEpoch 45/100\n - 3s - loss: 1.8255 - acc: 0.4767\nEpoch 46/100\n - 3s - loss: 1.8046 - acc: 0.4776\nEpoch 47/100\n - 3s - loss: 1.7699 - acc: 0.4890\nEpoch 48/100\n - 3s - loss: 1.7470 - acc: 0.4889\nEpoch 49/100\n - 3s - loss: 1.7274 - acc: 0.4978\nEpoch 50/100\n - 3s - loss: 1.6996 - acc: 0.5044\nEpoch 51/100\n - 3s - loss: 1.6667 - acc: 0.5114\nEpoch 52/100\n - 3s - loss: 1.6549 - acc: 0.5128\nEpoch 53/100\n - 3s - loss: 1.6217 - acc: 0.5239\nEpoch 54/100\n - 3s - loss: 1.6025 - acc: 0.5242\nEpoch 55/100\n - 3s - loss: 1.5736 - acc: 0.5299\nEpoch 56/100\n - 3s - loss: 1.5462 - acc: 0.5384\nEpoch 57/100\n - 3s - loss: 1.5278 - acc: 0.5428\nEpoch 58/100\n - 3s - loss: 1.5049 - acc: 0.5485\nEpoch 59/100\n - 3s - loss: 1.4923 - acc: 0.5524\nEpoch 60/100\n - 3s - loss: 1.4700 - acc: 0.5559\nEpoch 61/100\n - 3s - loss: 1.4465 - acc: 0.5623\nEpoch 62/100\n - 3s - loss: 1.4281 - acc: 0.5684\nEpoch 63/100\n - 3s - loss: 1.3955 - acc: 0.5783\nEpoch 64/100\n - 3s - loss: 1.3838 - acc: 0.5802\nEpoch 65/100\n - 3s - loss: 1.3660 - acc: 0.5845\nEpoch 66/100\n - 3s - loss: 1.3448 - acc: 0.5898\nEpoch 67/100\n - 3s - loss: 1.3306 - acc: 0.5937\nEpoch 68/100\n - 3s - loss: 1.3099 - acc: 0.5995\nEpoch 69/100\n - 3s - loss: 1.2885 - acc: 0.6041\nEpoch 70/100\n - 3s - loss: 1.2740 - acc: 0.6073\nEpoch 71/100\n - 3s - loss: 1.2524 - acc: 0.6129\nEpoch 72/100\n - 3s - loss: 1.2381 - acc: 0.6206\nEpoch 73/100\n - 3s - loss: 1.2240 - acc: 0.6229\nEpoch 74/100\n - 3s - loss: 1.2193 - acc: 0.6199\nEpoch 75/100\n - 3s - loss: 1.2034 - acc: 0.6243\nEpoch 76/100\n - 3s - loss: 1.1867 - acc: 0.6329\nEpoch 77/100\n - 3s - loss: 1.1700 - acc: 0.6360\nEpoch 78/100\n - 3s - loss: 1.1549 - acc: 0.6420\nEpoch 79/100\n - 3s - loss: 1.1378 - acc: 0.6420\nEpoch 80/100\n - 3s - loss: 1.1235 - acc: 0.6500\nEpoch 81/100\n - 3s - loss: 1.1223 - acc: 0.6510\nEpoch 82/100\n - 3s - loss: 1.1134 - acc: 0.6521\nEpoch 83/100\n - 3s - loss: 1.0945 - acc: 0.6606\nEpoch 84/100\n - 3s - loss: 1.0790 - acc: 0.6611\nEpoch 85/100\n - 3s - loss: 1.0605 - acc: 0.6683\nEpoch 86/100\n - 3s - loss: 1.0622 - acc: 0.6655\nEpoch 87/100\n - 3s - loss: 1.0564 - acc: 0.6696\nEpoch 88/100\n - 3s - loss: 1.0382 - acc: 0.6747\nEpoch 89/100\n - 3s - loss: 1.0246 - acc: 0.6790\nEpoch 90/100\n - 3s - loss: 1.0181 - acc: 0.6782\nEpoch 91/100\n - 3s - loss: 1.0097 - acc: 0.6813\nEpoch 92/100\n - 3s - loss: 1.0022 - acc: 0.6854\nEpoch 93/100\n - 3s - loss: 0.9923 - acc: 0.6869\nEpoch 94/100\n - 3s - loss: 0.9867 - acc: 0.6876\nEpoch 95/100\n - 3s - loss: 0.9708 - acc: 0.6944\nEpoch 96/100\n - 3s - loss: 0.9678 - acc: 0.6938\nEpoch 97/100\n - 3s - loss: 0.9499 - acc: 0.6980\nEpoch 98/100\n - 3s - loss: 0.9486 - acc: 0.6983\nEpoch 99/100\n - 3s - loss: 0.9416 - acc: 0.7006\nEpoch 100/100\n - 3s - loss: 0.9343 - acc: 0.7031\n" ], [ "aa_train=train_X\n\nfrom keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\ninput_pattern = Input(shape=(3, aa_train.shape[2]),name=\"Move_Pattern\")\ninput_id = Input(shape=(1,),name=\"User_id\")\n\nlstm_out = LSTM(units=300,return_sequences=False)(input_pattern)\nlstm_out = Dropout(0.2)(lstm_out)\n\n# em2 = Embedding(input_dim=User_size, output_dim=emdedding_size,input_length=1)(input_id)\n# em2=Flatten()(em2)\n\n# x = keras.layers.concatenate([lstm_out, em2])\nx=Dense(400,activation='relu',name=\"C1\")(lstm_out)\nx=Dense(300,activation='relu',name=\"C2\")(x)\nx=Dense(250,activation='relu',name=\"C3\")(x)\nx=Dropout(0.2)(x)\nx=Dense(b_train.shape[1],activation='softmax')(x)\n\n\nmodel = Model(inputs=[input_pattern,input_id], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\n# print(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_encode1.png',show_shapes=True)", "_____no_output_____" ], [ "history_encode1 = model.fit([aa_train,k], b_train, epochs=100, batch_size=512, verbose=2)", "Epoch 1/100\n - 5s - loss: 2.7515 - acc: 0.3074\nEpoch 2/100\n - 5s - loss: 2.7160 - acc: 0.3114\nEpoch 3/100\n - 5s - loss: 2.7005 - acc: 0.3157\nEpoch 4/100\n - 5s - loss: 2.6906 - acc: 0.3139\nEpoch 5/100\n - 5s - loss: 2.6715 - acc: 0.3190\nEpoch 6/100\n - 5s - loss: 2.6571 - acc: 0.3207\nEpoch 7/100\n - 5s - loss: 2.6453 - acc: 0.3226\nEpoch 8/100\n - 5s - loss: 2.6285 - acc: 0.3247\nEpoch 9/100\n - 5s - loss: 2.6126 - acc: 0.3291\nEpoch 10/100\n - 5s - loss: 2.5998 - acc: 0.3298\nEpoch 11/100\n - 5s - loss: 2.5840 - acc: 0.3312\nEpoch 12/100\n - 5s - loss: 2.5704 - acc: 0.3353\nEpoch 13/100\n - 5s - loss: 2.5506 - acc: 0.3393\nEpoch 14/100\n - 5s - loss: 2.5368 - acc: 0.3394\nEpoch 15/100\n - 5s - loss: 2.5159 - acc: 0.3440\nEpoch 16/100\n - 5s - loss: 2.4993 - acc: 0.3461\nEpoch 17/100\n - 5s - loss: 2.4865 - acc: 0.3480\nEpoch 18/100\n - 5s - loss: 2.4604 - acc: 0.3496\nEpoch 19/100\n - 5s - loss: 2.4475 - acc: 0.3515\nEpoch 20/100\n - 5s - loss: 2.4198 - acc: 0.3601\nEpoch 21/100\n - 5s - loss: 2.4056 - acc: 0.3603\nEpoch 22/100\n - 5s - loss: 2.3921 - acc: 0.3654\nEpoch 23/100\n" ], [ "from keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\nemdedding_size=12 #\nvo_len=3 #\nvocab_size=11000 #\na_train=patternX\nb_train=train_Y\nk=trainZ\npretrained_weights=Matrix\n\ninput_pattern = Input(shape=(3, ),name=\"input_pattern\")\nem = Embedding(input_dim=vocab_size, output_dim=emdedding_size,input_length=vo_len, weights=[pretrained_weights])(input_pattern)\nlstm_out = LSTM(units=emdedding_size)(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\nx=Dense(250,activation='relu',name=\"C\")(lstm_out)\nx=Dropout(0.2)(x)\nx=Dense(180,activation='softmax')(x)\n\n\nmodel = Model(inputs=input_pattern, outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\nprint(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_test.png',show_shapes=True)\nhistory_withpre2 = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2)", "_____no_output_____" ] ], [ [ "plot_model(model, to_file='t_lstm_test.png',show_shapes=True)\nhistory_nopre = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2)", "_____no_output_____" ] ], [ [ "from keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\nemdedding_size=12 #\nvo_len=3 #\nvocab_size=11000 #\na_train=trainX.reshape(-1,3)\nb_train=train_Y\nk=trainZ\npretrained_weights=Matrix\n\ninput_pattern = Input(shape=(3, ),name=\"input_pattern\")\nem = Embedding(input_dim=vocab_size, output_dim=emdedding_size,input_length=vo_len, weights=[pretrained_weights])(input_pattern)\nlstm_out = LSTM(units=emdedding_size)(em)\nlstm_out = Dropout(0.2)(lstm_out)\n\nx=Dense(250,activation='relu',name=\"C\")(lstm_out)\nx=Dropout(0.2)(x)\nx=Dense(180,activation='softmax')(x)\n\n\nmodel = Model(inputs=input_pattern, outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\nprint(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_test.png',show_shapes=True)\nhistory_withpre2 = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2)", "_____no_output_____" ], [ "from keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\nemdedding_size=12 #\nvo_len=3 #\nvocab_size=11000 #\na_train=patternX.reshape(-1,3,1)\nb_train=train_Y\nk=trainZ\npretrained_weights=Matrix\n\ninput_pattern = Input(shape=(3, 1),name=\"input_pattern\")\nlstm_out = LSTM(units=64)(input_pattern)\nlstm_out = Dropout(0.2)(lstm_out)\n\nx=Dense(250,activation='relu',name=\"C\")(lstm_out)\nx=Dropout(0.2)(x)\nx=Dense(180,activation='softmax')(x)\n\n\nmodel = Model(inputs=input_pattern, outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\nprint(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_test.png',show_shapes=True)\nhistory_withpre2 = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2)", "_____no_output_____" ] ], [ [ "plot_model(model, to_file='t_lstm_test.png',show_shapes=True)\nhistory1 = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2)", "_____no_output_____" ] ], [ [ "from keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\ninput_pattern = Input(shape=(3, a_train.shape[2]),name=\"input_pattern\")\n\nlstm_out = LSTM(512,input_shape=(3, a_train.shape[2]))(input_pattern)\n# lstm_out = LSTM(512,return_sequences=True,input_shape=(3, a_train.shape[2]))(input_pattern)\n# lstm_out = LSTM(300)(lstm_out)\nlstm_out = Dropout(0.2)(lstm_out)\n\nx=Dense(250,activation='relu',name=\"C\")(lstm_out)\nx=Dropout(0.2)(x)\nx=Dense(a_train.shape[2],activation='softmax')(x)\n\nmodel = Model(inputs=input_pattern, outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\nprint(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm_test.png',show_shapes=True)", "_____no_output_____" ], [ "history = model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2, validation_data=(a_test, b_test))", "_____no_output_____" ], [ "print(history.history.keys())", "_____no_output_____" ], [ "fig = plt.figure()\nplt.plot(history.history['acc'])\nplt.plot(history1.history['acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['1-lstm', '2-lstm'], loc='upper left')", "_____no_output_____" ], [ "fig = plt.figure()\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')", "_____no_output_____" ], [ "plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='lower left')", "_____no_output_____" ], [ "train_X=train_X.reshape(-1,200)\ntrain_Y.reshape(-1,200)\ntrain_X.shape", "_____no_output_____" ], [ "train_Y.shape", "_____no_output_____" ], [ "from keras.layers import Input, Embedding, LSTM, Dense,Merge\nfrom keras.models import Model\n\na_train=train_X\nb_train=train_Y\nk=trainZ\n\ninput_pattern = Input(shape=(3, a_train.shape[2]),name=\"input_pattern\")\ninput_id = Input(shape=(1,),name=\"input_id\")\n\nlstm_out = LSTM(250,input_shape=(3, a_train.shape[2]))(input_pattern)\nlstm_out = Dropout(0.2)(lstm_out)\n\nx = keras.layers.concatenate([lstm_out, input_id])\nx=Dense(250,activation='relu',name=\"C\")(x)\nx=Dropout(0.2)(x)\nx=Dense(a_train.shape[2],activation='softmax',name='x')(x)\n\n\nmodel = Model(inputs=[input_pattern,input_id], outputs=x)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n\nprint(model.summary()) # Summarize Model\nplot_model(model, to_file='t_lstm.png',show_shapes=True)", "_____no_output_____" ], [ "k=np.zeros(a_train.shape[0],dtype=np.int16)\nk=k.reshape(-1,1)\n\nk1=np.zeros(train_X.shape[0],dtype=np.int16)\nk1=k1.reshape(-1,1)", "_____no_output_____" ], [ "history = model.fit({'input_pattern': a_train, 'input_id' : k}, {'x': b_train}, epochs=100, batch_size=64, verbose=2)", "_____no_output_____" ], [ "fig = plt.figure()\nAccuracy=[42.00,47.15 ,48.36, 49.35,47.42, 50.82, 52.31,56.93 ,57.15 ]\nx2=(20,30,40,50,60,70,80,90,100)\nplt.plot(x2,Accuracy)\nx1=range(0,100)\nplt.plot(x1,history.history['acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['100% total_data Yu','100% total_data Mine'], loc='upper left')\n\n", "_____no_output_____" ], [ "fig = plt.figure()\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['90% train_data', '100% total_data'], loc='upper left')", "_____no_output_____" ] ], [ [ "model.fit(a_train, b_train, epochs=100, batch_size=16, verbose=2, validation_data=(a_test, b_test))", "_____no_output_____" ] ], [ [ "model.evaluate(train_X, train_Y, batch_size=64, verbose=2, sample_weight=None)", "_____no_output_____" ], [ "trainPredict = model.predict(train_X)\nD=np.argmax(train_Y,axis = 1)\nE=np.argmax(trainPredict,axis = 1)", "_____no_output_____" ], [ "print(D)\nprint(E)", "_____no_output_____" ], [ "A=0 #total number of right\nfor i,t in enumerate(E):\n if D[i]==t :\n A=A+1\nprint(A/D.shape[0])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec86ae8cc722254cbab6eb2df2cc41824e03f660
257,834
ipynb
Jupyter Notebook
dev/eigenmodes.ipynb
spinachslayer420/MSE598-SAF-Project
4719afdb6e90e9deb91268fe9a88e1cbf2b34a86
[ "BSD-3-Clause" ]
null
null
null
dev/eigenmodes.ipynb
spinachslayer420/MSE598-SAF-Project
4719afdb6e90e9deb91268fe9a88e1cbf2b34a86
[ "BSD-3-Clause" ]
null
null
null
dev/eigenmodes.ipynb
spinachslayer420/MSE598-SAF-Project
4719afdb6e90e9deb91268fe9a88e1cbf2b34a86
[ "BSD-3-Clause" ]
null
null
null
259.390342
77,396
0.902038
[ [ [ "# Eigenmodes", "_____no_output_____" ], [ "In this tutorial, we compute and relax a skyrmion in an interfacial-DMI material thin film using periodic boundary conditions.", "_____no_output_____" ] ], [ [ "import oommfc as mc\nimport discretisedfield as df\nimport micromagneticmodel as mm\nimport micromagneticdata as md", "_____no_output_____" ] ], [ [ "We define mesh in cuboid through corner points `p1` and `p2`, and discretisation cell size `cell`. To define periodic boundary conditions, we pass an additional argument `pbc`. This argument can be any iterable (list, tuple, string, set) containing strings `'x'`, `'y'`, and/or `'z'`. Let us assume we want the periodic boundary conditions in $x$ and $y$ directions.", "_____no_output_____" ] ], [ [ "region = df.Region(p1=(-50e-9, -50e-9, 0), p2=(50e-9, 50e-9, 10e-9))\nmesh = df.Mesh(region=region, cell=(5e-9, 5e-9, 5e-9))", "_____no_output_____" ] ], [ [ "Now, we can define the system object:", "_____no_output_____" ] ], [ [ "system = mm.System(name='skyrmion')\n\nsystem.energy = (mm.Exchange(A=1.6e-11)\n + mm.DMI(D=4e-3, crystalclass='Cnv') \n + mm.UniaxialAnisotropy(K=0.51e6, u=(0, 0, 1)) \n + mm.Zeeman(H=(0, 0, 0.2e5)))\nsystem.dynamics = mm.Precession(gamma0=mm.consts.gamma0) + mm.Damping(alpha=1e-5)\n\nMs = 1.1e6\n\ndef m_init(pos):\n x, y, z = pos\n if (x**2 + y**2)**0.5 < 10e-9:\n return (0, 0, -1)\n else:\n return (0, 0, 1)\n \n\n# create system with above geometry and initial magnetisation\nsystem.m = df.Field(mesh, dim=3, value=m_init, norm=Ms)", "_____no_output_____" ] ], [ [ "Finally we can minimise the energy and plot the magnetisation.", "_____no_output_____" ] ], [ [ "# minimize the energy\nmd = mc.MinDriver()\nmd.drive(system)\n\n# Plot relaxed configuration: vectors in z-plane\nsystem.m.plane('z').mpl()", "Running OOMMF (ExeOOMMFRunner) [2020/07/01 17:03]... (2.1 s)\n" ], [ "sinc_pulse = mm.Zeeman(H=(0, 0, 1e4), wave='sinc', f=5e9, t0=2.5e-9, name='sinc')\nsystem.energy += sinc_pulse\n\ntd = mc.TimeDriver()\ntd.drive(system, t=5e-9, n=200)", "Running OOMMF (ExeOOMMFRunner) [2020/07/01 17:03]... (11.6 s)\n" ], [ "# Plot z-component only:\nsystem.m.z.plane('z').mpl()", "_____no_output_____" ], [ "system.table.mpl(yaxis=['Bz_sinc'])", "_____no_output_____" ], [ "system.energy = (mm.Exchange(A=1.6e-11)\n + mm.DMI(D=4e-3, crystalclass='Cnv') \n + mm.UniaxialAnisotropy(K=0.51e6, u=(0, 0, 1)) \n + mm.Zeeman(H=(0, 0, 0.2e5)))\n\ntd = mc.TimeDriver()\ntd.drive(system, t=5e-9, n=1000)", "Running OOMMF (ExeOOMMFRunner) [2020/07/01 17:03]... (28.6 s)\n" ], [ "system.table.mpl(yaxis=['mz'])", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nmz = system.table.data['mz'].to_numpy()\npsd = np.abs(np.fft.fft(mz))\nf = np.fft.fftfreq(n=len(psd), d=5e-9/1000)\n\nplt.plot(f/1e9, np.abs(np.fft.fft(mz)))\nplt.xlim([4, 7])", "_____no_output_____" ], [ "md.drive(system)\n\nsystem.m.z.plane('z').mpl()", "Running OOMMF (ExeOOMMFRunner) [2020/07/01 17:03]... (1.7 s)\n" ], [ "system.energy += mm.Zeeman(H=(0, 0, 1e3), wave='sin', f=1.15e9, t0=0, name='sin')\n\ntd.drive(system, t=5e-9, n=200)", "Running OOMMF (ExeOOMMFRunner) [2020/07/01 17:04]... (12.6 s)\n" ], [ "import micromagneticdata as md\n\ndata = md.Data(system.name)\ndata.info", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec86b2064e947dffbb7fa81fc26982fbc2c7482b
151,155
ipynb
Jupyter Notebook
_site/notes/notes_ipynb/docs/lecture17-density-estimation.ipynb
wu-hongjun/aml
0964c90c537cfcc002eaca2d869cdaead4faf49d
[ "Apache-2.0", "MIT" ]
null
null
null
_site/notes/notes_ipynb/docs/lecture17-density-estimation.ipynb
wu-hongjun/aml
0964c90c537cfcc002eaca2d869cdaead4faf49d
[ "Apache-2.0", "MIT" ]
null
null
null
_site/notes/notes_ipynb/docs/lecture17-density-estimation.ipynb
wu-hongjun/aml
0964c90c537cfcc002eaca2d869cdaead4faf49d
[ "Apache-2.0", "MIT" ]
null
null
null
103.815247
25,024
0.852258
[ [ [ "<left><img width=25% src=\"https://aml.hongjunwu.com/slides/img/cornell_tech2.svg\"></left>\n\n# Lecture 17: Density Estimation\n\n### Applied Machine Learning\n\n__Volodymyr Kuleshov__<br>Cornell Tech", "_____no_output_____" ], [ "# Part 1: Unsupervised Probabilistic Models\n\nDensity estimation is the problem of estimating a probability distribution from data.\n\nAs a first step, we will introduce probabilistic models for unsupervised learning.", "_____no_output_____" ], [ "# Review: Unsupervised Learning\n\nWe have a dataset *without* labels. Our goal is to learn something interesting about the structure of the data:\n* Clusters hidden in the dataset.\n* Outliers: particularly unusual and/or interesting datapoints.\n* Useful signal hidden in noise, e.g. human speech over a noisy phone.", "_____no_output_____" ], [ "# Components of an Unsupervised Learning Problem\n\nAt a high level, an unsupervised machine learning problem has the following structure:\n\n$$ \\underbrace{\\text{Dataset}}_\\text{Attributes} + \\underbrace{\\text{Learning Algorithm}}_\\text{Model Class + Objective + Optimizer } \\to \\text{Unsupervised Model} $$\n\nThe unsupervised model describes interesting structure in the data. For instance, it can identify interesting hidden clusters.", "_____no_output_____" ], [ "# Review: Data Distribution\n\nWe will assume that the dataset is sampled from a probability distribution $P_\\text{data}$, which we will call the *data distribution*. We will denote this as\n$$x \\sim P_\\text{data}.$$\n\nThe dataset $\\mathcal{D} = \\{x^{(i)} \\mid i = 1,2,...,n\\}$ consists of *independent and identicaly distributed* (IID) samples from $P_\\text{data}$.", "_____no_output_____" ], [ "# Review: Unsupervised Models\n\nWe'll say that a model is a function\n$$ f : \\mathcal{X} \\to \\mathcal{S} $$\nthat maps inputs $x \\in \\mathcal{X}$ to some notion of structure $s \\in \\mathcal{S}$.\n\nStructure can have many definitions (clusters, low-dimensional representations, etc.), and we will see many examples.", "_____no_output_____" ], [ "Often, models have *parameters* $\\theta \\in \\Theta$ living in a set $\\Theta$. We will then write the model as\n$$ f_\\theta : \\mathcal{X} \\to \\mathcal{S} $$\nto denote that it's parametrized by $\\theta$.", "_____no_output_____" ], [ "# Unsupervised Probabilistic Models\n\nAn unsupervised probabilistic model is a probability distribution\n$$P(x) : \\mathcal{X} \\to [0,1].$$\nThis model can approximate the data distribution $P_\\text{data}$.", "_____no_output_____" ], [ "Probabilistic models also have *parameters* $\\theta \\in \\Theta$, which we denote as\n$$P_\\theta(x) : \\mathcal{X} \\to [0,1].$$", "_____no_output_____" ], [ "# Why Use Probabilistic Models?\n\nThere are many tasks that we can solve with a good model $P_\\theta$.\n1. Generation: sample new objects from $P_\\theta$, such as images.\n2. Representation learning: find interesting structure in $P_\\text{data}$\n3. Density estimation: approximate $P_\\theta \\approx P_\\text{data}$ and use it to solve any downstream task (generation, clustering, outlier detection, etc.).\n\nWe are going to be interested in the latter.", "_____no_output_____" ], [ "# Kullback-Leibler Divergence\n\nIn order to approximate $P_\\text{data}$ with $P_\\theta$, we need a measure of distance between distributions.", "_____no_output_____" ], [ "A standard measure of similarity between distributions is the *Kullback-Leibler (KL) divergence* between two distributions $p$ and $q$, defined as\n$$\nD(p \\| q) = \\sum_{{\\bf x}} p({\\bf x}) \\log \\frac{p({\\bf x})}{q({\\bf x})}.\n$$", "_____no_output_____" ], [ "#### Observations:\n* $D(p \\, \\| \\, q) \\geq 0$ for all $p, q$, with equality if and only if $p= q$. Proof:\n\\begin{align*}\n\tD(p \\| q) = \\mathbb{E}_{x\\sim p}{-\\log \\frac{q({\\bf x})}{p({\\bf x})}} & \\geq -\\log \\left( \\mathbb{E}_{x\\sim p} {\\frac{q({\\bf x})}{p({\\bf x})}} \\right) \\\\ = &\n\t-\\log \\left( \\sum_{{\\bf x}} p({\\bf x}) \\frac{q({\\bf x})}{p({\\bf x})} \\right)\n\t=\n\t0\n\\end{align*}\nwhere in the first line we used [Jensen's inequality](https://en.wikipedia.org/wiki/Jensen%27s_inequality)", "_____no_output_____" ], [ "* The KL-divergence is *asymmetric*, i.e., $D(p \\| q) \\neq D(q \\| p)$", "_____no_output_____" ], [ "# Learning Models Using KL Divergence\n\nWe may now learn a probabilistic model $P_\\theta(x)$ that approximates $P_\\text{data}(x)$ via the KL divergence:\n\\begin{align*}\nD(P_{\\textrm{data}} \\mid \\mid {P_\\theta}) & = \\mathbb{E}_{x \\sim P_{\\textrm{data}}}{\\log\\left(\n\\frac{P_{\\textrm{data}}(x)}{P_\\theta(x)}\n\\right)} \\\\ & = \\sum_{{x}} P_{\\textrm{data}}({x}) \\log \\frac{P_{\\textrm{data}}({x})}{P_\\theta(x)}\n\\end{align*}\n\nNote that $D(P_{\\textrm{data}} \\mid \\mid {P_\\theta})=0$ iff the two distributions are the same.", "_____no_output_____" ], [ "# From KL Divergence to Log Likelihood\n\n$\n\\newcommand{\\x}{x}\n\\newcommand{\\ex}[2]{\\mathbb{E}_{#1 \\sim #2}}\n\\newcommand{\\en}[2]{D(#1 \\mid \\mid #2)}\n$\n\nWe can learn $P_\\theta$ that approximates $P_\\text{data}$ by minimizing $D(P_{\\textrm{data}} \\mid \\mid {P_\\theta})$. This objective further simplifies as:\n\\begin{eqnarray*}\n\\en{P_{\\textrm{data}}}{P_\\theta} &=& \\ex{\\x}{P_{\\textrm{data}}}{\\log\\left(\n\\frac{P_{\\textrm{data}}(\\x)}{P_\\theta(\\x)}\n\\right)} \\\\\n&=& \n%-\\bH(P_{\\textrm{data}}) \n\\ex{\\x}{P_{\\textrm{data}}}{\\log P_{\\textrm{data}}(\\x)}\n- \\ex{\\x}{P_{\\textrm{data}}}{\\log P_\\theta(\\x)}\n\\end{eqnarray*}\n", "_____no_output_____" ], [ "The first term does not depend on $P_\\theta$: minimizing KL divergence is equivalent to maximizing the expected log-likelihood.\n\n\\begin{align*}\n\\arg\\min_{P_\\theta} \\en{P_{\\textrm{data}}}{P_\\theta} & = \\arg\\min_{P_\\theta} - \\ex{\\x}{P_{\\textrm{data}}}{\\log P_\\theta(\\x)} \\\\ & = \\arg\\max_{P_\\theta} \\ex{\\x}{P_{\\textrm{data}}}{\\log P_\\theta(\\x)}\n\\end{align*}", "_____no_output_____" ], [ "* This asks that $P_\\theta$ assign high probability to instances sampled from $P_{\\textrm{data}}$, so as to reflect the true distribution.", "_____no_output_____" ], [ "* Because of $\\log$, samples $\\x$ where $P_\\theta(\\x) \\approx 0$ weigh heavily in the objective.", "_____no_output_____" ], [ "Problem: In general we do not know $P_{\\textrm{data}}$, hence expected value is intractable.", "_____no_output_____" ], [ "# Maximum Likelihood Estimation\n\n$\n\\newcommand{\\exd}[2]{\\mathbb{E}_{#1 \\sim #2}}\n\\newcommand{\\cd}{\\mathcal{D}}\n$\n\nApplying, Monte Carlo estimation, we may approximate the expected log-likelihood\n$$\n\\ex{\\x}{P_{\\textrm{data}}}{\\log P_\\theta(\\x)}\n$$\nwith the *empirical log-likelihood*:\n$$\n\\exd{\\cd}{P_\\theta(\\x)} = \\frac{1}{|\\cd|}\\sum_{\\x\\in \\cd} \\log P_\\theta(\\x)\n$$", "_____no_output_____" ], [ "Maximum likelihood learning is then:\n$$\n\\max_{P_\\theta} \\hspace{2mm} \\frac{1}{|\\cd|}\\sum_{\\x\\in \\cd} \\log P_\\theta(\\x).\n$$\n<!-- Equivalently, we maximize probability of the data under model $P_\\theta((x^{(1)}, y^{(1)}), \\cdots, (x^{(n)}, y^{(n)})) = \\prod_{\\x\\in \\cd} P_\\theta(\\x)$ -->", "_____no_output_____" ], [ "# Example: Flipping a Random Coin\n\nHow should we choose $P_\\theta(x)$ if 3 out of 5 coin tosses are heads? Let's apply maximum likelihood learning.", "_____no_output_____" ], [ "* Our model is $P_\\theta(x=H)=\\theta$ and $P_\\theta(x=T)=1-\\theta$\n* Our data is: $\\cd=\\{H,H,T,H,T\\}$ \n* The likelihood of the data is $\\prod_{i} P_\\theta(x_i)=\\theta \\cdot \\theta \\cdot (1-\\theta) \\cdot \\theta \\cdot (1-\\theta)$.", "_____no_output_____" ], [ "We optimize for $\\theta$ which makes $\\cd$ most likely. What is the solution in this case?", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# our dataset is {H, H, T, H, T}; if theta = P(x=H), we get:\ncoin_likelihood = lambda theta: theta*theta*(1-theta)*theta*(1-theta)\n\ntheta_vals = np.linspace(0,1)\nplt.plot(theta_vals, coin_likelihood(theta_vals))", "_____no_output_____" ] ], [ [ "<left><img width=25% src=\"https://aml.hongjunwu.com/slides/img/cornell_tech2.svg\"></left>\n# Part 2: Kernel Density Estimation\n\nNext, let's look at a first example of probabilistic models and how they are used to perform density estimation.", "_____no_output_____" ], [ "# Review: Data Distribution\n\nWe will assume that the dataset is sampled from a probability distribution $\\mathbb{P}$, which we will call the *data distribution*. We will denote this as\n$$x \\sim P_\\text{data}.$$\n\nThe dataset $\\mathcal{D} = \\{x^{(i)} \\mid i = 1,2,...,n\\}$ consists of *independent and identicaly distributed* (IID) samples from $P_\\text{data}$.", "_____no_output_____" ], [ "# Review: Unsupervised Probabilistic Models\n\nAn unsupervised probabilistic model is a probability distribution\n$$P_\\theta(x) : \\mathcal{X} \\to [0,1].$$\nThis model can approximate the data distribution $P_\\text{data}$. It may have parameters $\\theta$.", "_____no_output_____" ], [ "# Density Estimation\n\nThe problem of density estimation is to approximate the data distribution $P_\\text{data}$ with the model $P$.\n$$ P \\approx P_\\text{data}. $$", "_____no_output_____" ], [ "It's also a general learning task. We can solve many downstream tasks using a good model $P$:\n* Outlier and novelty detection\n* Generating new samples $x$\n* Visualizing and understanding the structure of $P_\\text{data}$", "_____no_output_____" ], [ "# Histogram Density Estimation\n\nPerhaps the simplest approach to density estimation is by forming a histogram.\n\nA histogram partitions the input space $x$ into a $d$-dimensional grid and counts the number of points in each cell.", "_____no_output_____" ], [ "This is best illustrated by an example. \n\nLet's start by creating a simple 1D dataset coming from a mixture of two Gaussians:\n\n$$P_\\text{data}(x) = 0.3 \\cdot \\mathcal{N}(x ; \\mu=0, \\sigma=1) + 0.7 \\cdot \\mathcal{N}(x ; \\mu=5, \\sigma=1)$$", "_____no_output_____" ] ], [ [ "# https://scikit-learn.org/stable/auto_examples/neighbors/plot_kde_1d.html\nimport numpy as np\nnp.random.seed(1)\n\nN = 20 # number of points\n# concat samples from two Gaussians:\nX = np.concatenate((\n np.random.normal(0, 1, int(0.3 * N)), \n np.random.normal(5, 1, int(0.7 * N))\n))[:, np.newaxis]\nbins = np.linspace(-5, 10, 10) # locations of the bins\n\n# print out X\nprint(X.flatten())", "[ 1.62434536 -0.61175641 -0.52817175 -1.07296862 0.86540763 -2.3015387\n 6.74481176 4.2387931 5.3190391 4.75062962 6.46210794 2.93985929\n 4.6775828 4.61594565 6.13376944 3.90010873 4.82757179 4.12214158\n 5.04221375 5.58281521]\n" ] ], [ [ "We can now estimate the density using a histogram.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.hist(X[:, 0], bins=bins, density=True) # plot the histogram\nplt.plot(X[:, 0], np.full(X.shape[0], -0.01), '.k', markersize=10) # plot the points in X\nplt.xlim(-4, 9)\nplt.ylim(-0.02, 0.25)", "_____no_output_____" ] ], [ [ "# Limitations of Histograms\n\nHistogram-based methods have a number of shortcomings.\n* The number of grid cells increases exponentially with dimension $d$.\n* The histogram is not \"smooth\".\n* The shape of the histogram depends on the bin positions.\n\nWe will now try to address the last two limitations.", "_____no_output_____" ], [ "Let's also visualize what we mean when we say that shape of the histogram depends on the histogram bins.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12,4))\nax[0].hist(X[:, 0], bins=bins, density=True) # plot the histogram\nax[1].hist(X[:, 0], bins=bins+0.75, density=True) # plot the histogram with bin centers shifted by 0.75\nfor axi in ax.ravel():\n axi.plot(X[:, 0], np.full(X.shape[0], -0.01), '.k', markersize=10) # plot the points in X\n axi.set_xlim(-4, 9)\n axi.set_ylim(-0.02, 0.3)", "_____no_output_____" ] ], [ [ "# Kernel Density Estimation: Idea\n\nKernel density estimation (KDE) is a different approach to histogram estimation.\n* A histogram has $b$ bins of width $\\delta$ at fixed positions.\n* KDE effectively places a bin of with $\\delta$ at each $x \\in \\mathcal{X}$.\n* To obtain $P(x)$, we count the % of points that fall in the bin centered at $x$.", "_____no_output_____" ], [ "# Tophat Kernel Density Estimation\n\nThe simplest form of this strategy (Tophat KDE) assumes a model of the form\n$$P_\\delta(x) = \\frac{N(x; \\delta)}{n},$$\nwhere\n$$ N(x; \\delta) = |\\{x^{(i)} : ||x^{(i)} - x || \\leq \\delta/2\\}|, $$\nis the number of points that are within a bin of with $\\delta$ centered at $x$.", "_____no_output_____" ], [ "This is best understood via a picture.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KernelDensity\n\nkde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X) # fit a KDE model\nx_ticks = np.linspace(-5, 10, 1000)[:, np.newaxis] # choose 1000 points on x-axis\nlog_density = kde.score_samples(x_ticks) # compute density at 1000 points\n\nplt.fill(x_ticks[:, 0], np.exp(log_density)) # plot the density estimate\nplt.plot(X[:, 0], np.full(X.shape[0], -0.01), '.k', markersize=10) # plot the points in X\nplt.xlim(-4, 9)\nplt.ylim(-0.02, 0.32)", "_____no_output_____" ] ], [ [ "The above algorithm still has the problem of producing a density estimate that is not smooth.\n\nWe are going to resolve this by replacing histogram counts with weighted averages.", "_____no_output_____" ], [ "# Review: Kernels\n\nA *kernel function* $K : \\mathcal{X} \\times \\mathcal{X} \\to [0, \\infty]$ maps pairs of vectors $x, z \\in \\mathcal{X}$ to a real-valued score $K(x,z)$.\n\n* A kernel represents the similarity between $x$ and $z$.\n* It also often encodes the dot product between $x$ and $z$ in some high-dimensional feature space\n\nWe will use the first interpretation here.", "_____no_output_____" ], [ "# Kernel Density Estimation\n\nA kernelized density model $P$ takes the form:\n$$P(x) \\propto \\sum_{i=1}^n K(x, x^{(i)}).$$\nThis can be interpreted in different ways:\n* We count the number of points \"near\" $x$, but each $x^{(i)}$ has a weight $K(x, x^{(i)})$ that depends on similarity between $x, x^{(i)}$.\n* We place a \"micro-density\" $K(x, x^{(i)})$ at each $x^{(i)}$; the final density $P(x)$ is their sum.", "_____no_output_____" ], [ "# Types of Kernels\n\nWe have seen several types of kernels in the context of support vector machines.\n\nThere are additional kernels that are popular for density estimation.", "_____no_output_____" ], [ "The following kernels are available in `scikit-learn`.\n* Gaussian kernel $K(x,z; \\delta) \\propto \\exp(-||x-z||^2/2\\delta^2)$\n* Tophat kernel $K(x,z; \\delta) = 1 \\text{ if } ||x-z|| \\leq \\delta/2$ else $0$.\n* Epanechnikov kernel $K(x,z; \\delta) \\propto 1 - ||x-z||^2/\\delta^2$\n* Exponential kernel $K(x,z; \\delta) \\propto \\exp(-||x-z||/\\delta)$\n* Linear kernel $K(x,z; \\delta) \\propto (1 - ||x-z||/\\delta)^+$", "_____no_output_____" ], [ "It's easier to understand these kernels by looking at a figure.", "_____no_output_____" ] ], [ [ "# https://scikit-learn.org/stable/auto_examples/neighbors/plot_kde_1d.html\nX_plot = np.linspace(-6, 6, 1000)[:, None]\nX_src = np.zeros((1, 1))\n\nfig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(12,4))\nfig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)\n\ndef format_func(x, loc):\n if x == 0:\n return '0'\n elif x == 1:\n return '$\\delta/2$'\n elif x == -1:\n return '-$\\delta/2$'\n else:\n return '%i$\\delta$' % (int(x/2))\n\nfor i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',\n 'exponential', 'linear', 'cosine']):\n axi = ax.ravel()[i]\n log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)\n axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')\n axi.text(-2.6, 0.95, kernel)\n\n axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n axi.xaxis.set_major_locator(plt.MultipleLocator(1))\n axi.yaxis.set_major_locator(plt.NullLocator())\n\n axi.set_ylim(0, 1.05)\n axi.set_xlim(-2.9, 2.9)\n\nax[0, 1].set_title('Available Kernels')\n", "_____no_output_____" ] ], [ [ "# Kernel Density Estimation: Example\n\nLet's look at an example in the context of the 1D points we have seen earlier.\n\nWe will fit a model of the form\n$$P(x) = \\sum_{i=1}^n K(x, x^{(i)})$$\nwith a Gaussian kernel $K(x,z; \\delta) \\propto \\exp(-||x-z||^2/2\\delta^2)$.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KernelDensity\n\nkde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X) # fit a KDE model\nx_ticks = np.linspace(-5, 10, 1000)[:, np.newaxis] # choose 1000 points on x-axis\nlog_density = kde.score_samples(x_ticks) # compute density at 1000 points\ngaussian_kernel = lambda z : lambda x: np.exp(-np.abs(x-z)**2/(0.75**2)) # gaussian kernel\nkernel_linspace = lambda x : np.linspace(x-1.2,x+1.2,30)\n\nplt.figure(figsize=(12,4))\nplt.plot(x_tick s[:, 0], np.exp(log_density)) # plot the density estimate\nplt.plot(X[:, 0], np.full(X.shape[0], -0.01), '.k', markersize=10) # plot the points in X\nplt.plot(kernel_linspace(4), 0.07*gaussian_kernel(4)(kernel_linspace(4)), '--', color='r', alpha=0.75)\nplt.plot(kernel_linspace(5), 0.07*gaussian_kernel(5)(kernel_linspace(5)), '--', color='r', alpha=0.75)\nplt.plot(kernel_linspace(1), 0.07*gaussian_kernel(1)(kernel_linspace(1)), '--', color='r', alpha=0.75)\nplt.xlim(-4, 9)\nplt.ylim(-0.02, 0.32)", "_____no_output_____" ] ], [ [ "# KDE in Higher Dimensions\n\nIn priciple, kernel density estimation also works in higher dimensions.\n\nHowever, the number of datapoints needed for a good fit incrases expoentially with the dimension, which limits the applications of this model in high dimensions.", "_____no_output_____" ], [ "# Choosing Hyperparameters\n\nEach kernel has a notion of \"bandwidth\" $\\delta$. This is a hyperparameter that controls the \"smoothness\" of the fit.\n* We can choose it using inspection or heuristics like we did for $K$ in $K$-Means.\n* Because we have a probabilistic model, we can also estimate likelihood on a holdout dataset (more on this later!)", "_____no_output_____" ], [ "Let's illustrate how the bandwidth affects smoothness via an example.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KernelDensity\n\nkde1 = KernelDensity(kernel='gaussian', bandwidth=3).fit(X) # fit a KDE model\nkde2 = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(X) # fit a KDE model\n\nfig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12,4))\nax[0].fill(x_ticks[:, 0], np.exp(kde1.score_samples(x_ticks))) # plot the density estimate\nax[1].fill(x_ticks[:, 0], np.exp(kde2.score_samples(x_ticks))) # plot the density estimate\nax[0].set_title('Bandwidth Too High')\nax[1].set_title('Bandwidth Too Low')\n\nfor axi in ax.ravel():\n axi.plot(X[:, 0], np.full(X.shape[0], -0.01), '.k', markersize=10) # plot the points in X\n axi.set_xlim(-4, 9)\n axi.set_ylim(-0.02, 0.4)", "_____no_output_____" ] ], [ [ "# Algorithm: Kernel Density Estimation\n\n* __Type__: Unsupervised learning (density estimation).\n* __Model family__: Non-parametric. Sum of $n$ kernels.\n* __Objective function__: Log-likelihood to choose optimal bandwidth.\n* __Optimizer__: Grid search.", "_____no_output_____" ], [ "# Pros and Cons of KDE\n\nPros:\n* Can approximate any data distribution arbtrarily well.\n\nCons:\n* Need to store entire dataset to make queries, which is computationally prohibitive.\n* Number of data needed scale exponentially with dimension (\"curse of dimensionality\").", "_____no_output_____" ], [ "<left><img width=25% src=\"https://aml.hongjunwu.com/slides/img/cornell_tech2.svg\"></left>\n# Part 3: Latent Variable Models\n\nProbabilistic models we have seen earlier often need to approximate complex distributions.\n\nIn order to make our models more expressive, we introduce additional structure in the form of latent variables.", "_____no_output_____" ], [ "# Review: Probabilistic Models\n\nAn unsupervised probabilistic model is a probability distribution\n$$P(x) : \\mathcal{X} \\to [0,1].$$\nThis model can approximate the data distribution $P_\\text{data}$.", "_____no_output_____" ], [ "Probabilistic models also have *parameters* $\\theta \\in \\Theta$, which we denote as\n$$P_\\theta(x) : \\mathcal{X} \\to [0,1].$$", "_____no_output_____" ], [ "# Review: Maximum Likelihood\n\nIn maximum likelihood learning, we maximize the *empirical log-likelihood*\n$$\n\\max_{P_\\theta} \\hspace{2mm} \\frac{1}{|\\cd|}\\sum_{\\x\\in \\cd} \\log P_\\theta(\\x),\n$$\nwhere $\\mathcal{D} = \\{x^{(i)} \\mid i = 1,2,...,n\\}$ is a dataset of *independent and identicaly distributed* (IID) samples from $P_\\text{data}$.", "_____no_output_____" ], [ "# Latent Variable Models: Motivation\n\nConsider the following dataset of human faces.\n<center><img width=30% src=\"https://aml.hongjunwu.com/slides/img/celebA.png\"></center>\n\n* It contains variability due to gender, eye color, hair color, pose, etc.\n* However, unless these images are annotated, these factors of variation are not explicitly available (latent).", "_____no_output_____" ], [ "__Idea__: Explicitly model these factors using latent variables $z$ ", "_____no_output_____" ], [ "# Latent Variable Models: Definition\n\nAn latent-variable model is a probability distribution\n$$P_\\theta(x, z) : \\mathcal{X} \\times \\mathcal{Z} \\to [0,1]$$\n\ncontaining two sets of variables:\n* Observed $x$ that represent the high-dimensional object we are trying to model.\n* Latent $z$ that are not in the training set, but can encode hidden structure in the data.", "_____no_output_____" ], [ "This model defines a $P_\\theta(x) = \\sum_{z \\in \\mathcal{Z}} P_\\theta(x,z)$ that can approximate the data distribution $P_\\text{data}(x)$.", "_____no_output_____" ], [ "# Latent Variable Models: Example\n\nConsider the following example of latent variables\n<center><img width=50% src=\"https://aml.hongjunwu.com/slides/img/cartoonLVmodel.png\"></center>\n\nOnly shaded variables $x$ are observed in the data (pixel values).\nLatent variables $z$ correspond to high level features\n* If $z$ is chosen properly, $\\Pr(x| z)$ is much simpler than $\\Pr(x)$\n* We can identify features via $\\Pr(z \\mid x)$, e.g., $\\Pr(\\text{eye color} = \\text{blue} | \\x)$", "_____no_output_____" ], [ "# Mixtures of Gaussians\n\nA mixture of Gaussians is a probability distribution $P(x,z)$ that factorizes into two components:\n<center><img width=50% src=\"https://aml.hongjunwu.com/slides/img/mogdensity_v3.png\"></center>\n\n* $P_\\theta(z)$ is a [categorical](https://en.wikipedia.org/wiki/Categorical_distribution) distribution, and $P_\\theta(z=k) = \\phi_k$.\n* $P_\\theta(x\\mid z=k)$ is a [multivariate Gaussian](https://en.wikipedia.org/wiki/Multivariate_normal_distribution) $\\mathcal{N}(x; \\mu_k, \\Sigma_k)$ with mean and covariance $\\mu_k, \\Sigma_k$.", "_____no_output_____" ], [ "Thus, $P_\\theta(x,z)$ is a mixture of $K$ Gaussians:\n$$P_\\theta(x,z) = \\sum_{k=1}^K P_\\theta(z=k) P_\\theta(x|z=k) = \\sum_{k=1}^K \\phi_k \\mathcal{N}(x; \\mu_k, \\Sigma_k)$$", "_____no_output_____" ], [ "Mixtures of Gaussians fit more complex distributions than one Gaussian.", "_____no_output_____" ], [ "\nRaw data | Single Gaussian | Mixture of Gaussians\n--|--|---\n<img width=90% src=\"https://aml.hongjunwu.com/slides/img/oldfaithful_v2.png\"> | <img width=90% src=\"https://aml.hongjunwu.com/slides/img/oldfSingle_v2.png\"> | <img width=90% src=\"https://aml.hongjunwu.com/slides/img/oldfMOG_v2.png\">", "_____no_output_____" ], [ "# Representational Power of LVMs\n\nAn important reason for using LVMs is that they are more expressive models.\n\n<center><img width=50% src=\"https://aml.hongjunwu.com/slides/img/mogdensity1d_v2.png\"></center>", "_____no_output_____" ], [ "# Feature Representations from LVMs\n\nGiven $P_\\theta(x,z)$ we can compute $P_\\theta(z|x)$ to find useful latent representations.\n\n<center><img width=70% src=\"https://aml.hongjunwu.com/slides/img/dcgan_feats.png\"></center>", "_____no_output_____" ], [ "Latent variables are also useful to identify clusters in the data.\n\n<center><img width=50% src=\"https://aml.hongjunwu.com/slides/img/aae_dim_reduc_2.png\"></center>", "_____no_output_____" ], [ "# Learning Latent Variable Models\n\nWe can learn latent variable models using maximum likelihood:\n$$\n\\sum_{\\x\\in \\cd} \\log \\Pr(\\x ; \\theta) = \\sum_{\\x\\in \\cd} \\log \\sum_{z \\in \\mathcal{Z}}\\Pr(\\x, z; \\theta)\n$$", "_____no_output_____" ], [ "However, optimizing this objective is almost always intractable.\n* Consider a latent variable $z \\in \\{0,1\\}^{30}$, of thirty binary dimensions.\n* We need to sum over $2^{30} \\approx$ 1B possible values that $z$ can take.\n* For continuous variables we may need to solve an intractable integral $\\int P(x,z) dz$.", "_____no_output_____" ], [ "# Approximate Inference in LVMs\n\nIn practice, we need to compute the likelihood objective (and its gradients) *approximately*.\n* This is called approximate inference. We will see examples soon.\n* The alternating algorithm for K-Means was a first simple example.", "_____no_output_____" ], [ "# Summary of LVMs\n\nLatent-variable models are an important class of machine learning models.\n* They can represent complex probability distributions\n* They can find unsupervised feature representations\n\nThey also have drawbacks:\n* Learning these models is computationally intractable and requires approximate algorithms\n* Computing $P(z|x)$ to obtain latent features is also often intractable.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec86bf8f66cfe0cc8ecf23882c9715643c8ad966
273,481
ipynb
Jupyter Notebook
Detecting_Customers_Subscription.ipynb
annadutkiewicz/Detecting_Customers_Subscription
1adaf4e776e3a03e1cf7a249daf02e2104e6be5c
[ "MIT" ]
null
null
null
Detecting_Customers_Subscription.ipynb
annadutkiewicz/Detecting_Customers_Subscription
1adaf4e776e3a03e1cf7a249daf02e2104e6be5c
[ "MIT" ]
null
null
null
Detecting_Customers_Subscription.ipynb
annadutkiewicz/Detecting_Customers_Subscription
1adaf4e776e3a03e1cf7a249daf02e2104e6be5c
[ "MIT" ]
null
null
null
99.919985
76,952
0.798714
[ [ [ "# DETECTING CUSTOMERS SUBSCRIPTION", "_____no_output_____" ], [ "## 1. DATA IMPORT", "_____no_output_____" ], [ "### Load libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nfrom dateutil import parser", "_____no_output_____" ] ], [ [ "### Load dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('input/appdata10.csv')", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ] ], [ [ "### Exploratory data analysis (EDA)", "_____no_output_____" ] ], [ [ "dataset.describe()", "_____no_output_____" ] ], [ [ "## 2. DATA CLEANING", "_____no_output_____" ], [ "### Convert column type", "_____no_output_____" ], [ "Let's convert 'hour' column from 'str' to 'int' type. By using slice, we extract 1st and 2nd number:", "_____no_output_____" ] ], [ [ "dataset['hour'] = dataset['hour'].str.slice(1, 3).astype(int)", "_____no_output_____" ], [ "dataset.sample(5)", "_____no_output_____" ] ], [ [ "### Remove unnecessary columns", "_____no_output_____" ], [ "We remove unnecessary columns so that we are left only with numerical columns:", "_____no_output_____" ] ], [ [ "dataset2 = dataset.copy().drop(columns = ['user', 'screen_list', 'enrolled_date', 'first_open', 'enrolled'])\ndataset2.head()", "_____no_output_____" ] ], [ [ "## 3. DATA VISUALIZING", "_____no_output_____" ], [ "### Plot histogram", "_____no_output_____" ], [ "We are going to plot every image on one picture, we are going to use a loop. We are going to hav 7 images so we will plot them in 3 rows and 3 columns. Then we are going to specify bins number by vals variable. We will use 'unique' function to have exactly the same number of bins as the number of unique values:", "_____no_output_____" ] ], [ [ "plt.suptitle('Histograms of Numerical Columns', fontsize = 20, y = 1.1)\n\nfor i in range(1, dataset2.shape[1]+1):\n plt.subplot(3, 3, i)\n f = plt.gca()\n f.set_title(dataset2.columns.values[i - 1])\n \n vals = np.size(dataset2.iloc[:, i - 1].unique())\n \n plt.hist(dataset2.iloc[:, i - 1], bins = vals)\n plt.tight_layout()", "_____no_output_____" ] ], [ [ "Remarks:\n- day of week has pretty even distribution, no particular day of week seem to have influence on if User install the app\n- we have a substantial drop around 10 but note that those hours are in UTC format and the data are from US time zone so those hours are in reality in the middle of the night\n- age seem to have even distribution apart from 2-3 jumps we have around 30, 40 and 52\n- number of screens seem to be also evenly distributed apart from one outlier\n- we may observe that not so many people played minigame, similar with using premium feature and liked/ not liked", "_____no_output_____" ], [ "## 4. CORRELATION WITH RESPONSE", "_____no_output_____" ], [ "We are going to use corrwith function which shows the correlation between all the fields in this dataframe with a list that we specify as an argument, in this case that would be 'enrolled':", "_____no_output_____" ] ], [ [ "dataset2.corrwith(dataset.enrolled).plot.bar(figsize = (20,10),\n title = 'Correlation with Response Variable',\n fontsize = 15, rot = 45,\n grid = True);", "_____no_output_____" ] ], [ [ "Remarks:\n- day of the week seems to be correlated with the response variable meaning that the later the day of the week, the more possible to enroll but this relation is very small\n- the earlier it is, the smaller the age is, the more likely to enroll\n- the more screens we saw, the more likely it is to enroll\n- if we are likely to play a minigame, the more likely it is to enroll\n- the more we used premium features, the less likely to enroll\n- liked is not that strongly correlated with response variable", "_____no_output_____" ], [ "## 5. CORRELATION MATRIX", "_____no_output_____" ], [ "Now we are going to plot a correlation matrix between every individual field. Let's mask upper triangle of the matrix as it is symmetric:", "_____no_output_____" ] ], [ [ "sns.set(style=\"white\", font_scale=2)", "_____no_output_____" ] ], [ [ "### Compute the correlation matrix", "_____no_output_____" ] ], [ [ "corr = dataset2.corr()", "_____no_output_____" ] ], [ [ "### Generate a mask for the upper triangle", "_____no_output_____" ] ], [ [ "mask = np.zeros_like(corr, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True", "_____no_output_____" ] ], [ [ "### Draw the heatmap with the mask and correct aspect ratio", "_____no_output_____" ] ], [ [ "f, ax = plt.subplots(figsize=(18, 15))\nf.suptitle(\"Correlation Matrix\", fontsize = 40)\n\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\n\nax = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": 0.5});\nbottom, top = ax.get_ylim()\nax.set_ylim(bottom + 0.5, top - 0.5)", "_____no_output_____" ] ], [ [ "Remarks:\n- if colour between specific features is (almost) white, there is very little or no correlation between those features\n- there is a positive correlation between those who played minigame and those who used premium features\n- age and number of screens seem to be strongly correlated in a negative sense, i.e. the older the person is, the less number of screens he sees\n- it doesn't seem there is a linear dependence between the features so we can conclude that the features are independent", "_____no_output_____" ], [ "## 6. FEATURE ENGINEERING", "_____no_output_____" ], [ "Feature engineering is the process of selecting and transforming variables when creating a predictive model using machine learning or statistical modeling.", "_____no_output_____" ], [ "### Formatting date columns", "_____no_output_____" ], [ "Let's check first data types we have:", "_____no_output_____" ] ], [ [ "dataset.dtypes", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "dataset['first_open'] = [parser.parse(row_data) for row_data in dataset['first_open']]", "_____no_output_____" ], [ "dataset['enrolled_date'] = [parser.parse(row_data) if isinstance(row_data, str) else row_data for row_data in dataset['enrolled_date']]", "_____no_output_____" ] ], [ [ "We can now observe that our objects are now datetime objects:", "_____no_output_____" ] ], [ [ "dataset.dtypes", "_____no_output_____" ] ], [ [ "### Selecting time for response", "_____no_output_____" ], [ "Now we will need to calculate the difference between those days and set the type to be displayed in hours:", "_____no_output_____" ] ], [ [ "dataset['difference'] = (dataset['enrolled_date'] - dataset['first_open']).astype('timedelta64[h]')", "_____no_output_____" ], [ "plt.hist(dataset['difference'].dropna(), color = '#3F5D7D')\nplt.title('Distribution of Time-Since-Enrolled')\nplt.show;", "_____no_output_____" ], [ "plt.hist(dataset['difference'].dropna(), color = '#3F5D7D', range = [0, 100])\nplt.title('Distribution of Time-Since-Enrolled')\nplt.show;", "_____no_output_____" ] ], [ [ "Let's set the limit for enrollments to be up to 48 hours:", "_____no_output_____" ] ], [ [ "dataset.loc[dataset.difference > 48, 'enrolled'] = 0", "_____no_output_____" ] ], [ [ "Let's drop the columns we don't need anymore:", "_____no_output_____" ] ], [ [ "dataset = dataset.drop(columns=['difference', 'enrolled_date', 'first_open'])", "_____no_output_____" ] ], [ [ "### Load top screens", "_____no_output_____" ], [ "Let's now read screens that were most popular among the Users:", "_____no_output_____" ] ], [ [ "top_screens = pd.read_csv('input/top_screens.csv').top_screens.values", "_____no_output_____" ], [ "top_screens", "_____no_output_____" ] ], [ [ "### Mapping screens to fields", "_____no_output_____" ], [ "Let's add ',' at the end of every screen_list column:", "_____no_output_____" ] ], [ [ "dataset['screen_list'] = dataset.screen_list.astype(str) + ','", "_____no_output_____" ] ], [ [ "Now we are going to iterate over top_screens. The loop creates columns with top_screen names:", "_____no_output_____" ] ], [ [ "for sc in top_screens:\n dataset[sc] = dataset.screen_list.str.contains(sc).astype(int) #convert to integer, otherwise we will get True/False\n dataset['screen_list'] = dataset.screen_list.str.replace(sc+',','') #remove used top_screen from screen_list", "_____no_output_____" ] ], [ [ "As we are left with other screen types also, we are going to add all of those into \"Other\" column. We will indicate how many other screens we have. As we have added ',' at the end, it is enough if we calculate number of commas:", "_____no_output_____" ] ], [ [ "dataset['Other'] = dataset.screen_list.str.count(',')", "_____no_output_____" ], [ "dataset = dataset.drop(columns = 'screen_list')", "_____no_output_____" ] ], [ [ "### Funnels", "_____no_output_____" ], [ "Now we are going to work with funnels. Funnels are a group of screens that belong to the same set.", "_____no_output_____" ] ], [ [ "savings_screens = [\n 'Saving1',\n 'Saving2',\n 'Saving2Amount',\n 'Saving4',\n 'Saving5',\n 'Saving6',\n 'Saving7',\n 'Saving8',\n 'Saving9',\n 'Saving10'\n]\ndataset['SavingsCount'] = dataset[savings_screens].sum(axis = 1)\ndataset = dataset.drop(columns = savings_screens)\n\ncm_screens = [\n 'Credit1',\n 'Credit2',\n 'Credit3',\n 'Credit3Container',\n 'Credit3Dashboard'\n]\ndataset['CMCount'] = dataset[cm_screens].sum(axis=1)\ndataset = dataset.drop(columns=cm_screens)\n\ncc_screens = [\n 'CC1',\n 'CC1Category',\n 'CC3'\n]\ndataset['CCCount'] = dataset[cc_screens].sum(axis=1)\ndataset = dataset.drop(columns=cc_screens)\n\nloan_screens = [\n 'Loan',\n 'Loan2',\n 'Loan3',\n 'Loan4'\n]\ndataset['LoansCount'] = dataset[loan_screens].sum(axis=1)\ndataset = dataset.drop(columns=loan_screens)", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "dataset.describe()", "_____no_output_____" ], [ "dataset.columns", "_____no_output_____" ], [ "dataset.to_csv('new_appdata10.csv', index = False)", "_____no_output_____" ] ], [ [ "## 7. DATA PREPROCESSING", "_____no_output_____" ], [ "Import data:", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('input/new_appdata10.csv')", "_____no_output_____" ] ], [ [ "### Splitting independent and response variables", "_____no_output_____" ], [ "Let's split response variable from independent features:", "_____no_output_____" ] ], [ [ "X = dataset.drop(columns = 'enrolled')", "_____no_output_____" ], [ "y = dataset['enrolled']", "_____no_output_____" ] ], [ [ "### Splitting the dataset into the training set and test set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)", "_____no_output_____" ] ], [ [ "### Removing identifiers", "_____no_output_____" ], [ "For training and testing, user id is not needed in our model, but at the end we are going to associate the prediction to the user it came from, so let's save it away before removing it:", "_____no_output_____" ] ], [ [ "train_identifier = X_train['user']\nX_train = X_train.drop(columns = 'user')", "_____no_output_____" ], [ "test_identifier = X_test['user']\nX_test = X_test.drop(columns = 'user')", "_____no_output_____" ] ], [ [ "### Feature scaling", "_____no_output_____" ], [ "The next step will be feature scaling", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "sc_X = StandardScaler()", "_____no_output_____" ] ], [ [ "StandardScaler returns numpy array. The problem with that is that it looses the column names and index. Index is identifying each set of fields to the user and columns build our models. So we are going to save scaled part into a different dataframe:", "_____no_output_____" ] ], [ [ "for n_train,v_train in X_train.items():\n if v_train.dtype == \"object\":\n X_train[n_train] = v_train.factorize()[0]", "_____no_output_____" ], [ "for n_test,v_test in X_test.items():\n if v_test.dtype == \"object\":\n X_test[n_test] = v_test.factorize()[0]", "_____no_output_____" ], [ "X_train2 = pd.DataFrame(sc_X.fit_transform(X_train))", "_____no_output_____" ] ], [ [ "We are going only to transform scaled X cause it was already fitted to the training set:", "_____no_output_____" ] ], [ [ "X_test2 = pd.DataFrame(sc_X.transform(X_test))", "_____no_output_____" ], [ "X_train2.columns = X_train.columns.values\nX_test2.columns = X_test.columns.values", "_____no_output_____" ], [ "X_train2.index = X_train.index.values\nX_test2.index = X_test.index.values", "_____no_output_____" ], [ "X_train = X_train2\nX_test = X_test2", "_____no_output_____" ] ], [ [ "## 8. MODEL BUILDING", "_____no_output_____" ], [ "### Fitting model to the training set", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ] ], [ [ "Penalty l1 is going to change the model from regular logistic regression model to l1 regularization. We do it because, as mentioned above, screens can be correlated one to another. Maybe one screen is next to the other and maybe if you click on one screen, then you will click on the other. We address a lot of this correlations while creating the funnel features that incorporate all the screens that belong to the same set but there might be other correlations that exist even though the screens does not belong to the same set. What l1 penalty does, it penalizes those cases in which one particular feature gets a very big coefficient. This is important in the model when we are working with app screens.", "_____no_output_____" ] ], [ [ "classifier = LogisticRegression(random_state = 0, penalty = 'l1')", "_____no_output_____" ], [ "classifier.fit(X_train, y_train)", "C:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:929: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\n" ] ], [ [ "### Predicting test set", "_____no_output_____" ] ], [ [ "y_pred = classifier.predict(X_test)", "_____no_output_____" ] ], [ [ "### Evaluating results", "_____no_output_____" ], [ "Let's evaluate if our predictions were accurate or not", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, classification_report", "_____no_output_____" ] ], [ [ "### Print confusion matrix", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_test, y_pred)\nprint(cm)", "[[3741 38]\n [ 1 6220]]\n" ] ], [ [ "### Print heatmap", "_____no_output_____" ] ], [ [ "ax = sns.heatmap(cm, annot=True, fmt='d')\nbottom, top = ax.get_ylim()\nax.set_ylim(bottom + 0.5, top - 0.5)", "_____no_output_____" ] ], [ [ "### Print classification report", "_____no_output_____" ] ], [ [ "print(classification_report(y_test, y_pred, digits=4))", " precision recall f1-score support\n\n 0 0.9997 0.9899 0.9948 3779\n 1 0.9939 0.9998 0.9969 6221\n\n accuracy 0.9961 10000\n macro avg 0.9968 0.9949 0.9958 10000\nweighted avg 0.9961 0.9961 0.9961 10000\n\n" ] ], [ [ "We managed to improve a bit our model - now only 39 samples are misclassified. The good point is that only 1 out of 39 elements that are misclassified are an error type 2.", "_____no_output_____" ] ], [ [ "print('Test Data Accuracy: %0.4f' % accuracy_score(y_test, y_pred))", "Test Data Accuracy: 0.9961\n" ], [ "from sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)", "C:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\annak\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ], [ "print('Logistic Accuracy: %0.3f (+/- %0.3f)' % (accuracies.mean(), accuracies.std() * 2))", "Logistic Accuracy: 0.996 (+/- 0.022)\n" ] ], [ [ "### Formatting final results", "_____no_output_____" ] ], [ [ "final_results = pd.concat([y_test, test_identifier], axis = 1).dropna()", "_____no_output_____" ], [ "final_results['predicted_results'] = y_pred", "_____no_output_____" ], [ "final_results[['user', 'enrolled', 'predicted_results']].reset_index(drop=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec86c59c3e876475d5630d0eef2516ec8a91cb34
2,450
ipynb
Jupyter Notebook
downloaded_kernels/loan_data/kernel_172.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
null
null
null
downloaded_kernels/loan_data/kernel_172.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
null
null
null
downloaded_kernels/loan_data/kernel_172.ipynb
josepablocam/common-code-extraction
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
[ "MIT" ]
2
2021-07-12T00:48:08.000Z
2021-08-11T12:53:05.000Z
2,450
2,450
0.692653
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\n\n# Testing Stuss", "_____no_output_____" ], [ "print('hi')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec86ca6751f98761a914b00e49329d7bf3a21a49
5,344
ipynb
Jupyter Notebook
Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day65/Scrape-InfiniteScrollPage/solution.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
null
null
null
Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day65/Scrape-InfiniteScrollPage/solution.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
null
null
null
Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day65/Scrape-InfiniteScrollPage/solution.ipynb
pooja-gera/TheWireUsChallenge
18abb5ff3fd31b7dbfef41b8008f91d3fac029d3
[ "MIT" ]
1
2021-05-21T09:30:41.000Z
2021-05-21T09:30:41.000Z
28.57754
92
0.477919
[ [ [ "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom bs4 import BeautifulSoup\nimport pandas as pd", "_____no_output_____" ], [ "browser = webdriver.Chrome('/Users/jappanjeetsingh/Downloads/Drivers/chromedriver')\ntime.sleep(4)\nbrowser.get(\"http://quotes.toscrape.com/scroll\")", "_____no_output_____" ], [ "# getting to the bottom of the page to load all the quotes\ndef scrollDown():\n last_height = browser.execute_script(\"return document.body.scrollHeight\")\n SCROLL_PAUSE_TIME = 7\n while True:\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = browser.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n time.sleep(1.2)\n \n# call the function \nscrollDown()\n# data containers \ntexts=[]\nauthors=[]\n# find all the quotes\nsoup=BeautifulSoup(browser.page_source)\nbrowser.quit()\nquotes=soup.findAll(\"div\",{\"class\":\"quote\"})\nfor quote in quotes:\n texts.append(quote.find(\"span\",{\"class\":\"text\"}).text)\n authors.append(quote.find(\"small\",{\"class\":\"author\"}).text)", "_____no_output_____" ], [ "# create a dataframe for storing the quotes\ndf=pd.DataFrame({\n \"Quote\":texts,\n \"Author\":authors\n})", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]