hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ec7410b67db54e86539089944df3d32f415eb783
20,286
ipynb
Jupyter Notebook
Mycodes/chapter03_DL-basics/3.6_softmax-regression-scratch.ipynb
pengfei-chen/Dive-into-DL-PyTorch
e20eb799717a27b83550ce533bb158a690c88a47
[ "Apache-2.0" ]
null
null
null
Mycodes/chapter03_DL-basics/3.6_softmax-regression-scratch.ipynb
pengfei-chen/Dive-into-DL-PyTorch
e20eb799717a27b83550ce533bb158a690c88a47
[ "Apache-2.0" ]
null
null
null
Mycodes/chapter03_DL-basics/3.6_softmax-regression-scratch.ipynb
pengfei-chen/Dive-into-DL-PyTorch
e20eb799717a27b83550ce533bb158a690c88a47
[ "Apache-2.0" ]
null
null
null
24.470446
2,048
0.492458
[ [ [ "# 3.6 softmax回归的从零开始实现", "_____no_output_____" ] ], [ [ "import torch\nimport torchvision\nimport numpy as np\nimport sys\nsys.path.append(\"..\") # 为了导入上层目录的d2lzh_pytorch\nimport d2lzh_pytorch as d2l\n\nprint(torch.__version__)\nprint(torchvision.__version__)", "1.9.1+cpu\n0.10.1+cpu\n" ] ], [ [ "## 3.6.1 获取和读取数据", "_____no_output_____" ] ], [ [ "batch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)", "D:\\MySoftware\\Anaconda3\\lib\\site-packages\\torchvision\\datasets\\mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ..\\torch\\csrc\\utils\\tensor_numpy.cpp:180.)\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n" ] ], [ [ "## 3.6.2 初始化模型参数", "_____no_output_____" ] ], [ [ "num_inputs = 784\nnum_outputs = 10\n\nW = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_outputs)), dtype=torch.float)\nb = torch.zeros(num_outputs, dtype=torch.float)", "_____no_output_____" ], [ "W", "_____no_output_____" ], [ "W.shape", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "W.requires_grad_(requires_grad=True)\nb.requires_grad_(requires_grad=True) ", "_____no_output_____" ], [ "# keepdim:保持指定的维度不变\nX = torch.tensor([[1, 2, 3], [4, 5, 6]])\nprint(X.sum(dim=0, keepdim=True))\nprint(X.sum(dim=1, keepdim=True))\n", "tensor([[5, 7, 9]])\ntensor([[ 6],\n [15]])\n" ] ], [ [ "## 3.6.3 实现softmax运算", "_____no_output_____" ] ], [ [ "def softmax(X):\n X_exp = X.exp()\n partition = X_exp.sum(dim=1, keepdim=True)\n return X_exp / partition # 这里应用了广播机制", "_____no_output_____" ], [ "X = torch.rand((2, 5))\nX_prob = softmax(X)\nprint(X_prob, X_prob.sum(dim=1))", "tensor([[0.1550, 0.2261, 0.1406, 0.2190, 0.2593],\n [0.2574, 0.1541, 0.2498, 0.1172, 0.2215]]) tensor([1.0000, 1.0000])\n" ], [ "a = np.random.rand(2,5)\na", "_____no_output_____" ], [ "a.exp()", "_____no_output_____" ], [ "torch.rand((2, 5)).exp()", "_____no_output_____" ] ], [ [ "**torch增加了很多方法。**", "_____no_output_____" ], [ "## 3.6.4 定义模型", "_____no_output_____" ] ], [ [ "def net(X):\n return softmax(torch.mm(X.view((-1, num_inputs)), W) + b)", "_____no_output_____" ], [ "y = torch.LongTensor([0, 2])\ny.view(-1,1)", "_____no_output_____" ] ], [ [ "## 3.6.5 定义损失函数", "_____no_output_____" ] ], [ [ "tensor_0 = torch.arange(3, 12).view(3, 3)\nprint(tensor_0)", "tensor([[ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n" ], [ "index = torch.tensor([[2, 1, 0]])\ntensor_1 = tensor_0.gather(0, index)\nprint(tensor_1)", "tensor([[9, 7, 5]])\n" ], [ "index = torch.tensor([[2, 1, 0],\n [0,1,2]])\ntensor_0.gather(0, index)", "_____no_output_____" ], [ "index = torch.tensor([[2, 1, 0]])\ntensor_1 = tensor_0.gather(1, index)\nprint(tensor_1)", "tensor([[5, 4, 3]])\n" ], [ "y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = torch.LongTensor([0, 2])\ny_hat.gather(1, y.view(-1, 1))", "_____no_output_____" ], [ "y_hat", "_____no_output_____" ], [ "y = torch.LongTensor([1, 2])\ny.view(-1, 1)", "_____no_output_____" ], [ "c = y_hat.gather(1, y.view(-1, 1))\nc", "_____no_output_____" ], [ "- torch.log(c)", "_____no_output_____" ] ], [ [ "### torch.gather()用法", "_____no_output_____" ], [ "从上面的例子可以看出,torch.gather(dim),dim指定哪一维度,那一维度是固定的。\n", "_____no_output_____" ] ], [ [ "y_hat", "_____no_output_____" ], [ "y_hat.gather(1, torch.tensor([[1],[2]])) ", "_____no_output_____" ], [ "\"\"\"\n行数是固定自增长的,[0],[1]\ndim = 1,即按照列的方向 ;torch.tensor 指定列 [1],[2]\n则组合行列是[0,1],[1,2],因此取出来是 [[0.3],[0.5]]\n\"\"\"", "_____no_output_____" ], [ "def cross_entropy(y_hat, y):\n return - torch.log(y_hat.gather(1, y.view(-1, 1)))", "_____no_output_____" ] ], [ [ "## 3.6.6 计算分类准确率", "_____no_output_____" ] ], [ [ "def accuracy(y_hat, y):\n return (y_hat.argmax(dim=1) == y).float().mean().item()", "_____no_output_____" ], [ "print(accuracy(y_hat, y))", "0.5\n" ], [ "# 本函数已保存在d2lzh_pytorch包中方便以后使用。该函数将被逐步改进:它的完整实现将在“图像增广”一节中描述\ndef evaluate_accuracy(data_iter, net):\n acc_sum, n = 0.0, 0\n for X, y in data_iter:\n acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n n += y.shape[0]\n return acc_sum / n", "_____no_output_____" ], [ "print(evaluate_accuracy(test_iter, net))", "0.0691\n" ] ], [ [ "## 3.6.7 训练模型", "_____no_output_____" ] ], [ [ "num_epochs, lr = 5, 0.1\n\n# 本函数已保存在d2lzh_pytorch包中方便以后使用\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params=None, lr=None, optimizer=None):\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y).sum()\n \n # 梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n \n l.backward()\n if optimizer is None:\n d2l.sgd(params, lr, batch_size)\n else:\n optimizer.step() # “softmax回归的简洁实现”一节将用到\n \n \n train_l_sum += l.item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()\n n += y.shape[0]\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'\n % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))\n\ntrain_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr)", "epoch 1, loss 0.7855, train acc 0.750, test acc 0.794\nepoch 2, loss 0.5708, train acc 0.813, test acc 0.807\nepoch 3, loss 0.5249, train acc 0.826, test acc 0.817\nepoch 4, loss 0.5017, train acc 0.832, test acc 0.821\nepoch 5, loss 0.4853, train acc 0.837, test acc 0.829\n" ] ], [ [ "## 3.6.8 预测", "_____no_output_____" ] ], [ [ "X, y = iter(test_iter).next()\n\ntrue_labels = d2l.get_fashion_mnist_labels(y.numpy())\npred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(dim=1).numpy())\ntitles = [true + '\\n' + pred for true, pred in zip(true_labels, pred_labels)]\n\nd2l.show_fashion_mnist(X[0:9], titles[0:9])", "_____no_output_____" ] ], [ [ "**不知道什么问题,内核老是动不动就挂掉。**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec744320ca47f39440901f023b34727ef89aa4f3
12,063
ipynb
Jupyter Notebook
week3_sparkMLpipeline.ipynb
shristi428/Apache_Spark_Small_Projects
d6abdec69fd7d730e35891fb9677f43f88f6d744
[ "Apache-2.0" ]
null
null
null
week3_sparkMLpipeline.ipynb
shristi428/Apache_Spark_Small_Projects
d6abdec69fd7d730e35891fb9677f43f88f6d744
[ "Apache-2.0" ]
null
null
null
week3_sparkMLpipeline.ipynb
shristi428/Apache_Spark_Small_Projects
d6abdec69fd7d730e35891fb9677f43f88f6d744
[ "Apache-2.0" ]
null
null
null
137.079545
3,251
0.560391
[ [ [ "Welcome to exercise one of week three of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise we’ll use the HMP dataset again and perform some basic operations using Apache SparkML Pipeline components.\n\nLet’s create our DataFrame again:\n", "_____no_output_____" ] ], [ [ "# delete files from previous runs\n!rm -f hmp.parquet*\n\n# download the file containing the data in PARQUET format\n!wget https://github.com/IBM/coursera/raw/master/hmp.parquet\n \n# create a dataframe out of it\ndf = spark.read.parquet('hmp.parquet')\n\n# register a corresponding query table\ndf.createOrReplaceTempView('df')", "--2020-03-08 10:25:59-- https://github.com/IBM/coursera/raw/master/hmp.parquet\nResolving github.com (github.com)... 140.82.118.3\nConnecting to github.com (github.com)|140.82.118.3|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/IBM/coursera/master/hmp.parquet [following]\n--2020-03-08 10:26:00-- https://raw.githubusercontent.com/IBM/coursera/master/hmp.parquet\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 199.232.56.133\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|199.232.56.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 932997 (911K) [application/octet-stream]\nSaving to: 'hmp.parquet'\n\n100%[======================================>] 932,997 --.-K/s in 0.04s \n\n2020-03-08 10:26:00 (20.4 MB/s) - 'hmp.parquet' saved [932997/932997]\n\n" ] ], [ [ "Given below is the feature engineering pipeline from the lecture. Please add a feature column called “features_minmax” using the MinMaxScaler.\n\nMore information can be found here:\nhttp://spark.apache.org/docs/latest/ml-features.html#minmaxscaler", "_____no_output_____" ] ], [ [ "from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, Normalizer, MinMaxScaler\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml import Pipeline\n\n\nindexer = StringIndexer(inputCol=\"class\", outputCol=\"classIndex\")\nencoder = OneHotEncoder(inputCol=\"classIndex\", outputCol=\"categoryVec\")\nvectorAssembler = VectorAssembler(inputCols=[\"x\",\"y\",\"z\"],\n outputCol=\"features\")\nnormalizer = Normalizer(inputCol=\"features\", outputCol=\"features_norm\", p=1.0)\n\nminmaxscaler = MinMaxScaler(inputCol=\"features_norm\",outputCol=\"features_minmax\")\n\npipeline = Pipeline(stages=[indexer, encoder, vectorAssembler, normalizer,minmaxscaler])\nmodel = pipeline.fit(df)\nprediction = model.transform(df)\nprediction.show()", "+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+--------------------+\n| x| y| z| source| class|classIndex| categoryVec| features| features_norm| features_minmax|\n+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+--------------------+\n| 22| 49| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,49.0,35.0]|[0.20754716981132...|[0.26684636118598...|\n| 22| 49| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,49.0,35.0]|[0.20754716981132...|[0.26684636118598...|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,35.0]|[0.20183486238532...|[0.25950196592398...|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,35.0]|[0.20183486238532...|[0.25950196592398...|\n| 21| 52| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,52.0,34.0]|[0.19626168224299...|[0.25233644859813...|\n| 22| 51| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,51.0,34.0]|[0.20560747663551...|[0.26435246995994...|\n| 20| 50| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,50.0,35.0]|[0.19047619047619...|[0.24489795918367...|\n| 22| 52| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,34.0]|[0.20370370370370...|[0.26190476190476...|\n| 22| 50| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,50.0,34.0]|[0.20754716981132...|[0.26684636118598...|\n| 22| 51| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,51.0,35.0]|[0.20370370370370...|[0.26190476190476...|\n| 21| 51| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,51.0,33.0]|[0.2,0.4857142857...|[0.25714285714285...|\n| 20| 50| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,50.0,34.0]|[0.19230769230769...|[0.24725274725274...|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,49.0,33.0]|[0.20388349514563...|[0.26213592233009...|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,49.0,33.0]|[0.20388349514563...|[0.26213592233009...|\n| 20| 51| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,51.0,35.0]|[0.18867924528301...|[0.24258760107816...|\n| 18| 49| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,49.0,34.0]|[0.17821782178217...|[0.22913719943422...|\n| 19| 48| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[19.0,48.0,34.0]|[0.18811881188118...|[0.24186704384724...|\n| 16| 53| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[16.0,53.0,34.0]|[0.15533980582524...|[0.19972260748959...|\n| 18| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,52.0,35.0]|[0.17142857142857...|[0.22040816326530...|\n| 18| 51| 32|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,51.0,32.0]|[0.17821782178217...|[0.22913719943422...|\n+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+--------------------+\nonly showing top 20 rows\n\n" ] ], [ [ "The difference between a transformer and an estimator is state. A transformer is stateless whereas an estimator keeps state. Therefore “VectorAsselmbler” is a transformer since it only need to read row by row. Normalizer, on the other hand need to compute statistics on the dataset before, therefore it is an estimator. An estimator has an additional “fit” function. “OneHotEncoder” has been deprecated in Spark 2.3, therefore please change the code below to use the OneHotEstimator instead of the “OneHotEncoder”.\n\nMore information can be found here:\nhttp://spark.apache.org/docs/latest/ml-features.html#onehotencoderestimator\n\n\n\n", "_____no_output_____" ] ], [ [ "from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, Normalizer, MinMaxScaler, OneHotEncoderEstimator\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml import Pipeline\n\nindexer = StringIndexer(inputCol=\"class\", outputCol=\"classIndex\")\nencoder = OneHotEncoder(inputCol=\"classIndex\", outputCol=\"categoryVec\")\nvectorAssembler = VectorAssembler(inputCols=[\"x\",\"y\",\"z\"],\n outputCol=\"features\")\nnormalizer = Normalizer(inputCol=\"features\", outputCol=\"features_norm\", p=1.0)\n\n\npipeline = Pipeline(stages=[indexer, encoder, vectorAssembler, normalizer])\nmodel = pipeline.fit(df)\nprediction = model.transform(df)\nprediction.show()", "+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+\n| x| y| z| source| class|classIndex| categoryVec| features| features_norm|\n+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+\n| 22| 49| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,49.0,35.0]|[0.20754716981132...|\n| 22| 49| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,49.0,35.0]|[0.20754716981132...|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,35.0]|[0.20183486238532...|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,35.0]|[0.20183486238532...|\n| 21| 52| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,52.0,34.0]|[0.19626168224299...|\n| 22| 51| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,51.0,34.0]|[0.20560747663551...|\n| 20| 50| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,50.0,35.0]|[0.19047619047619...|\n| 22| 52| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,52.0,34.0]|[0.20370370370370...|\n| 22| 50| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,50.0,34.0]|[0.20754716981132...|\n| 22| 51| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[22.0,51.0,35.0]|[0.20370370370370...|\n| 21| 51| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,51.0,33.0]|[0.2,0.4857142857...|\n| 20| 50| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,50.0,34.0]|[0.19230769230769...|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,49.0,33.0]|[0.20388349514563...|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[21.0,49.0,33.0]|[0.20388349514563...|\n| 20| 51| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[20.0,51.0,35.0]|[0.18867924528301...|\n| 18| 49| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,49.0,34.0]|[0.17821782178217...|\n| 19| 48| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[19.0,48.0,34.0]|[0.18811881188118...|\n| 16| 53| 34|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[16.0,53.0,34.0]|[0.15533980582524...|\n| 18| 52| 35|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,52.0,35.0]|[0.17142857142857...|\n| 18| 51| 32|Accelerometer-201...|Brush_teeth| 6.0|(13,[6],[1.0])|[18.0,51.0,32.0]|[0.17821782178217...|\n+---+---+---+--------------------+-----------+----------+--------------+----------------+--------------------+\nonly showing top 20 rows\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec744fd6c32826f6cc2c2746112a9155e1010380
20,251
ipynb
Jupyter Notebook
18_PDEs_waves/18_PDEs_waves-students.ipynb
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2019
e6114b49d28df887abe37c8144df8f4ae8cf6419
[ "CC-BY-4.0" ]
null
null
null
18_PDEs_waves/18_PDEs_waves-students.ipynb
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2019
e6114b49d28df887abe37c8144df8f4ae8cf6419
[ "CC-BY-4.0" ]
null
null
null
18_PDEs_waves/18_PDEs_waves-students.ipynb
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2019
e6114b49d28df887abe37c8144df8f4ae8cf6419
[ "CC-BY-4.0" ]
null
null
null
28.765625
276
0.501901
[ [ [ "# 18 PDEs: Waves – Students\n\n(See *Computational Physics* Ch 21 and *Computational Modeling* Ch 6.5.)", "_____no_output_____" ], [ "## Background: waves on a string\n\nAssume a 1D string of length $L$ with mass density per unit length $\\rho$ along the $x$ direction. It is held under constant tension $T$ (force per unit length). Ignore frictional forces and the tension is so high that we can ignore sagging due to gravity.\n\n\n### 1D wave equation\nThe string is displaced in the $y$ direction from its rest position, i.e., the displacement $y(x, t)$ is a function of space $x$ and time $t$.\n\nFor small relative displacements $y(x, t)/L \\ll 1$ and therefore small slopes $\\partial y/\\partial x$ we can describe $y(x, t)$ with a *linear* equation of motion:", "_____no_output_____" ], [ "Newton's second law applied to short elements of the string with length $\\Delta x$ and mass $\\Delta m = \\rho \\Delta x$: the left hand side contains the *restoring force* that opposes the displacement, the right hand side is the acceleration of the string element:\n\n\\begin{align}\n\\sum F_{y}(x) &= \\Delta m\\, a(x, t)\\\\\nT \\sin\\theta(x+\\Delta x) - T \\sin\\theta(x) &= \\rho \\Delta x \\frac{\\partial^2 y(x, t)}{\\partial t^2}\n\\end{align}\n\nThe angle $\\theta$ measures by how much the string is bent away from the resting configuration.", "_____no_output_____" ], [ "Because we assume small relative displacements, the angles are small ($\\theta \\ll 1$) and we can make the small angle approximation\n\n$$\n\\sin\\theta \\approx \\tan\\theta = \\frac{\\partial y}{\\partial x}\n$$\n\nand hence", "_____no_output_____" ], [ "\\begin{align}\nT \\left.\\frac{\\partial y}{\\partial x}\\right|_{x+\\Delta x} - T \\left.\\frac{\\partial y}{\\partial x}\\right|_{x} &= \\rho \\Delta x \\frac{\\partial^2 y(x, t)}{\\partial t^2}\\\\\n\\frac{T \\left.\\frac{\\partial y}{\\partial x}\\right|_{x+\\Delta x} - T \\left.\\frac{\\partial y}{\\partial x}\\right|_{x}}{\\Delta x} &= \\rho \\frac{\\partial^2 y}{\\partial t^2}\n\\end{align}", "_____no_output_____" ], [ "or in the limit $\\Delta x \\rightarrow 0$ a linear hyperbolic PDE results:\n\n\\begin{gather}\n\\frac{\\partial^2 y(x, t)}{\\partial x^2} = \\frac{1}{c^2} \\frac{\\partial^2 y(x, t)}{\\partial t^2}, \\quad c = \\sqrt{\\frac{T}{\\rho}}\n\\end{gather}\n\nwhere $c$ has the dimension of a velocity. This is the (linear) **wave equation**.", "_____no_output_____" ], [ "### General solution: waves ", "_____no_output_____" ], [ "General solutions are propagating waves:\n\nIf $f(x)$ is a solution at $t=0$ then\n\n$$\ny_{\\mp}(x, t) = f(x \\mp ct)\n$$\n\nare also solutions at later $t > 0$.", "_____no_output_____" ], [ "Because of linearity, any linear combination is also a solution, so the most general solution contains both right and left propagating waves\n\n$$\ny(x, t) = A f(x - ct) + B g(x + ct)\n$$\n\n(If $f$ and/or $g$ are present depends on the initial conditions.)", "_____no_output_____" ], [ "In three dimensions the wave equation is\n\n$$\n\\boldsymbol{\\nabla}^2 y(\\mathbf{x}, t) - \\frac{1}{c^2} \\frac{\\partial^2 y(\\mathbf{x}, t)}{\\partial t^2} = 0\\\n$$", "_____no_output_____" ], [ "### Boundary and initial conditions ", "_____no_output_____" ], [ "* The boundary conditions could be that the ends are fixed \n\n $$y(0, t) = y(L, t) = 0$$\n \n* The *initial condition* is a shape for the string, e.g., a Gaussian at the center\n\n $$\n y(x, t=0) = g(x) = y_0 \\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp\\left[-\\frac{(x - x_0)^2}{2\\sigma^2}\\right]\n $$ \n \n at time 0.\n* Because the wave equation is *second order in time* we need a second initial condition, for instance, the string is released from rest: \n\n $$\n \\frac{\\partial y(x, t=0)}{\\partial t} = 0\n $$\n\n (The derivative, i.e., the initial displacement velocity is provided.)", "_____no_output_____" ], [ "### Analytical solution\nSolve (as always) with *separation of variables*.\n\n$$\ny(x, t) = X(x) T(t)\n$$\n\nand this yields the general solution (with boundary conditions of fixed string ends and initial condition of zero velocity) as a superposition of normal modes\n\n$$\ny(x, t) = \\sum_{n=0}^{+\\infty} B_n \\sin k_n x\\, \\cos\\omega_n t,\n\\quad \\omega_n = ck_n,\\ k_n = n \\frac{2\\pi}{L} = n k_0.\n$$\n\n(The angular frequency $\\omega$ and the wave vector $k$ are determined from the boundary conditions.)", "_____no_output_____" ], [ "The coefficients $B_n$ are obtained from the initial shape:\n\n$$\ny(x, t=0) = \\sum_{n=0}^{+\\infty} B_n \\sin n k_0 x = g(x)\n$$", "_____no_output_____" ], [ "In principle one can use the fact that $\\int_0^L dx \\sin m k_0 x \\, \\sin n k_0 x = \\pi \\delta_{mn}$ (orthogonality) to calculate the coefficients:\n\n\\begin{align}\n\\int_0^L dx \\sin m k_0 x \\sum_{n=0}^{+\\infty} B_n \\sin n k_0 x &= \\int_0^L dx \\sin(m k_0 x) \\, g(x)\\\\\n\\pi \\sum_{n=0}^{+\\infty} B_n \\delta_{mn} &= \\dots \\\\\nB_m &= \\pi^{-1} \\dots\n\\end{align}\n\n(but the analytical solution is ugly and I cannot be bothered to put it down here.)", "_____no_output_____" ], [ "## Numerical solution\n\n1. discretize wave equation\n2. time stepping: leap frog algorithm (iterate)", "_____no_output_____" ], [ "Use the central difference approximation for the second order derivatives:\n\n\\begin{align}\n\\frac{\\partial^2 y}{\\partial t^2} &\\approx \\frac{y(x, t+\\Delta t) + y(x, t-\\Delta t) - 2y(x, t)}{\\Delta t ^2} = \\frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\\Delta t^2}\\\\\n\\frac{\\partial^2 y}{\\partial x^2} &\\approx \\frac{y(x+\\Delta x, t) + y(x-\\Delta x, t) - 2y(x, t)}{\\Delta x ^2} = \\frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\\Delta x^2}\n\\end{align}\n\nand substitute into the wave equation to yield the *discretized* wave equation:", "_____no_output_____" ], [ "$$\n\\frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\\Delta x^2} = \\frac{1}{c^2} \\frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\\Delta t^2}\n$$", "_____no_output_____" ], [ "#### Student activity: derive the finite difference version of the 1D wave equation\nRe-arrange so that the future terms $j+1$ can be calculated from the present $j$ and past $j-1$ terms:\n\n$$\n? = ?\n$$", "_____no_output_____" ], [ "Use $\\beta := \\frac{c}{\\Delta x/\\Delta t}$ to write your solution.", "_____no_output_____" ], [ "This is the time stepping algorithm for the wave equation.", "_____no_output_____" ], [ "## Numerical implementation \n", "_____no_output_____" ] ], [ [ "# if you have plotting problems, try \n# %matplotlib inline\n%matplotlib notebook\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nplt.style.use('ggplot')", "_____no_output_____" ] ], [ [ "Implement the time stepping algorithm in the code below. Look for sections `# TODO`.", "_____no_output_____" ] ], [ [ "L = 0.5 # m\nNx = 50\nNt = 100\n\nDx = L/Nx\n# TODO: choose Dt\nDt = # s\n\nrho = 1.5e-2 # kg/m\ntension = 150 # N\n\nc = np.sqrt(tension/rho)\n\n# TODO: calculate beta\nbeta = \nbeta2 = \n\nprint(\"c = {0} m/s\".format(c))\nprint(\"Dx = {0} m, Dt = {1} s, Dx/Dt = {2} m/s\".format(Dx, Dt, Dx/Dt))\nprint(\"beta = {}\".format(beta))\n\nX = np.linspace(0, L, Nx+1) # need N+1!\n\ndef gaussian(x, y0=0.05, x0=L/2, sigma=0.1*L):\n return y0/np.sqrt(2*np.pi*sigma) * np.exp(-(x-x0)**2/(2*sigma**2))\n\n# displacements at j-1, j, j+1\ny0 = np.zeros_like(X)\ny1 = np.zeros_like(y0)\ny2 = np.zeros_like(y0)\n\n# save array\ny_t = np.zeros((Nt+1, Nx+1))\n\n# boundary conditions\n# TODO: set boundary conditions\ny2[:] = y0\n\n# initial conditions: velocity 0, i.e. no difference between y0 and y1\ny0[1:-1] = y1[1:-1] = gaussian(X)[1:-1]\n\n# save initial\nt_index = 0\ny_t[t_index, :] = y0\nt_index += 1\ny_t[t_index, :] = y1\n\nfor jt in range(2, Nt):\n # TODO: time stepping algorithm\n \n t_index += 1\n y_t[t_index, :] = y2 \n print(\"Iteration {0:5d}\".format(jt), end=\"\\r\")\nelse:\n print(\"Completed {0:5d} iterations: t={1} s\".format(jt, jt*Dt))\n ", "_____no_output_____" ] ], [ [ "### 1D plot\nPlot the output in the save array `y_t`. Vary the time steps that you look at with `y_t[start:end]`.\n\nWe indicate time by color changing.", "_____no_output_____" ] ], [ [ "ax = plt.subplot(111)\nax.set_prop_cycle(\"color\", [plt.cm.viridis_r(i) for i in np.linspace(0, 1, len(y_t))])\nax.plot(X, y_t.T);", "_____no_output_____" ] ], [ [ "### 1D Animation\nFor 1D animation to work in a Jupyter notebook, use", "_____no_output_____" ] ], [ [ "%matplotlib notebook", "_____no_output_____" ] ], [ [ "If no animations are visible, restart kernel and execute the `%matplotlib notebook` cell as the very first one in the notebook.\n\nWe use `matplotlib.animation` to look at movies of our solution:", "_____no_output_____" ] ], [ [ "import matplotlib.animation as animation", "_____no_output_____" ] ], [ [ "The `update_wave()` function simply re-draws our image for every `frame`.", "_____no_output_____" ] ], [ [ "y_limits = 1.05*y_t.min(), 1.05*y_t.max()\n\nfig1 = plt.figure(figsize=(5,5))\nax = fig1.add_subplot(111)\nax.set_aspect(1)\n\ndef update_wave(frame, data):\n global ax, Dt, y_limits\n ax.clear()\n ax.set_xlabel(\"x (m)\")\n ax.set_ylabel(\"y (m)\")\n ax.plot(X, data[frame])\n ax.set_ylim(y_limits)\n ax.text(0.1, 0.9, \"t = {0:3.1f} ms\".format(frame*Dt*1e3), transform=ax.transAxes)\n\nwave_anim = animation.FuncAnimation(fig1, update_wave, frames=len(y_t), fargs=(y_t,), \n interval=30, blit=True, repeat_delay=100)\n", "_____no_output_____" ] ], [ [ "### 3D plot\n(Uses functions from previous lessons.)", "_____no_output_____" ] ], [ [ "def plot_y(y_t, Dt, Dx, step=1):\n T, X = np.meshgrid(range(y_t.shape[0]), range(y_t.shape[1]))\n Y = y_t.T[X, T] # intepret index 0 as \"t\" and index 1 as \"x\", but plot x along axis 1 and t along axis 2\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.plot_wireframe(X*Dx, T*Dt*step, Y)\n ax.set_ylabel(r\"time $t$ (s)\")\n ax.set_xlabel(r\"position $x$ (m)\")\n ax.set_zlabel(r\"displacement $y$ (m)\")\n fig.tight_layout()\n return ax\n\ndef plot_surf(y_t, Dt, Dx, step=1, filename=None, offset=-1, zlabel=r'displacement',\n elevation=40, azimuth=-20, cmap=plt.cm.coolwarm):\n \"\"\"Plot y_t as a 3D plot with contour plot underneath.\n \n Arguments\n ---------\n y_t : 2D array\n displacement y(t, x)\n filename : string or None, optional (default: None)\n If `None` then show the figure and return the axes object.\n If a string is given (like \"contour.png\") it will only plot \n to the filename and close the figure but return the filename.\n offset : float, optional (default: 20)\n position the 2D contour plot by offset along the Z direction\n under the minimum Z value\n zlabel : string, optional\n label for the Z axis and color scale bar\n elevation : float, optional\n choose elevation for initial viewpoint\n azimuth : float, optional\n chooze azimuth angle for initial viewpoint\n \"\"\"\n \n t = np.arange(y_t.shape[0], dtype=int)\n x = np.arange(y_t.shape[1], dtype=int)\n T, X = np.meshgrid(t, x)\n Y = y_t.T[X, T]\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n surf = ax.plot_surface(X*Dx, T*Dt*step, Y, cmap=cmap, rstride=1, cstride=1, alpha=1)\n cset = ax.contourf(X*Dx, T*Dt*step, Y, 20, zdir='z', offset=offset+Y.min(), cmap=cmap)\n\n ax.set_xlabel('x')\n ax.set_ylabel('t')\n ax.set_zlabel(zlabel)\n ax.set_zlim(offset + Y.min(), Y.max())\n \n ax.view_init(elev=elevation, azim=azimuth)\n\n cb = fig.colorbar(surf, shrink=0.5, aspect=5)\n cb.set_label(zlabel)\n \n if filename:\n fig.savefig(filename)\n plt.close(fig)\n return filename\n else:\n return ax", "_____no_output_____" ], [ "plot_y(y_t, Dt, Dx, step)", "_____no_output_____" ], [ "plot_surf(y_t, Dt, Dx, step, offset=0, cmap=plt.cm.coolwarm)", "_____no_output_____" ] ], [ [ "## von Neumann stability analysis: Courant condition ", "_____no_output_____" ], [ "Assume that the solutions of the discretized equation can be written as normal modes\n\n$$\ny_{m,j} = \\xi(k)^j e^{ikm\\Delta x}, \\quad t=j\\Delta t,\\ x=m\\Delta x \n$$\n\nThe time stepping algorith is stable if\n\n$$\n|\\xi(k)| < 1\n$$", "_____no_output_____" ], [ "Insert normal modes into the discretized equation \n\n\n$$\ny_{i,j+1} = 2(1 - \\beta^2)y_{i,j} - y_{i, j-1} + \\beta^2 (y_{i+1,j} + y_{i-1,j}), \\quad \n\\beta := \\frac{c}{\\Delta x/\\Delta t}\n$$\n\nand simplify (use $1-\\cos x = 2\\sin^2\\frac{x}{2}$):\n\n$$\n\\xi^2 - 2(1-2\\beta^2 s^2)\\xi + 1 = 0, \\quad s=\\sin(k\\Delta x/2)\n$$\n\nThe characteristic equation has roots\n\n$$\n\\xi_{\\pm} = 1 - 2\\beta^2 s^2 \\pm \\sqrt{(1-2\\beta^2 s^2)^2 - 1}.\n$$\n\nIt has one root for \n\n$$\n\\left|1-2\\beta^2 s^2\\right| = 1,\n$$\n\ni.e., for\n\n$$\n\\beta s = 1\n$$\n\nWe have two real roots for \n\n$$\n\\left|1-2\\beta^2 s^2\\right| < 1 \\\\\n\\beta s > 1\n$$\n\nbut one of the roots is always $|\\xi| > 1$ and hence these solutions will diverge and not be stable.\n\nFor \n\n$$\n\\left|1-2\\beta^2 s^2\\right| ≥ 1 \\\\\n\\beta s ≤ 1\n$$\n\nthe roots will be *complex conjugates of each other*\n\n$$\n\\xi_\\pm = 1 - 2\\beta^2s^2 \\pm i\\sqrt{1-(1-2\\beta^2s^2)^2}\n$$\n\nand the *magnitude*\n\n$$\n|\\xi_{\\pm}|^2 = (1 - 2\\beta^2s^2)^2 - (1-(1-2\\beta^2s^2)^2) = 1\n$$\n\nis unity: Thus the solutions will not grow and will be *stable* for\n\n$$\n\\beta s ≤ 1\\\\\n\\frac{c}{\\frac{\\Delta x}{\\Delta t}} \\sin\\frac{k \\Delta x}{2} ≤ 1\n$$\n\nAssuming the \"worst case\" for the $\\sin$ factor (namely, 1), the **condition for stability** is\n\n$$\nc ≤ \\frac{\\Delta x}{\\Delta t}\n$$\n\nor \n\n$$\n\\beta ≤ 1.\n$$\n\nThis is also known as the **Courant condition**. When written as\n\n$$\n\\Delta t ≤ \\frac{\\Delta x}{c}\n$$\n\nit means that the time step $\\Delta t$ (for a given $\\Delta x$) must be *smaller than the time that the wave takes to travel one grid step*.\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ec745148b6fb130d389f2a8755e7cd3b0021b7ea
86,334
ipynb
Jupyter Notebook
examples/kernelml-autoencoder-example.ipynb
Freedomtowin/kernel_optimizer
2676044e0f287cd8dda8f9f92a6d3813544965e4
[ "MIT" ]
9
2019-10-03T18:02:29.000Z
2021-08-09T09:30:33.000Z
examples/kernelml-autoencoder-example.ipynb
Freedomtowin/kernel_optimizer
2676044e0f287cd8dda8f9f92a6d3813544965e4
[ "MIT" ]
1
2019-12-11T09:46:09.000Z
2021-06-17T00:45:16.000Z
examples/kernelml-autoencoder-example.ipynb
Freedomtowin/kernel_optimizer
2676044e0f287cd8dda8f9f92a6d3813544965e4
[ "MIT" ]
3
2020-04-18T10:41:56.000Z
2021-06-17T02:06:14.000Z
125.121739
35,570
0.80287
[ [ [ "import pandas as pd\nimport time\nimport seaborn\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\nfrom sklearn import linear_model\nimport kernelml\nimport re\n\nfrom ipyparallel import Client\nrc = Client(profile='default')\ndview = rc[:]\n\ndview.block = True\n\nwith dview.sync_imports():\n #for some reason, aliases cannot be use\n import numpy\n import scipy", "importing numpy on engine(s)\nimporting scipy on engine(s)\n" ], [ "full = pd.read_csv('DATA/hb_training.csv')\ntest = pd.read_csv('DATA/hb_testing.csv')\n\ndef change_label(x):\n if x =='s':\n return 1\n else: \n return 0\n \nfull['Label'] = full['Label'].apply(change_label)\nEventId = test['EventId']\nfull.drop(['EventId'],axis=1,inplace=True)\ntest.drop(['EventId'],axis=1,inplace=True)\nfeatures = list(full.columns[:-2])\ntarget = list(full.columns[-1:])\n\n#randomly sample and split data\nall_samples=full.index\nones = full[full[target].values==1].index\nzeros = full[full[target].values==0].index\nones_rand_sample = np.random.choice(ones, size=int(len(ones)*0.5),replace=False)\nzeros_rand_sample = np.random.choice(zeros, size=int(len(zeros)*0.5),replace=False)\nrand_sample = np.concatenate((ones_rand_sample,zeros_rand_sample))\nnp.random.shuffle(rand_sample)\n\ntest_sample = np.setdiff1d(all_samples,rand_sample)\nvalid = full.loc[test_sample,:]\ntrain = full.loc[rand_sample,:]", "_____no_output_____" ], [ "#Calculate the number of parameters and layer shapes\n\nnum_inputs = len(features)\nnum_outputs = len(features)\nlayer_sizes = [10]\nmodel_shapes = []\n\nmodel_shapes.append((num_inputs,layer_sizes[0]))\ninput_n_parameters = num_inputs*layer_sizes[0]\nN = input_n_parameters\n\nfor i in range(1,len(layer_sizes)):\n layer_n_parameters = layer_sizes[i-1]*layer_sizes[i]\n model_shapes.append((layer_sizes[i],layer_sizes[i-1]))\n N += layer_n_parameters\n\noutput_n_parameters = num_outputs*layer_sizes[-1]\nN += output_n_parameters\nmodel_shapes.append((num_outputs,layer_sizes[-1]))\nnum_parameters = N\nprint('network shapes:',model_shapes)\nprint('num of parameters',N)", "network shapes: [(30, 10), (30, 10)]\nnum of parameters 600\n" ], [ "#reshape parameter vector into list of matrices\ndef reshape_vector(w):\n reshape_w = []\n indx = 0\n for shape,num in zip(model_shape,parameter_shape):\n x = w[indx:num+indx]\n if x.size!=num:\n continue\n x = x.reshape(shape,int(num/shape))\n reshape_w.append(x)\n indx = indx+num\n extra_w = w[indx:]\n return reshape_w,extra_w\n\ndef reshape_vector(w):\n np=numpy\n reshape_w = []\n indx = 0\n for shape in model_shapes:\n num = np.prod(shape)\n x = w[indx:num+indx]\n if x.size!=num:\n continue\n x = x.reshape(shape)\n reshape_w.append(x)\n indx = indx+num\n extra_w = w[indx:].reshape(-1,1)\n return np.array(reshape_w),extra_w\n\n#Specifies the way the tensors are combined with the inputs\ndef combine_tensors(X,w_tensor):\n w_tensor,extra_w = reshape_vector(w_tensor)\n b1,a1,b2,a2 = extra_w[:4]\n pred = X.dot(w_tensor[0])\n #choose link on encoding layer\n pred = a1*(pred+b1)\n pred = pred.dot(w_tensor[1].T)\n pred = a2*(pred+b2)\n return pred", "_____no_output_____" ], [ "dview['model_shapes']=model_shapes\ndview['combine_tensors']=combine_tensors\ndview['reshape_vector']=reshape_vector", "_____no_output_____" ], [ "def autoencoder_function(X,y,w_tensor,predict=False):\n #can't be passed to parallel engines, so I just assign the aliases manually\n #this is an improvement from loading the libraries again\n np = numpy\n stats = scipy.stats\n \n # define the loss function between predicted output actual output\n def nn_autoencoder_loss(hypothesis,y):\n return np.sum((hypothesis-y)**2)/y.size\n \n #we cannot modify pickled memory so create a copy of the parameter vector\n w_tensor_copy = w_tensor.copy()\n pred = combine_tensors(X,w_tensor_copy)\n if predict==True:\n return pred\n loss = nn_autoencoder_loss(pred,y)\n return loss", "_____no_output_____" ], [ "X = train[features].values\ny = train[target].values\n\n\nrealizations = 20\ncycles = 5\nvolume = 40\nsimulations = 3000\nvolatility = 0.01\n\nbatch_size=500\n\nzscore = 2.0\n\nimport time\nstart = time.time()\n\nkml = kernelml.KernelML(\n prior_sampler_fcn=None,\n posterior_sampler_fcn=None,\n intermediate_sampler_fcn=None,\n mini_batch_sampler_fcn=None,\n parameter_transform_fcn=None,\n batch_size=batch_size)\n\nkml.use_ipyparallel(dview)\n\nparameter_by_run,loss_by_run = kml.optimize(X,X,loss_function=autoencoder_function,\n #add the activation function parameters + 4\n number_of_parameters=num_parameters+4,\n args=[],\n number_of_realizations=realizations,\n number_of_random_simulations=simulations,\n update_volatility = volatility,\n number_of_cycles=cycles,\n update_volume=volume,\n prior_uniform_low=-1,\n prior_uniform_high=1,\n convergence_z_score=zscore,\n plot_feedback=False,\n print_feedback=True)\nend = time.time()\n\nprint(end-start)", "('realization', 0, 'loss', 76107.195938158591, 'time', 5.942706823348999)\n('realization', 1, 'loss', 48840.295057050898, 'time', 6.122480869293213)\n('realization', 2, 'loss', 33327.403997283873, 'time', 6.742632865905762)\n('realization', 3, 'loss', 27329.353391080396, 'time', 6.610260009765625)\n('realization', 4, 'loss', 19622.446254575385, 'time', 6.733449935913086)\n('realization', 5, 'loss', 15045.266365762734, 'time', 6.165683031082153)\n('realization', 6, 'loss', 12421.891320053855, 'time', 6.550469875335693)\n('realization', 7, 'loss', 11088.197148225698, 'time', 6.800249814987183)\n('realization', 8, 'loss', 8981.2265498882734, 'time', 6.639477014541626)\n('realization', 9, 'loss', 7253.0849197167227, 'time', 6.585238933563232)\n('realization', 10, 'loss', 6438.8320106083584, 'time', 6.757697105407715)\n('realization', 11, 'loss', 5867.0427249286258, 'time', 7.00865912437439)\n('realization', 12, 'loss', 5390.2583248174869, 'time', 8.055474042892456)\n('realization', 13, 'loss', 4788.6150441189266, 'time', 7.0221710205078125)\n('realization', 14, 'loss', 9162.835117834009, 'time', 6.109021186828613)\n('realization', 15, 'loss', 4392.471742705894, 'time', 10.129889011383057)\n('realization', 16, 'loss', 3899.6516868555027, 'time', 5.998311996459961)\n('realization', 17, 'loss', 3608.3936616892697, 'time', 7.90788197517395)\n('realization', 18, 'loss', 3269.3898686461489, 'time', 6.186285972595215)\n('realization', 19, 'loss', 2836.9666005735767, 'time', 6.00260591506958)\n136.0774028301239\n" ], [ "plt.figure(figsize=(10,5))\nplt.plot(loss_by_run)\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(10,5))\nplt.plot(parameter_by_run)\nplt.show()", "_____no_output_____" ], [ "X = train[features].values\ny = train[target].values\nX_test = test[features].values\n# y_test = test[target].values\n\nautoencoder_SST_train = np.sum((X - np.mean(X,axis=0))**2)/X.size\nautoencoder_SST_test = np.sum((X_test - np.mean(X,axis=0))**2)/X_test.size\n\n#get model parameters of last run by interation\nkml.model.get_param_by_iter()\nkml.model.get_loss_by_iter()\n\nprint('performance by run')\nfor i in range(parameter_by_run.shape[0]):\n w=parameter_by_run[i].copy()\n autoencoder_SSE_train = autoencoder_function(X,X,w,model_shapes)\n autoencoder_SSE_test = autoencoder_function(X_test,X_test,w,model_shapes)\n print('iteration',i,'train rsquared',1-autoencoder_SSE_train/autoencoder_SST_train,'test rsquared',1-autoencoder_SSE_test/autoencoder_SST_test)", "performance by run\niteration 0 train rsquared 0.150194397636 test rsquared 0.147393967407\niteration 1 train rsquared 0.429166157218 test rsquared 0.427665596608\niteration 2 train rsquared 0.635475657519 test rsquared 0.634973096386\niteration 3 train rsquared 0.708687242438 test rsquared 0.708411877331\niteration 4 train rsquared 0.783977183682 test rsquared 0.7837629366\niteration 5 train rsquared 0.830015142301 test rsquared 0.829713127146\niteration 6 train rsquared 0.858136432656 test rsquared 0.857989033603\niteration 7 train rsquared 0.877759661645 test rsquared 0.877625649223\niteration 8 train rsquared 0.897650851565 test rsquared 0.897625044183\niteration 9 train rsquared 0.914282654105 test rsquared 0.9143493577\niteration 10 train rsquared 0.927670565024 test rsquared 0.927799461907\niteration 11 train rsquared 0.936352064563 test rsquared 0.936550898509\niteration 12 train rsquared 0.942844511683 test rsquared 0.943063010957\niteration 13 train rsquared 0.947848026839 test rsquared 0.94810356426\niteration 14 train rsquared 0.943355102831 test rsquared 0.943504928528\niteration 15 train rsquared 0.952367254255 test rsquared 0.952582933321\niteration 16 train rsquared 0.955000103888 test rsquared 0.955261989261\niteration 17 train rsquared 0.957989157754 test rsquared 0.958278222344\niteration 18 train rsquared 0.962427284274 test rsquared 0.962735073824\niteration 19 train rsquared 0.964250626681 test rsquared 0.964550707723\n" ], [ "import gc\ngc.collect()", "_____no_output_____" ], [ "from keras.layers import Input, Dense, Flatten\nfrom keras.models import Model, Sequential\n\ninput_data = Input(shape=(X.shape[1],))\nlayer1 = Dense(10, activation='linear')(input_data)\nlayer2 = Dense(len(features), activation='linear')(layer1)\nmodel = Model(input_data, layer2)\n\nmodel.compile(optimizer='adagrad', \n loss='mean_squared_error')\n\nstart = time.time()\nmodel.fit(X, X,\n epochs=100,\n batch_size=5000,\n shuffle=False,\n validation_data=(X_test, X_test))\nend = time.time()\nprint(end-start)", "Using TensorFlow backend.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec745d1e567c936fc4b6e088e3a90669a10ddb87
188,300
ipynb
Jupyter Notebook
Portfolio_Group_Constraints.ipynb
Hvass-Labs/InvestOps-Tutorials
a9bde0912b109db08d65d1358efc8b4bbbec9e59
[ "MIT" ]
1
2021-11-05T17:11:10.000Z
2021-11-05T17:11:10.000Z
Portfolio_Group_Constraints.ipynb
Hvass-Labs/InvestOps-Tutorials
a9bde0912b109db08d65d1358efc8b4bbbec9e59
[ "MIT" ]
null
null
null
Portfolio_Group_Constraints.ipynb
Hvass-Labs/InvestOps-Tutorials
a9bde0912b109db08d65d1358efc8b4bbbec9e59
[ "MIT" ]
1
2022-03-19T17:45:52.000Z
2022-03-19T17:45:52.000Z
142.220544
147,536
0.861407
[ [ [ "## InvestOps Tutorial - Portfolio Group Constraints\n\n[Original repository on GitHub](https://github.com/Hvass-Labs/InvestOps-Tutorials)\n\nOriginal author is [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)\n\n----\n\n\"*How many bacon rolls you're wantings Sir, we don't have so much bacons!*\" - [Facejacker](https://www.youtube.com/watch?v=wKmhJFwwiSM)", "_____no_output_____" ], [ "## Introduction\n\nOne way of controlling risk in an investment portfolio, is to ensure that it is diversified in many different groups of assets, such as stocks, bonds, currencies and commodities, and across many different countries, industries, credit-ratings, etc.\n\nThis tutorial shows how to adjust the portfolio weights according to the groups that the assets belong to, so the entire groups of assets are within some constraints or limits. This is an easy problem if all the groups are mutually exclusive, but the problem is much more complicated when assets may belong to many overlapping groups.\n\nFor classic \"mean-variance\" portfolio optimization, the group constraints are solved as an integral part of the optimization problem. But when we are using another portfolio method such as the \"filter-diversify\" method, we need a separate method for adjusting the portfolio weights so they satisfy the group constraints. This tutorial shows how to use a very efficient algorithm for doing this.\n\nThe paper referenced below gives a detailed explanation of the algorithm and how it works.", "_____no_output_____" ], [ "## References <a id=\"refs\"></a>\n\n- M.E.H. Pedersen, \"*Portfolio Group Constraints*\", 2022. ([PDF](https://ssrn.com/abstract=4033243))", "_____no_output_____" ], [ "## Google Colab", "_____no_output_____" ] ], [ [ "# If running in Google Colab, automatically install the required\n# Python packages. This is NOT recommended on your local computer,\n# unless you have setup a Python environment for this project.\n# See the README on GitHub for detailed instructions.\nif 'google.colab' in str(get_ipython()):\n !pip install investops numpy pandas matplotlib", "_____no_output_____" ] ], [ [ "## Imports", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "# Python packages.\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# InvestOps.\nimport investops as iv\nfrom investops.group_constraints import GroupConstraints\nfrom investops.random import (rand_normal, rand_uniform,\n rand_where, rand_groups,\n gen_asset_names, gen_group_names)", "_____no_output_____" ], [ "# InvestOps version.\niv.__version__", "_____no_output_____" ] ], [ [ "## Config", "_____no_output_____" ] ], [ [ "# Random number generator.\n# The seed makes the experiments repeatable.\nrng = np.random.default_rng(seed=80085)", "_____no_output_____" ] ], [ [ "## Small Example - Positive Weights\n\nThis small example has 3 different groups named: Group A, B, and C, which could be different asset classes (stocks, bonds, commodities), or it could be different countries or industries. We then have 4 different assets named: Asset 1, 2, 3, and 4, which could be different bonds, stocks, etc. In this small example all the portfolio weights are positive.", "_____no_output_____" ] ], [ [ "# The individual group-names.\nGROUP_A = 'Group A'\nGROUP_B = 'Group B'\nGROUP_C = 'Group C'\n\n# List of all the group-names.\ngroup_names = [GROUP_A, GROUP_B, GROUP_C]", "_____no_output_____" ], [ "# The individual asset-names.\nASSET_1 = 'Asset 1'\nASSET_2 = 'Asset 2'\nASSET_3 = 'Asset 3'\nASSET_4 = 'Asset 4'\n\n# List of all the asset-names.\nasset_names = [ASSET_1, ASSET_2, ASSET_3, ASSET_4]", "_____no_output_____" ], [ "# This data-structure defines the association between assets\n# and groups. It is a dictionary of lists, so it enables us to\n# lookup the list of groups that are associated with an asset-name.\nasset_to_groups = \\\n{\n # Groups that Asset 1 belongs to.\n ASSET_1: [GROUP_A],\n \n # Groups that Asset 2 belongs to.\n ASSET_2: [GROUP_A, GROUP_B],\n \n # Groups that Asset 3 belongs to.\n ASSET_3: [GROUP_B, GROUP_C],\n \n # Groups that Asset 4 belongs to.\n ASSET_4: [GROUP_C],\n}", "_____no_output_____" ], [ "# Group-limits for the POSITIVE portfolio weights.\n# This must be a Pandas Series so the groups are named properly.\ngroup_lim_pos = {GROUP_A: 0.05, GROUP_B: 0.1, GROUP_C: 0.2}\ngroup_lim_pos = pd.Series(group_lim_pos)\n\n# We do not use group-limits for the NEGATIVE portfolio weights.\ngroup_lim_neg = None", "_____no_output_____" ], [ "# Original portfolio weights found through some other process,\n# e.g. by estimating the future asset-returns and use them to\n# determine how much of the portfolio to invest in each asset.\nweights_org = {ASSET_1: 0.05, ASSET_2: 0.1, ASSET_3: 0.15, ASSET_4: 0.2}\nweights_org = pd.Series(weights_org)", "_____no_output_____" ], [ "# Initialize the solver for the Group Constraints.\n# This sets up internal data-structures for efficiently solving\n# the problem. If you later change the asset-names, group-names,\n# group-limits, or the mapping from assets to lists of groups,\n# then you must create a new instance of this solver.\ngrp = GroupConstraints(asset_names=asset_names,\n group_names=group_names,\n asset_to_groups=asset_to_groups,\n group_lim_pos=group_lim_pos,\n group_lim_neg=group_lim_neg)", "_____no_output_____" ], [ "# Calculate the adjusted weights that satisfy the group constraints.\nweights_new = grp.constrain_weights(weights_org=weights_org)\nweights_new", "_____no_output_____" ], [ "# Calculate the adjusted weights that satisfy the group constraints.\n# This logs the adjusted weights for all iterations of the algorithm.\n# Note that it is significantly slower to log all results, so this\n# should only be done for testing / debugging purposes, and not for\n# an actual trading or back-testing-system.\nweights_new_log = grp.constrain_weights(weights_org=weights_org,\n log=True)\nweights_new_log", "_____no_output_____" ], [ "# Calculate the group-ratios between the group-limits / group-sums,\n# which are less than 1.0 if the group-sums are too high so the\n# portfolio weights must be decreased. And the group-ratios are\n# greater than 1.0 if the group-sums are lower than the group-limits,\n# so the portfolio weights could be increased. Ideally we want all\n# the group limits to be equal or slightly above 1.0, as those weights\n# satisfy all the group constraints while being the closest possible\n# to the original portfolio weights.\ngroup_ratios_pos, group_ratios_neg = grp.group_ratios(weights=weights_new_log)\ngroup_ratios_pos", "_____no_output_____" ] ], [ [ "## Small Example - Positive & Negative Weights\n\nThis is another small example again using 3 groups and 4 assets, but this time some of the original portfolio weights are positive and some are negative.", "_____no_output_____" ] ], [ [ "# Group-limits for the POSITIVE portfolio weights.\ngroup_lim_pos = {GROUP_A: 0.05, GROUP_B: 0.1, GROUP_C: 0.2}\ngroup_lim_pos = pd.Series(group_lim_pos)\n\n# Group-limits for the NEGATIVE portfolio weights.\n# These must all be negative values. Note the -np.inf which\n# indicates that there is no negative limit for Group A.\ngroup_lim_neg = {GROUP_A: -np.inf, GROUP_B: -0.15, GROUP_C: -0.1}\ngroup_lim_neg = pd.Series(group_lim_neg)", "_____no_output_____" ], [ "# Original portfolio weights.\nweights_org = {ASSET_1: -0.05, ASSET_2: -0.1, ASSET_3: -0.15, ASSET_4: 0.2}\nweights_org = pd.Series(weights_org)", "_____no_output_____" ], [ "# Initialize the solver for the Group Constraints.\ngrp = GroupConstraints(asset_names=asset_names,\n group_names=group_names,\n asset_to_groups=asset_to_groups,\n group_lim_pos=group_lim_pos,\n group_lim_neg=group_lim_neg)", "_____no_output_____" ], [ "# Calculate the adjusted weights that satisfy the group constraints.\nweights_new = grp.constrain_weights(weights_org=weights_org)\nweights_new", "_____no_output_____" ], [ "# Calculate and log the adjusted weights satisfying the constraints.\nweights_new_log = grp.constrain_weights(weights_org=weights_org, log=True)\nweights_new_log", "_____no_output_____" ], [ "# Calculate the group-ratios between the group-limits / group-sums.\n# Note that a group-ratio of inf (infinity) means that either\n# the group-limit was inf, or the group-sum was zero. The goal\n# is still to have the non-inf group-ratios be equal to 1.0 or\n# slightly above.\ngroup_ratios_pos, group_ratios_neg = grp.group_ratios(weights=weights_new_log)\ngroup_ratios_pos", "_____no_output_____" ], [ "# Show group-ratios for the negative portfolio weights.\ngroup_ratios_neg", "_____no_output_____" ] ], [ [ "## Big Example\n\nWe will now consider a much larger example of 1000 assets and 20 groups, where we will generate random data for the portfolio.", "_____no_output_____" ] ], [ [ "# Number of assets.\nnum_assets = 1000\n\n# Number of groups.\nnum_groups = 20\n\n# Min number of groups per asset.\nmin_groups_per_asset = 1\n\n# Max number of groups per asset.\nmax_groups_per_asset = 10", "_____no_output_____" ], [ "# List of asset-names.\nasset_names = gen_asset_names(num_assets=num_assets)\n\n# List of group-names.\ngroup_names = gen_group_names(num_groups=num_groups)", "_____no_output_____" ], [ "# Generate a random data-structure for the associations between\n# assets and groups. This is a dict where each asset-name maps\n# to a list of group-names.\nasset_to_groups = \\\n rand_groups(rng=rng, num_assets=num_assets, num_groups=num_groups,\n max_groups_per_asset=max_groups_per_asset,\n min_groups_per_asset=min_groups_per_asset,\n asset_names=asset_names, group_names=group_names)", "_____no_output_____" ], [ "# Random POSITIVE group-limits.\ngroup_lim_pos = \\\n rand_uniform(rng=rng, index=group_names, low=0.05, high=0.2)\n\n# Random NEGATIVE group-limits.\ngroup_lim_neg = \\\n rand_uniform(rng=rng, index=group_names, low=-0.2, high=-0.05)", "_____no_output_____" ], [ "# Randomly set some of the group-limits to infinity.\nprob = 0.05\ngroup_lim_pos = rand_where(rng=rng, x=group_lim_pos, y=np.inf, prob=prob)\ngroup_lim_neg = rand_where(rng=rng, x=group_lim_neg, y=-np.inf, prob=prob)", "_____no_output_____" ], [ "# Random normal-distributed portfolio weights.\nweights_org = \\\n rand_normal(rng=rng, index=asset_names,\n mean=0.0, std=0.005, low=-1, high=1)", "_____no_output_____" ], [ "# Initialize the solver for the Group Constraints.\ngrp = GroupConstraints(asset_names=asset_names,\n group_names=group_names,\n asset_to_groups=asset_to_groups,\n group_lim_pos=group_lim_pos,\n group_lim_neg=group_lim_neg)", "_____no_output_____" ], [ "# Calculate the adjusted weights that satisfy the group constraints.\nweights_new = grp.constrain_weights(weights_org=weights_org)\nweights_new.head()", "_____no_output_____" ], [ "# Calculate and log the adjusted weights satisfying the constraints.\nweights_new_log = grp.constrain_weights(weights_org=weights_org, log=True, max_iter=20)\nweights_new_log.head()", "_____no_output_____" ] ], [ [ "We will now plot the weight-ratios and group-ratios. These show how the weight- and group-ratios evolve through the iterations of the algorithm. In the first iteration all the weights are decreased massively in order to satisfy the group-constraints, so the group-sums are all smaller than their respective group-limits. But this is an over-adjustment of the portfolio weights, so in the following iterations, the portfolio weights are gradually increased as the group-sums get closer and closer to the group-limits, while the adjusted weights must still be smaller or equal to the original weights.", "_____no_output_____" ] ], [ [ "# Create a figure with the log's weight-ratios and group-ratios.\nfig = grp.plot_log(weights_org=weights_org,\n weights_new_log=weights_new_log)\n\n# Save the figure to a file.\nfig.savefig('Portfolio Log.svg')\n\n# Show the figure here.\nfig;", "_____no_output_____" ] ], [ [ "## Time Usage\n\nWe will now see how long it takes to initialize the solver and run the algorithm. This uses the same random data from above with 1000 assets and 20 groups. So this is a very large portfolio. As can be seen, it only takes around 6 milli-seconds to initialize the solver, and then it takes around 2-3 milli-seconds to run the algorithm, or 13 milli-seconds if we want to log all the intermediate results of the algorithm.\n\nAlso note that a pure Python implementation needs around 100 milli-seconds to run the exact same algorithm that only takes 2-3 milli-seconds here, so the Numba Jit compiler provides a 40-50 times speed-up over pure Python.", "_____no_output_____" ] ], [ [ "%%timeit\n# Time-usage for initializing the solver.\nGroupConstraints(asset_names=asset_names,\n group_names=group_names,\n asset_to_groups=asset_to_groups,\n group_lim_pos=group_lim_pos,\n group_lim_neg=group_lim_neg)", "6.22 ms ± 646 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ], [ "# Time-usage for the group-constraint algorithm.\n%timeit grp.constrain_weights(weights_org=weights_org)", "2.44 ms ± 127 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ], [ "# Time-usage for the group-constraint algorithm with logging.\n# Note: It is not the logging that is time-consuming, but the\n# automated checking that all the intermediate results in the\n# log are valid and satisfy all the constraints.\n%timeit grp.constrain_weights(weights_org=weights_org, log=True)", "13.8 ms ± 303 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] ], [ [ "## Minimum Group Constraints\n\nThe algorithm only enforces upper limits for the group constraints. If you want to ensure a minimum part of your portfolio is invested in a group, then the easiest way is to set all the portfolio weights within that group to the max weight you want to allow for a single asset. The algorithm does not guarantee that the group-sum is above the lower limit because that may not be a valid solution, but the algorithm will find the best compromise.", "_____no_output_____" ], [ "## License (MIT)\n\nThis is published under the [MIT License](https://github.com/Hvass-Labs/InvestOps-Tutorials/blob/main/LICENSE) which allows very broad use for both academic and commercial purposes.\n\nYou are very welcome to modify and use this source-code in your own project. Please keep a link to the [original repository](https://github.com/Hvass-Labs/InvestOps-Tutorials).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec745e974a0a8a9c24515fdf543e1f3bb1f370f2
10,344
ipynb
Jupyter Notebook
Ch14 - Testing, Debugging, and Exceptions.ipynb
Lee-W/Python_Cookbook
c8896ee46bb0c00536f7132f3fec2eaf6df45667
[ "MIT" ]
null
null
null
Ch14 - Testing, Debugging, and Exceptions.ipynb
Lee-W/Python_Cookbook
c8896ee46bb0c00536f7132f3fec2eaf6df45667
[ "MIT" ]
null
null
null
Ch14 - Testing, Debugging, and Exceptions.ipynb
Lee-W/Python_Cookbook
c8896ee46bb0c00536f7132f3fec2eaf6df45667
[ "MIT" ]
null
null
null
20.771084
124
0.501353
[ [ [ "# Table of Content\n- [14.12 Debugging Basic Program Crashes](#14.12)\n- [14.13 Profiling and Timing Your Program](#14.13)\n- [14.14 Making Your Programs Run Faster](#14.14)", "_____no_output_____" ], [ "---\n## <a name=\"14.12\"></a> 14.12 Debugging Basic Program Crashes\n\n\n- Start an interactive shell as soon as a program terminates\n```sh\npython3 -i yourprogram.py\n```\n\n- print traceback", "_____no_output_____" ] ], [ [ "import traceback\nimport sys\n\ntry:\n func(arg)\nexcept Exception:\n traceback.print_exc(file=sys.stderr)", "Traceback (most recent call last):\n File \"<ipython-input-1-698f0c8e8b0e>\", line 5, in <module>\n func(arg)\nNameError: name 'func' is not defined\n" ] ], [ [ "---\n## <a name=\"14.13\"></a> 14.13 Profiling and Timing Your Program\n\n- Simply know the time of the whole program\n```sh\ntime python3 yourprogram.py\n```\n\n- Get a detail report of the program\n```sh\npython3 -m cProfile yourprograms.py\n```\n\n- Studying the performance of small code fragments", "_____no_output_____" ] ], [ [ "from timeit import timeit\n\ntimeit('math.sqrt(2)', 'import math')", "_____no_output_____" ], [ "timeit('sqrt(2)', 'from math import sqrt')", "_____no_output_____" ] ], [ [ "---\n## 14.14 Making Your Programs Run Faster\n\n### Use Function\nCode in global runs much slower than in functions due to the implementation of local versus global variables", "_____no_output_____" ] ], [ [ "%%time\n\na = 1\nfor _ in range(1000000):\n a += 1", "CPU times: user 126 ms, sys: 1.42 ms, total: 127 ms\nWall time: 138 ms\n" ], [ "%%time \ndef func():\n a = 1\n for _ in range(1000000):\n a += 1\n\nfunc()", "CPU times: user 86.3 ms, sys: 5.37 ms, total: 91.7 ms\nWall time: 130 ms\n" ] ], [ [ "### Selectively eliminate attribute Access\nThe use of dot(.) triggers methods such as `__getattribute__()` and `__getattr__()`, which often involves dict lockup", "_____no_output_____" ] ], [ [ "from timeit import timeit\n\ntimeit('math.sqrt(2)', 'import math')", "_____no_output_____" ], [ "timeit('sqrt(2)', 'from math import sqrt')", "_____no_output_____" ] ], [ [ "However, this only makes sense when the code is frequently executed. (e.g. loop) \nOtherwise, it might break readbility.", "_____no_output_____" ], [ "### Understand locality of variables", "_____no_output_____" ] ], [ [ "import math\nfrom math import sqrt\n\ndef compute_roots_global_with_dot(nums):\n result = []\n for n in nums:\n result.append(math.sqrt(n))\n return result\n\ndef compute_roots_global(nums):\n result = []\n result_append = result.append\n for n in nums:\n result_append(sqrt(n))\n return result\n\ndef compute_roots_local(nums):\n local_sqrt = sqrt\n result = []\n result_append = result.append\n for n in nums:\n result_append(local_sqrt(n))\n return result", "_____no_output_____" ], [ "timeit('compute_roots_global_with_dot(range(10))', 'from __main__ import compute_roots_global_with_dot')", "_____no_output_____" ], [ "timeit('compute_roots_global(range(10))', 'from __main__ import compute_roots_global')", "_____no_output_____" ] ], [ [ "- The second is faster than the first one.\n - The use of sqrt instead of math.sqrt", "_____no_output_____" ] ], [ [ "timeit('compute_roots_local(range(10))', 'from __main__ import compute_roots_local')", "_____no_output_____" ] ], [ [ "\n- The third one is even faster.\n - Assign math.sqrt as local variable", "_____no_output_____" ], [ "In general, ***looking up a value such as self.name will be slower than accessing a local variable***", "_____no_output_____" ], [ "### Avoid gratutious abstraction", "_____no_output_____" ] ], [ [ "class C:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n @property\n def y(self):\n return self._y\n \n @y.setter\n def y(self, value): \n self._y = value\n \nc = C(1, 2)\n\ntimeit('c.x', 'from __main__ import c')", "_____no_output_____" ], [ "timeit('c.y', 'from __main__ import c')", "_____no_output_____" ] ], [ [ "The concept of using setter/getter is not necessary in Python. \nUsing property when not needed will only slower the program", "_____no_output_____" ], [ "### Use the built-in containers\n\ne.g. string, tuple, list, set, dict \nThey are all implemented in C", "_____no_output_____" ], [ "### Other Discussion\n- Before optimizaing, it's usually worthwhile to speedup the algorithms first.\n- Don'y worry about optimization until you need to\n - John Ousterhout: \"The best performance improvement is the transition from nonworking to the working state.\"\n\n#### Why PyPy is faster?\nPyPy analyzes the execution of the program and generates native machine code for frequently executed parts.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ec7461a3b125b667aceef2e367444542ca16e9a2
17,055
ipynb
Jupyter Notebook
python-for-data/Ex03_Booleans_and_Conditionals.ipynb
tinyvyy/atom-assignments
fcbd7334f92534919f798cf81097d851c51ebe37
[ "MIT" ]
null
null
null
python-for-data/Ex03_Booleans_and_Conditionals.ipynb
tinyvyy/atom-assignments
fcbd7334f92534919f798cf81097d851c51ebe37
[ "MIT" ]
null
null
null
python-for-data/Ex03_Booleans_and_Conditionals.ipynb
tinyvyy/atom-assignments
fcbd7334f92534919f798cf81097d851c51ebe37
[ "MIT" ]
null
null
null
29.921053
274
0.494166
[ [ [ "<a href=\"https://colab.research.google.com/github/tinyvyy/atom-assignments/blob/main/Ex03_Booleans_and_Conditionals.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Exercise 03 - Booleans and Conditionals", "_____no_output_____" ], [ "## 1. Simple Function with Conditionals\n\nMany programming languages have [sign](https://en.wikipedia.org/wiki/Sign_function) available as a built-in function. Python does not, but we can define our own!\n\nIn the cell below, define a function called `sign` which takes a numerical argument and returns -1 if it's negative, 1 if it's positive, and 0 if it's 0.", "_____no_output_____" ] ], [ [ "# Your code goes here. Define a function called 'sign'\ndef sign (a): \n if a > 0: \n return 1\n elif a == 0:\n return 0\n else:\n return -1", "_____no_output_____" ], [ "sign(-16)", "_____no_output_____" ] ], [ [ "## 2. Singular vs Plural Nouns\n\nWe've decided to add \"print\" to our `to_smash` function from Exercise 02", "_____no_output_____" ] ], [ [ "def to_smash(total_candies):\n \"\"\"Return the number of leftover candies that must be smashed after distributing\n the given number of candies evenly between 3 friends.\n \n >>> to_smash(91)\n 1\n \"\"\"\n print(\"Splitting\", total_candies, \"candies\")\n return total_candies % 3\n\nto_smash(91)", "Splitting 91 candies\n" ] ], [ [ "What happens if we call it with `total_candies = 1`?", "_____no_output_____" ] ], [ [ "to_smash(1)", "Splitting 1 candies\n" ] ], [ [ "**Wrong grammar there!**\n\nModify the definition in the cell below to correct the grammar of our print statement.\n\n**Your Task:**\n> If there's only one candy, we should use the singular \"candy\" instead of the plural \"candies\"", "_____no_output_____" ] ], [ [ "def to_smash(total_candies):\n \"\"\"Return the number of leftover candies that must be smashed after distributing\n the given number of candies evenly between 3 friends.\n \n >>> to_smash(91)\n 1\n \"\"\"\n if total_candies == 1:\n print(\"Splitting\", total_candies, \"candy\")\n else:\n print(\"Splitting\", total_candies, \"candies\")\n return total_candies % 3\n\nto_smash(91)\nto_smash(1)", "Splitting 91 candies\nSplitting 1 candie\n" ] ], [ [ "## 3. Checking weather again\n\nIn the main lesson we talked about deciding whether we're prepared for the weather. I said that I'm safe from today's weather if...\n- I have an umbrella...\n- or if the rain isn't too heavy and I have a hood...\n- otherwise, I'm still fine unless it's raining *and* it's a workday\n\nThe function below uses our first attempt at turning this logic into a Python expression. I claimed that there was a bug in that code. Can you find it?\n\nTo prove that `prepared_for_weather` is buggy, come up with a set of inputs where it returns the wrong answer.", "_____no_output_____" ] ], [ [ "def prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday):\n # Don't change this code. Our goal is just to find the bug, not fix it!\n return have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday\n\n# Change the values of these inputs so they represent a case where prepared_for_weather\n# returns the wrong answer.\nhave_umbrella = False\nrain_level = 5.0\nhave_hood = False\nis_workday = True \n\n# Check what the function returns given the current values of the variables above\nactual = prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday)\nprint(actual)\n\n#bug: not (rain_level > 0 and is_workday)", "False\n" ] ], [ [ "## 4. Start being lazy...\n\nThe function `is_negative` below is implemented correctly \n- It returns True if the given number is negative and False otherwise.\n\nHowever, it's more verbose than it needs to be. We can actually reduce the number of lines of code in this function by *75%* while keeping the same behaviour. \n\n**Your task:**\n> See if you can come up with an equivalent body that uses just **one line** of code, and put it in the function `concise_is_negative`. (HINT: you don't even need Python's ternary syntax)", "_____no_output_____" ] ], [ [ "def is_negative(number):\n if number < 0:\n return True\n else:\n return False\n\ndef concise_is_negative(number):\n return number < 0 \n pass # Your code goes here (try to keep it to one line!)\n", "_____no_output_____" ], [ "concise_is_negative(-10)", "_____no_output_____" ] ], [ [ "## 5. Adding Toppings\n\nThe boolean variables `ketchup`, `mustard` and `onion` represent whether a customer wants a particular topping on their hot dog. We want to implement a number of boolean functions that correspond to some yes-or-no questions about the customer's order. For example:", "_____no_output_____" ] ], [ [ "def onionless(ketchup, mustard, onion):\n \"\"\"Return whether the customer doesn't want onions.\n \"\"\"\n return not onion", "_____no_output_____" ] ], [ [ "**Your task:**\n> For each of the remaining functions, fill in the body to match the English description in the docstring. ", "_____no_output_____" ] ], [ [ "def wants_all_toppings(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants \"the works\" (all 3 toppings)\n \"\"\"\n return ketchup and mustard and onion\n pass\n", "_____no_output_____" ], [ "def wants_plain_hotdog(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants a plain hot dog with no toppings.\n \"\"\"\n return not (ketchup and mustard and onion)\n pass\n", "_____no_output_____" ], [ "def exactly_one_sauce(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants either ketchup or mustard, but not both.\n (You may be familiar with this operation under the name \"exclusive or\")\n \"\"\"\n return (ketchup and not mustard) or (not ketchup and mustard)\n pass\n", "_____no_output_____" ] ], [ [ "## 6. <span title=\"A bit spicy\" style=\"color: darkgreen \">🌶️</span>\n\nWe’ve seen that calling `bool()` on an integer returns `False` if it’s equal to 0 and `True` otherwise. What happens if we call `int()` on a bool? Try it out in the notebook cell below.\n\nCan you take advantage of this to write a succinct function that corresponds to the English sentence \"*Does the customer want exactly one topping?*\"?\n\n> *HINT*: You may have already found that `int(True)` is `1`, and `int(False)` is `0`. Think about what kinds of basic arithmetic operations you might want to perform on ketchup, mustard, and onion after converting them to integers.", "_____no_output_____" ] ], [ [ "def exactly_one_topping(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants exactly one of the three available toppings\n on their hot dog.\n \"\"\"\n return int(ketchup) + int(mustard) + int(onion) == 1\n pass\n", "_____no_output_____" ], [ "int(True)\nint(False)", "_____no_output_____" ] ], [ [ "# Keep Going 💪", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec746c6e818ed7ed147d0f49b0c50e8f11feea73
14,779
ipynb
Jupyter Notebook
notebooks/MERFISH_Pipeline_-_U2O2_Cell_Culture_-_1_FOV.ipynb
nicopierson/starfish
7192ae2adc0669cd6ebe5e9e898e0b978d5553da
[ "MIT" ]
3
2020-09-01T12:18:20.000Z
2021-05-18T03:50:31.000Z
notebooks/MERFISH_Pipeline_-_U2O2_Cell_Culture_-_1_FOV.ipynb
nicopierson/starfish
7192ae2adc0669cd6ebe5e9e898e0b978d5553da
[ "MIT" ]
null
null
null
notebooks/MERFISH_Pipeline_-_U2O2_Cell_Culture_-_1_FOV.ipynb
nicopierson/starfish
7192ae2adc0669cd6ebe5e9e898e0b978d5553da
[ "MIT" ]
null
null
null
36.401478
643
0.639285
[ [ [ "## Reproduce Published results with Starfish\n\nThis notebook walks through a workflow that reproduces a MERFISH result for one field of view using the starfish package.", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "%matplotlib inline\n\nimport pprint\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom showit import image as show_image\n\nfrom starfish import display\nfrom starfish import data, FieldOfView\nfrom starfish.types import Features, Axes", "_____no_output_____" ], [ "# load the data from cloudfront\nuse_test_data = os.getenv(\"USE_TEST_DATA\") is not None\nexperiment = data.MERFISH(use_test_data=use_test_data)", "_____no_output_____" ] ], [ [ "Individual imaging rounds and channels can also be visualized", "_____no_output_____" ] ], [ [ "primary_image = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)", "_____no_output_____" ], [ "# Display the data\n%gui qt5\ndisplay(primary_image)", "_____no_output_____" ] ], [ [ "## Show input file format that specifies how the tiff stack is organized\n\nThe stack contains multiple images corresponding to the channel and imaging rounds. MERFISH builds a 16 bit barcode from 8 imaging rounds, each of which measures two channels that correspond to contiguous (but not necessarily consistently ordered) bits of the barcode.\n\nThe MERFISH computational pipeline also constructs a scalar that corrects for intensity differences across each of the 16 images, e.g., one scale factor per bit position.\n\nThe stacks in this example are pre-registered using fiduciary beads.", "_____no_output_____" ] ], [ [ "pp = pprint.PrettyPrinter(indent=2)\npp.pprint(experiment._src_doc)", "_____no_output_____" ] ], [ [ "## Visualize codebook", "_____no_output_____" ], [ "The MERFISH codebook maps each barcode to a gene (or blank) feature. The codes in the MERFISH codebook are constructed from a 4-hamming error correcting code with exactly 4 \"on\" bits per barcode", "_____no_output_____" ] ], [ [ "experiment.codebook", "_____no_output_____" ] ], [ [ "## Filter and scale raw data before decoding", "_____no_output_____" ], [ "Begin filtering with a high pass filter to remove background signal.", "_____no_output_____" ] ], [ [ "from starfish.image import Filter\nghp = Filter.GaussianHighPass(sigma=3)\nhigh_passed = ghp.run(primary_image, verbose=True, in_place=False)", "_____no_output_____" ] ], [ [ "The below algorithm deconvolves out the point spread function introduced by the microcope and is specifically designed for this use case. The number of iterations is an important parameter that needs careful optimization.", "_____no_output_____" ] ], [ [ "from starfish.types import Clip\ndpsf = Filter.DeconvolvePSF(num_iter=15, sigma=2, clip_method=Clip.SCALE_BY_CHUNK)\ndeconvolved = dpsf.run(high_passed, verbose=True, in_place=False)", "_____no_output_____" ] ], [ [ "Recall that the image is pre-registered, as stated above. Despite this, individual RNA molecules may still not be perfectly aligned across imaging rounds. This is crucial in order to read out a measure of the itended barcode (across imaging rounds) in order to map it to the codebook. To solve for potential mis-alignment, the images can be blurred with a 1-pixel Gaussian kernel. The risk here is that this will obfuscate signals from nearby molecules.\n\nA local search in pixel space across imaging rounds can also solve this.", "_____no_output_____" ] ], [ [ "glp = Filter.GaussianLowPass(sigma=1)\nlow_passed = glp.run(deconvolved, in_place=False, verbose=True)", "_____no_output_____" ] ], [ [ "Use MERFISH-calculated size factors to scale the channels across the imaging rounds and visualize the resulting filtered and scaled images. Right now we have to extract this information from the metadata and apply this transformation manually.", "_____no_output_____" ] ], [ [ "if use_test_data:\n scale_factors = {\n (t[Axes.ROUND], t[Axes.CH]): t['scale_factor']\n for t in experiment.extras['scale_factors']\n }\nelse:\n scale_factors = {\n (t[Axes.ROUND], t[Axes.CH]): t['scale_factor']\n for index, t in primary_image.tile_metadata.iterrows()\n }", "_____no_output_____" ], [ "# this is a scaling method. It would be great to use image.apply here. It's possible, but we need to expose H & C to\n# at least we can do it with get_slice and set_slice right now.\nfrom copy import deepcopy\nscaled_image = deepcopy(low_passed)\n\nfor selector in primary_image._iter_axes():\n data = scaled_image.get_slice(selector)[0]\n scaled = data / scale_factors[selector[Axes.ROUND.value], selector[Axes.CH.value]]\n scaled_image.set_slice(selector, scaled, [Axes.ZPLANE])", "_____no_output_____" ] ], [ [ "## Use spot-detector to create 'encoder' table for standardized input to decoder\n\nEach pipeline exposes a spot detector, and this spot detector translates the filtered image into an encoded table by detecting spots. The table contains the spot_id, the corresponding intensity (v) and the channel (c), imaging round (r) of each spot.\n\nThe MERFISH pipeline merges these two steps together by finding pixel-based features, and then later collapsing these into spots and filtering out undesirable (non-spot) features.\n\nTherefore, no encoder table is generated, but a robust SpotAttribute and DecodedTable are both produced:", "_____no_output_____" ], [ "## Decode\n\nEach assay type also exposes a decoder. A decoder translates each spot (spot_id) in the encoded table into a gene that matches a barcode in the codebook. The goal is to decode and output a quality score, per spot, that describes the confidence in the decoding. Recall that in the MERFISH pipeline, each 'spot' is actually a 16 dimensional vector, one per pixel in the image. From here on, we will refer to these as pixel vectors. Once these pixel vectors are decoded into gene values, contiguous pixels that are decoded to the same gene are labeled as 'spots' via a connected components labeler. We shall refer to the latter as spots.\n\nThere are hard and soft decodings -- hard decoding is just looking for the max value in the code book. Soft decoding, by contrast, finds the closest code by distance in intensity. Because different assays each have their own intensities and error modes, we leave decoders as user-defined functions.\n\nFor MERFISH, which uses soft decoding, there are several parameters which are important to determining the result of the decoding method:\n\n### Distance threshold\nIn MERFISH, each pixel vector is a 16d vector that we want to map onto a barcode via minimum euclidean distance. Each barcode in the codebook, and each pixel vector is first mapped to the unit sphere by L2 normalization. As such, the maximum distance between a pixel vector and the nearest single-bit error barcode is 0.5176. As such, the decoder only accepts pixel vectors that are below this distance for assignment to a codeword in the codebook.\n\n### Magnitude threshold\nThis is a signal floor for decoding. Pixel vectors with an L2 norm below this floor are not considered for decoding.\n\n### Area threshold\nContiguous pixels that decode to the same gene are called as spots via connected components labeling. The minimum area of these spots are set by this parameter. The intuition is that pixel vectors, that pass the distance and magnitude thresholds, shold probably not be trusted as genes as the mRNA transcript would be too small for them to be real. This parameter can be set based on microscope resolution and signal amplification strategy.\n\n### Crop size\nThe crop size crops the image by a number of pixels large enough to eliminate parts of the image that suffer from boundary effects from both signal aquisition (e.g., FOV overlap) and image processing. Here this value is 40.\n\nGiven these three thresholds, for each pixel vector, the decoder picks the closest code (minimum distance) that satisfies each of the above thresholds, where the distance is calculated between the code and a normalized intensity vector and throws away subsequent spots that are too small.", "_____no_output_____" ] ], [ [ "# TODO this crop should be (x, y) = (40, 40) but it was getting eaten by kwargs\nfrom starfish.spots import PixelSpotDecoder\npsd = PixelSpotDecoder.PixelSpotDecoder(\n codebook=experiment.codebook,\n metric='euclidean',\n distance_threshold=0.5176,\n magnitude_threshold=1.77e-5,\n min_area=2,\n max_area=np.inf,\n norm_order=2,\n crop_z=0,\n crop_y=0,\n crop_x=0\n)\n\ninitial_spot_intensities, prop_results = psd.run(scaled_image)\n\nspot_intensities = initial_spot_intensities.loc[initial_spot_intensities[Features.PASSES_THRESHOLDS]]", "_____no_output_____" ] ], [ [ "## Compare to results from paper\n\nThe below plot aggregates gene copy number across single cells in the field of view and compares the results to the published intensities in the MERFISH paper.\n\nTo make this match perfectly, run deconvolution 15 times instead of 14. As presented below, STARFISH displays a lower detection rate.", "_____no_output_____" ] ], [ [ "bench = pd.read_csv('https://d2nhj9g34unfro.cloudfront.net/MERFISH/benchmark_results.csv',\n dtype = {'barcode':object})\n\nbenchmark_counts = bench.groupby('gene')['gene'].count()\ngenes, counts = np.unique(spot_intensities[Features.AXIS][Features.TARGET], return_counts=True)\nresult_counts = pd.Series(counts, index=genes)\n\ntmp = pd.concat([result_counts, benchmark_counts], join='inner', axis=1).values\n\nr = np.corrcoef(tmp[:, 1], tmp[:, 0])[0, 1]\nx = np.linspace(50, 2000)\nf, ax = plt.subplots(figsize=(6, 6))\nax.scatter(tmp[:, 1], tmp[:, 0], 50, zorder=2)\nax.plot(x, x, '-k', zorder=1)\n\nplt.xlabel('Gene copy number Benchmark')\nplt.ylabel('Gene copy number Starfish')\nplt.xscale('log')\nplt.yscale('log')\nplt.title(f'r = {r}');", "_____no_output_____" ] ], [ [ "## Visualize results\n\nThis image applies a pseudo-color to each gene channel to visualize the position and size of all called spots in a subset of the test image", "_____no_output_____" ] ], [ [ "from scipy.stats import scoreatpercentile\nimport warnings\n\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 15))\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n area_lookup = lambda x: 0 if x == 0 else prop_results.region_properties[x - 1].area\n vfunc = np.vectorize(area_lookup)\n mask = np.squeeze(vfunc(prop_results.label_image))\n show_image(np.squeeze(prop_results.decoded_image)*(mask > 2), cmap='nipy_spectral', ax=ax1)\n ax1.axes.set_axis_off()\n\n mp = scaled_image.max_proj(Axes.ROUND, Axes.CH, Axes.ZPLANE)\n mp_numpy = mp._squeezed_numpy(Axes.ROUND, Axes.CH, Axes.ZPLANE)\n clim = scoreatpercentile(mp_numpy, [0.5, 99.5])\n show_image(mp_numpy, clim=clim, ax=ax2)\n\n f.tight_layout()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec746ff041f56d256b46ec8bc69b34784642d0f4
17,705
ipynb
Jupyter Notebook
Updated_LSTM.ipynb
jordanlei/neural-network-autocorrect
8ab1c55e5d612f0a09819facca32621290f60498
[ "MIT" ]
null
null
null
Updated_LSTM.ipynb
jordanlei/neural-network-autocorrect
8ab1c55e5d612f0a09819facca32621290f60498
[ "MIT" ]
null
null
null
Updated_LSTM.ipynb
jordanlei/neural-network-autocorrect
8ab1c55e5d612f0a09819facca32621290f60498
[ "MIT" ]
null
null
null
17,705
17,705
0.608246
[ [ [ "# Imports and Setup", "_____no_output_____" ] ], [ [ "!git clone https://github.com/cis700/hw1-release.git\n!mv hw1-release/dills/* .\n!mv hw1-release hw1\n!pip install pymagnitude\n!pip install nltk\n!pip install symspellpy\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport torch\nimport time\nimport nltk\nfrom nltk.metrics.distance import edit_distance, jaccard_distance\nimport threading\nimport random\nfrom nltk.stem import PorterStemmer\nfrom pymagnitude import *\nimport datetime\nimport pickle\nfrom tqdm import tqdm\nfrom symspellpy.symspellpy import SymSpell, Verbosity\nfrom pymagnitude import *\nimport ast\n\n\n\nfrom hw1.helper import Logger\n\n\n##MISSING NLTK DOWNLOAD\nnltk.download(\"brown\")", "fatal: destination path 'hw1-release' already exists and is not an empty directory.\nmv: cannot stat 'hw1-release/dills/*': No such file or directory\nmv: cannot move 'hw1-release' to 'hw1/hw1-release': Directory not empty\nRequirement already satisfied: pymagnitude in /usr/local/lib/python3.6/dist-packages (0.1.120)\nRequirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (3.2.5)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk) (1.12.0)\nRequirement already satisfied: symspellpy in /usr/local/lib/python3.6/dist-packages (6.3.8)\nRequirement already satisfied: numpy>=1.13.1 in /usr/local/lib/python3.6/dist-packages (from symspellpy) (1.16.3)\n[nltk_data] Downloading package brown to /root/nltk_data...\n[nltk_data] Package brown is already up-to-date!\n" ] ], [ [ "# Tensorboard", "_____no_output_____" ] ], [ [ "#! rm -r ./logs\nLOG_DIR = './logs'\nget_ipython().system_raw(\n 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'\n .format(LOG_DIR)\n)\n\n!if [ -f ngrok ] ; then echo \"Ngrok already installed\" ; else wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip > /dev/null 2>&1 && unzip ngrok-stable-linux-amd64.zip > /dev/null 2>&1 ; fi\n\nget_ipython().system_raw('./ngrok http 6006 &')\n\n! curl -s http://localhost:4040/api/tunnels | python3 -c \\\n \"import sys, json; print('Tensorboard Link: ' +str(json.load(sys.stdin)['tunnels'][0]['public_url']))\"", "_____no_output_____" ] ], [ [ "# Google Drive", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')\nlocal_dir= 'drive/My Drive/CIS700-004/project'\n%cd 'drive/My Drive/CIS700-004/project'", "_____no_output_____" ] ], [ [ "# Data Generator", "_____no_output_____" ] ], [ [ "#initialize spell suggester\nsym_spell = SymSpell(3, 6, compact_level=0)\nsym_spell.load_dictionary(\"frequency_dictionary_en_82_765.txt\", 0, 1)\n\n#max number of candidates to consider as a replacement for a word\ncandLimit = 100\n\n#list of all parts of speech which are allowed to be replaced when part-of-speech filtering is on\nswitch_tags = [\"JJ\", \"JJR\", \"JJS\", \"NN\", \"NNS\", \"RB\", \"RBR\", \"RBS\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"]\n\n#list of all nltk pos tags (other than punctuation)\nall_pos = [\"CC\", \"DT\", \"EX\", \"IN\", \"JJ\", \"JJR\", \"JJS\", \"MD\", \"NN\", \"NNS\", \"NNP\", \"NNPS\", \"PDT\", \"POS\", \"PRP\", \"PRP$\", \"RB\", \"RBR\", \"RBS\", \"RP\", \"TO\", \"UH\", \"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\", \"WDT\", \"WP\", \"WP$\", \"WRB\"]\n\ndef corrupt(tokens, tags, dist_threshold=3, pos_filter=True):\n \"\"\"replaces (num_words) words in the (sentence) with \n similar words that are only (dist_threshold) away, \n with optional filtering by part-of-speech\"\"\"\n \n #initialize labels (1 for correct words, 0 for out-of-place word)\n labels = [1]*len(tags)\n \n #gets indices of words that can be replaced (must have suitable part-of-speech), \n #or all indices of real words (non-punctuation/symbols) if pos_filter is false\n replaceables = getReplaceables(tags, pos_filter)\n real_word = \"\"\n \n #if there are words that can be replaced, replace one\n if(len(replaceables)>0):\n replace_ind = random.choice(replaceables)\n real_word = tokens[replace_ind]\n fake_word = suggestWord(real_word, tags[replace_ind], dist_threshold, pos_filter=pos_filter)\n if(fake_word!=real_word):\n labels[replace_ind] = 0\n tokens[replace_ind] = fake_word\n return tokens, labels, real_word\n\ndef getReplaceables(pos_tags, filtering):\n \"\"\"returns indices of tokens that can be replaced (restricted set if pos_filtering is on)\"\"\"\n if(filtering):\n replaceables = [i for i,t in enumerate(pos_tags) if t in switch_tags]\n else:\n replaceables = [i for i,t in enumerate(pos_tags) if t in all_pos]\n return replaceables\n\ndef suggestWord(real_word, real_tag, e_thresh=3, pos_filter=True):\n \"\"\"suggests a word at most edit distance e_thresh away, with optional pos_filtering\n (enforces that new word has same pos as original word)\"\"\"\n \n #get similarly spelled words\n suggestions = sym_spell.lookup(real_word, Verbosity.ALL, e_thresh)\n \n #initialize list of word candidates and frequencies\n candidates = [\"\"] * len(suggestions)\n candFreq = [0]*len(suggestions)\n numSuggest = 0\n \n #loop through all word candidates\n for s in suggestions:\n \n #add the current word to the candidate list if it has correct pos, or if pos filtering is off\n tag=\"\"\n if(pos_filter):\n _, tag = nltk.pos_tag([s.term])[0]\n if((tag==real_tag or (not pos_filter)) and s.term!=real_word): \n candidates[numSuggest] = s.term\n candFreq[numSuggest] = s.count\n numSuggest+=1\n \n #stop searching if maximum number of candidates is reached\n if(numSuggest>candLimit):\n break\n \n #if there is a suitable replacement word, return it; otherwise, return original word\n if(numSuggest>0):\n return random.choices(candidates[0:numSuggest], weights=candFreq[0:numSuggest], k=1)[0]\n else:\n return real_word\n\n\ndef createData(output_file, num_cycles, pos_filter):\n \"\"\"samples sentences from brown corpus for num_cycles, and writes corrupted data to output_file.\"\"\"\n \n #loads brown corpus\n brown = nltk.corpus.brown\n allFiles = brown.fileids()\n \n #max number of sentences to pull from any given file at a time\n sent_per_file = 20\n \n #min sentence length to be considered valid sentence\n sent_threshold = 5\n \n with open(output_file, 'a') as f:\n for j in range(num_cycles):\n \n #randomly choose file and get pos-tagged sentences \n currentFile = random.choice(allFiles) \n sentences = brown.tagged_sents(currentFile)\n sentences = [s for s in sentences if len(s)>=sent_threshold]\n \n for i in tqdm(range(sent_per_file)):\n #randomly choose sentence, corrupt it, \n #then write sentence, label, and real word to output file\n tagged_sent = random.choice(sentences)\n tokens, tags = zip(*tagged_sent)\n tokens, tags = list(tokens), list(tags)\n new_sent, labels, real_word = corrupt(tokens, tags, pos_filter=pos_filter)\n f.write(str(new_sent)+\"\\n\"+str(labels)+\"\\n\"+real_word+\"\\n\")", "_____no_output_____" ], [ "sym_spell = SymSpell(3, 6, compact_level=0)\nsym_spell.load_dictionary(\"freqdict.pkl\", 0, 1)\n\nsuggestions = sym_spell.lookup(\"dog\", Verbosity.ALL, 3)\nprint(suggestions)\nwith open(\"freqdict.pkl\", \"r\") as f:\n i=0\n for line in f:\n print(f)", "[]\n" ], [ "##EDIT TO CORRECT FILE CONVENTIONS\ncreateData(\"train_filtered.txt\", 2000, True)\ncreateData(\"test_filtered.txt\", 2000, True)\ncreateData(\"train_unfiltered.txt\", 2000, True)\ncreateData(\"test_unfiltered.txt\", 2000, True)", "_____no_output_____" ], [ "def getDataset(filename, savename):\n \"\"\"Converts corrupted sentences to vector representations (using pymagnitude), saving to output file\"\"\"\n \n #load magnitude embeddings\n vectors = Magnitude(\"GoogleNews-vectors-negative300.magnitude\")\n \n sentences = []\n labels = []\n with open(filename, \"r\") as f:\n \n #iterates through file, converts each word in sentence to vector, \n #builds sentences and labels lists\n for line in tqdm(f):\n sentence_words = ast.literal_eval(line)\n label = ast.literal_eval(f.readline())\n real_word = f.readline()[0:-1]\n sentence_vector = []\n for word in sentence_words:\n sentence_vector.append(vectors.query(word))\n sentences.append(sentence_vector)\n labels.append(label)\n \n #saves sentence vector and label objects as pickle files\n ##EDIT TO CORRECT FILE CONVENTION\n pickle.dump(sentences, open(savename+\"sentvectors.pkl\", \"wb\"))\n pickle.dump(labels, open(savename+\"labelvectors.pkl\", \"wb\"))", "_____no_output_____" ], [ "##RUN ON EACH DATASET", "_____no_output_____" ] ], [ [ "# LSTM", "_____no_output_____" ] ], [ [ "class LSTM(nn.Module): \n def __init__(self, input_size, hidden_size, num_layers, bidirectional= False):\n super(LSTM, self).__init__()\n self.hidden_size= hidden_size\n self.num_layers= num_layers\n self.num_directions = 2 if bidirectional else 1\n self.lstm= nn.LSTM(input_size, hidden_size, \n num_layers, batch_first= True, bidirectional= bidirectional)\n self.fc= nn.Linear(hidden_size*self.num_directions, 1)\n \n #for logging\n self.step = 0\n self.epoch = 0\n \n def forward(self, x): \n h0 = torch.zeros(self.num_directions*self.num_layers, 1, self.hidden_size).cuda()\n lstm_out, _ = self.lstm(x, (h0, c0))\n out = lstm_out.view(-1, lstm_out.size(2))\n out = F.sigmoid(self.fc(F.relu(out)))\n return out\n\n def train(self, train_data, train_labels, max_epochs, lr= .00001):\n \n #initialize objects\n logger = Logger(\"./\"+modelname+\"_logs\")\n optimizer = torch.optim.Adam(self.parameters(), lr= lr)\n \n #BCE loss since predicting binary tag for each word\n criterion = torch.nn.BCELoss()\n \n for epoch in range(max_epochs):\n \n #randomize order of examples\n order = torch.randperm(len(train_data))\n for i in tqdm(range(len(train_data))):\n \n #read next sentence and labels from dataset\n sentence = train_data[order[i]]\n word_labels = train_labels[order[i]]\n\n #rearrange sentence and labels for feeding into network\n inputs = torch.tensor(sentence).view(1, len(sentence), -1).type(torch.FloatTensor).cuda()\n labels = torch.tensor([word_labels]).transpose(0,1).float().cuda()\n\n #backprop\n optimizer.zero_grad()\n y_pred = self.forward(inputs)\n loss = criterion(y_pred, labels)\n loss.backward()\n optimizer.step()\n\n #log loss\n logger.scalar_summary(\"loss\", loss.item(), self.step)\n logger.writer.flush()\n self.step +=1\n\n torch.save(self, modelname +\"/epoch\" + str(self.epoch))\n self.epoch +=1", "_____no_output_____" ], [ "##TRAIN LSTMS (4)", "_____no_output_____" ] ], [ [ "# Evaluation", "_____no_output_____" ] ], [ [ "def evaluate(sentence_file, text_file, output_file, model):\n results = []\n sentence_vects = pickle.load(open(sentence_file, \"rb\"))\n with open(text_file, \"r\") as f:\n i=0\n with torch.no_grad():\n \n #for each example, record the sentence, vectorized sentence, true labels, and predictions\n for line in tqdm(f):\n \n #read in example\n sentence_arr = ast.literal_eval(line)\n label = ast.literal_eval(f.readline())\n real_word = f.readline()[0:-1]\n sentence_vect = sentence_vects[i]\n \n #get model predictions\n tensor_inputs = torch.tensor(sentence_vect).view(1, len(sentence_vect), -1).type(torch.FloatTensor).cuda()\n out = model.forward(tensor_inputs)\n out = out.view(-1)\n out=out.cpu().tolist()\n \n #discretize predictions, with threshold at .5\n pred = [1 if j>.5 else 0 for j in out]\n \n #get which words were predicted to be out-of-place, and which ones actually were\n predicted_inapprop = [sentence_arr[j] for j in range(len(sentence_arr)) if pred[j]<.5]\n actual_inapprop = [sentence_arr[j] for j in range(len(sentence_arr)) if label[j]==0]\n \n #append to results list\n results.append({\"sentence\":sentence_arr, \n \"label\":label, \n \"output\":out, \n \"discrete\": pred,\n \"inapprop\":actual_inapprop, \n \"pred_inapprop\":predicted_inapprop})\n i+=1\n pickle.dump(results, open(output_file, \"wb\"))\n return", "_____no_output_____" ], [ "##RUN ON ALL MODELS, 2 DATASETS", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec747713e81725556a3f92c5ba67913118de173d
27,874
ipynb
Jupyter Notebook
05_dictionaries.ipynb
rodmorley/ElementsOfDataScience
4444a77a40d17191a43d58aad7f8dab34c2c38a1
[ "MIT" ]
1
2020-10-20T01:40:49.000Z
2020-10-20T01:40:49.000Z
05_dictionaries.ipynb
wanghao1991217/ElementsOfDataScience
5c7acc4ef8601af18a803d14a2a3b859edebfe46
[ "MIT" ]
null
null
null
05_dictionaries.ipynb
wanghao1991217/ElementsOfDataScience
5c7acc4ef8601af18a803d14a2a3b859edebfe46
[ "MIT" ]
1
2020-02-19T02:43:46.000Z
2020-02-19T02:43:46.000Z
23.54223
279
0.54976
[ [ [ "# Dictionaries\n\nElements of Data Science\n\nby [Allen Downey](https://allendowney.com)\n\n[MIT License](https://opensource.org/licenses/MIT)\n\n### Goals\n\nIn the previous notebook we used a `for` loop to read a file and count the words. In this notebook, you'll learn about a new type called a \"dictionary\", and we will use it to count the number of unique words and the number of times each one appears.\n\nAlong the way, you will also see how to use an index to select an element from a sequence (tuple, list, or array). And you will learn a little about Unicode, which is used to represent letters, numbers, and punctuation for almost every language in the world.", "_____no_output_____" ], [ "## Indexing\n\nSuppose you have a variable named `t` that refers to a list or tuple.\nYou can select an element using the bracket operator, `[]`. \n\nFor example, here's a tuple of strings:", "_____no_output_____" ] ], [ [ "t = 'zero', 'one', 'two'", "_____no_output_____" ] ], [ [ "To select the first element, we put `0` in brackets:", "_____no_output_____" ] ], [ [ "t[0]", "_____no_output_____" ] ], [ [ "To select the second element, we put `1` in brackets:", "_____no_output_____" ] ], [ [ "t[1]", "_____no_output_____" ] ], [ [ "\nTo select the third element, we put `2` in brackets:", "_____no_output_____" ] ], [ [ "t[2]", "_____no_output_____" ] ], [ [ "The number in brackets is called an \"index\" because it indicates which element we want.\n\nTuples and lists use [zero-based numbering](https://en.wikipedia.org/wiki/Zero-based_numbering); that is, the index of the first element is 0. Some other programming languages use one-based numbering. There are pros and cons of both systems.\n\nThe index in brackets can also be a variable:", "_____no_output_____" ] ], [ [ "i = 1\nt[i]", "_____no_output_____" ] ], [ [ "Or an expression with variables, values, and operators:", "_____no_output_____" ] ], [ [ "t[i+1]", "_____no_output_____" ] ], [ [ "But if the index goes past the end of the list or tuple, you get an error:", "_____no_output_____" ] ], [ [ "t[3]", "_____no_output_____" ] ], [ [ "The index has to be an integer; if it is any other type, you get an error.", "_____no_output_____" ] ], [ [ "t[1.5]", "_____no_output_____" ], [ "t['index']", "_____no_output_____" ] ], [ [ "**Exercise:** You can use negative integers as indices. Try using `-1` and `-2` as indices, and see if you can figure out what they do. ", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ] ], [ [ "## Dictionaries\n\nA dictionary is similar to a tuple or list, but in a dictionary, the index can be almost any type, not just an integer.\n\nWe can create an empty dictionary like this:", "_____no_output_____" ] ], [ [ "d = {}", "_____no_output_____" ] ], [ [ "Then we can add elements like this:", "_____no_output_____" ] ], [ [ "d['one'] = 1\nd['two'] = 2", "_____no_output_____" ] ], [ [ "If you display the dictionary, it shows each index and the corresponding value. ", "_____no_output_____" ] ], [ [ "d", "_____no_output_____" ] ], [ [ "Instead of creating an empty dictionary and then adding elements, you can create a dictionary and specify the elements at the same time:", "_____no_output_____" ] ], [ [ "d = {'one': 1, 'two': 2, 'three': 3}\nd", "_____no_output_____" ] ], [ [ "When we are talking about dictionaries, an index is usually called a \"key\". In this example, the keys are strings and the corresponding values are integers.\n\nA dictionary is also called a \"map\", because it represents a mapping, in the sense of a correspondence, between keys and values. So we might say that this dictionary \"maps from\" English number names to the corresponding integers.\n\nYou can use the bracket operator to select an element from a dictionary, like this:", "_____no_output_____" ] ], [ [ "d['two']", "_____no_output_____" ] ], [ [ "But don't forget the quotation marks. If you write something like this:", "_____no_output_____" ] ], [ [ "d[two]", "_____no_output_____" ] ], [ [ "Python looks for a variable named `two` and doesn't find one.", "_____no_output_____" ], [ "To check whether a particular key is in a dictionary, you can use the special word `in`:", "_____no_output_____" ] ], [ [ "'one' in d", "_____no_output_____" ], [ "'zero' in d", "_____no_output_____" ] ], [ [ "The word `in` is actually an operator in Python, so you can't use it as a variable name:", "_____no_output_____" ] ], [ [ "in = 5", "_____no_output_____" ] ], [ [ "If a key is already in a dictionary, adding it again has no effect:", "_____no_output_____" ] ], [ [ "d", "_____no_output_____" ], [ "d['one'] = 1\nd", "_____no_output_____" ] ], [ [ "But you can also change the value associated with a key:", "_____no_output_____" ] ], [ [ "d['one'] = 100\nd", "_____no_output_____" ] ], [ [ "You can loop through the keys in a dictionary like this:", "_____no_output_____" ] ], [ [ "for key in d:\n print(key)", "_____no_output_____" ] ], [ [ "If you want the keys and the values, a simple way to get them is to loop through the keys and look up the values:", "_____no_output_____" ] ], [ [ "for key in d:\n print(key, d[key])", "_____no_output_____" ] ], [ [ "Or you can loop through both at the same time, like this:", "_____no_output_____" ] ], [ [ "for key, value in d.items():\n print(key, value)", "_____no_output_____" ] ], [ [ "The `items` method loops through the key-value pairs in the dictionary; each time through the loop, they are assigned to `key` and `value`.", "_____no_output_____" ], [ "**Exercise:** Make a dictionary with the numbers `1`, `2`, and `3` as keys and strings as values. The strings should be the numbers \"one\", \"two\", and \"three\" in any language you know.\n\nWrite a loop that prints just the values from the dictionary.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ] ], [ [ "## Unique words\n\nIn the previous notebook we downloaded *War and Peace* from [Project Gutenberg](https://www.gutenberg.org) and counted the number of lines and words.\n\nNow that we have dictionaries, we can also count the number of unique words and the number of times each one appears.\n\nFirst, let's download the book again. If you run the following cell, it checks to see whether you already have a file named `2600-0.txt`; if not, it uses `wget` to copy the file from Project Gutenberg to your computer. ", "_____no_output_____" ] ], [ [ "import os\n\nif not os.path.exists('2600-0.txt'):\n !wget https://www.gutenberg.org/files/2600/2600-0.txt", "_____no_output_____" ] ], [ [ "Now we can read the file and count the words as we did in the previous notebook:", "_____no_output_____" ] ], [ [ "fp = open('2600-0.txt')\ncount = 0\nfor line in fp:\n count += len(line.split())\n \ncount", "_____no_output_____" ] ], [ [ "To count the number of unique words, I'll loop through the words in each line and add as keys in a dictionary:", "_____no_output_____" ] ], [ [ "fp = open('2600-0.txt')\nunique_words = {}\nfor line in fp:\n for word in line.split():\n unique_words[word] = 1", "_____no_output_____" ] ], [ [ "This is the first example we've seen with one loop inside another.\n\n* The outer loop runs through the lines in the file.\n\n* The inner loops runs through the words in each line.\n\nEach time through the inner loop, we add a word as a key in the dictionary, with the value 1. If the same word appears more than once, it gets added to the dictionary again, which has no effect. So the dictionary contains only one copy of each unique word in the file.\n\nAt the end of the loop, we can display the first 10 keys:", "_____no_output_____" ] ], [ [ "i = 0\nfor key in unique_words:\n print(key)\n i += 1\n if i == 10:\n break", "_____no_output_____" ] ], [ [ "So far, it looks like all the words in the file, in order.\n\nBut each word only appears once, so the number of keys is the number of unique words:", "_____no_output_____" ] ], [ [ "len(unique_words)", "_____no_output_____" ] ], [ [ "It looks like there are about 42,000 different words in the book, which is substantially less than the total number of words, about 560,000. \n\nBut that's not quite right, because we have not taken into account capitalization and punctuation.", "_____no_output_____" ], [ "**Exercise:** Before we deal with those problems, let's practice with \"nested loops\", that is, one loop inside another.\n\nSuppose you have a list of words, like this:", "_____no_output_____" ] ], [ [ "line = ['War', 'and', 'Peace']", "_____no_output_____" ] ], [ [ "Write a nested loop that iterates through each word in the list, and each letter in each word, and prints the letters on separate lines.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ] ], [ [ "## Capitalization\n\nWhen we count unique words, we probably want to treat `The` and `the` as the same word. We can do that by converting all words to lower case, using the `lower` function:", "_____no_output_____" ] ], [ [ "word = 'The'\nword.lower()", "_____no_output_____" ], [ "word = 'the'\nword.lower()", "_____no_output_____" ] ], [ [ "`lower` creates a new string; it does not modify the original string. ", "_____no_output_____" ] ], [ [ "word = 'THE'\nword.lower()", "_____no_output_____" ], [ "word", "_____no_output_____" ] ], [ [ "However, you can assign the new string back to the existing variable, like this:", "_____no_output_____" ] ], [ [ "word = 'THE'\nword = word.lower()", "_____no_output_____" ] ], [ [ "Now if we can display the new value of `word`, we get the lowercase version:", "_____no_output_____" ] ], [ [ "word", "_____no_output_____" ] ], [ [ "**Exercise:** Modify the previous loop so it makes a lowercase version of each word before adding it to the dictionary. How many unique words are there, if we ignore the difference between uppercase and lowercase?", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ] ], [ [ "## Punctuation\n\nTo remove punctuation from the words, we can use `strip`, which removes specified characters from the beginning and end of a string. Here's an example:", "_____no_output_____" ] ], [ [ "word = 'abracadabra'\nword.strip('ab')", "_____no_output_____" ] ], [ [ "In this example, `strip` removes all instances of `a` and `b` from the beginning and end of the word, but not from the middle.\n\nBut note that it makes a new word; it doesn't modify the original:", "_____no_output_____" ] ], [ [ "word", "_____no_output_____" ] ], [ [ "To remove punctuation, we can use the `string` library, which provides a variable named `punctuation`.", "_____no_output_____" ] ], [ [ "import string\n\nstring.punctuation", "_____no_output_____" ] ], [ [ "`string.punctuation` contains the most common punctuation marks, but as we'll see, not all of them.\n\nNevertheless, we can use it to handle most cases. Here's an example:", "_____no_output_____" ] ], [ [ "line = \"It's not given to people to judge what's right or wrong.\"\n\nfor word in line.split():\n word = word.strip(string.punctuation)\n print(word)", "_____no_output_____" ] ], [ [ "Notice that `strip` does not remove the apostrophe from the middle of `don't`, which is probably what we want.\n\nTo see how well it works, I'll select an arbitrary line from the file:", "_____no_output_____" ] ], [ [ "fp = open('2600-0.txt')\ncount = 0\nfor line in fp:\n if count == 1000:\n break\n count += 1\n \nline", "_____no_output_____" ] ], [ [ "And try to remove punctuation from the words:", "_____no_output_____" ] ], [ [ "for word in line.split():\n word = word.strip(string.punctuation)\n print(word)", "_____no_output_____" ] ], [ [ "It words pretty well, but the last word is a problem because it ends with a quotation mark that is not in `string.punctuation`. To fix this problem, we'll use the following loop, which\n\n1. Reads the file and builds a dictionary that contains all punctuation marks that appear in the book, then\n\n2. It uses the `join` function to concatenate the keys of the dictionary in a single string.\n\nYou don't have to understand everything about how it works, but you should read it and see how much you can figure out. You can read [the documentation of the `unicodedata` library here](https://docs.python.org/3/library/unicodedata.html).", "_____no_output_____" ] ], [ [ "import unicodedata\n\nfp = open('2600-0.txt')\npunc_marks = {}\nfor line in fp:\n for x in line:\n category = unicodedata.category(x)\n if category[0] == 'P':\n punc_marks[x] = 1\n \nall_punctuation = ''.join(punc_marks)\nprint(all_punctuation)", "_____no_output_____" ] ], [ [ "**Exercise:** Modify the word-counting loop from the previous section to convert words to lower case *and* strip punctuation before adding them to the dictionary. Now how many unique words are there?\n\nOptional: You might want to skip over the frontmatter and start with the text of Chapter 1, and skip over the license at the end, as we did in the previous notebook.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ] ], [ [ "## Word frequencies\n\nIn the previous section we counted the number of unique words, but we might also want to know how often each word appears. Then we can find the most common and least common words in the book.\n\nTo count the frequency of each word, we'll make a dictionary that maps from each word to the number of times it appears.\n\nHere's an example that loops through a string and counts the number of times each letter appears.", "_____no_output_____" ] ], [ [ "line = 'If everyone fought for their own convictions there would be no war.'\n\nletter_counts = {}\nfor x in line:\n if x in letter_counts:\n letter_counts[x] += 1\n else:\n letter_counts[x] = 1\n \nletter_counts", "_____no_output_____" ] ], [ [ "The `if` statement in the previous example uses a feature we have not seen before, an `else` clause.\n\nHere's how it works.\n\n1. First, it checks whether the letter is already a key in the dictionary, `letter_counts`.\n\n2. If so, it runs the first statement, `letter_counts[x] += 1`, which increments the value associated with the letter, `x`.\n\n3. Otherwise, it runs the second statement, `letter_counts[x] = 1`, which adds `x` as a new key, with the value `1` indicating that we have seen the new letter once.\n\nThe result is a dictionary that maps from each letter to the number of times it appears.", "_____no_output_____" ], [ "To get the most common letters, we can use a `Counter`, which is similar to a dictionary. To use it, we have to import a library called `collections`: ", "_____no_output_____" ] ], [ [ "import collections", "_____no_output_____" ] ], [ [ "Then we use `collections.Counter` as a function to convert the dictionary to a `Counter` value:", "_____no_output_____" ] ], [ [ "counter = collections.Counter(letter_counts)\ntype(counter)", "_____no_output_____" ] ], [ [ "`Counter` provides a function called `most_common` we can use to get the most common characters:", "_____no_output_____" ] ], [ [ "counter.most_common(3)", "_____no_output_____" ] ], [ [ "The result is a list of tuples, where each tuple contains a character and an integer.\n\nThe most common character is a space, followed by `o` and `e`.", "_____no_output_____" ], [ "**Exercise:** Modify the loop from the previous exercise to count the frequency of the words in *War and Peace*; then print the 20 most common words and the number of times each one appears.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ] ], [ [ "**Exercise:** You can run `most_common` with no value in parentheses, like this:\n\n```\nword_freq_pairs = counter.most_common()\n```\n\nThe result is a list of tuples, with one tuple for every unique word in the book. Assign the result to a variable so it doesn't get displayed. Then answer the following questions:\n\n1. How many times does the #1 ranked word appear (that is, the first element of the list)?\n\n2. How many times does the #10 ranked word appear?\n\n3. How many times does the #100 ranked word appear?\n\n4. How many times does the #1000 ranked word appear?\n\n5. How many times does the #10000 ranked word appear?\n\nDo you see a pattern in the results? We will explore this pattern more in the next notebook.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ] ], [ [ "**Exercise:** Write a loop that counts how many words appear 200 times. What are they? How many words appear 100 times, 50 times, and 20 times?\n\n**Optional:** If you know how to define a function, write a function that takes a `Counter` and a frequency as arguments, prints all words with that frequency, and returns the number of words with that frequency.", "_____no_output_____" ] ], [ [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ], [ "# Solution goes here", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec747fa74048a413ceb618f8f5c1a4ad751aea2f
14,821
ipynb
Jupyter Notebook
notebooks/WIP_Demo_For_Eastman.ipynb
matthewjohnpayne/MPCAdvancer
1a244c4740eaff3128d9ab477519004434202796
[ "MIT" ]
null
null
null
notebooks/WIP_Demo_For_Eastman.ipynb
matthewjohnpayne/MPCAdvancer
1a244c4740eaff3128d9ab477519004434202796
[ "MIT" ]
null
null
null
notebooks/WIP_Demo_For_Eastman.ipynb
matthewjohnpayne/MPCAdvancer
1a244c4740eaff3128d9ab477519004434202796
[ "MIT" ]
null
null
null
40.056757
154
0.578706
[ [ [ "Nov 2018\nPayne\n\n - Want to create a notebook to demo to J. Eastman some of the basic functionalities that (a) he'll need, and (b) that are available\n ", "_____no_output_____" ], [ "### Standard python / jupyter imports ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os, sys\n#import astropy as AP", "_____no_output_____" ] ], [ [ "### Imports of *HIGHLY DEVELOPMENTAL* (i.e. potentially wrong/slow) packages\n - This needs to be updated / generalized to get the damn things to install from pip ...", "_____no_output_____" ] ], [ [ "# >>> pip install mpcutilities \nimport mpcutilities as MPCU\n#import mpcadvancer as MPCA", "_____no_output_____" ] ], [ [ "### Orbit specification", "_____no_output_____" ] ], [ [ "# Specify a Keplerian orbit\na,e,i,O,o,M = (2.781870259390299, 0.07692192514945771, 0.18480538653567896, 1.4012469961929193, 1.237855673926063, -1.0950384781408455)\n \n# Use kep2cartState to calculate the cartesian state from the specified elements \nCartState = kc.kep2cartState(PHYS.GMsun , a,e,i,O,o,M)\nprint(\"CartState=\", CartState.x, CartState.y, CartState.z, CartState.xd, CartState.yd, CartState.zd)\n\n# Transform it back to Keplerian \nels = kc.cart2kep(PHYS.GMsun , CartState)\n\n# Check that the numbers look ok \nassert( np.allclose( els , np.array([a,e,i,O,o,M]) )) \n", "_____no_output_____" ] ], [ [ "### Simulating Observations of an orbiting object", "_____no_output_____" ] ], [ [ "# Specify the date of the proposed observation of the object\n# - Midnight, Jan 1st, 2023, arbitrarily simulating data around this time ...\n# - Orbit times use \"TDB\": https://en.wikipedia.org/wiki/Barycentric_Dynamical_Time\nJD_TDB = 2459945.5 \n\n# It will be useful to know the approximate mean anomaly of the earth at the above time \n# - This is to prevent us from simulating observations in stupid/unphysical directions\nObservatory.getObservatoryPosition('500', JD_TDB) ##<<-- '500' is the geocenter\nearthMeanAnom = True\n\n# Specify a Cartesian state at a particular epoch\na,e,i,O,o,M = (2.781870259390299, 0.07692192514945771, 0.18480538653567896, 1.4012469961929193, 1.237855673926063, earthMeanAnom)\nCartState = kc.kep2cartState(PHYS.GMsun , a,e,i,O,o,M)\n\n# Specify where you are observing from (using observatory codes as short-hand)\nJD_UTC = 2459945.5 ## <<-- Observation taken at a time specified in UTC system: https://en.wikipedia.org/wiki/Coordinated_Universal_Time\nobsCode = '806' ## <<-- Observatory code: https://en.wikipedia.org/wiki/List_of_observatory_codes\n\n# Calculate the topocentric RA & Dec of the object at the time of observation", "_____no_output_____" ] ], [ [ "### Advance a Cartesian state to a series of time-steps", "_____no_output_____" ] ], [ [ "# Specify a Cartesian state at a particular epoch\nJD_TDB = 2459945.5, # <<-- Midnight, Jan 1st, 2023, arbitrarily simulating data around this time ...\na,e,i,O,o,M = (2.781870259390299, 0.07692192514945771, 0.18480538653567896, 1.4012469961929193, 1.237855673926063, -1.0950384781408455)\nCartState = kc.kep2cartState(PHYS.GMsun , a,e,i,O,o,M)\n\n# Specify some times of interest\ntargetTDBs = np.array(JD_TDB , JD_TDB + 10.0, 1.0)\n\n# Advance the initial cartesian state to a set of cartesian states, one at each of the times of interest\nics = ORBIT_ICs(something) ##<<-- Inputs need to be specified \na = ADVANCE(ics)\nt = a.twobody(targetTDBs)\n", "_____no_output_____" ] ], [ [ "### Simulating observations of an orbiting object at a series of time-steps", "_____no_output_____" ] ], [ [ "# Specify a Cartesian state at a particular epoch\n\n# Specify some times of interest\n\n# Advance the initial cartesian state to a set of cartesian states, one at each of the times of interest\n\n# Simulate the observations", "_____no_output_____" ] ], [ [ "### Define some convenience functions for generating \"realistic\" data sets", "_____no_output_____" ] ], [ [ "\ndef subsequent_new_moon_dates(JD, N):\n '''\n # Convenience function to return the dates of the N new-moons after a specified JD\n # - Used in function(s) below to generate synthetic data\n # - Totally unimportant, why did I spend any time doing this ?!?\n '''\n # When is it new moon ?\n # http://aa.usno.navy.mil/cgi-bin/aa_phases.pl?year=2018&month=11&day=12&nump=50&format=p\n JD_of_a_new_moon = 2458459.805556 # 2018 Dec 07 07:20 \n # Approx ave synodic month \n period = 29.530588 # Good enough for government work: https://en.wikipedia.org/wiki/New_moon\n # Find next new moon after input date\n Nfullphases, remainderDays = (JD - JD_of_a_new_moon ) // period , (JD - JD_of_a_new_moon ) % period\n next_new_moon = JD + (period - remainderDays)\n # Generate array of next N-new-moons\n return np.arange( next_new_moon, next_new_moon + N*period, period )\n\ndef generate_darkTime_arrays(JD_UTC, Nlunations=3):\n '''\n # Convenience function to identify the periods of \"dark-time\" (away from full moon) after a given date\n # - Used in function(s) below to generate synthetic data\n # - Totally unimportant, why did I spend any time doing this ?!?\n '''\n # Find the new moons dates after JD_UTC\n newMoonNights = subsequent_new_moon_dates(JD_UTC, Nlunations)\n\n # Get the unbroken ~22+ night span(s) of dark(ish)-time centered-on the above new moon dates\n Ndark = 2*11\n darkNightArrays = []\n for i,n in enumerate(newMoonNights[:-1]):\n tStart = int(newMoonNights[i] - Ndark/2) \n tEnd = int(newMoonNights[i] + Ndark/2)\n if tStart > JD_UTC:\n darkNightArrays.append( np.arange( tStart, tEnd ,1 ) )\n assert darkNightArrays != [], \"nothing added to darkNightArrays ... \"\n return darkNightArrays\n\ndef choose_nights_from_span(nightArray, Nnights, ArcLength):\n '''\n # Convenience function to choose randomly (with constraints) N-nights from a contoguous span \n # - Used in function(s) below to generate synthetic data\n '''\n assert Nnights <= ArcLength, \"Nnights is more than the specified ArcLength\"\n assert Nnights <= len(nightArray), \"Nnights is more than the available nights\"\n ArcLength = int(ArcLength) \n \n # First night must be within the first few (to allow arc-length constraint to be satisfied)\n firstNight = np.random.choice(nightArray[: len(nightArray)- ArcLength ])\n selectedNights = [firstNight]\n # If Nnights > 1, generate a total span that is approx equal to ArcLength \n if Nnights > 1:\n lastNight = firstNight + ArcLength \n selectedNights.append(lastNight)\n # If Nnights > 2, add in extra nights \n if Nnights > 2:\n additionalNights = np.random.choice( np.arange(firstNight + 1 , lastNight) , Nnights-2, replace=False)\n selectedNights.extend( list(additionalNights) )\n return np.sort(np.array(selectedNights))\n \ndef generate_synthetic_LSST_observations_SingleLunation(\n OrbitalCartesianState,\n t0_orb_JD_TDB = 2459945.5, # <<-- Midnight, Jan 1st, 2023, arbitrarily simulating data around this time ...\n t0_obs_JD_UTC = 2459945.5, # <<-- Midnight, Jan 1st, 2023, ... and assuming orbit specified around same time\n Ndets=6, # <<-- Total number of individual detections \n Nnights=3, # <<-- Total number of nights with data on them\n ArcLength=10., # <<-- Number of nights separating start of data from end of data\n obsCode='806', # <<-- Observatory code to use for LSST \n obsError=0.1, # <<-- Assumed astrometric uncertainty for LSST observations \n outlier=True): # <<-- Whether to add outlier observational errors for shits and giggles\n '''\n # Convenience function to generate synthetic observational data sets \n # LSST discovery data will be *short*: \n # - typically ~6 detections, \n # - spread across ~3 individual nights, \n # - with the span of the data (\"arc length\") being ~10 nights from start to end \n '''\n # Assertions to avoid fuck-ups due to the assumptions I make below\n assert ArcLength < 20\n assert Nnights <= ArcLength\n assert Ndets >= Nnights*2 ## <<-- LSST requires >= 2 per night\n assert Ndets <= Nnights*4 ## <<-- No good reason, it just seems unnecessary for LSST ....\n \n # Get nights away from the full moon \n # - N.B. \"[0]\" ... just selecting the first set of dark time ...\n darkNightArrays = generate_darkTime_arrays(t0_obs_JD_UTC)[0] \n \n # Choose nights, ensuring a data-span approx equal to ArcLength \n selectedNights = choose_nights_from_span(darkNightArrays, Nnights, ArcLength)\n \n # Specify number of observations on each night \n # - To simplify, am having everything have the average, then add in extras here-and-there\n aveObsPerNight, additionalObsBeyondAve = Ndets // Nnights , Ndets % Nnights\n nightsToGetAdditionalObs = np.random.choice( selectedNights , additionalObsBeyondAve, replace=False)\n \n # Choose observation times for each night\n timeRange = (0.33,0.75) ##<<-- min & max JD_UTC, corresponds to approx 8pm & 6am\n selectedObservationJDUTCs = []\n for night in selectedNights:\n startTime = np.random.uniform( timeRange[0],timeRange[1]-1.01/24.) ## <<-- leave at least an hour at the end\n endTime = np.random.uniform( startTime+1/24. , timeRange[1]) ## <<-- want the detections >= 1-hr apart \n if night not in nightsToGetAdditionalObs:\n selectedObservationJDUTCs.extend( [night+startTime , night+endTime])\n else:\n extraTime = np.random.uniform( startTime , endTime )\n selectedObservationJDUTCs.extend( [night+startTime , night+extraTime , night+endTime])\n print(\"selectedObservationJDUTCs\", selectedObservationJDUTCs)\n \n '''\n \n # Evolve the input OrbitalCartesianState to the required times and simulate the observations \n \n \n # Decide whether to add extra errors to a few data points because no one has a clue what error bars really mean\n if outlier:\n Noutliers = np.random.int(1,int(len(OBS)/2)) ## <<-- At least one, but less than half\n Outliers = np.random.choice( np.arange(len(OBS)) , Noutliers, replace=False)\n # Add errors\n for i,O in enumerate(OBS):\n if i in Outliers:\n factor = 2.0\n else:\n factor = 1.0\n O.RA += np.random.normal(0.0, factor*obsError)\n O.DEC += np.random.normal(0.0, factor*obsError)\n \n # Return cartesian state(s) as well as observations ...\n return OBS\n '''\n \n \n ", "_____no_output_____" ] ], [ [ "### Plot results from above convenience function", "_____no_output_____" ] ], [ [ "# Define input cartesian state \nOrbitalCartesianState = True\n\n# Generate synthetic observations \n_ = generate_synthetic_LSST_observations_SingleLunation(CartState)\n\n# Make a plot of the cartesian position(s) \n\n# Make a plot of the RA & Dec (with & without uncertainties?)", "selectedObservationJDUTCs [2459959.5102544706, 2459959.6515298695, 2459964.394071052, 2459964.530711493, 2459969.5560183674, 2459969.6396634015]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec7480ae7026125943f657b9bb4f773864268861
128,464
ipynb
Jupyter Notebook
post_analysis/jupyter_notebook/mashvsrpstbln.ipynb
NCBI-Codeathons/Domain_HMM_Boundaries
17736e1c845faedd8bc53a71f6bb280a6cf0b392
[ "MIT" ]
1
2021-03-19T19:59:49.000Z
2021-03-19T19:59:49.000Z
post_analysis/jupyter_notebook/mashvsrpstbln.ipynb
NCBI-Codeathons/Domain_HMM_Boundaries
17736e1c845faedd8bc53a71f6bb280a6cf0b392
[ "MIT" ]
null
null
null
post_analysis/jupyter_notebook/mashvsrpstbln.ipynb
NCBI-Codeathons/Domain_HMM_Boundaries
17736e1c845faedd8bc53a71f6bb280a6cf0b392
[ "MIT" ]
1
2019-11-04T20:19:14.000Z
2019-11-04T20:19:14.000Z
218.105263
44,916
0.895496
[ [ [ "import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ], [ "#mashresults\nmashdic = {}\nfor i in os.listdir('mashresults'):\n dataset = i.strip().split('.')[0]\n with open('mashresults/'+ i) as f:\n for line in f:\n try:\n cdd = line.strip().split('.')[0]\n contig = '_'.join(line.strip().split()[1].split('_')[:-1])\n pval = float(line.strip().split()[3])\n if dataset in mashdic:\n if cdd not in mashdic[dataset]:\n mashdic[dataset].append(cdd)\n if dataset not in mashdic:\n mashdic[dataset] = [cdd]\n except:\n break", "_____no_output_____" ], [ "#get pssm id and wrap them around to actual accessions:\nlinkdic = {}\nwith open('virus_models.txt') as f:\n for line in f:\n acc1 = line.strip().split(' ')[0]\n pssmacc = line.strip().split()[2]\n linkdic[pssmacc] = acc1", "_____no_output_____" ], [ "#rpsblastresults\nrpsdic = {}\nfor i in os.listdir('realrpsresults'):\n dataset = i.strip().split('.')[0]\n with open('realrpsresults/' + i) as f:\n for line in f:\n accfull = line.strip().split()[1].replace('CDD:','')\n if accfull in linkdic:\n realacc = linkdic[accfull]\n else:\n realacc = 'NA'\n if dataset in rpsdic:\n rpsdic[dataset].append(realacc)\n else:\n rpsdic[dataset] = [realacc]", "_____no_output_____" ], [ "comparedic = {}\nfor i in rpsdic:\n if i in mashdic:\n lis1 = rpsdic[i]\n lis2 = mashdic[i]\n setlen = len(list(set(lis1) & set(lis2)))\n rpsun = len(lis1) - setlen\n mashun = len(lis2) - setlen\n rpshits = list(set(lis1))\n mashhits = list(set(lis2))\n mashhits_truepos = []\n for j in list(set(lis2)):\n if j in lis1:\n mashhits_truepos.append(j)\n mashhits_falseneg = []\n for j in list(set(lis1)):\n if j not in list(set(lis2)):\n mashhits_falseneg.append(j)\n totalrps = len(lis1)\n totalmash = len(lis2)\n comparedic[i] = [setlen, rpsun, -mashun, len(rpshits), len(mashhits), len(mashhits_truepos)/len(mashhits), len(mashhits_truepos)/(len(mashhits_truepos)+len(mashhits_falseneg)),totalrps, totalmash]", "_____no_output_____" ], [ "comparedf = pd.DataFrame(comparedic)\ncomparedf = comparedf.T\ncomparedf.columns = ['Union_rpsmash','unique_rps','unique_mash','total_rps','total_mash','precision_mash','recall_mash', 'totalrps', 'totalmash']\ncomparedf.head()", "_____no_output_____" ], [ "# Precision (number of true pos (by rpsbln def) / total mash hits):\ncomparedf['precision_mash'].mean()", "_____no_output_____" ], [ "comparedf['recall_mash'].mean()", "_____no_output_____" ], [ "#Total RPS hits among the 700 datasets.\ncomparedf['totalrps'].sum()", "_____no_output_____" ], [ "#Total mash hits among the 700 datasets.\ncomparedf['totalmash'].sum()", "_____no_output_____" ], [ "comparesort = comparedf.sort_values(by=['Union_rpsmash'])", "_____no_output_____" ], [ "plt.figure(figsize=(20,20))\nsns.barplot(comparesort.index,comparesort['Union_rpsmash'])", "_____no_output_____" ], [ "plt.figure(figsize=(50,20))\nb = sns.barplot(comparesort.index,comparesort['unique_rps'],color='red')\nsns.barplot(comparesort.index,comparesort['unique_mash'], color='blue')", "_____no_output_____" ], [ "comparesort['commlog'] = np.log10(comparesort['Union_rpsmash'] + 1)\ncomparesort['rpslog'] = np.log10(comparesort['unique_rps'] + 1)\ncomparesort['mashlog'] = np.log10(-comparesort['unique_mash'] + 1)\ncomparesort['mashlog'] = -comparesort['mashlog']\ncomparesort.head()", "_____no_output_____" ], [ "plt.figure(figsize=(50,20))\nb = sns.barplot(comparesort.index,comparesort['rpslog'],color='#0173b2')\nb = sns.barplot(comparesort.index,comparesort['mashlog'], color='#de8f05')\nplt.savefig(\"mashvsrps.png\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec74b2dbe271c17871394c45501ad76bbea9990f
3,065
ipynb
Jupyter Notebook
docs/tutorials/plotting/pairplot.ipynb
aaitbr/graspologic
35085fa4c40b37b809b6627bfa6d8310348035be
[ "MIT" ]
148
2020-09-15T21:45:51.000Z
2022-03-24T17:33:01.000Z
docs/tutorials/plotting/pairplot.ipynb
aaitbr/graspologic
35085fa4c40b37b809b6627bfa6d8310348035be
[ "MIT" ]
533
2020-09-15T18:49:00.000Z
2022-03-25T12:16:58.000Z
docs/tutorials/plotting/pairplot.ipynb
aaitbr/graspologic
35085fa4c40b37b809b6627bfa6d8310348035be
[ "MIT" ]
74
2020-09-16T02:24:23.000Z
2022-03-20T20:09:38.000Z
22.703704
138
0.531485
[ [ [ "# Pairplot: Visualizing High Dimensional Data\n\nThis example provides how to visualize high dimensional data using the pairplot.", "_____no_output_____" ] ], [ [ "import graspologic\n\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Simulate a binary graph using stochastic block model\nThe 3-block model is defined as below:\n\n\\begin{align*}\nn &= [50, 50, 50]\\\\\nP &= \n\\begin{bmatrix}0.5 & 0.1 & 0.05 \\\\\n0.1 & 0.4 & 0.15 \\\\\n0.05 & 0.15 & 0.3\n\\end{bmatrix}\n\\end{align*}\n\nThus, the first 50 vertices belong to block 1, the second 50 vertices belong to block 2, and the last 50 vertices belong to block 3.", "_____no_output_____" ] ], [ [ "from graspologic.simulations import sbm\n\nn_communities = [50, 50, 50]\np = [[0.5, 0.1, 0.05], \n [0.1, 0.4, 0.15], \n [0.05, 0.15, 0.3],]\n\nnp.random.seed(2)\nA = sbm(n_communities, p)", "_____no_output_____" ] ], [ [ "## Embed using adjacency spectral embedding to obtain lower dimensional representation of the graph\n\nThe embedding dimension is automatically chosen. It should embed to 3 dimensions.", "_____no_output_____" ] ], [ [ "from graspologic.embed import AdjacencySpectralEmbed\n\nase = AdjacencySpectralEmbed()\nX = ase.fit_transform(A)\n\nprint(X.shape)", "_____no_output_____" ] ], [ [ "## Use pairplot to plot the embedded data\n\nFirst we generate labels that correspond to blocks. We pass the labels along with the data for pair plot.", "_____no_output_____" ] ], [ [ "from graspologic.plot import pairplot\n\nlabels = ['Block 1'] * 50 + ['Block 2'] * 50 + ['Block 3'] * 50\n\nplot = pairplot(X, labels)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec74d42e7f4939e7ca7232b83a0ad847b0275e79
2,994
ipynb
Jupyter Notebook
nbgrader/tests/nbextensions/files/submitted-grade-cell-type-changed.ipynb
dsblank/nbgrader
003a4d924b86ffbb6040a7f7d6a9862a67fb51e7
[ "BSD-3-Clause-Clear" ]
null
null
null
nbgrader/tests/nbextensions/files/submitted-grade-cell-type-changed.ipynb
dsblank/nbgrader
003a4d924b86ffbb6040a7f7d6a9862a67fb51e7
[ "BSD-3-Clause-Clear" ]
null
null
null
nbgrader/tests/nbextensions/files/submitted-grade-cell-type-changed.ipynb
dsblank/nbgrader
003a4d924b86ffbb6040a7f7d6a9862a67fb51e7
[ "BSD-3-Clause-Clear" ]
null
null
null
19.192308
52
0.518704
[ [ [ "# YOUR CODE HERE\nraise NotImplementedError()", "_____no_output_____" ], [ "print(\"Success!\")", "_____no_output_____" ] ], [ [ "assert a == 1", "_____no_output_____" ], [ "YOUR ANSWER HERE", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\nraise NotImplementedError()", "_____no_output_____" ], [ "print(\"Don't change this cell!\")", "_____no_output_____" ] ], [ [ "This cell shouldn't be changed.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec74d43a06ab19884b2ba855b9181514846f2029
10,690
ipynb
Jupyter Notebook
solutions_do_not_open/Lab_26_TF Eager Execution_solution.ipynb
differentmatt/ztdl-5-day-bootcamp
66a300ed73857dd9b4e1b2fa6aec54e1f87b6615
[ "MIT" ]
1
2018-09-23T05:40:20.000Z
2018-09-23T05:40:20.000Z
solutions_do_not_open/Lab_26_TF Eager Execution_solution.ipynb
differentmatt/ztdl-5-day-bootcamp
66a300ed73857dd9b4e1b2fa6aec54e1f87b6615
[ "MIT" ]
null
null
null
solutions_do_not_open/Lab_26_TF Eager Execution_solution.ipynb
differentmatt/ztdl-5-day-bootcamp
66a300ed73857dd9b4e1b2fa6aec54e1f87b6615
[ "MIT" ]
null
null
null
22.890792
108
0.513564
[ [ [ "# Eager Execution\n\nAdapted from: https://www.tensorflow.org/get_started/eager", "_____no_output_____" ] ], [ [ "import os\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\ntf.enable_eager_execution()", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "train_dataset_fp = '../data/iris_training.csv'", "_____no_output_____" ], [ "!head -n5 {train_dataset_fp}", "_____no_output_____" ] ], [ [ "## Csv parser", "_____no_output_____" ] ], [ [ "def parse_csv(line):\n example_defaults = [[0.], [0.], [0.], [0.], [0]]\n parsed_line = tf.decode_csv(line, example_defaults)\n features = tf.reshape(parsed_line[:-1], shape=(4,))\n label = tf.reshape(parsed_line[-1], shape=())\n return features, label", "_____no_output_____" ] ], [ [ "## Dataset API", "_____no_output_____" ] ], [ [ "train_dataset = tf.data.TextLineDataset(train_dataset_fp)\ntrain_dataset = train_dataset.skip(1)\ntrain_dataset = train_dataset.map(parse_csv)\ntrain_dataset = train_dataset.shuffle(buffer_size=1000)\ntrain_dataset = train_dataset.batch(32)", "_____no_output_____" ], [ "train_dataset", "_____no_output_____" ], [ "features, label = tfe.Iterator(train_dataset).next()", "_____no_output_____" ], [ "features", "_____no_output_____" ], [ "label", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "Note that the model is outputting the logits, not the softmax probabilities.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation=\"relu\", input_shape=(4,)),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(3)\n])", "_____no_output_____" ], [ "model", "_____no_output_____" ] ], [ [ "model behaves like a function:", "_____no_output_____" ] ], [ [ "model(features)", "_____no_output_____" ] ], [ [ "In eager mode we can access the values of the weights directly:", "_____no_output_____" ] ], [ [ "for i, v in enumerate(model.variables):\n print(\"Weight shape: \", v.shape)\n print(\"Weight tensor: \", v)\n print()\n", "_____no_output_____" ] ], [ [ "## Loss\n\nLoss is sparse categorical cross entropy", "_____no_output_____" ] ], [ [ "def loss(model, x, y):\n y_ = model(x)\n return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)", "_____no_output_____" ], [ "loss(model, features, label)", "_____no_output_____" ] ], [ [ "## Gradients\n\nIn eager mode we can evaluate the gradients", "_____no_output_____" ] ], [ [ "def grad(model, inputs, targets):\n with tfe.GradientTape() as tape:\n loss_value = loss(model, inputs, targets)\n return tape.gradient(loss_value, model.variables)", "_____no_output_____" ], [ "grads = grad(model, features, label)", "_____no_output_____" ], [ "for i, g in enumerate(grads):\n print(\"Gradient shape: \", g.shape)\n print(\"Gradient tensor: \", g)\n print()\n", "_____no_output_____" ] ], [ [ "## Optimizer\n\nLet's use simple gradient descent", "_____no_output_____" ] ], [ [ "optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)", "_____no_output_____" ] ], [ [ "## Training Loop", "_____no_output_____" ] ], [ [ "train_loss_results = []\ntrain_accuracy_results = []\n\nnum_epochs = 201\n\n# Loop over epochs\nfor epoch in range(num_epochs):\n \n # accumulators for mean loss and accuracy\n epoch_loss_avg = tfe.metrics.Mean()\n epoch_accuracy = tfe.metrics.Accuracy()\n\n # loop on dataset, for each batch:\n for x, y in tfe.Iterator(train_dataset):\n # Calculate gradients\n grads = grad(model, x, y)\n \n # Apply gradients to the weights\n optimizer.apply_gradients(zip(grads, model.variables),\n global_step=tf.train.get_or_create_global_step())\n\n # accumulate loss\n epoch_loss_avg(loss(model, x, y))\n \n # calculate predictions\n y_pred = tf.argmax(model(x), axis=1, output_type=tf.int32)\n # acccumulate accuracy\n epoch_accuracy(y_pred, y)\n\n # end epoch\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n\n if epoch % 50 == 0:\n print(\"Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}\".format(epoch,\n epoch_loss_avg.result(),\n epoch_accuracy.result()))", "_____no_output_____" ] ], [ [ "## Plot Metrics", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))\nfig.suptitle('Training Metrics')\n\naxes[0].set_ylabel(\"Loss\", fontsize=14)\naxes[0].plot(train_loss_results)\n\naxes[1].set_ylabel(\"Accuracy\", fontsize=14)\naxes[1].set_xlabel(\"Epoch\", fontsize=14)\naxes[1].plot(train_accuracy_results)\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Test", "_____no_output_____" ] ], [ [ "test_fp = '../data/iris_test.csv'", "_____no_output_____" ], [ "test_dataset = tf.data.TextLineDataset(test_fp)\ntest_dataset = test_dataset.skip(1) # skip header row\ntest_dataset = test_dataset.map(parse_csv) # parse each row with the funcition created earlier\ntest_dataset = test_dataset.shuffle(1000) # randomize\ntest_dataset = test_dataset.batch(32) # use the same batch size as the training set", "_____no_output_____" ], [ "test_accuracy = tfe.metrics.Accuracy()\n\nfor (x, y) in tfe.Iterator(test_dataset):\n prediction = tf.argmax(model(x), axis=1, output_type=tf.int32)\n test_accuracy(prediction, y)\n\nprint(\"Test set accuracy: {:.3%}\".format(test_accuracy.result()))", "_____no_output_____" ], [ "class_ids = [\"Iris setosa\", \"Iris versicolor\", \"Iris virginica\"]\n\npredict_dataset = tf.convert_to_tensor([\n [5.1, 3.3, 1.7, 0.5,],\n [5.9, 3.0, 4.2, 1.5,],\n [6.9, 3.1, 5.4, 2.1]\n])\n\npredictions = model(predict_dataset)\n\nfor i, logits in enumerate(predictions):\n class_idx = tf.argmax(logits).numpy()\n name = class_ids[class_idx]\n print(\"Example {} prediction: {}\".format(i, name))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec74d569ca928decbac918ea64041dcf4f66fb23
75,521
ipynb
Jupyter Notebook
notebooks/pandas/pydata/Comparando Pandas com SQL - Pandas.ipynb
walmirsilva/python-data-science-study
baac92c35cdf7c5b90cb5b58aa95e5776582d076
[ "MIT" ]
null
null
null
notebooks/pandas/pydata/Comparando Pandas com SQL - Pandas.ipynb
walmirsilva/python-data-science-study
baac92c35cdf7c5b90cb5b58aa95e5776582d076
[ "MIT" ]
null
null
null
notebooks/pandas/pydata/Comparando Pandas com SQL - Pandas.ipynb
walmirsilva/python-data-science-study
baac92c35cdf7c5b90cb5b58aa95e5776582d076
[ "MIT" ]
null
null
null
25.987956
107
0.326902
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "file_csv = '../data/pydata/tips.csv'", "_____no_output_____" ], [ "tips = pd.read_csv(file_csv)", "_____no_output_____" ], [ "tips.head()", "_____no_output_____" ] ], [ [ "# 1 SELECT ", "_____no_output_____" ] ], [ [ "SELECT total_bill, tip, smoker, time\nFROM tips\nLIMIT 5;", "_____no_output_____" ] ], [ [ "tips[['total_bill', 'tip', 'smoker', 'time']].head()", "_____no_output_____" ] ], [ [ "# 2 WHERE", "_____no_output_____" ] ], [ [ "SELECT * FROM tips\nWHERE time = 'Dinner'\nLIMIT 5;", "_____no_output_____" ] ], [ [ "tips[tips['time'] == 'Dinner'].head()", "_____no_output_____" ], [ "is_dinner = tips['time'] == 'Dinner'", "_____no_output_____" ], [ "is_dinner.value_counts()", "_____no_output_____" ], [ "tips[is_dinner].head()", "_____no_output_____" ] ], [ [ "## 2.1 AND / OR ", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM tips\nWHERE time = 'Dinner' AND tip > 5.00;", "_____no_output_____" ] ], [ [ "tips[(tips['time'] == 'Dinner') & (tips['tip'] > 5.00)].head()", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM tips\nWHERE size >= 5 OR total_bill > 45;", "_____no_output_____" ] ], [ [ "tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)]", "_____no_output_____" ] ], [ [ "## 2.2 CHECK NULL VALUE notna() and isna()", "_____no_output_____" ] ], [ [ "frame = pd.DataFrame({\n 'col1': ['A', 'B', np.NaN, 'C', 'D'],\n 'col2': ['F', np.NaN, 'G', 'H', 'I']\n});\nframe", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM frame\nWHERE col2 IS NULL;", "_____no_output_____" ] ], [ [ "frame[frame['col2'].isna()]", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM frame\nWHERE col1 IS NOT NULL;", "_____no_output_____" ] ], [ [ "frame[frame['col1'].notna()]", "_____no_output_____" ] ], [ [ "# 3 GROUP BY ", "_____no_output_____" ] ], [ [ "SELECT sex, count( * )\nFROM tips\nGROUP BY sex;", "_____no_output_____" ] ], [ [ "tips.groupby('sex').size()", "_____no_output_____" ], [ "tips.groupby('sex').count()", "_____no_output_____" ], [ "tips.groupby('sex')['total_bill'].count()", "_____no_output_____" ] ], [ [ "## 3.1 Groupby with mutiple functions", "_____no_output_____" ] ], [ [ "SELECT day, AVG(tip), COUNT( * )\nFROM tips\nGROUP BY day;", "_____no_output_____" ] ], [ [ "tips.groupby('day').agg({\n 'tip': np.mean,\n 'day': np.size\n})", "_____no_output_____" ] ], [ [ "SELECT smoker, day, COUNT( * ), AVG(tip)\nFROM tips\nGROUP BY smoker, day;", "_____no_output_____" ] ], [ [ "tips.groupby(['smoker', 'day']).agg({\n 'tip': [np.size, np.mean]\n})", "_____no_output_____" ] ], [ [ "# 4 JOIN", "_____no_output_____" ] ], [ [ "df1 = pd.DataFrame({\n 'key': ['A', 'B', 'C', 'D'],\n 'value': np.random.randn(4)\n})", "_____no_output_____" ], [ "df2 = pd.DataFrame({\n 'key': ['B', 'D', 'D', 'E'],\n 'value': np.random.randn(4)\n})", "_____no_output_____" ] ], [ [ "## 4.1 INNER JOIN", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM df1\nINNER JOIN df2 ON df1.key = df2.key;", "_____no_output_____" ] ], [ [ "pd.merge(df1, df2, on='key')", "_____no_output_____" ], [ "indexed_df2 = df2.set_index('key')", "_____no_output_____" ], [ "pd.merge(df1, indexed_df2, left_on='key', right_index=True) # Mesma possibilidade através dos indices", "_____no_output_____" ] ], [ [ "## 4.2 LEFT OUTER JOIN", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM df1\nLEFT OUTER JOIN df2 ON df1.key = df2.key;", "_____no_output_____" ] ], [ [ "pd.merge(df1, df2, on='key', how='left')", "_____no_output_____" ] ], [ [ "## 4.3 RIGHT OUTER JOIN", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM df1\nRIGHT OUTER JOIN df2 ON df1.key = df2.key;", "_____no_output_____" ] ], [ [ "pd.merge(df1, df2, on='key', how='right')", "_____no_output_____" ] ], [ [ "## 4.4 FULL JOIN", "_____no_output_____" ] ], [ [ "SELECT\n*\nFROM df1\nFULL OUTER JOIN df2 ON df1.key = df2.key;", "_____no_output_____" ] ], [ [ "pd.merge(df1, df2, on='key', how='outer')", "_____no_output_____" ] ], [ [ "# 5 UNION", "_____no_output_____" ], [ "## 5.1 UNION ALL", "_____no_output_____" ] ], [ [ "df1 = pd.DataFrame({\n 'city': ['Chicago', 'San Francisco', 'New York City'],\n 'rank': range(1, 4)\n})", "_____no_output_____" ], [ "df2 = pd.DataFrame({\n 'city': ['Chicago', 'Boston', 'Los Angeles'],\n 'rank': [1, 4, 5]\n})", "_____no_output_____" ] ], [ [ "SELECT city, rank\nFROM df1\nUNION ALL\nSELECT city, rank\nFROM df2;", "_____no_output_____" ] ], [ [ "pd.concat([df1, df2])", "_____no_output_____" ] ], [ [ "## 5.2 UNION", "_____no_output_____" ] ], [ [ "SELECT city, rank\nFROM df1\nUNION\nSELECT city, rank\nFROM df2;", "_____no_output_____" ] ], [ [ " pd.concat([df1, df2]).drop_duplicates() # é similar ao UNION ALL só que sem os registros duplicados", "_____no_output_____" ] ], [ [ "# 6 Pandas equivalents for some SQL analytic and aggregate functions", "_____no_output_____" ], [ "## 6.1 Top N rows with offset", "_____no_output_____" ] ], [ [ "-- MySQL\nSELECT * FROM tips\nORDER BY tip DESC\nLIMIT 10 OFFSET 5;", "_____no_output_____" ] ], [ [ "# nlargest: Obtenha as linhas de um DataFrame classificadas pelos n maiores valores de colunas.\ntips.nlargest(10+5, columns='tip').tail(10)", "_____no_output_____" ] ], [ [ "## 6.2 Top N rows per group", "_____no_output_____" ] ], [ [ "-- Oracle's ROW_NUMBER() analytic function\nSELECT * FROM (\n SELECT\n t. * ,\n ROW_NUMBER() OVER(PARTITION BY day ORDER BY total_bill DESC) AS rn\n FROM tips t\n)\nWHERE rn < 3\nORDER BY day, rn;", "_____no_output_____" ] ], [ [ "(tips.assign(rn=tips.sort_values(['total_bill'], ascending=False)\n .groupby(['day'])\n .cumcount() + 1)\n .query('rn < 3')\n .sort_values(['day','rn'])\n)", "_____no_output_____" ], [ "(tips.assign(rnk=tips.groupby(['day'])['total_bill']\n .rank(method='first', ascending=False))\n .query('rnk < 3')\n .sort_values(['day','rnk'])\n)", "_____no_output_____" ] ], [ [ "-- Oracle's RANK() analytic function\nSELECT * FROM (\nSELECT\n t. * ,\n RANK() OVER(PARTITION BY sex ORDER BY tip) AS rnk\n FROM tips t\n WHERE tip < 2\n)\nWHERE rnk < 3\nORDER BY sex, rnk;", "_____no_output_____" ] ], [ [ "(tips[tips['tip'] < 2]\n .assign(rnk_min=tips.groupby(['sex'])['tip']\n .rank(method='min'))\n .query('rnk_min < 3')\n .sort_values(['sex','rnk_min'])\n)", "_____no_output_____" ] ], [ [ "# 7 UPDATE", "_____no_output_____" ] ], [ [ "UPDATE tips\nSET tip = tip * 2\nWHERE tip < 2;", "_____no_output_____" ] ], [ [ " tips.loc[tips['tip'] < 2, 'tip'] *= 2", "_____no_output_____" ] ], [ [ "# 8 DELETE", "_____no_output_____" ] ], [ [ "DELETE FROM tips\nWHERE tip > 9;", "_____no_output_____" ] ], [ [ "tips = tips.loc[tips['tip'] <= 9]", "_____no_output_____" ] ] ]
[ "code", "markdown", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "raw", "code", "markdown", "code", "raw", "code", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "raw", "code", "markdown", "code", "markdown", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "markdown", "code", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code", "raw", "code", "markdown", "raw", "code", "markdown", "raw", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "raw" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown", "markdown" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code", "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "code" ] ]
ec74df86ffb4140f995765ceb236268f7306fbcc
29,041
ipynb
Jupyter Notebook
examples/misc/committors.ipynb
bdice/openpathsampling
8c7ab8cb1bd7f6ae388a49d441423e2332c8301b
[ "MIT" ]
64
2016-07-06T13:38:51.000Z
2022-03-30T15:58:01.000Z
examples/misc/committors.ipynb
bdice/openpathsampling
8c7ab8cb1bd7f6ae388a49d441423e2332c8301b
[ "MIT" ]
601
2016-06-13T10:22:01.000Z
2022-03-25T00:10:40.000Z
examples/misc/committors.ipynb
bdice/openpathsampling
8c7ab8cb1bd7f6ae388a49d441423e2332c8301b
[ "MIT" ]
45
2016-11-10T11:17:53.000Z
2022-02-13T11:50:26.000Z
58.432596
9,362
0.756586
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport openpathsampling as paths\nimport openpathsampling.engines.toy as toys\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Committors and Shooting Point Analysis\n\nThere are several ways that we can calculate committors. This deals with two, in particular.\n\nFirst, we frequently want to run a committor from some specific snapshots. For example, you have a transition trajectory, and you want a sense of where the barrier is. So you select 10-20 frames from the trajectory, and run each of them 10-20 times with random velocities to see which states they end up in. The first section of this document describes how to do that process for any list of snapshots.\n\nSecond, we frequently want to plot the committor as a function of some collective variable(s). In this case, we can histogram a bunch of committor shots according to the desired CVs. And, since every shooting move is essentially a committor trial, we can histogram *all* the shooting points from a normal path sampling calculation to obtain the committor landscape. This is what we do in the second section of this document.\n\nNote that there are also tricks to get the committor based on the reweighted path ensemble, and if you're really trying to get the committor landscape in some collective variables, that will probably be the best approach. The approaches here are useful for more simple analyses and for troubleshooting while setting up path sampling simulations.", "_____no_output_____" ], [ "## Simple Committor Run\n\nThe analysis we use for the committor can be used on shooting points in general, but often we just want to run a committor calculation directly. Here we set up a simple run in a situation where, if the forward extension is chosen, we always end up in the `right` state, and if the backward extension is chosen, we always end up in the `left` state.", "_____no_output_____" ] ], [ [ "pes = toys.LinearSlope(m=[0.0], c=[0.0]) # flat line\ntopology = toys.Topology(n_spatial=1, masses=[1.0], pes=pes)\nintegrator = toys.LeapfrogVerletIntegrator(0.1)\noptions = {\n 'integ': integrator,\n 'n_frames_max': 1000,\n 'n_steps_per_frame': 1\n}\n\nengine = toys.Engine(options=options, topology=topology)\n\nsnap0 = toys.Snapshot(coordinates=np.array([[0.0]]),\n velocities=np.array([[1.0]]),\n engine=engine)\nsnap1 = toys.Snapshot(coordinates=np.array([[0.2]]),\n velocities=np.array([[1.0]]),\n engine=engine)\n\n\ncv = paths.FunctionCV(\"Id\", lambda snap : snap.coordinates[0][0])\n\n# these are our states:\nleft = paths.CVDefinedVolume(cv, float(\"-inf\"), -1.0).named(\"left\")\nright = paths.CVDefinedVolume(cv, 1.0, float(\"inf\")).named(\"right\")\n\n# set up a file for storage\nstorage = paths.Storage(\"committor_test.nc\", mode=\"w\", template=snap0)", "_____no_output_____" ] ], [ [ "In addition to the standard setup as above, we need a way to randomize the snapshots. For this simple example, we actually won't randomize them (`NoModification`), but typically we would assign totally random velocities from a Boltzmann distribution (`RandomVelocities`).", "_____no_output_____" ] ], [ [ "## more typical:\n#randomizer = paths.RandomVelocities(beta=1.0)\n## for testing purposes:\nrandomizer = paths.NoModification()", "_____no_output_____" ] ], [ [ "Now we set up the committor simulation and run it. In this example, we use a list of two snapshots. If you only want to do the committor from one snapshot, you don't have to wrap it in a list.", "_____no_output_____" ] ], [ [ "simulation = paths.CommittorSimulation(storage=storage,\n engine=engine,\n states=[left, right],\n randomizer=randomizer,\n initial_snapshots=[snap0, snap1])\nsimulation.run(n_per_snapshot=10)", "Working on snapshot 2 / 2; shot 10 / 10" ] ], [ [ "Now we do the analysis:", "_____no_output_____" ] ], [ [ "results = paths.ShootingPointAnalysis(steps=storage.steps, states=[left, right])\nresults[snap0]", "_____no_output_____" ], [ "# prettier printing of the same\n# first version uses number indexes to label snapshots\nresults.to_pandas()", "_____no_output_____" ], [ "# second version uses given label_function\nresults.to_pandas(label_function=cv)", "_____no_output_____" ] ], [ [ "## Analyzing shooting points from a TIS calculation\n\nThe same analysis procedure can be applied to shooting points from an arbitrary TIS simulation. ", "_____no_output_____" ] ], [ [ "store2 = paths.AnalysisStorage(\"mstis.nc\")", "_____no_output_____" ], [ "stateA = store2.volumes.find(\"A\")\nstateB = store2.volumes.find(\"B\")\nstateC = store2.volumes.find(\"C\")", "_____no_output_____" ], [ "results = paths.ShootingPointAnalysis(store2.steps, [stateA, stateB, stateC])", "_____no_output_____" ], [ "len(results)", "_____no_output_____" ], [ "len(store2.steps)", "_____no_output_____" ] ], [ [ "### Committor as a function of one variable\n\nFirst we'll histogram the committor as a function of the distance from the center of state `A`. Recall that the order parameter we used is actually the square of the distance, so we take its square root to make the scale more reasonable.", "_____no_output_____" ] ], [ [ "opA = store2.cvs['opA']\ndistA = lambda snap : np.sqrt(opA(snap))\nbins = [0.0+0.05*i for i in range(31)]", "_____no_output_____" ], [ "hist, bins = results.committor_histogram(distA, stateA, bins)\nplt.bar(left=bins[:-1], height=hist, width=[bins[i+1]-bins[i] for i in range(len(bins)-1)], log=True)\nplt.xlim(0.0, 1.6);", "_____no_output_____" ] ], [ [ "Here we've used the order parameter associated with state `A` as our collective variable. However, it is important to keep in mind that the choice of collective variable is completely arbitrary. For example, we could do the same analysis by looking at the distance from the central point `(0.0, 0.0)`. The plot would look different, since it is a different CV, but the procedure would remain the same.\n\nThis is important: because we have access to the whole snapshots, things that were not calculated during the data-generation phase can be calculated in the data-analysis phase.", "_____no_output_____" ], [ "### Committor as a function of two variables\n\nFrequently, we're interested in looking at the committor as a function of two collective variables. This also follows the same procedure, although it is important that the new hashing function must return a tuple, where previously it returned a float. (Technically, it needs to return a hashable object. Tuples are hashable; lists, for example, are not.)", "_____no_output_____" ] ], [ [ "twoD_hash = lambda snap: (snap.xyz[0][0], snap.xyz[0][1])\nbins = [-1.0+i*0.05 for i in range(41)]", "_____no_output_____" ], [ "hist, bins_x, bins_y = results.committor_histogram(twoD_hash, stateA, bins)", "_____no_output_____" ], [ "# when using pcolor, we need to transpose the histogram (hist.T)\nplt.pcolor(bins_x, bins_y, hist.T)\nplt.clim(0.0, 1.0)\nplt.colorbar();", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
ec74e965a768e25e66920d44448f4426b3ab3f83
299,702
ipynb
Jupyter Notebook
10/.ipynb_checkpoints/homework-10-schuetz-reddit-checkpoint.ipynb
raschuetz/foundations-homework
58b559556591c0ae230a94a64e4f0fa100ba1613
[ "MIT" ]
null
null
null
10/.ipynb_checkpoints/homework-10-schuetz-reddit-checkpoint.ipynb
raschuetz/foundations-homework
58b559556591c0ae230a94a64e4f0fa100ba1613
[ "MIT" ]
null
null
null
10/.ipynb_checkpoints/homework-10-schuetz-reddit-checkpoint.ipynb
raschuetz/foundations-homework
58b559556591c0ae230a94a64e4f0fa100ba1613
[ "MIT" ]
null
null
null
753.020101
144,254
0.686015
[ [ [ "# Reddit Part One: Getting Data\n\nYou're going to scrape the front page of https://www.reddit.com! Reddit is a magic land made of many many semi-independent kingdoms, called subreddits. We need to find out which are the most powerful.\n\nYou are going to scrape the front page of reddit every 4 hours, saving a CSV file that includes:\n* The title of the post\n* The number of votes it has (the number between the up and down arrows)\n* The number of comments it has\n* What subreddit it is from (e.g. /r/AskReddit, /r/todayilearned)\n* When it was posted (get a TIMESTAMP, e.g. 2016-06-22T12:33:58+00:00, not \"4 hours ago\")\n* The URL to the post itself\n* The URL of the thumbnail image associated with the post\n\nNote:\n\n<p>Ugh, reddit is horrible when it hasn't been customized to your tastes. If you would like something more exciting/less idiotic, try scraping a multireddit page - https://www.reddit.com/r/multihub/top/?sort=top&t=year - they're subreddits clustered by topics.\n\n<p>For example, you could scrape https://www.reddit.com/user/CrownReserve/m/improveyoself which is all self-improvement subreddits. You can follow the links at https://www.reddit.com/r/multihub/top/?sort=top&t=year or use the \"Find Multireddits\" link on the Multireddit page to find more.", "_____no_output_____" ] ], [ [ "from bs4 import BeautifulSoup\nimport requests\n\nuser_agent = {'User-agent': 'Mozilla/5.0'}\nhtml_str = requests.get('https://www.reddit.com/', headers = user_agent).text", "_____no_output_____" ], [ "html_str", "_____no_output_____" ], [ "document = BeautifulSoup(html_str, 'html.parser')", "_____no_output_____" ], [ "# The title of the post\n # The whole post is under `<div>` class = ' thing id-t3_4 ....'\n # <div> class = 'entry unvoted'\n # <p> class = 'title'\n # `<a>` class = 'title may-blank '\n# The number of votes it has (the number between the up and down arrows)\n # The number of votes is in <div> class = 'score unvoted'\n # sometimes this is &bull;\n# The number of comments it has\n # There's a\n # <div> class = 'entry unvoted'\n # <ul> class = 'flat-list buttons'\n # <li> class = 'first'\n # <a> class = 'bylink comments may-blank'\n# What subreddit it is from (e.g. /r/AskReddit, /r/todayilearned)\n # <div> class = 'entry unvoted'\n # <p> class='tagline'\n # <a> class = 'subreddit hover may-blank'\n# When it was posted (get a TIMESTAMP, e.g. 2016-06-22T12:33:58+00:00, not \"4 hours ago\")\n # <div> class = 'entry unvoted'\n # <p> class='tagline'\n # <time> it's actually in the tag\n# The URL to the post itself\n # This is in two places. Both inside the main <div> tag and in the same tag with the title.\n# The URL of the thumbnail image associated with the post\n # There are two thumbnail urls—the one I guess it's from orginially and the reddit thumbnail. Here's how to get the reddit thumbnail:\n # <a> class = 'thumbnail may-blank'\n # <img> it's actually in the tag\n# What I eventually want: \n posts_today = [\n {'title': '\"Two clowns in the same circus\" 16 x 12s oil on linen'},\n {'votes': 4246},\n {'comments': 372},\n {'subreddit': '/r/Art'},\n {'timestamp': '2016-06-22T12:33:58+00:00'},\n {'url': 'https://www.reddit.com/r/Art/comments/4pbvk5/two_clowns_in_the_same_circus_16_x_12s_oil_on/'},\n {'thumb_url': 'https://b.thumbs.redditmedia.com/p32PnbLD9t9hqvw9Q5X7eZS2tI7Ygqnh5K5MTxOERSE.jpg'}\n ]", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "one_sibling_up = document.find_all('div', {'class': 'clearleft'})", "_____no_output_____" ], [ "# troubleshooting\ndocument", "_____no_output_____" ], [ "# because only every other clearleft has a post in it:\nposts = [tag.find_next_sibling('div') for tag in one_sibling_up if tag.find_next_sibling('div')]", "_____no_output_____" ], [ "# posts is a list\nlen(posts)\n# There are 10 more posts than show up on the homepage. Seems like the first 9 and last one aren't actual posts.", "_____no_output_____" ], [ "def title(post):\n if post.find('a', {'class': 'title may-blank '}):\n return post.find('a', {'class': 'title may-blank '}).string\n else:\n return 'NO TITLE'", "_____no_output_____" ], [ "def votes(post):\n if post.find('div', {'class': 'score unvoted'}):\n return post.find('div', {'class': 'score unvoted'}).string\n else:\n return 'NO INFO'", "_____no_output_____" ], [ "# The number of comments it has\n # There's a\n # <div> class = 'entry unvoted'\n # <ul> class = 'flat-list buttons'\n # <li> class = 'first'\n # <a> class = 'bylink comments may-blank'\n\nnum = 0\n\nfor post in posts:\n if post.find('a', {'class': 'bylink comments may-blank'}):\n print(r'\\d+', re.findall(post.find('a', {'class': 'bylink comments may-blank'})).text)\n else:\n print(0)\n num += 1\n print(num)\n print('')", "0\n1\n\n" ], [ "posts_today = []\npost_dict = {}\nfor post in posts[9:34]:\n post_dict['title'] = title(post)\n if votes(post) == 'NO INFO':\n post_dict['votes'] = votes(post)\n else:\n post_dict['votes'] = int(votes(post))\n posts_today.append(post_dict)\n post_dict = {}\n\nprint(len(posts_today))\nposts_today", "25\n" ] ], [ [ "# Reddit Part Two: Sending data\n\nYou'd like to get something in your inbox about what's happening on reddit every morning at 8:30AM. Using a mailgun.com account and their API, send an email to your email address with the the CSV you saved at 8AM attached. The title of the email should be something like \"Reddit this morning: January, 1 1970\" \n\n<p>TIP: How are you going to find that csv file? Well, think about specific the datetime stamp in the filename really needs to be.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec74eb4f700850c9ca8e21f4dc99c5e776216c74
8,167
ipynb
Jupyter Notebook
analyse/t-SNE.ipynb
passerby233/keratitis_imb
a7fb12f39b2084b82e19da6dfe0097a6003a9ea2
[ "Apache-2.0" ]
null
null
null
analyse/t-SNE.ipynb
passerby233/keratitis_imb
a7fb12f39b2084b82e19da6dfe0097a6003a9ea2
[ "Apache-2.0" ]
null
null
null
analyse/t-SNE.ipynb
passerby233/keratitis_imb
a7fb12f39b2084b82e19da6dfe0097a6003a9ea2
[ "Apache-2.0" ]
null
null
null
35.202586
124
0.545733
[ [ [ "import sys, os\nsys.path.append('/home/ljc/keratitis')\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport numpy as np\nfrom sklearn import metrics\nimport torch, re\nimport torch.nn.functional as F\nfrom torchvision.models import DenseNet\nfrom torch.utils.data import DataLoader\nfrom dataset import KeratitisLabeled\n\ndef convert_ckpt(state_dict):\n pattern = re.compile(r'module.(.*)')\n for key in list(state_dict.keys()):\n res = pattern.match(key)\n if res:\n new_key = res.group(1)\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n return state_dict\n\nclass Densenet121Extractor(DenseNet):\n def __init__(self, num_classes=4):\n super().__init__(num_classes=num_classes)\n def forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n return out\n \ndef extract(model, dataloader):\n model.eval()\n device = next(model.parameters()).device\n feature_list, y_true_list = [], []\n with torch.no_grad(): \n for step, data in enumerate(dataloader):\n images, target = data\n images = images.to(device)\n features = model(images).numpy()\n feature_list.append(features)\n y_true_list.append(target.numpy())\n all_features = np.concatenate(feature_list, axis=0)\n y_true = np.concatenate(y_true_list, axis=0)\n return all_features, y_true\n\nmodel = Densenet121Extractor()\ntestset = KeratitisLabeled(mode='test', k=0)\ntrainset = KeratitisLabeled(mode='train', k=0)\ntestloader = DataLoader(testset, batch_size=400, shuffle=False,\n num_workers=16, pin_memory=True)\ntrainloader = DataLoader(trainset, batch_size=400, shuffle=False,\n num_workers=16, pin_memory=True)\nif not os.path.exists('./features'):\n os.mkdir('./features')", "_____no_output_____" ], [ "ckpt_path = '/home/ljc/keratitis/outputs/densenet121/fold_0/densenet121.pth'\nstate_dict = convert_ckpt(torch.load(ckpt_path))\nmodel.load_state_dict(state_dict)\n\nfor loader, mode in zip([testloader, trainloader], ['test', 'train']):\n all_features, y_true = extract(model, loader)\n print(all_features.shape, y_true.shape)\n data = {'features': all_features, 'label':y_true}\n file_name = f'vanilla_{mode}.npz'\n save_path = os.path.join('./features', file_name)\n np.savez(save_path, **data)", "_____no_output_____" ], [ "import numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.patheffects as PathEffects\nimport matplotlib\nfrom sklearn.manifold import TSNE\nimport seaborn as sns\n#sns.set_style('darkgrid')\n#sns.set_palette('muted')\nsns.set_context(\"notebook\", font_scale=1.5,\n rc={\"lines.linewidth\": 2.5})\nRS = 42\n\ndef scatter(x, colors, class_to_idx, grid, size, title):\n # We choose a color palette with seaborn.\n palette = np.array(sns.color_palette(\"hls\", 4))\n\n # We create a scatter plot.\n ax = plt.subplot(grid, aspect='equal')\n sc = ax.scatter(x[:,0], x[:,1], s=size, \n c=palette[colors.astype(np.int)])\n plt.xlim(-20, 20)\n plt.ylim(-20, 20)\n #ax.axis('off')\n plt.xticks([])\n plt.yticks([])\n ax.axis('tight')\n ax.set(title=title)\n \n # We add the labels for each digit.\n txts = []\n for i in range(4):\n # Position of each label.\n xtext, ytext = np.median(x[colors == i, :], axis=0)\n txt = ax.text(xtext, ytext, class_to_idx[i], fontsize=20)\n txt.set_path_effects([\n PathEffects.Stroke(linewidth=5, foreground=\"w\"),\n PathEffects.Normal()])\n txts.append(txt)\n \ndef visualize(fpath, grid, size, title, p, e):\n class_to_idx = ['A', 'B', 'F', 'H']\n data = np.load(fpath)\n x = data['features']\n y = data['label']\n coordinate = TSNE(random_state=RS, perplexity=p, early_exaggeration=e).fit_transform(x)\n scatter(coordinate, y, class_to_idx, grid, size, title)", "_____no_output_____" ], [ "vanilla_list = ('./features/vanilla_test.npz', './features/vanilla_train.npz')\nfull_model_list = ('./features/full_model_test.npz', './features/full_model_train.npz')\nfor (test_file, train_file), name in zip([vanilla_list, full_model_list], ['vanilla', 'full']):\n fig = plt.figure(figsize=(11, 5))\n visualize(train_file, 122, 5, 'Training Set', 40, 15)\n visualize(test_file, 121, 20, 'Test Set', 20, 4)\n plt.show()\n fig.savefig(name +'.pdf')", "_____no_output_____" ], [ "test_list = ['./features/vanilla_test.npz', './features/self_super_test.npz', './features/full_model_test.npz']\ntrain_list = ['./features/vanilla_train.npz', './features/self_super_train.npz', './features/full_model_train.npz']\ntest_title = ['Vanilla Test', 'Self Supervise Test', 'Full Model Test']\ntrain_title = ['Vanilla Train', 'Self Supervise Train', 'Full Model Train']\nfor plotlist, mode in zip([test_list, train_list][:1], ['test', 'train'][:1]):\n f = plt.figure(figsize=(17, 5))\n for idx, plotfile in enumerate(plotlist):\n if mode == 'test':\n visualize(plotfile, 131+idx, 20, test_title[idx], 20, 4)\n else:\n visualize(plotfile, 131+idx, 16, train_title[idx], 40, 15)\n plt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec74eed434b4dff4e69a55bab3881768572e7f25
395,405
ipynb
Jupyter Notebook
000 Projekte_Juerg/00 Direktinvestitionen/Kopien/Direktinvestitionen CH-Copy3.ipynb
JonnyGrafico/00_Git_Projekte
5df548ecd43a7071f60ce26d06eceb33f8a11815
[ "MIT" ]
null
null
null
000 Projekte_Juerg/00 Direktinvestitionen/Kopien/Direktinvestitionen CH-Copy3.ipynb
JonnyGrafico/00_Git_Projekte
5df548ecd43a7071f60ce26d06eceb33f8a11815
[ "MIT" ]
null
null
null
000 Projekte_Juerg/00 Direktinvestitionen/Kopien/Direktinvestitionen CH-Copy3.ipynb
JonnyGrafico/00_Git_Projekte
5df548ecd43a7071f60ce26d06eceb33f8a11815
[ "MIT" ]
null
null
null
55.753666
24,828
0.392238
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ], [ "path = 'snb-data-fdiaustabsa-de-all-20171215_0900.xlsx'", "_____no_output_____" ], [ "df = pd.read_excel(path, na_values='-')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.set_index('Jahre')", "_____no_output_____" ], [ "df['Jahre']", "_____no_output_____" ], [ "df['Industrie - Total']", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df[\"Sektoren\"] = df['Sektoren'].astype('int')\ndf.dtypes", "_____no_output_____" ], [ "df[\"Industrie\"] = df['Industrie'].astype('int')\ndf.dtypes", "_____no_output_____" ], [ "df.Jahre.sort_index()", "_____no_output_____" ], [ "df.Industrie.sort_index()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "felderliste = ['Jahre', 'Industrie']", "_____no_output_____" ], [ "df[felderliste].head(5)", "_____no_output_____" ], [ "df.plot(kind='line', x='Jahre', y='Industrie')", "_____no_output_____" ], [ "data = {'Beteiligungskapital'}\n\ndf = pd.DataFrame(data, index = ['Kapitalart', 'Selektoren und Branchen','1998', '1999', '2000', '2001', '2001', '2002',\n '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', '2016'])\ndf\n", "_____no_output_____" ], [ "df.Beteiligungskapital.value_counts().sort_index()", "_____no_output_____" ], [ "data = {'Beteiligungskapital ohne reinvestierte Erträge': [2]}\n\n\ndf = pd.DataFrame(data, index = ['Kapitalart', 'Selektoren und Branchen','1998', '1999', '2000', '2001', '2001', '2002',\n '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', '2016'])\ndf\n\n\n\n\n\ndf_irn = df[data]\ndf_irn.head(3)\n\n\ndf_irn.data.value_counts().sort_index().plot()", "_____no_output_____" ], [ "df.set_index('Jahre')", "_____no_output_____" ], [ "felderliste = ['Jahre', 'Beteiligungskapital ohne reinvestierte Erträge', 'Beteiligungskapital ohne reinvestierte Erträge.1',\n 'Beteiligungskapital ohne reinvestierte Erträge.2', 'Beteiligungskapital ohne reinvestierte Erträge.3']", "_____no_output_____" ], [ "felderliste = ['Jahre', 'Beteiligungskapital ohne reinvestierte Erträge', 'Beteiligungskapital ohne reinvestierte Erträge.1',\n 'Beteiligungskapital ohne reinvestierte Erträge.2', 'Beteiligungskapital ohne reinvestierte Erträge.3']", "_____no_output_____" ], [ "df[felderliste].head(5)", "_____no_output_____" ], [ "df = pd.DataFrame({'Kapitalart': ['Selektoren und Branchen','1998', '1999', '2000', '2001', '2001', '2002',\n '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', '2016'],\n 'Beteiligungskapital ohne reinvestierte Erträge': ['6374']})\n \n\n\n\n\ndf.columns = ['Jahre',]\ndf\n\n\n\n\ndata = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'], \n 'year': [2012, 2012, 2013, 2014, 2014], \n 'reports': [4, 24, 31, 2, 3]}\ndf = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])\ndf", "_____no_output_____" ], [ "df.drop([1])", "_____no_output_____" ], [ "df_kapital..value_counts().sort_index().plot()", "_____no_output_____" ], [ "df_irn = df[df.Beteiligungskapital ohne reinvestierte Erträge.1 == 'IRN']\ndf_irn.head(3)", "_____no_output_____" ], [ "data = {'Beteiligungskapital ohne reinvestierte Erträge': ['']}\ndf = pd.DataFrame(data, index = ['Kapitalart', 'Sektoren und Branchen', 'Leerzeile',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2205',\n '2006', '2007', '2008', '2009', '2010', '2011', '2012',\n '2013', '2014', '2015', '2016'])\ndf", "_____no_output_____" ], [ "df.drop(['Leerzeile'])", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "data = {}\ndf = pd.DataFrame(data, index = ['Kapitalart', 'Sektoren und Branchen', 'Leerzeile',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2205',\n '2006', '2007', '2008', '2009', '2010', '2011', '2012',\n '2013', '2014', '2015', '2016'])\ndf\n\n\n'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'], \n 'year': [2012, 2012, 2013, 2014, 2014], \n 'reports': [4, 24, 31, 2, 3]", "_____no_output_____" ], [ "data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'], \n 'year': [2012, 2012, 2013, 2014, 2014], \n 'reports': [4, 24, 31, 2, 3]}\n\ndf = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])\ndf", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df.set_index('Kapitalart')", "_____no_output_____" ], [ "df = df.transpose()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.set_index('Kapitalart')", "_____no_output_____" ], [ "data = ()", "_____no_output_____" ], [ "# setting first name as index column \ndata.set_index([\"Kapitalart\"], inplace = True, \n append = True, drop = True)", "_____no_output_____" ], [ "df.set_index(['1998', '1999', '2000', '2001',\n '2002', '2003', '2004', '2005', '2006',\n '2007', '2008', '2009', '2010'])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec751968b7c6eb0def315eb6863092fbb6ad4e13
117,667
ipynb
Jupyter Notebook
scripts/dysfunctional.ipynb
tetyanaloskutova/dysfunctionalbo
7a2bda9c0d437263c38e72806ee91ad09a123ba6
[ "MIT" ]
1
2022-02-21T16:31:04.000Z
2022-02-21T16:31:04.000Z
scripts/dysfunctional.ipynb
tetyanaloskutova/dysfunctionalbo
7a2bda9c0d437263c38e72806ee91ad09a123ba6
[ "MIT" ]
11
2020-01-17T09:54:41.000Z
2022-02-10T10:23:32.000Z
scripts/dysfunctional.ipynb
tetyanaloskutova/dysfunctionalbott
7a2bda9c0d437263c38e72806ee91ad09a123ba6
[ "MIT" ]
null
null
null
34.415619
1,857
0.522678
[ [ [ "from simpleneighbors import SimpleNeighbors\nimport spacy\nnlp = spacy.load('en_core_web_lg')\nfilename1 = r\"C:\\conda\\dysfunctionalbott\\data\\Wait.txt\"\ncutoff = 500000\ntext1 = open(filename1).read()[:cutoff]\nfilename2 = r\"C:\\conda\\dysfunctionalbott\\data\\gotg2_processed.txt\"\ntext2 = open(filename2).read()[:cutoff]\ndoc = nlp(text1+text2, \n disable=['tagger'])\n\nimport numpy as np\n\ndef concatenate_vectors(seq):\n return np.concatenate(np.array([w.vector for w in seq]), axis=0)\n\n\nconcatenate_vectors(nlp(\"hello there\")).shape\n\n\n\n\n\n", "_____no_output_____" ], [ "n = 3\nnns = SimpleNeighbors(n*300)\nfor seq in doc.sents:\n seq = [item for item in seq if item.is_alpha]\n for i in range(len(seq)-n):\n mean = concatenate_vectors(seq[i:i+n])\n next_item = seq[i+n].text\n nns.add_one(next_item, mean)\nnns.build()", "_____no_output_____" ], [ "import random\nrandom.randint(1,101)\nrandom.randint(0,len(vec))", "_____no_output_____" ], [ "tagger = spacy.load('en_core_web_sm')", "_____no_output_____" ], [ "count_sentenses = 0\n \nstarts = [\"I have never\",\n 'I was a'\n ,'We have come'\n ,'Needless to say'\n ,'Under international law'\n ,'I frankly did'\n ,'These were my'\n ,'You could see'\n ,'I truly enjoyed'\n ,'Safe to come'\n ,'I am pleased'\n ,'And she had'\n ,'I will buy'\n ,'Most nations opted'\n ,'The most impressive']\n\n\n\n", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "start=''\nnew_text=''", "_____no_output_____" ], [ "start = starts[count_sentenses]\nnew_text += start", "_____no_output_____" ], [ "while count_sentenses < 14:\n vec = nns.nearest(concatenate_vectors(nlp(start)))\n ix=random.randint(0,len(vec)-1)\n \n start = start + ' ' + vec[ix]\n start = ' '.join(start.split()[-3:])\n \n new_text += (' ' + vec[ix])\n\n word = tagger(vec[ix])\n\n pos = [str(p.pos_) for p in word]\n if 'NOUN' in pos:\n count_sentenses += 1\n start = starts[count_sentenses]\n new_text += ('. ' +start)\n", "_____no_output_____" ], [ "new_text", "_____no_output_____" ], [ "new_text", "_____no_output_____" ] ], [ [ "## Correct the sentences", "_____no_output_____" ] ], [ [ "from spacy import displacy\n", "_____no_output_____" ], [ "new_text", "_____no_output_____" ], [ "new_text2", "_____no_output_____" ], [ "for sent in new_text_tokened.doc.sents:\n sent = tagger(sent.text)\n #displacy.serve(sent, style=\"dep\")\n for token in sent.doc:\n print(token.text, token.dep_,\n token.shape_, token.is_alpha, token.is_stop)", "I nsubj X True True\nhave aux xxxx True True\nnever neg xxxx True True\nsailed ROOT xxxx True False\nknown xcomp xxxx True False\na det x True True\nworrying dobj xxxx True False\nand cc xxx True True\nto aux xx True True\nease conj xxxx True False\n. punct . False False\nI nsubj X True True\nwas ROOT xxx True True\na det x True True\nFederal attr Xxxxx True False\nin prep xx True True\nall det xxx True True\nwaste pobj xxxx True False\n. punct . False False\nWe nsubj Xx True True\nhave aux xxxx True True\ncome ROOT xxxx True False\nback advmod xxxx True True\nfour nummod xxxx True True\nwrong dobj xxxx True False\nto aux xx True True\nmeet advcl xxxx True False\non prt xx True True\nwith prep xxxx True True\nwe pobj xx True True\nof prep xx True True\nit pobj xx True True\nand cc xxx True True\nof conj xx True True\nodor pobj xxxx True False\n. punct . False False\nNeedless ROOT Xxxxx True False\nto aux xx True True\nsay xcomp xxx True True\nonly advmod xxxx True True\nmaybe advmod xxxx True False\nthey nsubj xxxx True True\ndid aux xxx True True\nirritated ccomp xxxx True False\nat prep xx True True\nus pobj xx True True\nat prep xx True True\nGerman amod Xxxxx True False\ntime pobj xxxx True False\n. punct . False False\nUnder prep Xxxxx True True\ninternational amod xxxx True False\nlaw pobj xxx True False\nso advmod xx True True\ndoes aux xxxx True True\nhe nsubjpass xx True True\ngot auxpass xxx True False\nlost ROOT xxxx True False\na det x True True\nmile dobj xxxx True False\n. punct . False False\nI nsubj X True True\nfrankly advmod xxxx True False\ndid ROOT xxx True True\nan det xx True True\nwe compound xx True True\nriver dobj xxxx True False\n. punct . False False\nThese nsubj Xxxxx True True\nwere ROOT xxxx True True\nmy attr xx True True\nof prep xx True True\ntown pobj xxxx True False\n. punct . False False\nYou nsubj Xxx True True\ncould aux xxxx True True\nsee ROOT xxx True True\na det x True True\nsteam dobj xxxx True False\n. punct . False False\nI nsubj X True True\ntruly advmod xxxx True False\nenjoyed ROOT xxxx True False\nto aux xx True True\nbe xcomp xx True True\nquick acomp xxxx True False\nof prep xx True True\nit pobj xx True True\nto aux xx True True\nsummon xcomp xxxx True False\n. punct . False False\nSafe ROOT Xxxx True False\nto aux xx True True\ncome xcomp xxxx True False\non prt xx True True\nand cc xxx True True\nprognosticate conj xxxx True False\n. punct . False False\nI nsubj X True True\nam ROOT xx True True\npleased acomp xxxx True False\ninsure dep xxxx True False\nall appos xxx True True\nas prep xx True True\nit pobj xx True True\nas mark xx True True\nhe nsubj xx True True\nfelt advcl xxxx True False\na det x True True\njudge compound xxxx True False\ndeal dobj xxxx True False\n. punct . False False\nAnd cc Xxx True True\nshe nsubj xxx True True\nhad aux xxx True True\nkept ROOT xxxx True False\naway advmod xxxx True False\nfor prep xxx True True\nthe det xxx True True\ntwo nummod xxx True True\nsimple pobj xxxx True False\nout prt xxx True True\nwe nsubj xx True True\nwill aux xxxx True True\ntake ccomp xxxx True True\nyears dobj xxxx True False\n. punct . False False\nI nsubj X True True\nwill aux xxxx True True\nbuy ROOT xxx True False\nand cc xxx True True\nreign conj xxxx True False\n. punct . False False\nMost amod Xxxx True True\nnations nsubj xxxx True False\nopted csubjpass xxxx True False\nbe auxpass xx True True\nimagined ROOT xxxx True False\nat prep xx True True\nthe det xxx True True\npoodle amod xxxx True False\nCHAPTER pobj XXXX True False\n. punct . False False\nThe det Xxx True True\nmost advmod xxxx True True\nimpressive ROOT xxxx True False\n" ], [ "for token in doc:\n print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,\n token.shape_, token.is_alpha, token.is_stop)", "Skip Skip ROOT Xxxx True False\nto to prep xx True True\nmain main amod xxxx True False\ncontent content pobj xxxx True False\n\n \n SPACE _SP \n False False\nweb web dobj xxx True False\n\n \n SPACE _SP \n False False\ntexts text npadvmod xxxx True False\n\n \n SPACE _SP \n False False\nmovies movie conj xxxx True False\n\n \n SPACE _SP \n False False\naudio audio nmod xxxx True False\n\n \n SPACE _SP \n False False\nsoftware software appos xxxx True False\n\n \n SPACE _SP \n False False\nimage image appos xxxx True False\n\n \n SPACE _SP \n False False\nlogosearch logosearch npadvmod xxxx True False\n\n \n SPACE _SP \n False False\nSearch Search npadvmod Xxxxx True False\n\n \n SPACE _SP \n False False\nupload upload xcomp xxxx True False\n\n \n SPACE _SP \n False False\nUPLOAD UPLOAD nmod XXXX True False\n\n \n SPACE _SP \n False False\nperson person dobj xxxx True False\n\n \n SPACE _SP \n False False\nSIGN SIGN appos XXXX True False\nIN IN prep XX True True\n\n \n SPACE _SP \n False False\nABOUT ABOUT nmod XXXX True True\n\n \n \n \n \n \n False False\nCONTACT CONTACT nsubj XXXX True False\n\n \n \n \n \n \n False False\nBLOG BLOG ROOT XXXX True False\n\n \n \n \n \n \n False False\nPROJECTS PROJECTS ROOT XXXX True False\n\n \n \n \n \n \n False False\nHELP HELP nsubj XXXX True False\n\n \n \n \n \n \n False False\nDONATE DONATE ROOT XXXX True False\n\n \n \n \n \n \n False False\nJOBS JOBS dobj XXXX True False\n\n \n \n \n \n \n False False\nVOLUNTEER VOLUNTEER ROOT XXXX True False\n\n \n \n \n \n \n False False\nPEOPLE PEOPLE ROOT XXXX True False\n\n \n SPACE _SP \n False False\nFull Full amod Xxxx True True\ntext text ROOT xxxx True False\nof of prep xx True True\n\" \" punct \" False False\nThree Three nummod Xxxxx True True\nmen man pobj xxx True False\nin in prep xx True True\na a det x True True\nboat boat pobj xxxx True False\n: : punct : False False\n( ( punct ( False False\nto to aux xx True True\nsay say parataxis xxx True True\nnothing nothing dobj xxxx True True\nof of prep xx True True\nthe the det xxx True True\ndog dog pobj xxx True False\n) ) punct ) False False\n\" \" punct \" False False\n\n \n SPACE _SP \n False False\nSee See pcomp Xxx True True\nother other amod xxxx True True\nformats format dobj xxxx True False\n\n \n SPACE _SP \n False False\nTHE THE det XXX True True\nLIBRARY LIBRARY appos XXXX True False\n\n\n \n\n \n\n False False\nOF OF prep XX True True\n\n\n \n\n \n\n False False\nTHE THE det XXX True True\nUNIVERSITY UNIVERSITY ROOT XXXX True False\n\n \n SPACE _SP \n False False\nOF OF prep XX True True\nCALIFORNIA CALIFORNIA pobj XXXX True False\n\n\n \n\n \n\n False False\nLOS LOS compound XXX True False\nANGELES ANGELES compound XXXX True False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nGIFT GIFT appos XXXX True False\n\n\n \n\n \n\n False False\nFrom From ROOT Xxxx True True\nthe the det xxx True True\nLibrary Library nmod Xxxxx True False\nof of prep xx True True\n\n\n \n\n \n\n False False\nHenry Henry compound Xxxxx True False\nGoldman Goldman pobj Xxxxx True False\n, , punct , False False\nPh.D. Ph.D. appos Xx.X. False False\n\n\n \n\n \n\n False False\n1886 1886 punct dddd False False\n- - punct - False False\n1972 1972 prep dddd False False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nTHREE THREE nummod XXXX True True\nMEN MEN ROOT XXX True False\nIN IN prep XX True True\nA A det X True True\nBOAT BOAT pobj XXXX True False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\n( ( punct ( False False\nTO TO aux XX True True\nSAY SAY ROOT XXX True True\nNOTHING NOTHING dobj XXXX True True\nOF OF prep XX True True\nTHE THE det XXX True True\nDOG DOG pobj XXX True False\n) ) punct ) False False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nBY BY prep XX True True\n\n \n SPACE _SP \n False False\nJEROME JEROME compound XXXX True False\nK. K. compound X. False False\nJEROME JEROME ROOT XXXX True False\n\n\n \n\n \n\n False False\nAUTHOR AUTHOR appos XXXX True False\nOF OF prep XX True True\n\n \n SPACE _SP \n False False\n\" \" punct \" False False\nRLE RLE compound XXX True False\nTHOUGHTS THOUGHTS ROOT XXXX True False\nOF OF prep XX True True\nAN AN det XX True True\nIDLE IDLE compound XXXX True False\nFELLOW FELLOW pobj XXXX True False\n, , punct , False False\n\" \" punct \" False False\n\" \" punct \" False False\nSTAGE STAGE compound XXXX True False\nLAND LAND appos XXXX True False\n, , punct , False False\n\" \" punct \" False False\nETC ETC compound XXX True False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nILLUSTRA ILLUSTRA compound XXXX True False\nTIONS TIONS appos XXXX True False\nBY BY prep XX True True\nA. A. compound X. False False\nFREDERICS FREDERICS pobj XXXX True False\n\n\n\n\n\n \n\n\n\n\n \n\n\n\n False False\nNEW NEW compound XXX True False\nYORK YORK compound XXXX True False\n\n\n \n\n \n\n False False\nHENRY HENRY compound XXXX True False\nHOLT HOLT conj XXXX True False\nAND AND cc XXX True True\nCOMPANY COMPANY conj XXXX True False\n\n \n SPACE _SP \n False False\n1890 1890 npadvmod dddd False False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nAnnex Annex nmod Xxxxx True False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nPREFACE PREFACE nmod XXXX True False\n\n\n\n\n \n\n\n\n \n\n\n\n False False\nr r compound x True False\nl l nmod x True False\nlie lie nmod xxx True False\nchief chief amod xxxx True False\nbeauty beauty nsubj xxxx True False\nof of prep xx True True\nthis this det xxxx True True\nbook book pobj xxxx True False\nlies lie ROOT xxxx True False\nnot not neg xxx True True\nso so advmod xx True True\nmuch much advmod xxxx True True\nin in prep xx True True\n\n \n SPACE _SP \n False False\nits its poss xxx True True\nliteral literal amod xxxx True False\n\\ \\ compound \\ False False\nst\\h\\ st\\h\\ ROOT xx\\x\\ False False\nor or cc xx True True\nin in conj xx True True\nthe the det xxx True True\nextent extent amod xxxx True False\nanJ anJ compound xxX True False\nusefulness usefulness pobj xxxx True False\nof of prep xx True True\nthe the det xxx True True\n\n \n SPACE _SP \n False False\ninformation information pobj xxxx True False\nit it nsubj xx True True\nconveys convey relcl xxxx True False\n, , punct , False False\nas a prep xx True True\nin in prep xx True True\nits its poss xxx True True\nsin sin pobj xxx True False\n: : punct : False False\np!e p!e amod x!x False False\ntruthfulness truthfulness appos xxxx True False\n. . punct . False False\n\n \n SPACE _SP \n False False\nli li ROOT xx True False\n* * punct * False False\npages page nsubj xxxx True False\nform form ROOT xxxx True False\nthe the det xxx True True\nrecord record dobj xxxx True False\nof of prep xx True True\nevents event pobj xxxx True False\nthat that nsubj xxxx True True\nreally really advmod xxxx True True\nhap- hap- relcl xxx- False False\n\n \n SPACE _SP \n False False\npened pened " ] ], [ [ "### Do not allow of the same in a row - did not work", "_____no_output_____" ] ], [ [ "new_text_1 = ''", "_____no_output_____" ], [ "for sent in new_text_tokened.doc.sents:\n sent = tagger(sent.text)\n #\n last_dep_ = ''\n for token in sent.doc:\n if last_dep_ == token.dep_:\n if sum(required_sents.values()) > 2:\n new_text_1 += '. '\n break\n last_dep_ = token.dep_ \n new_text_1 += (' ' + token.text)\n if token.dep_ in required_sents:\n required_sents[token.dep_] += 1\n \n \n print(token.text, token.dep_,\n token.shape_, token.is_alpha, token.is_stop)", "I nsubj X True True\nhave aux xxxx True True\nnever neg xxxx True True\nsailed ROOT xxxx True False\nknown xcomp xxxx True False\na det x True True\nworrying dobj xxxx True False\nand cc xxx True True\nto aux xx True True\nease conj xxxx True False\n. punct . False False\nI nsubj X True True\nwas ROOT xxx True True\na det x True True\nFederal attr Xxxxx True False\nin prep xx True True\nall det xxx True True\nwaste pobj xxxx True False\n. punct . False False\nWe nsubj Xx True True\nhave aux xxxx True True\ncome ROOT xxxx True False\nback advmod xxxx True True\nfour nummod xxxx True True\nwrong dobj xxxx True False\nto aux xx True True\nmeet advcl xxxx True False\non prt xx True True\nwith prep xxxx True True\nwe pobj xx True True\nof prep xx True True\nit pobj xx True True\nand cc xxx True True\nof conj xx True True\nodor pobj xxxx True False\n. punct . False False\nNeedless ROOT Xxxxx True False\nto aux xx True True\nsay xcomp xxx True True\nonly advmod xxxx True True\nUnder prep Xxxxx True True\ninternational amod xxxx True False\nlaw pobj xxx True False\nso advmod xx True True\ndoes aux xxxx True True\nhe nsubjpass xx True True\ngot auxpass xxx True False\nlost ROOT xxxx True False\na det x True True\nmile dobj xxxx True False\n. punct . False False\nI nsubj X True True\nfrankly advmod xxxx True False\ndid ROOT xxx True True\nan det xx True True\nwe compound xx True True\nriver dobj xxxx True False\n. punct . False False\nThese nsubj Xxxxx True True\nwere ROOT xxxx True True\nmy attr xx True True\nof prep xx True True\ntown pobj xxxx True False\n. punct . False False\nYou nsubj Xxx True True\ncould aux xxxx True True\nsee ROOT xxx True True\na det x True True\nsteam dobj xxxx True False\n. punct . False False\nI nsubj X True True\ntruly advmod xxxx True False\nenjoyed ROOT xxxx True False\nto aux xx True True\nbe xcomp xx True True\nquick acomp xxxx True False\nof prep xx True True\nit pobj xx True True\nto aux xx True True\nsummon xcomp xxxx True False\n. punct . False False\nSafe ROOT Xxxx True False\nto aux xx True True\ncome xcomp xxxx True False\non prt xx True True\nand cc xxx True True\nprognosticate conj xxxx True False\n. punct . False False\nI nsubj X True True\nam ROOT xx True True\npleased acomp xxxx True False\ninsure dep xxxx True False\nall appos xxx True True\nas prep xx True True\nit pobj xx True True\nas mark xx True True\nhe nsubj xx True True\nfelt advcl xxxx True False\na det x True True\njudge compound xxxx True False\ndeal dobj xxxx True False\n. punct . False False\nAnd cc Xxx True True\nshe nsubj xxx True True\nhad aux xxx True True\nkept ROOT xxxx True False\naway advmod xxxx True False\nfor prep xxx True True\nthe det xxx True True\ntwo nummod xxx True True\nsimple pobj xxxx True False\nout prt xxx True True\nwe nsubj xx True True\nwill aux xxxx True True\ntake ccomp xxxx True True\nyears dobj xxxx True False\n. punct . False False\nI nsubj X True True\nwill aux xxxx True True\nbuy ROOT xxx True False\nand cc xxx True True\nreign conj xxxx True False\n. punct . False False\nMost amod Xxxx True True\nnations nsubj xxxx True False\nopted csubjpass xxxx True False\nbe auxpass xx True True\nimagined ROOT xxxx True False\nat prep xx True True\nthe det xxx True True\npoodle amod xxxx True False\nCHAPTER pobj XXXX True False\n. punct . False False\nThe det Xxx True True\nmost advmod xxxx True True\nimpressive ROOT xxxx True False\n" ], [ "new_text_1", "_____no_output_____" ] ], [ [ "## how does it look", "_____no_output_____" ] ], [ [ "from itertools import islice, count\n", "_____no_output_____" ], [ "sent = next(islice(new_text_tokened.doc.sents, 2, 2+1))\ndisplacy.render(sent, style=\"dep\")", "_____no_output_____" ] ], [ [ "## Simple sentences are better: remove clauses", "_____no_output_____" ] ], [ [ "for token in sent:\n print(token.text, token.dep_, token.head.text, token.head.pos_,\n [child for child in token.children])", "_____no_output_____" ] ], [ [ "### if a child token becomes parent, drop that part", "_____no_output_____" ] ], [ [ "def less_dependencies(new_text) :\n if len(new_text.split(' ')) < 9: \n return new_text\n new_text_tokened = tagger(new_text)\n new_text2 = ''\n children_set = set()\n for sent in new_text_tokened.doc.sents:\n for token in sent:\n try:\n first_item = next(token.children) \n children_set.update([child.text for child in token.children]) \n children_set.update([token.text]) \n break\n except StopIteration:\n pass\n for token in sent:\n if token.text not in children_set:\n print('----------------------')\n break\n new_text2 += (' ' +token.text)\n #print(token.text, token.dep_, token.head.text, token.head.pos_,\n # [child for child in token.children])\n new_text2 += '. '\n return new_text2", "_____no_output_____" ], [ "len(new_text.split(' '))", "_____no_output_____" ], [ "new_text='And she had kept away for the two simple out we will take years'", "_____no_output_____" ], [ "less_dependencies(new_text)", "----------------------\n" ] ], [ [ "# Answer questions", "_____no_output_____" ] ], [ [ "questions = [\"What have you done?\",\n 'Who are you?'\n ,'Where are you from?'\n ,'Why is it dark?'\n ,'Why are you sad?'\n ,'When is it too late?'\n ,'What is the purpose?'\n ,'Are we there yet?'\n ,'What do you do?'\n ,'How are you?'\n ,'What are the unwritten rules of where you work?'\n ]\n\n", "_____no_output_____" ] ], [ [ "### Do replacements (rule-based) ", "_____no_output_____" ] ], [ [ "def select_we_are():\n starts = ['I am desperately'\n , 'Sometimes, I am'\n , 'Strangely enough, I feel'\n , 'I used to be'\n , 'I found myself'\n ,'I think therefore']\n ix=random.randint(0,len(starts)-1)\n \n return starts[ix]", "_____no_output_____" ], [ "def select_because():\n starts = ['Because of this, '\n , 'Because I am worth it,'\n , 'Because I could not stop'\n , 'Because so much'\n , 'Because your best days'\n ]\n ix=random.randint(0,len(starts)-1)\n \n return starts[ix]", "_____no_output_____" ], [ "def select_how():\n starts = ['Desperately trying to'\n ,'Needless to say'\n ,'Everything functioned exactly'\n ,'Fortunately the officials'\n ,'Safe to come'\n ,'Under international law']\n ix=random.randint(0,len(starts)-1)\n \n return starts[ix]", "_____no_output_____" ], [ "def select_what():\n starts = [\"I have never\",\n 'I was a'\n ,'We have come'\n ,'I frankly didn\\’t'\n ,'These were random'\n ,'You could see'\n ,'I truly enjoyed'\n ,'I am pleased to'\n ,'Most nations opted'\n ,'The most impressive']\n ix=random.randint(0,len(starts)-1)\n \n return starts[ix]", "_____no_output_____" ], [ "def select_location():\n starts = ['In the woods'\n , 'Over the shoulder'\n , 'Over there in'\n , 'In your dreams'\n , 'In the desert'\n ,'Nowhere at all']\n ix=random.randint(0,len(starts)-1)\n \n return starts[ix]", "_____no_output_____" ], [ "def prepare_start_of_response(question):\n if 'you' in question.lower() and 'are' in question.lower():\n return select_we_are()\n elif 'where' in question.lower():\n return select_location()\n elif 'why' in question.lower():\n return select_because()\n elif 'how' in question.lower():\n return select_how()\n elif 'what' in question.lower():\n return select_what()\n else:\n return select_what()", "_____no_output_____" ], [ "for q in questions:\n print(prepare_start_of_response(q))", "The most impressive\nI am desperately\nSometimes, I am\nBecause I could not stop\nSometimes, I am\nThe most impressive\nMost nations opted\nWe have come\nI am pleased to\nI think therefore\nI am desperately\n" ], [ "def strip_punct(start):\n return start.replace(',','').replace('.','').replace('?','').replace('!','')", "_____no_output_____" ], [ "max_sentences = 1\ndef generate_qa(q):\n start=''\n new_text=''\n count_sentenses = 0\n start = prepare_start_of_response(q)\n \n new_text += start\n start = ' '.join(start.split()[-3:])\n while count_sentenses < max_sentences:\n \n vec = nns.nearest(concatenate_vectors(nlp(strip_punct(start))))\n ix=random.randint(0,len(vec)-1)\n\n start = start + ' ' + vec[ix]\n #print(vec[ix])\n start = ' '.join(start.split()[-3:])\n\n new_text += (' ' + vec[ix])\n c=[1 if c=='.' else 0 for c in new_text]\n if sum(c) >1:\n break\n #print(new_text)\n word = tagger(vec[ix])\n\n pos = [str(p.pos_) for p in word]\n if ('NOUN' in pos) or 'PRONOUN' in pos:\n count_sentenses += 1\n #start = ''#prepare_start_of_response(q)\n start = starts[random.randint(0,len(starts)-1)]\n \n new_text += ('. ' +start)\n return less_dependencies(new_text)\n #return new_text", "_____no_output_____" ], [ "for q in questions:\n print(q)\n print(generate_qa(q))\n print('-----------------------------------')", "What have you done?\n----------------------\n----------------------\n These were random easy. Needless. \n-----------------------------------\nWho are you?\n----------------------\n I used. I am pleased. \n-----------------------------------\nWhere are you from?\n----------------------\n I am desperately gon. We have come. \n-----------------------------------\nWhy is it dark?\n----------------------\n Because I could not stop happen. I will buy. \n-----------------------------------\nWhy are you sad?\nI am desperately sex. The most impressive\n-----------------------------------\nWhen is it too late?\n----------------------\n These were. We have come. \n-----------------------------------\nWhat is the purpose?\n----------------------\n Most nations. I frankly did. \n-----------------------------------\nAre we there yet?\n----------------------\n These were random. I was a. \n-----------------------------------\nWhat do you do?\n----------------------\n I have never felt good worse worse. I truly enjoyed. \n-----------------------------------\nHow are you?\n----------------------\n I found. And she had. \n-----------------------------------\nWhat are the unwritten rules of where you work?\nI am desperately your ass. Most nations opted\n-----------------------------------\n" ], [ "generate_qa('What do you know?')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec751aa7ff744cbbced23aabdeead11e684c974f
136,577
ipynb
Jupyter Notebook
Problem-Statement in Linear Regression-Cricket-Temp.ipynb
KarinkiManikanta/Pandas
0af6dbb8553e3a3abd24ff0001cc18f9da557cb4
[ "MIT" ]
null
null
null
Problem-Statement in Linear Regression-Cricket-Temp.ipynb
KarinkiManikanta/Pandas
0af6dbb8553e3a3abd24ff0001cc18f9da557cb4
[ "MIT" ]
null
null
null
Problem-Statement in Linear Regression-Cricket-Temp.ipynb
KarinkiManikanta/Pandas
0af6dbb8553e3a3abd24ff0001cc18f9da557cb4
[ "MIT" ]
1
2020-04-17T17:51:35.000Z
2020-04-17T17:51:35.000Z
143.765263
34,612
0.867533
[ [ [ "import numpy as np\nimport pandas as pd\nimport pandas_profiling \nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "# Problem Statement: We have predict the temparature in the ground weather we can play the match are not when the temparature in high and performance Exploratory Data Analysis (EDA) for each column ", "_____no_output_____" ] ], [ [ "data=pd.read_csv('https://raw.githubusercontent.com/KarinkiManikanta/Data-Sets-For-Machine-Learnig-and-Data-Science/master/DataSets/Regression_Data_cricket_chirps.csv')\ndata", "_____no_output_____" ], [ "plt.plot(data)", "_____no_output_____" ], [ "data.isna()", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data.tail()", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "data.ndim", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15 entries, 0 to 14\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Chirps/Minute 15 non-null float64\n 1 Temperature 15 non-null float64\ndtypes: float64(2)\nmemory usage: 368.0 bytes\n" ], [ "data.describe()", "_____no_output_____" ], [ "x=data['Chirps/Minute']\ny=data['Temperature']", "_____no_output_____" ], [ "slope,intercept,r_value,p_value,std_error=stats.linregress(x,y)", "_____no_output_____" ], [ "print(\"slope of the equation is:{}\".format(slope))", "slope of the equation is:3.291094089026233\n" ], [ "print(\"intercept of the equation is:{}\".format(intercept))", "intercept of the equation is:25.232313104083133\n" ], [ "print(\"r_square is:{}\".format(r_value**2))", "r_square is:0.6974651446012033\n" ], [ "print(\"p_value is:{}\".format(p_value))", "p_value is:0.00010667185597430323\n" ], [ "print(\"standerd error is:{}\".format(std_error))", "standerd error is:0.6011669223418112\n" ], [ "plt.figure(figsize=(16,10))\nplt.scatter(x,y)\nplt.plot(x,intercept+slope*x,'r')\nplt.title(\"linear regression line for Chirps/Minute and Temperature \")\nplt.xlabel(\"Chirps/Minute \")\nplt.ylabel(\"Temperature with respective to Chirps/Minute and Temperature\")", "_____no_output_____" ], [ "sns.pairplot(data)", "_____no_output_____" ], [ "sns.kdeplot(data['Chirps/Minute'])", "_____no_output_____" ], [ "sns.kdeplot(data['Temperature'])", "_____no_output_____" ], [ "sns.heatmap(data.corr())", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec751c8b5e15f54e0c7e03f363bd366c51d49d68
119,218
ipynb
Jupyter Notebook
in_progress/old/Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
1
2020-06-09T16:16:21.000Z
2020-06-09T16:16:21.000Z
in_progress/old/Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
null
null
null
in_progress/old/Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
1
2021-03-02T12:51:56.000Z
2021-03-02T12:51:56.000Z
51.387069
655
0.57774
[ [ [ "<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# $\\texttt{GiRaFFE}$: Solving GRFFE equations at a higher Finite Differencing order\n\n## Authors: Zach Etienne & Patrick Nelson\n### Formatting improvements courtesy Brandon Clark\n\n## GiRaFFE_HO_v2 is an experiment that omits the analytic derivatives of $\\partial_j T^j_{{\\rm EM} i}$. \n\n### Note that, for full compatibility with .par files, running this notebook will <font color = 'red'>overwrite</font> the thorn GiraFFE_HO\n\n[comment]: <> (Notebook Status and Validation Notes: TODO)\n\n\n### NRPy+ Source Code for this module: [GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py) [\\[tutorial\\]](Tutorial-GiRaFFE_Higher_Order_v2.ipynb) Constructs GRFFE evolution equations and initial data as SymPy expressions.\n\n\n\n## Introduction:\n\nThis module focuses on using the equations developed in the [Tutorial-GiRaFFE_Higher_Order_v2](Tutorial-GiRaFFE_Higher_Order_v2.ipynb) tutorial notebook to build an Einstein Toolkit (ETK) thorn to solve the GRFFE equations in Cartesian coordinates. This tutorial will focus on implementing the time evolution aspects; others can be contructed to set up specific initial data.\n\nWhen interfaced properly with the ETK, this module will propagate the initial data for $\\tilde{S}_i$, $A_i$, and $\\sqrt{\\gamma} \\Phi$, defined in the last tutorial, forward in time by integrating the equations for $\\partial_t \\tilde{S}_i$, $\\partial_t A_i$ and $\\partial_t [\\sqrt{\\gamma} \\Phi]$ subject to spatial boundary conditions. The time evolution itself is handled by the $\\text{MoL}$ (Method of Lines) thorn in the $\\text{CactusNumerical}$ arrangement, and the boundary conditions by the $\\text{Boundary}$ thorn in the $\\text{CactusBase}$ arrangement. \n\nSimilar to the other ETK modules we have built, we will construct the WaveToyNRPy module in two steps.\n\n1. Call on NRPy+ to convert the SymPy expressions for the evolution equations into one C-code kernel.\n1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module.", "_____no_output_____" ], [ "<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$ \n\nThis notebook is organized as follows\n\n1. [Step 1](#initializenrpy): Call on NRPy+ to convert the SymPy expression for the evolution equations and A-to-B into one C-code kernel for each\n 1. [Step 1.a](#import): Import NRPy+ core modules and the `GiRaFFE_Higher_Order_v2` module\n 1. [Step 1.b](#ccode_output): Create the C code output kernel \n 1. [Step 1.c](#ccode_write): Write C code to files\n 1. [Step 1.d](#a2bdriver): The A-to-B driver\n1. [Step 2](#etk): Interfacing with the Einstein Toolkit\n 1. [Step 2.a](#einstein_c): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels\n 1. [Step 2.b](#conservative2primitive): The Conservative to Primitive Solver\n 1. [Step 2.b.i](#old_giraffe): Including `GiRaFFE_headers.h` from old version of GiRaFFE\n 1. [Step 2.b.ii](#compute_conservatives): Writing `compute_conservatives_FFE.C`\n 1. [Step 2.c](#grmhd): GRMHD variables provided by HydroBase\n 1. [Step 2.d](#timelevel): Copying initial data to additional timelevels\n 1. [Step 2.e](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure\n 1. [Step2.e.i](#interface): `interface.ccl`\n 1. [Step2.e.ii](#param): `param.ccl`\n 1. [Step2.e.iiii](#schedule): `schedule.ccl`\n 1. [Step 2.f](#einstein_list): Add the C code to the Einstein Toolkit compilation list\n1. [Step 3](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file", "_____no_output_____" ], [ "<a id='initializenrpy'></a>\n\n# Step 1: Call on NRPy+ to convert the SymPy expressions for the evolution equations and A-to-B into one C-code kernel for each \\[Back to [top](#toc)\\]\n$$\\label{initializenrpy}$$\n\n\n", "_____no_output_____" ], [ "<a id='import'></a>\n\n## Step 1.a: Import NRPy+ core modules and the `GiRaFFE_Higher_Order_v2` module \\[Back to [top](#toc)\\]\n$$\\label{import}$$\n\nWe start by importing the core NRPy+ modules we need and setting commonly used parameters. Since we are writing an ETK thorn, we'll need to set `\"grid::GridFuncMemAccess\"` to `\"ETK\"`. We will then import the [GiRaFFE_Higher_Order.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order.py) module we previously created and run its main function to generate the SymPy for the expressions we want.", "_____no_output_____" ] ], [ [ "# Step 0: Add NRPy's directory to the path\n# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory\nimport os,sys\nnrpy_dir_path = os.path.join(\"..\")\nif nrpy_dir_path not in sys.path:\n sys.path.append(nrpy_dir_path)\n\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport grid as gri\nimport finite_difference as fin\nfrom outputC import *\nimport loop\n\n#Step 0: Set the spatial dimension parameter to 3.\npar.set_parval_from_str(\"grid::DIM\", 3)\nDIM = par.parval_from_str(\"grid::DIM\")\npar.set_parval_from_str(\"grid::GridFuncMemAccess\",\"ETK\")\n\n# Step 1c: Call the GiRaFFE_Higher_Order() function from within the\n# GiRaFFE_HO/GiRaFFE_Higher_Order.py module.\nimport GiRaFFE_HO.GiRaFFE_Higher_Order_v2 as gho\ngho.GiRaFFE_Higher_Order_v2()\n\n# Step 1: Set the finite differencing order to 4.\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 2)\n", "_____no_output_____" ] ], [ [ "<a id='ccode_output'></a>\n\n## Step 1.b: Create the C code output kernel \\[Back to [top](#toc)\\]\n$$\\label{ccode_output}$$\n\nTo do this, for each header file we need, we will set up an array of `lhrh` objects representing the gridfunctions to print. We will then pass that array to `FD_outputC()` to get a string of optimized C code. Next, we use the loop function to add code to the kernel so that is applied to the whole ETK grid.\n", "_____no_output_____" ] ], [ [ "# Step 2: Create the C code output kernel.\nPrereqs_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"AevolParen\"),rhs=gho.AevolParen),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"PevolParenU0\"),rhs=gho.PevolParenU[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"PevolParenU1\"),rhs=gho.PevolParenU[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"PevolParenU2\"),rhs=gho.PevolParenU[2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD00\"),rhs=gho.SevolParenUD[0][0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD01\"),rhs=gho.SevolParenUD[0][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD02\"),rhs=gho.SevolParenUD[0][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD10\"),rhs=gho.SevolParenUD[1][0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD11\"),rhs=gho.SevolParenUD[1][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD12\"),rhs=gho.SevolParenUD[1][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD20\"),rhs=gho.SevolParenUD[2][0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD21\"),rhs=gho.SevolParenUD[2][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"SevolParenUD22\"),rhs=gho.SevolParenUD[2][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU00\"),rhs=gho.gammaUU[0][0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU01\"),rhs=gho.gammaUU[0][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU02\"),rhs=gho.gammaUU[0][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU11\"),rhs=gho.gammaUU[1][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU12\"),rhs=gho.gammaUU[1][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU22\"),rhs=gho.gammaUU[2][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammadet\"),rhs=gho.gammadet),\\\n ]\n\nmetric_quantities_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU00\"),rhs=gho.gammaUU[0][0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU01\"),rhs=gho.gammaUU[0][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU02\"),rhs=gho.gammaUU[0][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU11\"),rhs=gho.gammaUU[1][1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU12\"),rhs=gho.gammaUU[1][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammaUU22\"),rhs=gho.gammaUU[2][2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"gammadet\"),rhs=gho.gammadet),\\\n ]\n\n# To best format this for the ETK, we'll need to register these gridfunctions.\nStilde_rhsD = ixp.register_gridfunctions_for_single_rank1(\"AUX\",\"Stilde_rhsD\")\nA_rhsD = ixp.register_gridfunctions_for_single_rank1(\"AUX\",\"A_rhsD\")\npsi6Phi_rhs = gri.register_gridfunctions(\"AUX\",\"psi6Phi_rhs\")\nConservs_to_print = [\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"Stilde_rhsD0\"),rhs=gho.Stilde_rhsD[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"Stilde_rhsD1\"),rhs=gho.Stilde_rhsD[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"Stilde_rhsD2\"),rhs=gho.Stilde_rhsD[2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"A_rhsD0\"),rhs=gho.A_rhsD[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"A_rhsD1\"),rhs=gho.A_rhsD[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"A_rhsD2\"),rhs=gho.A_rhsD[2]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"psi6Phi_rhs\"),rhs=gho.psi6Phi_rhs),\\\n ]\n\nPrereqs_CKernel = fin.FD_outputC(\"returnstring\",Prereqs_to_print,params=\"outCverbose=False\")\n#Prereqs_CKernel = \"const double u0 = u0GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];\\n\" + Prereqs_CKernel\nmetric_quantities_CKernel = fin.FD_outputC(\"returnstring\",metric_quantities_to_print,params=\"outCverbose=False\")\nConservs_CKernel = fin.FD_outputC(\"returnstring\",Conservs_to_print,params=\"outCverbose=False\")\n#Conservs_CKernel = \"const double u0 = u0GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];\\n\" + Conservs_CKernel\n\nPrereqs_looped = loop.loop([\"i2\",\"i1\",\"i0\"],[\"0\",\"0\",\"0\"],\\\n [\"cctk_lsh[2]\",\"cctk_lsh[1]\",\"cctk_lsh[0]\"],\\\n [\"1\",\"1\",\"1\"],[\"#pragma omp parallel for\",\"\",\"\"],\"\",\\\n Prereqs_CKernel.replace(\"time\",\"cctk_time\"))\n\nmetric_quantities_looped = loop.loop([\"i2\",\"i1\",\"i0\"],[\"0\",\"0\",\"0\"],\\\n [\"cctk_lsh[2]\",\"cctk_lsh[1]\",\"cctk_lsh[0]\"],\\\n [\"1\",\"1\",\"1\"],[\"#pragma omp parallel for\",\"\",\"\"],\"\",\\\n metric_quantities_CKernel.replace(\"time\",\"cctk_time\"))\n\nConservs_looped = loop.loop([\"i2\",\"i1\",\"i0\"],[\"cctk_nghostzones[2]\",\"cctk_nghostzones[1]\",\"cctk_nghostzones[0]\"],\\\n [\"cctk_lsh[2]-cctk_nghostzones[2]\",\"cctk_lsh[1]-cctk_nghostzones[1]\",\\\n \"cctk_lsh[0]-cctk_nghostzones[0]\"],\\\n [\"1\",\"1\",\"1\"],[\"#pragma omp parallel for\",\"\",\"\"],\"\",\\\n Conservs_CKernel.replace(\"time\",\"cctk_time\"))\n", "_____no_output_____" ] ], [ [ "<a id='ccode_write'></a>\n\n## Step 1.c: Write C code to files \\[Back to [top](#toc)\\]\n$$\\label{ccode_write}$$\n\nHere, we write our C code to files after first creating appropriate directories. Note that we also import [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py) for its `computeu0_Cfunction`.", "_____no_output_____" ] ], [ [ "# Step 3: Create directories for the thorn if they don't exist.\n!mkdir GiRaFFE_HO 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.\n!mkdir GiRaFFE_HO/src 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.\n\n# Step 4: Write the C code kernel to file.\nwith open(\"GiRaFFE_HO/src/Prereqs.h\", \"w\") as file:\n file.write(str(Prereqs_looped))\n\nwith open(\"GiRaFFE_HO/src/metric_quantities.h\", \"w\") as file:\n file.write(str(metric_quantities_looped))\n\nwith open(\"GiRaFFE_HO/src/Conservs.h\", \"w\") as file:\n file.write(str(Conservs_looped))\n\n# Step 5: Import the function to calculate u0 and write it to a file.\nimport u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc\n#u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU)\n\nwith open(\"GiRaFFE_HO/src/computeu0_Cfunction.h\", \"w\") as file:\n file.write(u0etc.computeu0_Cfunction)\n", "_____no_output_____" ] ], [ [ "<a id='a2bdriver'></a>\n\n## Steps 1.d: The A-to-B driver \\[Back to [top](#toc)\\]\n$$\\label{a2bdriver}$$\n\nWe will also need a routine to compute new Valencia 3-velocities at each timestep using a conservative-to-primitive solver. Since we need $v^i_{(n)}$ everywhere, this will require us to compute $B^i$ everywhere. However, $B^i = \\epsilon^{ijk} \\partial_j A_k$ requires derivatives of $A_i$, so getting $B^i$ will require some finesse. A chief aspect of this will require using lower-order finite differencing in the ghost zones. To that end, we will create header files for each finite differencing order $\\leq 10$, as well as upwinded- and downwinded-derivatives at 2nd order. These will let us compute the derivative at the outermost gridpoints.", "_____no_output_____" ] ], [ [ "# Step 6: The A-to-B driver\n\n# Step 6a: Import the Levi-Civita symbol and build the corresponding tensor.\n# We already have a handy function to define the Levi-Civita symbol in WeylScalars\nimport WeylScal4NRPy.WeylScalars_Cartesian as weyl\nLeviCivitaDDD = weyl.define_LeviCivitaSymbol_rank3()\nLeviCivitaUUU = ixp.zerorank3()\nfor i in range(DIM):\n for j in range(DIM):\n for k in range(DIM):\n LCijk = LeviCivitaDDD[i][j][k]\n #LeviCivitaDDD[i][j][k] = LCijk * sp.sqrt(gho.gammadet)\n LeviCivitaUUU[i][j][k] = LCijk / sp.sqrt(gho.gammadet)\n\nAD_dD = ixp.declarerank2(\"AD_dD\",\"nosym\")\nBU = ixp.zerorank1() # BU is already registered as a gridfunction, but we need to zero its values and declare it in this scope.\n# Step 6b: We can use this function to compactly reset to expressions to print at each FD order.\ndef set_BU_to_print():\n return [lhrh(lhs=gri.gfaccess(\"out_gfs\",\"BU0\"),rhs=BU[0]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"BU1\"),rhs=BU[1]),\\\n lhrh(lhs=gri.gfaccess(\"out_gfs\",\"BU2\"),rhs=BU[2])] \n\nfor i in range(DIM):\n for j in range(DIM):\n for k in range(DIM):\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\n# Step 6c: We'll lower the FD order at each stage and write to a new file.\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 10)\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_10.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 8)\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_8.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 6)\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_6.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 4)\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_4.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\", 2)\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\n# Step 6c: For the outermost points, we'll need a separate file for each face. \n# These will correspond to an upwinded and a downwinded file for each direction.\nAD_ddnD = ixp.declarerank2(\"AD_ddnD\",\"nosym\")\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 0:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x0D.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\nAD_dupD = ixp.declarerank2(\"AD_dupD\",\"nosym\")\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 0:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x0U.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 1:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x1D.h\",set_BU_to_print(),params=\"outCverbose=False\")\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 1:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x1U.h\",set_BU_to_print(),params=\"outCverbose=False\")\n\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 2:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x2D.h\",set_BU_to_print(),params=\"outCverbose=False\")\nfor i in range(DIM):\n BU[i] = 0\n for j in range(DIM):\n for k in range(DIM):\n if j is 2:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]\n else:\n BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]\n\nfin.FD_outputC(\"GiRaFFE_HO/src/B_from_A_2x2U.h\",set_BU_to_print(),params=\"outCverbose=False\")\n", "Wrote to file \"GiRaFFE_HO/src/B_from_A_10.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_8.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_6.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_4.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x0D.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x0U.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x1D.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x1U.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x2D.h\"\nWrote to file \"GiRaFFE_HO/src/B_from_A_2x2U.h\"\n" ] ], [ [ "<a id='etk'></a>\n\n# Step 2: Interfacing with the Einstein Toolkit \\[Back to [top](#toc)\\]\n$$\\label{etk}$$\n", "_____no_output_____" ], [ "<a id='einstein_c'></a>\n\n## Step 2.a: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels. \\[Back to [top](#toc)\\]\n$$\\label{einstein_c}$$\n\nNow that we have generated the C code kernel `GiRaFFE_RHSs.h` and the parameters file `NRPy_params.h`, we will need to write C code to make use of these files. To do this, we can simply follow the example within the [IDScalarWaveNRPy tutorial notebook](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb). Functions defined by these files will be called by the Einstein Toolkit scheduler (specified in `schedule.ccl` below).\n\nAlso, we will write the logic that determines which files are called where in order to calculate $B^i$ here. \n\n1. Take the primary finite differencing order $N$ from the `param.ccl` file. Fill in the interior points with the corresponding FD order. \n1. Then, for each face, at $0+{\\rm cctk\\_nghostzones[face]}-1$ and ${\\rm cctk\\_lsh[face]}-{\\rm cctk\\_nghostzones[face]}+1$, calculate $B^i$ at order $N-2$\n1. Continue moving outwards: at the points $0+{\\rm cctk\\_nghostzones[face]}-p$ and ${\\rm cctk\\_lsh[face]}-{\\rm cctk\\_nghostzones[face]}+p$, calculate B at order $N-2p$.\n1. When ${\\rm cctk\\_nghostzones[face]}-p = 0$, use the upwinding and downwinding derivatives for the appropriate face.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/GiRaFFE.c\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cctk.h\"\n#include \"cctk_Arguments.h\"\n#include \"cctk_Parameters.h\"\n#include \"Symmetry.h\"\n\nconst int MAXFACE = -1;\nconst int NUL = +0;\nconst int MINFACE = +1;\n\n/* TODO: Start using this to directly interface with HydroBase!\n*CCTK_REAL *Bvec0,*Bvec1,*Bvec2;\n*Bvec0 = &Bvec[0*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];\n*Bvec1 = &Bvec[1*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];\n*Bvec2 = &Bvec[2*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];\n*/\n\n// Declare boundary condition FACE_UPDATE function,\n// which fills in the ghost zones with successively\n// lower order finite differencing\nvoid AtoB(const cGH* restrict const cctkGH,const int ORDER, \n const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,\n CCTK_REAL *BU0GF,CCTK_REAL *BU1GF,CCTK_REAL *BU2GF,\n const int i0min, const int i0max, \n const int i1min, const int i1max, \n const int i2min, const int i2max, \n const int FACEX0, const int FACEX1, const int FACEX2) {\n \n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n\n const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));\n const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));\n const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));\n const CCTK_REAL *gammaDD00GF = gxx;\n const CCTK_REAL *gammaDD01GF = gxy;\n const CCTK_REAL *gammaDD02GF = gxz;\n const CCTK_REAL *gammaDD11GF = gyy;\n const CCTK_REAL *gammaDD12GF = gyz;\n const CCTK_REAL *gammaDD22GF = gzz;\n\n if(ORDER==8) {\n printf(\"Computing A to B with Order = 8...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_8.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(ORDER==6) {\n printf(\"Computing A to B with Order = 6...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_6.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(ORDER==4) {\n printf(\"Computing A to B with Order = 4...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_4.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(ORDER==2) {\n printf(\"Computing A to B with Order = 2...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n } \n } else if(ORDER==0) {\n if(FACEX0==MAXFACE) {\n printf(\"Computing A to B at x = max...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x0D.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(FACEX0==MINFACE) {\n printf(\"Computing A to B at x = min...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x0U.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(FACEX1==MAXFACE) {\n printf(\"Computing A to B at y = max...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x1D.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(FACEX1==MINFACE) {\n printf(\"Computing A to B at y = min...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x1U.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(FACEX2==MAXFACE) {\n printf(\"Computing A to B at z = max...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x2D.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else if(FACEX2==MINFACE) {\n printf(\"Computing A to B at z = min...\\n\");\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {\n #include \"B_from_A_2x2U.h\"\n if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {\n printf(\"i0,i1,i2 = %d,%d,%d\\n\",i0,i1,i2);\n }\n }\n } else {\n printf(\"ERROR. FACEX parameters not set properly.\\n\");\n exit(1);\n }\n } else {\n printf(\"ERROR. ORDER = %d not supported!\\n\",ORDER);\n exit(1);\n }\n}\n\nvoid driver_A_to_B(CCTK_ARGUMENTS) {\n \n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n \n const int *NG = cctk_nghostzones;\n const int *Nx = cctk_lsh;\n CCTK_INT ORDER = NG[0]*2;\n for(int ii=0;ii<cctk_lsh[2]*cctk_lsh[1]*cctk_lsh[0];ii++) {\n BU0[ii] = 1.0 / 0.0;\n BU1[ii] = 1.0 / 0.0;\n BU2[ii] = 1.0 / 0.0;\n }\n\n printf(\"Starting A to B driver with Order = %d...\\n\",ORDER);\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, NG[0],Nx[0]-NG[0],NG[1],Nx[1]-NG[1],NG[2],Nx[2]-NG[2], NUL,NUL,NUL);\n int imin[3] = { NG[0], NG[1], NG[2] };\n int imax[3] = { Nx[0]-NG[0], Nx[1]-NG[1], Nx[2]-NG[2] };\n while(ORDER>0) {\n // After updating each face, adjust imin[] and imax[] \n // to reflect the newly-updated face extents.\n ORDER -= 2;\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); \n if(ORDER!=0) imin[0]--;\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); \n if(ORDER!=0) imax[0]++;\n\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); \n if(ORDER!=0) imin[1]--;\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); \n if(ORDER!=0) imax[1]++;\n\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); \n if(ORDER!=0) imin[2]--;\n AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); \n if(ORDER!=0) imax[2]++;\n }\n}\n\nvoid GiRaFFE_HO_calc_prereqs(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,\n const CCTK_REAL invdx0,const CCTK_REAL invdx1,const CCTK_REAL invdx2,\n const CCTK_REAL *alphaGF, const CCTK_REAL *betaU0GF, const CCTK_REAL *betaU1GF, const CCTK_REAL *betaU2GF,\n const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,\n const CCTK_REAL *ValenciavU0GF, const CCTK_REAL *ValenciavU1GF, const CCTK_REAL *ValenciavU2GF, const CCTK_REAL *u4upperZeroGF,\n const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,const CCTK_REAL *psi6PhiGF,\n const CCTK_REAL *BU0GF,const CCTK_REAL *BU1GF,const CCTK_REAL *BU2GF,\n CCTK_REAL *gammaUU00GF,CCTK_REAL *gammaUU01GF,CCTK_REAL *gammaUU02GF,CCTK_REAL *gammaUU11GF,CCTK_REAL *gammaUU12GF,CCTK_REAL *gammaUU22GF,CCTK_REAL *gammadetGF,\n CCTK_REAL *SevolParenUD00GF,CCTK_REAL *SevolParenUD01GF,CCTK_REAL *SevolParenUD02GF,\n CCTK_REAL *SevolParenUD10GF,CCTK_REAL *SevolParenUD11GF,CCTK_REAL *SevolParenUD12GF,\n CCTK_REAL *SevolParenUD20GF,CCTK_REAL *SevolParenUD21GF,CCTK_REAL *SevolParenUD22GF,\n CCTK_REAL *AevolParenGF,CCTK_REAL *PevolParenU0GF,CCTK_REAL *PevolParenU1GF,CCTK_REAL *PevolParenU2GF) {\n\n DECLARE_CCTK_PARAMETERS;\n\n#include \"Prereqs.h\" \n\n}\n\nvoid GiRaFFE_HO_calc_rhs(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,\n const CCTK_REAL invdx0,const CCTK_REAL invdx1,const CCTK_REAL invdx2,\n const CCTK_REAL *alphaGF, const CCTK_REAL *betaU0GF, const CCTK_REAL *betaU1GF, const CCTK_REAL *betaU2GF,\n const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,\n const CCTK_REAL *gammaUU00GF,const CCTK_REAL *gammaUU01GF,const CCTK_REAL *gammaUU02GF,const CCTK_REAL *gammaUU11GF,const CCTK_REAL *gammaUU12GF,const CCTK_REAL *gammaUU22GF,const CCTK_REAL *gammadetGF,\n const CCTK_REAL *ValenciavU0GF, const CCTK_REAL *ValenciavU1GF, const CCTK_REAL *ValenciavU2GF, const CCTK_REAL *u4upperZeroGF,\n const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,const CCTK_REAL *psi6PhiGF,\n const CCTK_REAL *BU0GF,const CCTK_REAL *BU1GF,const CCTK_REAL *BU2GF,\n const CCTK_REAL *SevolParenUD00GF,const CCTK_REAL *SevolParenUD01GF,const CCTK_REAL *SevolParenUD02GF,\n const CCTK_REAL *SevolParenUD10GF,const CCTK_REAL *SevolParenUD11GF,const CCTK_REAL *SevolParenUD12GF,\n const CCTK_REAL *SevolParenUD20GF,const CCTK_REAL *SevolParenUD21GF,const CCTK_REAL *SevolParenUD22GF,\n const CCTK_REAL *AevolParenGF,const CCTK_REAL *PevolParenU0GF,const CCTK_REAL *PevolParenU1GF,const CCTK_REAL *PevolParenU2GF,\n CCTK_REAL *Stilde_rhsD0GF, CCTK_REAL *Stilde_rhsD1GF, CCTK_REAL *Stilde_rhsD2GF,\n CCTK_REAL *A_rhsD0GF, CCTK_REAL *A_rhsD1GF, CCTK_REAL *A_rhsD2GF, CCTK_REAL *psi6Phi_rhsGF) {\n DECLARE_CCTK_PARAMETERS;\n\n#include \"Conservs.h\"\n\n}\n\nvoid calc_u0(const CCTK_REAL alpha,const CCTK_INT idx,\n const CCTK_REAL gammaDD00,const CCTK_REAL gammaDD01,const CCTK_REAL gammaDD02,const CCTK_REAL gammaDD11,const CCTK_REAL gammaDD12,const CCTK_REAL gammaDD22,\n CCTK_REAL *ValenciavU0GF,CCTK_REAL *ValenciavU1GF,CCTK_REAL *ValenciavU2GF,CCTK_REAL *u0GF)\n{\n DECLARE_CCTK_PARAMETERS;\n CCTK_REAL u0;\n CCTK_REAL ValenciavU0 = ValenciavU0GF[idx];\n CCTK_REAL ValenciavU1 = ValenciavU1GF[idx];\n CCTK_REAL ValenciavU2 = ValenciavU2GF[idx];\n\n#include \"computeu0_Cfunction.h\"\n\n u0GF[idx] = u0;\n ValenciavU0GF[idx] = ValenciavU0;\n ValenciavU1GF[idx] = ValenciavU1;\n ValenciavU2GF[idx] = ValenciavU2;\n}\n\nvoid GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS(CCTK_ARGUMENTS) {\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n \n const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));\n const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));\n const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));\n \n#pragma omp parallel for\n for(int i2=0; i2<cctk_lsh[2]; i2++) {\n for(int i1=0; i1<cctk_lsh[1]; i1++) {\n for(int i0=0; i0<cctk_lsh[0]; i0++) {\n const CCTK_INT idx = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);\n calc_u0(alp[idx],idx,\n gxx[idx],gxy[idx],gxz[idx],gyy[idx],gyz[idx],gzz[idx],\n ValenciavU0,ValenciavU1,ValenciavU2,u4upperZero);\n }\n }\n }\n\n GiRaFFE_HO_calc_prereqs(cctkGH,cctk_lsh,cctk_nghostzones,\n invdx0, invdx1, invdx2,\n alp, betax, betay, betaz,\n gxx, gxy, gxz, gyy, gyz, gzz,\n ValenciavU0, ValenciavU1, ValenciavU2, u4upperZero,\n AD0, AD1, AD2, psi6Phi,\n BU0, BU1, BU2,\n gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet,\n SevolParenUD00, SevolParenUD01, SevolParenUD02,\n SevolParenUD10, SevolParenUD11, SevolParenUD12,\n SevolParenUD20, SevolParenUD21, SevolParenUD22,\n AevolParen, PevolParenU0, PevolParenU1, PevolParenU2);\n}\n\nvoid GiRaFFE_HO_set_rhs(CCTK_ARGUMENTS) {\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n \n const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));\n const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));\n const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));\n \n GiRaFFE_HO_calc_rhs(cctkGH,cctk_lsh,cctk_nghostzones,\n invdx0, invdx1, invdx2,\n alp, betax, betay, betaz,\n gxx, gxy, gxz, gyy, gyz, gzz,\n gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet,\n ValenciavU0, ValenciavU1, ValenciavU2, u4upperZero,\n AD0, AD1, AD2, psi6Phi,\n BU0, BU1, BU2,\n SevolParenUD00, SevolParenUD01, SevolParenUD02,\n SevolParenUD10, SevolParenUD11, SevolParenUD12,\n SevolParenUD20, SevolParenUD21, SevolParenUD22,\n AevolParen, PevolParenU0, PevolParenU1, PevolParenU2,\n Stilde_rhsD0, Stilde_rhsD1, Stilde_rhsD2,\n A_rhsD0, A_rhsD1, A_rhsD2, psi6Phi_rhs);\n}\n\n/* Boundary Condition code adapted from WaveToyC thorn in ETK, implementing built-in\n * ETK BC functionality\n */\nvoid GiRaFFE_HO_SelectBCs(CCTK_ARGUMENTS)\n{\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n\n const char *bctype;\n\n\n bctype = NULL;\n if (CCTK_EQUALS(bound,\"flat\") || CCTK_EQUALS(bound,\"static\") ||\n CCTK_EQUALS(bound,\"radiation\") || CCTK_EQUALS(bound,\"robin\") ||\n CCTK_EQUALS(bound,\"none\"))\n {\n bctype = bound;\n }\n else if (CCTK_EQUALS(bound,\"zero\"))\n {\n bctype = \"scalar\";\n }\n\n /* Uses all default arguments, so invalid table handle -1 can be passed */\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::StildeD0\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::StildeD1\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::StildeD2\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::AD0\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::AD1\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::AD2\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,\n \"GiRaFFE_HO::psi6Phi\", bctype) < 0)\n {\n CCTK_WARN (0, \"GiRaFFE_HO_Boundaries: Error selecting boundary condition\");\n }\n}\n\nvoid GiRaFFE_HO_InitSymBound(CCTK_ARGUMENTS)\n{\n DECLARE_CCTK_ARGUMENTS;\n \n int sym[3];\n\n sym[0] = 1;\n sym[1] = 1;\n sym[2] = 1;\n\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::uU0\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::uU1\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::uU2\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::u0alpha\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::alpsqrtgam\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::AevolParen\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::PevolParenU0\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::PevolParenU1\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::PevolParenU2\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::StildeD0\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::StildeD1\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::StildeD2\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::AD0\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::AD1\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::AD2\");\n SetCartSymVN(cctkGH, sym,\"GiRaFFE_HO::psi6Phi\");\n\n return;\n}\n\nvoid GiRaFFE_HO_RegisterVars(CCTK_ARGUMENTS)\n{\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n \n CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;\n /* Register all the evolved grid functions with MoL */\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::StildeD0\"), CCTK_VarIndex(\"GiRaFFE_HO::Stilde_rhsD0\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::StildeD1\"), CCTK_VarIndex(\"GiRaFFE_HO::Stilde_rhsD1\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::StildeD2\"), CCTK_VarIndex(\"GiRaFFE_HO::Stilde_rhsD2\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::AD0\"), CCTK_VarIndex(\"GiRaFFE_HO::A_rhsD0\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::AD1\"), CCTK_VarIndex(\"GiRaFFE_HO::A_rhsD1\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::AD2\"), CCTK_VarIndex(\"GiRaFFE_HO::A_rhsD2\"));\n ierr += MoLRegisterEvolved(CCTK_VarIndex(\"GiRaFFE_HO::psi6Phi\"), CCTK_VarIndex(\"GiRaFFE_HO::psi6Phi_rhs\"));\n /* Register all the evolved Array functions with MoL */\n return;\n}", "Overwriting GiRaFFE_HO/src/GiRaFFE.c\n" ] ], [ [ "<a id='conservative2primitive'></a>\n\n## Step 2.b: The Conservative to Primitive Solver \\[Back to [top](#toc)\\]\n$$\\label{conservative2primitive}$$\n\nWe will also need to use the conservative to primitive solver from the old version of $\\texttt{GiRaFFE}$, included here for convenience. It is slightly modified to use our new variable names and to otherwise work with the slightly different scheduler. It also computes the inverse and determinant of the three metric.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/driver_conserv_to_prims_FFE.C\n/* We evolve forward in time a set of functions called the \n * \"conservative variables\" (magnetic field and Poynting vector), \n * and any time the conserv's are updated, we must recover the \n * primitive variables (velocities), before reconstructing & evaluating \n * the RHSs of the MHD equations again. \n *\n * This file contains the routine for this algebraic calculation. \n * The velocity is calculated with formula (85), arXiv:1310.3274v2\n * $v^i = 4 \\pi \\alpha \\gamma^{ij} {\\tilde S}_j \\gamma{-1/2} B^{-2} - \\beta^i$ \n * The force-free condition: $B^2>E^2$ is checked before computing the velocity.\n * and after imposing the constraint ${\\tilde B}^i {\\tilde S}_i = 0$\n \n * The procedure is as described in arXiv:1310.3274v2: \n * 1. ${\\tilde S}_i ->{\\tilde S}_i - ({\\tilde S}_j {\\tilde B}^j) {\\tilde B}^i/{\\tilde B}^2$\n * 2. $f = \\sqrt{(1-\\gamma_{max}^{-2}){\\tilde B}^4/(16 \\pi^2 \\gamma {\\tilde S}^2)}$ \n * 3. ${\\tilde S}_i -> {\\tilde S}_i min(1,f)\n * 4. $v^i = 4 \\pi \\alpha \\gamma^{ij} {\\tilde S}_j \\gamma{-1/2} B^{-2} - \\beta^i$\n * 5. ${\\tilde n}_i v^i = 0$\n *\n * All equations are from: http://arxiv.org/pdf/1310.3274.pdf (v2)\n * */\n\n#include \"cctk.h\"\n#include <iostream>\n#include <iomanip>\n#include <fstream>\n#include <sys/time.h>\n#include <cmath>\n#include <ctime>\n#include <cstdlib>\n#include \"cctk_Arguments.h\"\n#include \"cctk_Parameters.h\"\n#include \"Symmetry.h\"\n\n#ifndef M_PI\n#define M_PI 3.141592653589793238463\n#endif\n\n#include \"GiRaFFE_headers.h\"\n//#include \"inlined_functions.C\"\n\nvoid GiRaFFE_HO_update_metric_det_inverse(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,\n const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,\n CCTK_REAL *gammaUU00GF,CCTK_REAL *gammaUU01GF,CCTK_REAL *gammaUU02GF,CCTK_REAL *gammaUU11GF,CCTK_REAL *gammaUU12GF,CCTK_REAL *gammaUU22GF,CCTK_REAL *gammadetGF) {\n\n#include \"metric_quantities.h\"\n\n}\n\nextern \"C\" void GiRaFFE_HO_conserv_to_prims_FFE(CCTK_ARGUMENTS) {\n printf(\"Starting conservative-to-primitive solver...\\n\");\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n\n // We use proper C++ here, for file I/O later.\n using namespace std;\n\n const int imin=0,jmin=0,kmin=0;\n const int imax=cctk_lsh[0],jmax=cctk_lsh[1],kmax=cctk_lsh[2];\n \n const CCTK_REAL dz = CCTK_DELTA_SPACE(2);\n\n CCTK_REAL error_int_numer=0,error_int_denom=0;\n\n CCTK_INT num_vel_limits=0,num_vel_nulls_current_sheet=0;\n\n GiRaFFE_HO_update_metric_det_inverse(cctkGH,cctk_lsh,cctk_nghostzones,\n gxx, gxy, gxz, gyy, gyz, gzz,\n gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet);\n\n#pragma omp parallel for reduction(+:error_int_numer,error_int_denom,num_vel_limits,num_vel_nulls_current_sheet) schedule(static)\n for(int k=kmin;k<kmax;k++)\n for(int j=jmin;j<jmax;j++)\n for(int i=imin;i<imax;i++) {\n const int index = CCTK_GFINDEX3D(cctkGH,i,j,k);\n \n const CCTK_REAL rL = r[index];\n if(rL>min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED) {\n\n const CCTK_REAL sqrtg = sqrt(gammadet[index]); // Determinant of 3-metric\n\n // \\gamma_{ij}, computed from \\tilde{\\gamma}_{ij}\n const CCTK_REAL gxxL = gxx[index];\n const CCTK_REAL gxyL = gxy[index];\n const CCTK_REAL gxzL = gxz[index];\n const CCTK_REAL gyyL = gyy[index];\n const CCTK_REAL gyzL = gyz[index];\n const CCTK_REAL gzzL = gzz[index];\n\n // \\gamma^{ij} = psim4 * \\tilde{\\gamma}^{ij}\n const CCTK_REAL gupxxL = gammaUU00[index];\n const CCTK_REAL gupxyL = gammaUU01[index];\n const CCTK_REAL gupxzL = gammaUU02[index];\n const CCTK_REAL gupyyL = gammaUU11[index];\n const CCTK_REAL gupyzL = gammaUU12[index];\n const CCTK_REAL gupzzL = gammaUU22[index];\n\n // Read in magnetic field and momentum variables once from memory, since memory access is expensive:\n const CCTK_REAL BU0L = BU0[index];\n const CCTK_REAL BU1L = BU1[index];\n const CCTK_REAL BU2L = BU2[index];\n\n // End of page 7 on http://arxiv.org/pdf/1310.3274.pdf\n const CCTK_REAL BtildexL = BU0L*sqrtg;\n const CCTK_REAL BtildeyL = BU1L*sqrtg;\n const CCTK_REAL BtildezL = BU2L*sqrtg;\n\n const CCTK_REAL Btilde_xL = gxxL*BtildexL + gxyL*BtildeyL + gxzL*BtildezL;\n const CCTK_REAL Btilde_yL = gxyL*BtildexL + gyyL*BtildeyL + gyzL*BtildezL;\n const CCTK_REAL Btilde_zL = gxzL*BtildexL + gyzL*BtildeyL + gzzL*BtildezL;\n\n CCTK_REAL StildeD0L = StildeD0[index];\n CCTK_REAL StildeD1L = StildeD1[index];\n CCTK_REAL StildeD2L = StildeD2[index];\n\n if(i==114 && j==114 && k==114) {\n printf(\"gupxxL = %.15e\\nStildeD0L = %.15e\\ngupxyL = %.15e\\nStildeD1L = %.15e\\ngupxzL = %.15e\\nStildeD2L = %.15e\\n\",gupxxL,StildeD0L,gupxyL,StildeD1L,gupxzL,StildeD2L);\n }\n\n const CCTK_REAL StildeD0_orig = StildeD0L;\n const CCTK_REAL StildeD1_orig = StildeD1L;\n const CCTK_REAL StildeD2_orig = StildeD2L;\n\n const CCTK_REAL ValenciavU0_orig = ValenciavU0[index];\n const CCTK_REAL ValenciavU1_orig = ValenciavU1[index];\n const CCTK_REAL ValenciavU2_orig = ValenciavU2[index];\n\n //const CCTK_REAL alpL = alp[index];\n //const CCTK_REAL fourpialpha = 4.0*M_PI*alpL;\n const CCTK_REAL fourpi = 4.0*M_PI;\n\n //const CCTK_REAL betaxL = betax[index];\n //const CCTK_REAL betayL = betay[index];\n //const CCTK_REAL betazL = betaz[index];\n\n //* 1. Just below Eq 90: Enforce orthogonality of B^i & S^i, so that B^i S_i = 0\n //* Correction ${\\tilde S}_i ->{\\tilde S}_i - ({\\tilde S}_j {\\tilde B}^j) {\\tilde B}_i/{\\tilde B}^2$\n //* NOTICE THAT THE {\\tilde B}_i IS LOWERED, AS IT SHOULD BE. THIS IS A TYPO IN PASCHALIDIS ET AL.\n\n // First compute Btilde^i Stilde_i:\n const CCTK_REAL BtildeiSt_i = StildeD0L*BtildexL + StildeD1L*BtildeyL + StildeD2L*BtildezL;\n //printf(\"xterm = %f ; yterm = %f ; zterm = %f\\n\",StildeD0L*BtildexL,StildeD1L*BtildeyL,StildeD2L*BtildezL);\n\n // Then compute (Btilde)^2\n const CCTK_REAL Btilde2 = gxxL*BtildexL*BtildexL + gyyL*BtildeyL*BtildeyL + gzzL*BtildezL*BtildezL\n + 2.0*(gxyL*BtildexL*BtildeyL + gxzL*BtildexL*BtildezL + gyzL*BtildeyL*BtildezL);\n\n#define APPLY_GRFFE_FIXES\n\n // Now apply constraint: Stilde_i = Stilde_i - (Btilde^i Stilde_i) / (Btilde)^2\n#ifdef APPLY_GRFFE_FIXES\n StildeD0L -= BtildeiSt_i*Btilde_xL/Btilde2;\n StildeD1L -= BtildeiSt_i*Btilde_yL/Btilde2;\n StildeD2L -= BtildeiSt_i*Btilde_zL/Btilde2;\n //printf(\"BtildeiSt_i = %f ; Btilde2 = %f\\n\",BtildeiSt_i,Btilde2);\n#endif\n // Now that tildeS_i has been fixed, let's compute tildeS^i:\n CCTK_REAL mhd_st_upx = gupxxL*StildeD0L + gupxyL*StildeD1L + gupxzL*StildeD2L;\n CCTK_REAL mhd_st_upy = gupxyL*StildeD0L + gupyyL*StildeD1L + gupyzL*StildeD2L;\n CCTK_REAL mhd_st_upz = gupxzL*StildeD0L + gupyzL*StildeD1L + gupzzL*StildeD2L;\n\n // Just below Eq. 86 in http://arxiv.org/pdf/1310.3274.pdf:\n CCTK_REAL St2 = StildeD0L*mhd_st_upx + StildeD1L*mhd_st_upy + StildeD2L*mhd_st_upz;\n\n //* 2. Eq. 92: Factor $f = \\sqrt{(1-\\gamma_{max}^{-2}){\\tilde B}^4/(16 \\pi^2 \\gamma {\\tilde S}^2)}$ \n\n#ifdef APPLY_GRFFE_FIXES\n const CCTK_REAL gmax = GAMMA_SPEED_LIMIT;\n if(St2 > (1.0 - 1.0/(gmax*gmax))*Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg)) {\n const CCTK_REAL fact = sqrt((1.0 - 1.0/(gmax*gmax))/St2)*Btilde2/(4.0*M_PI*sqrtg);\n\n //* 3. ${\\tilde S}_i -> {\\tilde S}_i min(1,f)\n StildeD0L *= MIN(1.0,fact);\n StildeD1L *= MIN(1.0,fact);\n StildeD2L *= MIN(1.0,fact);\n\n // Recompute S^i\n mhd_st_upx = gupxxL*StildeD0L + gupxyL*StildeD1L + gupxzL*StildeD2L;\n mhd_st_upy = gupxyL*StildeD0L + gupyyL*StildeD1L + gupyzL*StildeD2L;\n mhd_st_upz = gupxzL*StildeD0L + gupyzL*StildeD1L + gupzzL*StildeD2L;\n /*\n printf(\"%e %e %e | %e %e %e | %e %e %e | oldgamma: %e %e should be > %e vfix\\n\",x[index],y[index],z[index],\n BU0L,BU1L,BU2L,\n St2,(1.0 - 1.0/(gmax*gmax))*Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg),gmax,\n sqrt(Btilde2 / (Btilde2 - 16*M_PI*M_PI*sqrtg*sqrtg * St2 / Btilde2) ) , Btilde2,16*M_PI*M_PI*sqrtg*sqrtg * St2 / Btilde2 );\n //exit(1);\n */\n // Recompute Stilde^2:\n St2 = StildeD0L*mhd_st_upx + StildeD1L*mhd_st_upy + StildeD2L*mhd_st_upz;\n\n if( St2 >= Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg) ) {\n printf(\"ERROR: Velocity cap fix wasn't effective; still have B^2 > E^2\\n\"); exit(1);\n }\n num_vel_limits++;\n }\n#endif\n\n //* 4. Eq. 85: $v^i = 4 pi \\alpha \\gamma^{ij} {\\tilde S}_j \\gamma{-1/2} B^{-2} - \\beta^i$: \n\n // See, e.g., Eq 71 in http://arxiv.org/pdf/1310.3274.pdf\n // ... or end of page 7 on http://arxiv.org/pdf/1310.3274.pdf:\n const CCTK_REAL B2 = Btilde2/(sqrtg*sqrtg);\n /* \n Eq. 75: \n v^i = \\alpha \\gamma^{ij} S_j / \\mathcal{B}^2 - \\beta^i\n Eq. 7: \\mathcal{B}^{\\mu} = B^{\\mu}/\\sqrt{4 \\pi}\n -> v^i = 4 \\pi \\alpha \\gamma^{ij} S_j / B^2 - \\beta^i\n Eq. 79: \\tilde{S_i} = \\sqrt{\\gamma} S_i\n -> v^i = 4 \\pi \\alpha \\gamma^{ij} \\tilde{S}_j / (\\sqrt{\\gamma} B^2) - \\beta^i\n */\n // Modified from the original GiRaFFE to use Valencia, not drift velocity\n const CCTK_REAL ValenciavU0L = fourpi*mhd_st_upx/(sqrtg*B2);\n const CCTK_REAL ValenciavU1L = fourpi*mhd_st_upy/(sqrtg*B2);\n /* ValenciavU2L not necessarily const! See below. */\n CCTK_REAL ValenciavU2L = fourpi*mhd_st_upz/(sqrtg*B2);\n \n //* 5. Eq. 94: ${\\tilde n}_i v^i = 0$ in the current sheet region\n // n^i is defined as the normal from the current sheet, which lies in the \n // xy-plane (z=0). So n = (0,0,1) \n#ifdef APPLY_GRFFE_FIXES\n if(current_sheet_null_v) {\n CCTK_REAL zL = z[index];\n if (fabs(zL) <= (4.0 + 1.0e-2)*dz ) {\n //ValenciavU2L = 0.0;\n ValenciavU2L = - (ValenciavU0L*gxzL + ValenciavU1L*gyzL) / gzzL;\n // FIXME: This is probably not right, but also definitely not the problem. \n \n // ValenciavU2L reset: TYPICALLY WOULD RESET CONSERVATIVES TO BE CONSISTENT. LET'S NOT DO THAT, TO AVOID MESSING UP B-FIELDS\n\n if(1==1) {\n CCTK_REAL PRIMS[MAXNUMVARS];\n int ww=0;\n PRIMS[ww] = ValenciavU0L; ww++;\n PRIMS[ww] = ValenciavU1L; ww++;\n PRIMS[ww] = ValenciavU2L; ww++;\n PRIMS[ww] = BU0L; ww++;\n PRIMS[ww] = BU1L; ww++;\n PRIMS[ww] = BU2L; ww++;\n\n CCTK_REAL METRIC[NUMVARS_FOR_METRIC],dummy=0;\n ww=0;\n // FIXME: NECESSARY?\n //psi_bssn[index] = exp(phi[index]);\n METRIC[ww] = (1.0/12.0) * log(gammadet[index]);ww++;\n METRIC[ww] = dummy; ww++; // Don't need to set psi.\n METRIC[ww] = gxx[index]; ww++;\n METRIC[ww] = gxy[index]; ww++;\n METRIC[ww] = gxz[index]; ww++;\n METRIC[ww] = gyy[index]; ww++;\n METRIC[ww] = gyz[index]; ww++;\n METRIC[ww] = gzz[index]; ww++;\n METRIC[ww] = alp[index]-1; ww++;\n METRIC[ww] = betax[index]; ww++;\n METRIC[ww] = betay[index]; ww++;\n METRIC[ww] = betaz[index]; ww++;\n METRIC[ww] = gammaUU00[index]; ww++;\n METRIC[ww] = gammaUU01[index]; ww++;\n METRIC[ww] = gammaUU02[index]; ww++;\n METRIC[ww] = gammaUU11[index]; ww++;\n METRIC[ww] = gammaUU12[index]; ww++;\n METRIC[ww] = gammaUU22[index]; ww++;\n\n CCTK_REAL CONSERVS[NUM_CONSERVS] = {0.0, 0.0, 0.0}; // 3 conservative variables: Stilde_x, Stilde_y, Stilde_z\n GiRaFFE_HO_compute_conservatives(PRIMS,METRIC, CONSERVS);\n\n StildeD0L = CONSERVS[STILDEX];\n StildeD1L = CONSERVS[STILDEY];\n StildeD2L = CONSERVS[STILDEZ];\n }\n num_vel_nulls_current_sheet++;\n }\n }\n#endif\n ValenciavU0[index] = ValenciavU0L;\n ValenciavU1[index] = ValenciavU1L; \n ValenciavU2[index] = ValenciavU2L; \n \n //Now we compute the difference between original & new conservatives, for diagnostic purposes:\n //error_int_numer += fabs(StildeD0L - StildeD0_orig) + fabs(StildeD1L - StildeD1_orig) + fabs(StildeD2L - StildeD2_orig);\n //error_int_denom += fabs(StildeD0_orig) + fabs(StildeD1_orig) + fabs(StildeD2_orig);\n /*\n if(fabs(ValenciavU0_orig) > 1e-13 && fabs(ValenciavU0L-ValenciavU0_orig)/ValenciavU0_orig > 1e-2) printf(\"BAD ValenciavU0: %e %e | %e %e %e\\n\",ValenciavU0L,ValenciavU0_orig,x[index],y[index],z[index]);\n if(fabs(ValenciavU1_orig) > 1e-13 && fabs(ValenciavU1L-ValenciavU1_orig)/ValenciavU1_orig > 1e-2) printf(\"BAD ValenciavU1: %e %e | %e %e %e\\n\",ValenciavU1L,ValenciavU1_orig,x[index],y[index],z[index]);\n if(fabs(ValenciavU2_orig) > 1e-13 && fabs(ValenciavU2L-ValenciavU2_orig)/ValenciavU2_orig > 1e-2) printf(\"BAD ValenciavU2: %e %e | %e %e %e\\n\",ValenciavU2L,ValenciavU2_orig,x[index],y[index],z[index]);\n */\n error_int_numer += fabs(ValenciavU0L - ValenciavU0_orig) + fabs(ValenciavU1L - ValenciavU1_orig) + fabs(ValenciavU2L - ValenciavU2_orig);\n error_int_denom += fabs(ValenciavU0_orig) + fabs(ValenciavU1_orig) + fabs(ValenciavU2_orig);\n \n\n\n StildeD0[index] = StildeD0L;\n StildeD1[index] = StildeD1L;\n StildeD2[index] = StildeD2L;\n }\n }\n\n if (cctk_time==0) {\n /* Write the initial data to separate, dedicated gridfunctions:\n These will be accessed later by VolumeIntegrals_GRMHD to assess convergence. */ \n #pragma omp parallel for\n for(int i2=0; i2<cctk_lsh[2]; i2++) {\n for(int i1=0; i1<cctk_lsh[1]; i1++) {\n for(int i0=0; i0<cctk_lsh[0]; i0++) {\n CCTK_INT idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);\n AD0_init[idx3] = AD0[idx3];\n AD1_init[idx3] = AD1[idx3];\n AD2_init[idx3] = AD2[idx3];\n psi6Phi_init[idx3] = psi6Phi[idx3];\n CCTK_REAL lapse = alp[idx3];\n CCTK_REAL betaxL = betax[idx3];\n CCTK_REAL betayL = betay[idx3];\n CCTK_REAL betazL = betaz[idx3];\n ValenciavU0_init[idx3] = lapse*ValenciavU0[idx3]-betaxL;\n ValenciavU1_init[idx3] = lapse*ValenciavU1[idx3]-betayL;\n ValenciavU2_init[idx3] = lapse*ValenciavU2[idx3]-betazL;\n BU0_init[idx3] = StildeD0[idx3];\n BU1_init[idx3] = StildeD1[idx3];\n BU2_init[idx3] = StildeD2[idx3];\n }\n }\n }\n }\n CCTK_VInfo(CCTK_THORNSTRING,\"FFEC2P: Lev: %d NumPts= %d | Error: %.3e, ErrDenom: %.3e, v_limits: %d / %d = %.3e, v_nulls: %d / %d = %.3e\",\n (int)GetRefinementLevel(cctkGH),\n cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],\n error_int_numer/(error_int_denom+1e-300),error_int_denom,\n /**/ num_vel_limits, cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],\n (CCTK_REAL)num_vel_limits/((CCTK_REAL)cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]),\n /**/ num_vel_nulls_current_sheet, cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],\n (CCTK_REAL)num_vel_nulls_current_sheet/((CCTK_REAL)cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]));\n}", "Overwriting GiRaFFE_HO/src/driver_conserv_to_prims_FFE.C\n" ] ], [ [ "<a id='old_giraffe'></a>\n\n### Step 2.b.i: Including `GiRaFFE_headers.h` from old version of GiRaFFE \\[Back to [top](#toc)\\]\n$$\\label{old_giraffe}$$\n\nWe will also include `GiRaFFE_headers.h` from the old version of GiRaFFE, which defines constants on which our conservative-to-primitive solver depends.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/GiRaFFE_headers.h\n// To safeguard against double-including this header file:\n#ifndef GIRAFFE_HEADERS_H_\n#define GIRAFFE_HEADERS_H_\n\n#define MIN(a,b) ( ((a) < (b)) ? (a) : (b) )\n#define MAX(a,b) ( ((a) > (b)) ? (a) : (b) )\n#define SQR(x) ((x) * (x))\n#define ONE_OVER_SQRT_4PI 0.282094791773878143474039725780\n\n#define VERR_DEF_PARAMS __LINE__, __FILE__, CCTK_THORNSTRING\n\n// The order here MATTERS, as we assume that GUPXX+1=GUPYY, etc.\nstatic const int PHI=0,PSI=1,GXX=2,GXY=3,GXZ=4,GYY=5,GYZ=6,GZZ=7,\n LAPM1=8,SHIFTX=9,SHIFTY=10,SHIFTZ=11,GUPXX=12,GUPYY=13,GUPZZ=14,\n NUMVARS_FOR_METRIC_FACEVALS=15; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs!\n\n// These are not used for facevals in the reconstruction step, but boy are they useful anyway. \nstatic const int GUPXY=15,GUPXZ=16,GUPYZ=17,\n NUMVARS_FOR_METRIC=18; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs!\n\n// The order here MATTERS, and must be consistent with the order in the in_prims[] array in driver_evaluate_FFE_rhs.C.\nstatic const int VX=0,VY=1,VZ=2,\n BX_CENTER=3,BY_CENTER=4,BZ_CENTER=5,BX_STAGGER=6,BY_STAGGER=7,BZ_STAGGER=8,\n VXR=9,VYR=10,VZR=11,VXL=12,VYL=13,VZL=14,MAXNUMVARS=15; //<-- Be _sure_ to define MAXNUMVARS appropriately!\n\nstatic const int UT=0,UX=1,UY=2,UZ=3;\n\n// The \"I\" suffix denotes interpolation. In other words, these\n// definitions are used for interpolation ONLY. The order here\n// matters as well!\nstatic const int SHIFTXI=0,SHIFTYI=1,SHIFTZI=2,GUPXXI=3,GUPXYI=4,GUPXZI=5,GUPYYI=6,GUPYZI=7,GUPZZI=8,\n PSII=9,LAPM1I=10,A_XI=11,A_YI=12,A_ZI=13,LAPSE_PSI2I=14,LAPSE_OVER_PSI6I=15,MAXNUMINTERP=16;\n\n// Again, the order here MATTERS, since we assume in the code that, e.g., smallb[0]=b^t, smallb[3]=b^z, etc.\nstatic const int SMALLBT=0,SMALLBX=1,SMALLBY=2,SMALLBZ=3,SMALLB2=4,NUMVARS_SMALLB=5;\n\n// Again, the order here MATTERS, since we assume in the code that, CONSERV[STILDEX+1] = \\tilde{S}_y\nstatic const int STILDEX=0,STILDEY=1,STILDEZ=2,NUM_CONSERVS=3;\n\nstatic const int LAPSE=0,PSI2=1,PSI4=2,PSI6=3,PSIM4=4,LAPSEINV=5,NUMVARS_METRIC_AUX=6;\n#define SET_LAPSE_PSI4(array_name,METRIC) { \\\n array_name[LAPSE] = METRIC[LAPM1]+1.0; \\\n array_name[PSI2] = exp(2.0*METRIC[PHI]); \\\n array_name[PSI4] = SQR(array_name[PSI2]); \\\n array_name[PSI6] = array_name[PSI4]*array_name[PSI2]; \\\n array_name[PSIM4] = 1.0/array_name[PSI4]; \\\n array_name[LAPSEINV] = 1.0/array_name[LAPSE]; \\\n }\n\n// Keeping track of ghostzones between routines is a nightmare, so\n// we instead attach ghostzone info to each gridfunction and set\n// the ghostzone information correctly within each routine.\nstruct gf_and_gz_struct {\n CCTK_REAL *gf;\n int gz_lo[4],gz_hi[4];\n};\n\nstruct output_stats {\n int font_fixed,vel_limited,failure_checker;\n long n_iter;\n};\n\n\n// FIXME: For cosmetic purposes, we might want to make everything either zero-offset or one-offset, instead of a mixture.\nconst int kronecker_delta[4][3] = { { 0,0,0 },\n { 1,0,0 },\n { 0,1,0 },\n { 0,0,1 } };\n\n/* PUBLIC FUNCTIONS, USED OUTSIDE GiRaFFE AS WELL */\nvoid GiRaFFE_HO_compute_conservatives(const CCTK_REAL *PRIMS, const CCTK_REAL *METRIC, CCTK_REAL *CONSERVS);\n#include \"compute_conservatives_FFE.C\"\n\nvoid GiRaFFE_set_symmetry_gzs_staggered(const cGH *cctkGH, const int *cctk_lsh,const CCTK_REAL *X,const CCTK_REAL *Y,const CCTK_REAL *Z, CCTK_REAL *gridfunc,\n const CCTK_REAL *gridfunc_syms,const int stagger_x,const int stagger_y,const int stagger_z);\n\n#endif // GIRAFFE_HEADERS_H", "Overwriting GiRaFFE_HO/src/GiRaFFE_headers.h\n" ] ], [ [ "<a id='compute_conservatives'></a>\n\n### Step 2.b.ii: Writing `compute_conservatives_FFE.C` \\[Back to [top](#toc)\\]\n$$\\label{compute_conservatives}$$\n\nThe conservative to primitive solver will also depend on the function provided by `compute_conservatives_FFE.C`.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/compute_conservatives_FFE.C\nvoid GiRaFFE_HO_compute_conservatives(const CCTK_REAL *PRIMS, const CCTK_REAL *METRIC, CCTK_REAL *CONSERVS) {\n const CCTK_REAL psi_bssnL = exp(METRIC[PHI]);\n const CCTK_REAL psi2 = psi_bssnL*psi_bssnL;\n const CCTK_REAL psi4 = psi2*psi2;\n const CCTK_REAL sqrtg = psi4*psi2;\n\n // \\gamma_{ij}, computed from \\tilde{\\gamma}_{ij}\n const CCTK_REAL gxxL = psi4*METRIC[GXX];\n const CCTK_REAL gxyL = psi4*METRIC[GXY];\n const CCTK_REAL gxzL = psi4*METRIC[GXZ];\n const CCTK_REAL gyyL = psi4*METRIC[GYY];\n const CCTK_REAL gyzL = psi4*METRIC[GYZ];\n const CCTK_REAL gzzL = psi4*METRIC[GZZ];\n\n // Read in magnetic field and momentum variables once from memory, since memory access is expensive:\n const CCTK_REAL BxL = PRIMS[BX_CENTER];\n const CCTK_REAL ByL = PRIMS[BY_CENTER];\n const CCTK_REAL BzL = PRIMS[BZ_CENTER];\n\n const CCTK_REAL vxL = PRIMS[VX];\n const CCTK_REAL vyL = PRIMS[VY];\n const CCTK_REAL vzL = PRIMS[VZ];\n\n //const CCTK_REAL fourpialpha_inv = 1.0/( 4.0*M_PI*(METRIC[LAPM1] + 1.0) );\n const CCTK_REAL fourpi_inv = 1.0/( 4.0*M_PI );\n\n //const CCTK_REAL betaxL = METRIC[SHIFTX];\n //const CCTK_REAL betayL = METRIC[SHIFTY];\n //const CCTK_REAL betazL = METRIC[SHIFTZ];\n\n const CCTK_REAL B2 = gxxL*BxL*BxL + gyyL*ByL*ByL + gzzL*BzL*BzL\n + 2.0*(gxyL*BxL*ByL + gxzL*BxL*BzL + gyzL*ByL*BzL);\n\n\n // NOTE: SIGNIFICANTLY MODIFIED FROM ILLINOISGRMHD VERSION:\n // velocities in GiRaFFE are defined to be \"drift\" velocity.\n // cf. Eqs 47 and 85 in http://arxiv.org/pdf/1310.3274.pdf \n // Modified again from the original GiRaFFE to use Valencia velocity\n\n const CCTK_REAL v_xL = gxxL*vxL + gxyL*vyL + gxzL*vzL;\n const CCTK_REAL v_yL = gxyL*vxL + gyyL*vyL + gyzL*vzL;\n const CCTK_REAL v_zL = gxzL*vxL + gyzL*vyL + gzzL*vzL;\n \n /*\n * Comments:\n * Eq. 85 in https://arxiv.org/pdf/1310.3274.pdf:\n * v^i = 4 pi alpha * (gamma^{ij} tilde{S}_j) / (sqrtgamma * B^2) - beta^i\n * which implies that\n * (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma^{ij} tilde{S}_j\n * Multiply both sides by gamma_{ik}:\n * gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma_{ik} gamma^{ij} tilde{S}_j\n * \n * -> tilde{S}_k = gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha)\n */\n\n CONSERVS[STILDEX] = v_xL * sqrtg * B2 * fourpi_inv;\n CONSERVS[STILDEY] = v_yL * sqrtg * B2 * fourpi_inv;\n CONSERVS[STILDEZ] = v_zL * sqrtg * B2 * fourpi_inv;\n}", "Overwriting GiRaFFE_HO/src/compute_conservatives_FFE.C\n" ] ], [ [ "<a id='grmhd'></a>\n\n## Step 2.c: GRMHD variables provided by HydroBase \\[Back to [top](#toc)\\]\n$$\\label{grmhd}$$\n\nThis thorn should also use the common GRMHD variables provided by HydroBase, to allow it to interact easily with other MHD thorns. To that end, we will need to read in the common MHD variables at the beginning of our evolution, and then write the new values at the end of our evolution.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/GiRaFFE_HydroBase_conversion.c\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"cctk.h\"\n#include \"cctk_Arguments.h\"\n#include \"cctk_Parameters.h\"\n#include \"Symmetry.h\"\n\nvoid HydroBase_to_GiRaFFE(CCTK_ARGUMENTS) {\n /* BUi <- Bvec[i]\n * ADi <- Avec[i]\n * ValenciavUi <- vel[i]\n */\n DECLARE_CCTK_PARAMETERS;\n DECLARE_CCTK_ARGUMENTS;\n\n CCTK_INT idx3;\n CCTK_INT idx4[3];\n#pragma omp parallel for\n for(int i2=0; i2<cctk_lsh[2]; i2++) {\n for(int i1=0; i1<cctk_lsh[1]; i1++) {\n for(int i0=0; i0<cctk_lsh[0]; i0++) {\n idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);\n idx4[0] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,0);\n idx4[1] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,1);\n idx4[2] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,2);\n BU0[idx3] = Bvec[idx4[0]];\n BU1[idx3] = Bvec[idx4[1]];\n BU2[idx3] = Bvec[idx4[2]];\n AD0[idx3] = Avec[idx4[0]];\n AD1[idx3] = Avec[idx4[1]];\n AD2[idx3] = Avec[idx4[2]];\n ValenciavU0[idx3] = vel[idx4[0]];\n ValenciavU1[idx3] = vel[idx4[1]];\n ValenciavU2[idx3] = vel[idx4[2]];\n // We don't set Phi, because it is always set to zero in GiRaFFE ID.\n }\n }\n }\n}\n\nvoid GiRaFFE_to_HydroBase(CCTK_ARGUMENTS) {\n /* Bvec[i] <- BUi\n * Avec[i] <- ADi\n * vel[i] <- ValenciavUi\n */\n DECLARE_CCTK_PARAMETERS;\n DECLARE_CCTK_ARGUMENTS;\n \n CCTK_INT idx3;\n CCTK_INT idx4[3];\n#pragma omp parallel for\n for(int i2=0; i2<cctk_lsh[2]; i2++) {\n for(int i1=0; i1<cctk_lsh[1]; i1++) {\n for(int i0=0; i0<cctk_lsh[0]; i0++) {\n idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);\n idx4[0] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,0);\n idx4[1] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,1);\n idx4[2] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,2);\n Bvec[idx4[0]] = BU0[idx3];\n Bvec[idx4[1]] = BU1[idx3];\n Bvec[idx4[2]] = BU2[idx3];\n Avec[idx4[0]] = AD0[idx3];\n Avec[idx4[1]] = AD1[idx3];\n Avec[idx4[2]] = AD2[idx3];\n vel[idx4[0]] = ValenciavU0[idx3];\n vel[idx4[1]] = ValenciavU1[idx3];\n vel[idx4[2]] = ValenciavU2[idx3];\n // We don't set Phi, because it is always set to zero in GiRaFFE ID thorns.\n }\n }\n }\n}", "Overwriting GiRaFFE_HO/src/GiRaFFE_HydroBase_conversion.c\n" ] ], [ [ "<a id='timelevel'></a>\n\n## Step 2.d: Copying initial data to additional timelevels \\[Back to [top](#toc)\\]\n$$\\label{timelevel}$$\n\nSince the ETK thorn Boundary will access all three timelevels for our evolved variables, we will need to make sure that those timelevels actually contain data at time zero. So, we will copy initial data to both other timelevels.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/postpostinitial__copy_timelevels.c\n//-------------------------------------------------\n// Stuff to run right after initial data is set up\n//-------------------------------------------------\n\n#include \"cctk.h\"\n//#include <cstdio>\n//#include <cstdlib>\n#include \"cctk_Arguments.h\"\n#include \"cctk_Functions.h\"\n#include \"cctk_Parameters.h\"\n#include \"Symmetry.h\"\n//#include \"GiRaFFE_headers.h\"\n\n//extern \"C\" \nvoid GiRaFFE_HO_PostPostInitial__Copy_Timelevels(CCTK_ARGUMENTS) {\n DECLARE_CCTK_ARGUMENTS;\n DECLARE_CCTK_PARAMETERS;\n\n\n //------------------------------------------------------------------\n // FILL _p AND _p_p TIMELEVELS. Probably don't need to do this if \n // Carpet::init_fill_timelevels=yes and\n // MoL::initial_data_is_crap = yes\n // NOTE: We don't fill metric data here.\n // FIXME: Do we really need this?\n if(cctk_time==0) {\n#pragma omp parallel for\n for(int k=0;k<cctk_lsh[2];k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) {\n const int index = CCTK_GFINDEX3D(cctkGH,i,j,k);\n\n StildeD0_p[index] = StildeD0[index]; \n StildeD1_p[index] = StildeD1[index];\n StildeD2_p[index] = StildeD2[index];\n\n psi6Phi_p[index] = psi6Phi[index];\n AD0_p[index] = AD0[index]; \n AD1_p[index] = AD1[index];\n AD2_p[index] = AD2[index];\n\n StildeD0_p_p[index] = StildeD0[index]; \n StildeD1_p_p[index] = StildeD1[index];\n StildeD2_p_p[index] = StildeD2[index];\n\n psi6Phi_p_p[index] = psi6Phi[index];\n AD0_p_p[index] = AD0[index]; \n AD1_p_p[index] = AD1[index];\n AD2_p_p[index] = AD2[index];\n }\n }\n}\n", "Overwriting GiRaFFE_HO/src/postpostinitial__copy_timelevels.c\n" ] ], [ [ "<a id='cclfiles'></a>\n\n## Step 2.e: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \\[Back to [top](#toc)\\]\n$$\\label{cclfiles}$$\n\nWriting a module (\"thorn\") within the Einstein Toolkit requires that three \"ccl\" files be constructed, all in the root directory of the thorn:", "_____no_output_____" ], [ "<a id='interface'></a>\n\n### Step 2.e.i: `interface.ccl` \\[Back to [top](#toc)\\]\n$$\\label{interface}$$\n\n1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. This file governs the interaction between this thorn and others; more information can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-178000D2.2). \nWith \"implements\", we give our thorn its unique name. By \"inheriting\" other thorns, we tell the Toolkit that we will rely on variables and functions that exist and are declared \"public\" within those thorns. Then, we tell the toolkit that we want the gridfunctions $A_i$, $\\tilde{S}_i$, and $\\sqrt{\\gamma}\\Phi$ to be visible to other thorns by using the keyword \"public\". ", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/interface.ccl\nimplements: GiRaFFE_HO\n\ninherits: admbase HydroBase Boundary grid Tmunubase\n\nUSES INCLUDE: loopcontrol.h\nUSES INCLUDE: Symmetry.h\nUSES INCLUDE: Boundary.h\n \nCCTK_INT FUNCTION MoLRegisterEvolved(CCTK_INT IN EvolvedIndex, CCTK_INT IN RHSIndex)\nUSES FUNCTION MoLRegisterEvolved\n\nCCTK_INT FUNCTION GetBoundarySpecification(CCTK_INT IN size, CCTK_INT OUT ARRAY nboundaryzones, CCTK_INT OUT ARRAY is_internal, CCTK_INT OUT ARRAY is_staggered, CCTK_INT OUT ARRAY shiftout)\nUSES FUNCTION GetBoundarySpecification\n\nCCTK_INT FUNCTION SymmetryTableHandleForGrid(CCTK_POINTER_TO_CONST IN cctkGH)\nUSES FUNCTION SymmetryTableHandleForGrid\n\nCCTK_INT FUNCTION Boundary_SelectGroupForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN group_name, CCTK_STRING IN bc_name)\nUSES FUNCTION Boundary_SelectGroupForBC\n\nCCTK_INT FUNCTION Boundary_SelectVarForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN var_name, CCTK_STRING IN bc_name)\nUSES FUNCTION Boundary_SelectVarForBC\n\npublic:\ncctk_real GiRaFFE_aux type = GF Timelevels=1 tags='prolongation=\"none\"'\n{\n SevolParenUD00,SevolParenUD01,SevolParenUD02,\n SevolParenUD10,SevolParenUD11,SevolParenUD12,\n SevolParenUD20,SevolParenUD21,SevolParenUD22,\n AevolParen,PevolParenU0,PevolParenU1,PevolParenU2,\n gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet\n} \"The evolved scalar fields\"\n\npublic:\ncctk_real GiRaFFE_Bs type = GF Timelevels=1 tags='InterpNumTimelevels=1 prolongation=\"none\"'\n{\n BU0,BU1,BU2\n} \"The B field\"\n\npublic:\ncctk_real GiRaFFE_Vs type = GF Timelevels=1 tags='InterpNumTimelevels=1 prolongation=\"none\"'\n{\n u4upperZero,ValenciavU0,ValenciavU1,ValenciavU2\n} \"The zeroth component of the four velocity and the Valencia 3-velocity\"\n\npublic:\ncctk_real GiRaFFE_rhs type = GF Timelevels=1 tags='prolongation=\"none\" Checkpoint=\"no\"'\n{\n Stilde_rhsD0,Stilde_rhsD1,Stilde_rhsD2,A_rhsD0,A_rhsD1,A_rhsD2,psi6Phi_rhs\n} \"The evolved scalar fields\"\n\npublic:\ncctk_real GiRaFFE_vars type = GF Timelevels=3 tags='prolongation=\"none\"'\n{\n StildeD0,StildeD1,StildeD2,AD0,AD1,AD2,psi6Phi\n} \"The evolved scalar fields\"\n\npublic:\ncctk_real GiRaFFEfood_init type = GF Timelevels=1 tags='prolongation=\"none\"'\n{\n AD0_init,AD1_init,AD2_init,psi6Phi_init,ValenciavU0_init,ValenciavU1_init,ValenciavU2_init,BU0_init,BU1_init,BU2_init\n} \"Stores the initial data for later debugging\"\n\n#########################################\n### Aliased functions from Carpet ###\n#########################################\n\nCCTK_INT FUNCTION \\\n GetRefinementLevel \\\n (CCTK_POINTER_TO_CONST IN cctkGH)\nUSES FUNCTION GetRefinementLevel", "Overwriting GiRaFFE_HO/interface.ccl\n" ] ], [ [ "<a id='param'></a>\n\n### Step 2.e.ii: `param.ccl` \\[Back to [top](#toc)\\]\n$$\\label{param}$$\n\n2. `param.ccl`: specifies free parameters within the thorn, enabling them to be set at runtime. It is required to provide allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-183000D2.3). A number of parameters are defined, and more parameters can be easily added in later versions. We also set the number of timelevels we will store in memory.", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/param.ccl\nshares: MethodOfLines\n\nUSES CCTK_INT MoL_Num_Evolved_Vars\nUSES CCTK_INT MoL_Num_ArrayEvolved_Vars\n\nrestricted:\nCCTK_INT GiRaFFE_HO_MaxNumEvolvedVars \"Number of evolved variables used by this thorn\" ACCUMULATOR-BASE=MethodofLines::MoL_Num_Evolved_Vars STEERABLE=RECOVER\n{\n 7:7 :: \"Number of evolved variables used by this thorn\"\n} 7\n\nrestricted:\nCCTK_INT GiRaFFE_HO_MaxNumArrayEvolvedVars \"Number of Array evolved variables used by this thorn\" ACCUMULATOR-BASE=MethodofLines::MoL_Num_ArrayEvolved_Vars STEERABLE=RECOVER\n{\n 0:0 :: \"Number of Array evolved variables used by this thorn\"\n} 0\n\nrestricted:\nKEYWORD bound \"Type of boundary condition to use\"\n{\n \"flat\" :: \"Flat (von Neumann, n grad phi = 0) boundary condition\"\n \"static\" :: \"Static (Dirichlet, dphi/dt=0) boundary condition\"\n \"radiation\" :: \"Radiation boundary condition\"\n \"robin\" :: \"Robin (phi(r) = C/r) boundary condition\"\n \"zero\" :: \"Zero (Dirichlet, phi=0) boundary condition\"\n \"none\" :: \"Apply no boundary condition\"\n} \"radiation\"\n\nrestricted:\nCCTK_INT timelevels \"Number of active timelevels\" STEERABLE=RECOVER\n{\n 0:3 :: \"\"\n} 3\n\nrestricted:\nCCTK_REAL xi \"The damping factor for the psi6Phi evolution equation\"\n{\n *:* :: \"The damping factor for the psi6Phi evolution equation\"\n} 0.0\n\n# SPEED LIMIT: Set maximum relativistic gamma factor\n# \nREAL GAMMA_SPEED_LIMIT \"Maximum relativistic gamma factor. Note the default is much higher than IllinoisGRMHD. (GRFFE can handle higher Lorentz factors)\"\n{\n 1:* :: \"Positive > 1, though you'll likely have troubles far above 2000.\"\n} 2000.0\n\nREAL min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED \"As parameter suggests, this is the minimum radius inside of which the conservatives-to-primitives solver is disabled. In the Aligned Rotator test, this should be set equal to R_NS_aligned_rotator.\" STEERABLE=ALWAYS\n{\n -1. :: \"disable the conservative-to-primitive solver modification\"\n (0:* :: \"any positive value\"\n} -1.\n\n# Set the drift velocity perpendicular to the current sheet to zero.\nBOOLEAN current_sheet_null_v \"Shall we null the velocity normal to the current sheet?\"\n{\n} \"no\" #Necessary for the split monopole\n", "Overwriting GiRaFFE_HO/param.ccl\n" ] ], [ [ "<a id='schedule'></a>\n\n### Step 2.e.iii: `schedule.ccl` \\[Back to [top](#toc)\\]\n$$\\label{schedule}$$\n\n3. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. `schedule.ccl`'s official documentation may be found [here](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-186000D2.4). \n\nFor clarity, we will outline the desired schedule we wish to create in the toolkit:\n\n0. **GiRaFFEfood_HO**\n 1. **GiRaFFE_ExactWaldID**\n 1. Sets up the vector potential and initial Valencia 3-velocity.\n 1. Reads: gammaDD02, gammaDD12, gammaDD22 (Everywhere)\n 1. Writes: AD0, AD1, AD2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)\n 1. **driver_A_to_B**\n 1. Computes the magnetic field from the vector potential everywhere (this function is from **GiRaFFE_HO**).\n 1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, AD0, AD1, AD2 (Everywhere)\n 1. Writes: BU0, BU1, BU2 (Everywhere)\n 1. **StildeD_from_initial_data**\n 1. Computes the initial Poynting flux from the initial magnetic field and Valencia 3-velocity data.\n 1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)\n 1. Writes: StildeD0, StildeD1, StildeD2 (Interior)\n0. **GiRaFFE_HO**\n 1. **HydroBase_to_GiRaFFE**\n 1. Reads data from **HydroBase** variables into **GiRaFFE_HO** variables.\n 1. Reads: Avec, Bvec, vel (from **HydroBase**) (Everywhere)\n 1. Writes: AD0, AD1, AD2, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)\n 1. **GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS**\n 1. Sets auxiliary gridfunctions that will need to be finite-differenced for the right-hand sides.\n 1. Reads: alpha, betaU0, betaU1, betaU2, gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, ValenciavU0, ValenciavU1, ValenciavU2, AD0, AD1, AD2, psi6Phi (Everywhere)\n 1. Writes: uU0, uU1, uU2, u0alpha, alpsqrtgam, AevolParen, PevolParenU0, PevolParenU1, PevolParenU2, gammaUU00, gammaUU01, gammaUU02, gammaUU11, gammaUU12, gammaUU22, gammadet, u4upperZero (Everywhere)\n 1. **GiRaFFE_HO_set_rhs**\n 1. Sets the RHSs for the ETK's MoL solver.\n 1. Reads: alpha, betaU0, betaU1, betaU2, gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, ValenciavU0, ValenciavU1, ValenciavU2, AD0, AD1, AD2, BU0, BU1, BU2, uU0, uU1, uU2, u0alpha, alpsqrtgam, AevolParen, PevolParenU0, PevolParenU1, PevolParenU2, gammaUU00, gammaUU01, gammaUU02, gammaUU11, gammaUU12, gammaUU22, gammadet (Everywhere)\n 1. Writes: Stilde_rhsD0, Stilde_rhsD1, Stilde_rhsD2, A_rhsD0, A_rhsD1, A_rhsD2, psi6Phi_rhs (Interior)\n 1. **GiRaFFE_HO_SelectBCs**\n 1. Apply boundary conditions. \n 1. Reads: StildeD0, StildeD1, StildeD2, AD0, AD1, AD2, psi6Phi (Interior)\n 1. Writes: StildeD0, StildeD1, StildeD2, AD0, AD1, AD2, psi6Phi (Boundaries)\n 1. **driver_A_to_B**\n 1. Computes the magnetic field from the vector potential everywhere.\n 1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, AD0, AD1, AD2 (Everywhere)\n 1. Writes:BU0, BU1, BU2 (Everywhere)\n 1. **GiRaFFE_to_HydroBase**\n 1. Reads data from **GiRaFFE_HO** variables into **HydroBase** variables.\n 1. Reads: AD0, AD1, AD2, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)\n 1. Writes: Avec, Bvec, vel (from HydroBase) (Everywhere)\n\nWe first assign storage for both scalar gridfunctions, and then specify the standardized ETK \"scheduling bins\" in which we want each of our thorn's functions to run.\n", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/schedule.ccl\nSTORAGE: GiRaFFE_rhs[1]\nSTORAGE: GiRaFFE_vars[3]\nSTORAGE: GiRaFFE_aux[1]\nSTORAGE: GiRaFFE_Bs[1]\nSTORAGE: GiRaFFE_Vs[1]\nSTORAGE: GiRaFFEfood_init[1]\n\nSTORAGE: HydroBase::rho[1],HydroBase::press[1],HydroBase::eps[1],HydroBase::vel[1],HydroBase::Bvec[1],HydroBase::Avec[1],HydroBase::Aphi[1]\n\n# POSTPOSTINITIAL\nschedule GROUP GiRaFFE_PostPostInitial at CCTK_POSTPOSTINITIAL before MoL_PostStep after HydroBase_Con2Prim\n{\n} \"HydroBase_Con2Prim in CCTK_POSTPOSTINITIAL sets conserv to prim then outer boundaries (OBs, which are technically disabled). The post OB SYNCs actually reprolongate the conservative variables, making cons and prims INCONSISTENT. So here we redo the con2prim, avoiding the SYNC afterward, then copy the result to other timelevels\"\n\n#schedule GiRaFFE_HO_InitSymBound at BASEGRID\n#{\n# LANG: C\n# OPTIONS: global\n#} \"Schedule symmetries\"\n\n# Sets the gridfunctions that are needed for RHS; GiRaFFE_HO_set_rhs will need these to be set so it can finite-difference them\nschedule GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS as GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS IN MoL_CalcRHS\n{\n LANG: C\n READS: admbase::alp(Everywhere)\n READS: admbase::betax(Everywhere)\n READS: admbase::betay(Everywhere)\n READS: admbase::betaz(Everywhere)\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n READS: GiRaFFE_HO::psi6Phi(Everywhere)\n WRITES: GiRaFFE_HO::uU0(Everywhere)\n WRITES: GiRaFFE_HO::uU1(Everywhere)\n WRITES: GiRaFFE_HO::uU2(Everywhere)\n WRITES: GiRaFFE_HO::u0alpha(Everywhere)\n WRITES: GiRaFFE_HO::alpsqrtgam(Everywhere)\n WRITES: GiRaFFE_HO::AevolParen(Everywhere)\n WRITES: GiRaFFE_HO::PevolParenU0(Everywhere)\n WRITES: GiRaFFE_HO::PevolParenU1(Everywhere)\n WRITES: GiRaFFE_HO::PevolParenU2(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU00(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU01(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU02(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU11(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU12(Everywhere)\n WRITES: GiRaFFE_HO::gammaUU22(Everywhere)\n WRITES: GiRaFFE_HO::gammadet(Everywhere)\n WRITES: GiRaFFE_HO::u4upperZero(Everywhere)\n SYNC: GiRaFFE_aux\n} \"Sets prerequisite quantities for the GiRaFFE right-hand sides\"\n\nschedule GiRaFFE_HO_set_rhs as GiRaFFE_HO_Evolution IN MoL_CalcRHS after GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS\n{\n LANG: C\n READS: admbase::alp(Everywhere)\n READS: admbase::betax(Everywhere)\n READS: admbase::betay(Everywhere)\n READS: admbase::betaz(Everywhere)\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n READS: GiRaFFE_HO::BU0(Everywhere)\n READS: GiRaFFE_HO::BU1(Everywhere)\n READS: GiRaFFE_HO::BU2(Everywhere)\n READS: GiRaFFE_HO::uU0(Everywhere)\n READS: GiRaFFE_HO::uU1(Everywhere)\n READS: GiRaFFE_HO::uU2(Everywhere)\n READS: GiRaFFE_HO::u0alpha(Everywhere)\n READS: GiRaFFE_HO::alpsqrtgam(Everywhere)\n READS: GiRaFFE_HO::AevolParen(Everywhere)\n READS: GiRaFFE_HO::PevolParenU0(Everywhere)\n READS: GiRaFFE_HO::PevolParenU1(Everywhere)\n READS: GiRaFFE_HO::PevolParenU2(Everywhere)\n READS: GiRaFFE_HO::gammaUU00(Everywhere)\n READS: GiRaFFE_HO::gammaUU01(Everywhere)\n READS: GiRaFFE_HO::gammaUU02(Everywhere)\n READS: GiRaFFE_HO::gammaUU11(Everywhere)\n READS: GiRaFFE_HO::gammaUU12(Everywhere)\n READS: GiRaFFE_HO::gammaUU22(Everywhere)\n READS: GiRaFFE_HO::gammadet(Everywhere)\n WRITES: GiRaFFE_HO::Stilde_rhsD0(Interior)\n WRITES: GiRaFFE_HO::Stilde_rhsD1(Interior)\n WRITES: GiRaFFE_HO::Stilde_rhsD2(Interior)\n WRITES: GiRaFFE_HO::A_rhsD0(Interior)\n WRITES: GiRaFFE_HO::A_rhsD1(Interior)\n WRITES: GiRaFFE_HO::A_rhsD2(Interior)\n WRITES: GiRaFFE_HO::psi6Phi_rhs(Interior)\n} \"Sets the GiRaFFE right-hand sides\"\n\nschedule GiRaFFE_HO_SelectBCs in MoL_PostStep\n{\n LANG: C\n OPTIONS: level\n SYNC: GiRaFFE_vars\n} \"Boundaries of GiRaFFE equations\"\n\nschedule GROUP ApplyBCs as GiRaFFE_HO_ApplyBCs in MoL_PostStep after GiRaFFE_HO_SelectBCs\n{\n READS: GiRaFFE_HO::AD0(Interior)\n READS: GiRaFFE_HO::AD1(Interior)\n READS: GiRaFFE_HO::AD2(Interior)\n READS: GiRaFFE_HO::psi6Phi(Interior)\n READS: GiRaFFE_HO::ValenciavU0(Interior)\n READS: GiRaFFE_HO::ValenciavU1(Interior)\n READS: GiRaFFE_HO::ValenciavU2(Interior)\n READS: GiRaFFE_HO::StildeD0(Interior)\n READS: GiRaFFE_HO::StildeD1(Interior)\n READS: GiRaFFE_HO::StildeD2(Interior)\n WRITES: GiRaFFE_HO::AD0(Boundary)\n WRITES: GiRaFFE_HO::AD1(Boundary)\n WRITES: GiRaFFE_HO::AD2(Boundary)\n WRITES: GiRaFFE_HO::psi6Phi(Boundary)\n WRITES: GiRaFFE_HO::ValenciavU0(Boundary)\n WRITES: GiRaFFE_HO::ValenciavU1(Boundary)\n WRITES: GiRaFFE_HO::ValenciavU2(Boundary)\n WRITES: GiRaFFE_HO::StildeD0(Boundary)\n WRITES: GiRaFFE_HO::StildeD1(Boundary)\n WRITES: GiRaFFE_HO::StildeD2(Boundary)\n} \"Apply boundary conditions\"\n\n\nschedule GROUP ApplyBCs as GiRaFFE_HO_ApplyBCs at POSTRESTRICT\n{\n} \"Apply boundary conditions\"\n\nschedule driver_A_to_B as driver_A_to_B in HydroBase_Boundaries\n{\n LANG: C\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n WRITES: GiRaFFE_HO::BU0(Everywhere)\n WRITES: GiRaFFE_HO::BU1(Everywhere)\n WRITES: GiRaFFE_HO::BU2(Everywhere)\n} \"Calculates the B-field from the vector potential\"\n\nschedule GiRaFFE_HO_RegisterVars in MoL_Register\n{\n LANG: C\n OPTIONS: meta\n} \"Register Variables for MoL\"\n\n# Nontrivial primitives solve, for vx,vy,vz:\n#schedule GiRaFFE_HO_conserv_to_prims_FFE in MoL_CalcRHS after HydroBase_to_GiRaFFE before GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS\n#{\n# LANG: C\n# READS: admbase::gxx(Everywhere)\n# READS: admbase::gxy(Everywhere)\n# READS: admbase::gxz(Everywhere)\n# READS: admbase::gyy(Everywhere)\n# READS: admbase::gyz(Everywhere)\n# READS: admbase::gzz(Everywhere)\n# READS: GiRaFFE_HO::BU0(Everywhere)\n# READS: GiRaFFE_HO::BU1(Everywhere)\n# READS: GiRaFFE_HO::BU2(Everywhere)\n# READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n# READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n# READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n# READS: GiRaFFE_HO::StildeD0(Everywhere)\n# READS: GiRaFFE_HO::StildeD1(Everywhere)\n# READS: GiRaFFE_HO::StildeD2(Everywhere)\n# WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)\n# WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)\n# WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)\n# WRITES: GiRaFFE_HO::StildeD0(Everywhere)\n# WRITES: GiRaFFE_HO::StildeD1(Everywhere)\n# WRITES: GiRaFFE_HO::StildeD2(Everywhere)\n#} \"Applies the FFE condition B^2>E^2 and recomputes the velocities\"\n\n# Schedule this AFTER the evolution as well.\nschedule GiRaFFE_HO_conserv_to_prims_FFE in HydroBase_Boundaries AFTER driver_A_to_B\n{\n LANG: C\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::BU0(Everywhere)\n READS: GiRaFFE_HO::BU1(Everywhere)\n READS: GiRaFFE_HO::BU2(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n READS: GiRaFFE_HO::StildeD0(Everywhere)\n READS: GiRaFFE_HO::StildeD1(Everywhere)\n READS: GiRaFFE_HO::StildeD2(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)\n WRITES: GiRaFFE_HO::StildeD0(Everywhere)\n WRITES: GiRaFFE_HO::StildeD1(Everywhere)\n WRITES: GiRaFFE_HO::StildeD2(Everywhere)\n} \"Applies the FFE condition B^2>E^2 and recomputes the velocities\"\n\nschedule HydroBase_to_GiRaFFE IN MoL_CalcRHS before GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS\n{\n LANG: C\n READS: HydroBase::Avec(Everywhere)\n READS: HydroBase::Bvec(Everywhere)\n READS: HydroBase::vel(Everywhere)\n WRITES: GiRaFFE_HO::BU0(Everywhere)\n WRITES: GiRaFFE_HO::BU1(Everywhere)\n WRITES: GiRaFFE_HO::BU2(Everywhere)\n WRITES: GiRaFFE_HO::AD0(Everywhere)\n WRITES: GiRaFFE_HO::AD1(Everywhere)\n WRITES: GiRaFFE_HO::AD2(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)\n} \"Converts the HydroBase variables to GiRaFFE variables\"\n\nschedule GiRaFFE_to_HydroBase AT CCTK_ANALYSIS AFTER ML_BSSN_evolCalcGroup\n{\n LANG: C\n READS: GiRaFFE_HO::BU0(Everywhere)\n READS: GiRaFFE_HO::BU1(Everywhere)\n READS: GiRaFFE_HO::BU2(Everywhere)\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n WRITES: HydroBase::Avec(Everywhere)\n WRITES: HydroBase::Bvec(Everywhere)\n WRITES: HydroBase::vel(Everywhere)\n} \"Converts the GiRaFFE variables to HydroBase variables\"\n\n############################################################\n# Schedule Blocks that are run BEFORE the evolution to finish setting up initial data:\n\nschedule driver_A_to_B IN GiRaFFE_ID_Converter as initial_driver_A_to_B before first_initialdata\n{\n LANG: C\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n WRITES: GiRaFFE_HO::BU0(Everywhere)\n WRITES: GiRaFFE_HO::BU1(Everywhere)\n WRITES: GiRaFFE_HO::BU2(Everywhere)\n} \"Calculates the B-field from the vector potential\"\n\nschedule GiRaFFE_HO_conserv_to_prims_FFE in GiRaFFE_ID_Converter after first_initialdata\n{\n LANG: C\n READS: admbase::gxx(Everywhere)\n READS: admbase::gxy(Everywhere)\n READS: admbase::gxz(Everywhere)\n READS: admbase::gyy(Everywhere)\n READS: admbase::gyz(Everywhere)\n READS: admbase::gzz(Everywhere)\n READS: GiRaFFE_HO::BU0(Everywhere)\n READS: GiRaFFE_HO::BU1(Everywhere)\n READS: GiRaFFE_HO::BU2(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n READS: GiRaFFE_HO::StildeD0(Everywhere)\n READS: GiRaFFE_HO::StildeD1(Everywhere)\n READS: GiRaFFE_HO::StildeD2(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)\n WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)\n WRITES: GiRaFFE_HO::StildeD0(Everywhere)\n WRITES: GiRaFFE_HO::StildeD1(Everywhere)\n WRITES: GiRaFFE_HO::StildeD2(Everywhere)\n} \"Applies the FFE condition B^2>E^2 and recomputes the velocities\"\n\n# Copy data to other timelevels.\n#schedule GiRaFFE_HO_PostPostInitial__Copy_Timelevels in GiRaFFE_PostPostInitial as mhdpostid after initial_driver_A_to_B# after p2c\nschedule GiRaFFE_HO_PostPostInitial__Copy_Timelevels in SetTmunu as mhdpostid# after initial_driver_A_to_B \n{\n READS: GiRaFFE_HO::AD0(Everywhere)\n READS: GiRaFFE_HO::AD1(Everywhere)\n READS: GiRaFFE_HO::AD2(Everywhere)\n READS: GiRaFFE_HO::psi6Phi(Everywhere)\n READS: GiRaFFE_HO::ValenciavU0(Everywhere)\n READS: GiRaFFE_HO::ValenciavU1(Everywhere)\n READS: GiRaFFE_HO::ValenciavU2(Everywhere)\n READS: GiRaFFE_HO::StildeD0(Everywhere)\n READS: GiRaFFE_HO::StildeD1(Everywhere)\n READS: GiRaFFE_HO::StildeD2(Everywhere)\n LANG: C\n} \"Compute post-initialdata quantities\"\n# FIXME: This is getting run too many times, even during the evolution!", "Overwriting GiRaFFE_HO/schedule.ccl\n" ] ], [ [ "This yields the following, as output by the toolkit itself, with some extra formatting:\n\n 1. GiRaFFE_HO::GiRaFFE_HO_RegisterVars: [meta] Register Variables for MoL\n 1. GROUP GiRaFFE_Initial: Schedule GiRaFFE functions in HydroBase_Initial\n 1. GiRaFFEfood_HO::GiRaFFE_Food: Initial data for GiRaFFE\n 1. GiRaFFE_HO::initial_driver_A_to_B: Calculates the B-field from the vector potential\n 1. GiRaFFE_HO::mhdpostid: Compute post-initialdata quantities\n 1. GROUP GiRaFFE_ID_Converter: Translate ET-generated, HydroBase-compatible initial data and convert into variables used by GiRaFFE\n 1. GiRaFFEfood_HO::first_initialdata: [local] Convert HydroBase initial data (ID) to ID that GiRaFFE can read.\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GROUP GiRaFFE_PostPostInitial: HydroBase_Con2Prim in CCTK_POSTPOSTINITIAL sets conserv to prim then outer boundaries (OBs, which are technically disabled). The post OB SYNCs actually reprolongate the conservative variables, making cons and prims INCONSISTENT. So here we redo the con2prim, avoiding the SYNC afterward, then copy the result to other timelevels\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GiRaFFE_HO::GiRaFFE_to_HydroBase: Converts the GiRaFFE variables to HydroBase variables\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GiRaFFE_HO::HydroBase_to_GiRaFFE: Converts the HydroBase variables to GiRaFFE variables\n 1. GiRaFFE_HO::GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS: Sets prerequisite quantities for the GiRaFFE right-hand sides\n 1. GiRaFFE_HO::GiRaFFE_HO_Evolution: Sets the GiRaFFE right-hand sides\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential\n 1. GiRaFFE_HO::GiRaFFE_to_HydroBase: Converts the GiRaFFE variables to HydroBase variables\n 1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations\n 1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions\n 1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential", "_____no_output_____" ], [ "<a id='einstein_list'></a>\n\n## Step 2.f: Add the C file to Einstein Toolkit compilation list \\[Back to [top](#toc)\\]\n\nWe will also need `make.code.defn`, which indicates the list of files that need to be compiled. \n$$\\label{einstein_list}$$", "_____no_output_____" ] ], [ [ "%%writefile GiRaFFE_HO/src/make.code.defn\nSRCS = GiRaFFE.c driver_conserv_to_prims_FFE.C \\\n GiRaFFE_HydroBase_conversion.c \\\n postpostinitial__copy_timelevels.c", "Overwriting GiRaFFE_HO/src/make.code.defn\n" ] ], [ [ "<a id='latex_pdf_output'></a>\n\n# Step 3: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.pdf](Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.ipynb\n!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.tex\n!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.tex\n!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.tex\n!rm -f Tut*.out Tut*.aux Tut*.log", "[NbConvertApp] Converting notebook Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.ipynb to latex\n[NbConvertApp] Writing 292250 bytes to Tutorial-ETK_thorn-GiRaFFE_Higher_Order_v2.tex\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec75257eee8aebf064776503047e710f4e6e8644
334,091
ipynb
Jupyter Notebook
2020.1/Grupo 5/code/Mercado_financeiro_acoes_mineracao.ipynb
prof-alexandre-maciel/mineracao-de-dados
1bff364077f3d423e51d88f3b9cdb1765ab34023
[ "MIT" ]
1
2021-12-14T22:42:29.000Z
2021-12-14T22:42:29.000Z
2020.1/Grupo 5/code/Mercado_financeiro_acoes_mineracao.ipynb
prof-alexandre-maciel/mineracao-de-dados
1bff364077f3d423e51d88f3b9cdb1765ab34023
[ "MIT" ]
null
null
null
2020.1/Grupo 5/code/Mercado_financeiro_acoes_mineracao.ipynb
prof-alexandre-maciel/mineracao-de-dados
1bff364077f3d423e51d88f3b9cdb1765ab34023
[ "MIT" ]
null
null
null
81.366537
167,456
0.696523
[ [ [ "# **Instalação e importação de bibliotecas** ", "_____no_output_____" ] ], [ [ "#Instalar bibliotecas\n!pip install fundamentus\n!pip install yfinance\n!pip install dtale\n!pip install technical_indicators_lib\n!pip install ta\n!pip install yfinance\n!pip install technical_indicators_lib\n!pip install bioinfokit", "Requirement already satisfied: fundamentus in c:\\users\\kelly\\anaconda3\\lib\\site-packages (0.2.0)\nRequirement already satisfied: requests>=2.25.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from fundamentus) (2.25.1)\nRequirement already satisfied: requests-cache>=0.5.2 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from fundamentus) (0.6.3)\nRequirement already satisfied: pandas>=1.1.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from fundamentus) (1.2.4)\nRequirement already satisfied: lxml>=4.6.2 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from fundamentus) (4.6.3)\nRequirement already satisfied: tabulate>=0.8.7 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from fundamentus) (0.8.9)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.25.1->fundamentus) (1.25.9)\nRequirement already satisfied: chardet<5,>=3.0.2 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.25.1->fundamentus) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.25.1->fundamentus) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.25.1->fundamentus) (2020.6.20)\nRequirement already satisfied: itsdangerous in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests-cache>=0.5.2->fundamentus) (1.1.0)\nRequirement already satisfied: url-normalize>=1.4 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests-cache>=0.5.2->fundamentus) (1.4.3)\nRequirement already satisfied: pytz>=2017.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas>=1.1.5->fundamentus) (2020.1)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas>=1.1.5->fundamentus) (2.8.1)\nRequirement already satisfied: numpy>=1.16.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas>=1.1.5->fundamentus) (1.18.5)\nRequirement already satisfied: six in c:\\users\\kelly\\appdata\\roaming\\python\\python38\\site-packages (from url-normalize>=1.4->requests-cache>=0.5.2->fundamentus) (1.13.0)\nRequirement already satisfied: yfinance in c:\\users\\kelly\\anaconda3\\lib\\site-packages (0.1.59)\nRequirement already satisfied: multitasking>=0.0.7 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from yfinance) (0.0.9)\nRequirement already satisfied: requests>=2.20 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from yfinance) (2.25.1)\nRequirement already satisfied: lxml>=4.5.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from yfinance) (4.6.3)\nRequirement already satisfied: numpy>=1.15 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from yfinance) (1.18.5)\nRequirement already satisfied: pandas>=0.24 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from yfinance) (1.2.4)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.20->yfinance) (2020.6.20)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.20->yfinance) (1.25.9)\nRequirement already satisfied: chardet<5,>=3.0.2 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests>=2.20->yfinance) (2.10)\nRequirement already satisfied: pytz>=2017.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas>=0.24->yfinance) (2020.1)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas>=0.24->yfinance) (2.8.1)\nRequirement already satisfied: six>=1.5 in c:\\users\\kelly\\appdata\\roaming\\python\\python38\\site-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.13.0)\nRequirement already satisfied: dtale in c:\\users\\kelly\\anaconda3\\lib\\site-packages (1.45.0)\nRequirement already satisfied: squarify in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.4.3)\nRequirement already satisfied: Flask>=1.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.1.2)\nRequirement already satisfied: openpyxl; python_version >= \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (3.0.4)\nRequirement already satisfied: Flask-Compress in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.9.0)\nRequirement already satisfied: missingno in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.4.2)\nRequirement already satisfied: seaborn; python_version >= \"3.6\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.10.1)\nRequirement already satisfied: flask-ngrok; python_version > \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.0.25)\nRequirement already satisfied: ppscore; python_version >= \"3.6\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.2.0)\nRequirement already satisfied: xarray; python_version >= \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.18.0)\nRequirement already satisfied: future>=0.14.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.18.2)\nRequirement already satisfied: dash-daq in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.5.0)\nRequirement already satisfied: requests in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (2.25.1)\nRequirement already satisfied: et-xmlfile; python_version >= \"3.6\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.0.1)\nRequirement already satisfied: dash-colorscales in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.0.4)\nRequirement already satisfied: itsdangerous in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.1.0)\nRequirement already satisfied: six in c:\\users\\kelly\\appdata\\roaming\\python\\python38\\site-packages (from dtale) (1.13.0)\nRequirement already satisfied: xlrd in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.2.0)\nRequirement already satisfied: strsimpy in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.2.0)\nRequirement already satisfied: kaleido; python_version > \"3.6\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.2.1)\nRequirement already satisfied: plotly>=4.9.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (4.14.3)\nRequirement already satisfied: statsmodels; python_version > \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.11.1)\nRequirement already satisfied: lz4; python_version > \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (3.1.3)\nRequirement already satisfied: dash>=1.5.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.20.0)\nRequirement already satisfied: scipy in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.5.0)\nRequirement already satisfied: dash-bootstrap-components; python_version > \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.12.2)\nRequirement already satisfied: scikit-learn in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (0.23.1)\nRequirement already satisfied: pandas in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (1.2.4)\nRequirement already satisfied: networkx; python_version >= \"3.0\" in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dtale) (2.4)\nRequirement already satisfied: click>=5.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from Flask>=1.0->dtale) (7.1.2)\nRequirement already satisfied: Jinja2>=2.10.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from Flask>=1.0->dtale) (2.11.2)\nRequirement already satisfied: Werkzeug>=0.15 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from Flask>=1.0->dtale) (1.0.1)\nRequirement already satisfied: jdcal in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from openpyxl; python_version >= \"3.0\"->dtale) (1.4.1)\nRequirement already satisfied: brotli in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from Flask-Compress->dtale) (1.0.9)\nRequirement already satisfied: numpy in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from missingno->dtale) (1.18.5)\nRequirement already satisfied: matplotlib in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from missingno->dtale) (3.2.2)\nRequirement already satisfied: setuptools>=40.4 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from xarray; python_version >= \"3.0\"->dtale) (49.2.0.post20200714)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests->dtale) (1.25.9)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests->dtale) (2020.6.20)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests->dtale) (2.10)\nRequirement already satisfied: chardet<5,>=3.0.2 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from requests->dtale) (3.0.4)\nRequirement already satisfied: retrying>=1.3.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from plotly>=4.9.0->dtale) (1.3.3)\nRequirement already satisfied: patsy>=0.5 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from statsmodels; python_version > \"3.0\"->dtale) (0.5.1)\nRequirement already satisfied: dash-core-components==1.16.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dash>=1.5.0->dtale) (1.16.0)\nRequirement already satisfied: dash-html-components==1.1.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dash>=1.5.0->dtale) (1.1.3)\nRequirement already satisfied: dash-table==4.11.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dash>=1.5.0->dtale) (4.11.3)\nRequirement already satisfied: dash-renderer==1.9.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from dash>=1.5.0->dtale) (1.9.1)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from scikit-learn->dtale) (0.16.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from scikit-learn->dtale) (2.1.0)\nRequirement already satisfied: pytz>=2017.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas->dtale) (2020.1)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from pandas->dtale) (2.8.1)\nRequirement already satisfied: decorator>=4.3.0 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from networkx; python_version >= \"3.0\"->dtale) (4.4.2)\nRequirement already satisfied: MarkupSafe>=0.23 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from Jinja2>=2.10.1->Flask>=1.0->dtale) (1.1.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from matplotlib->missingno->dtale) (1.2.0)\nRequirement already satisfied: cycler>=0.10 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from matplotlib->missingno->dtale) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in c:\\users\\kelly\\anaconda3\\lib\\site-packages (from matplotlib->missingno->dtale) (2.4.7)\n" ], [ "#Importar pacotes\nimport fundamentus\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport dtale\nimport dtale.app as dtale_app\nfrom sklearn.manifold import TSNE\nimport seaborn as sns \nfrom bioinfokit.visuz import cluster\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom ta import add_all_ta_features\nimport technical_indicators_lib as ti\nfrom sklearn import preprocessing\nfrom ta.utils import dropna\nimport time\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\nfrom scipy.stats import wilcoxon, friedmanchisquare, rankdata\nfrom scipy.stats import f_oneway\nimport warnings\nfrom IPython.display import Image\nfrom sklearn.cluster import KMeans #para usar o KMeans\nfrom sklearn.metrics import davies_bouldin_score, silhouette_score\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn import preprocessing\nfrom keras.models import Sequential\nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom pandas import read_csv\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_regression\nfrom numpy import array\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.metrics import r2_score", "2021-05-13 12:09:46,688 [logging.log_init] INFO: LOGLEVEL=INFO\n" ] ], [ [ "# **Importar dados**", "_____no_output_____" ] ], [ [ "info_papel_fit_no_missing_table = pd.read_csv('info_papel_fit_no_missing_table.csv')\ndf2 = info_papel_fit_no_missing_table\ndf2", "_____no_output_____" ] ], [ [ "# Pré- processamento dos dados", "_____no_output_____" ] ], [ [ "# Extrair indicadores fundamentalistas filtrados com os stakeholders\ndf_papeis_fit_new = df2[['Cres_Rec_5a', 'Div_Br_Patrim',\n 'Div_Bruta', 'Div_Liquida', 'Div_Yield', 'EV_EBIT', 'EV_EBITDA', 'LPA',\n 'Marg_Bruta', 'Marg_Liquida', 'PEBIT', 'PL', 'PVP', 'ROE', 'ROIC',\n 'Valor_de_mercado', 'VPA']]\n\n# Normalização\ndf_papeis_fit_norm = (df_papeis_fit_new-df_papeis_fit_new.min())/(df_papeis_fit_new.max()-df_papeis_fit_new.min())\ndf_papeis_fit_norm.fillna(value = 0, inplace = True)\n\n\ndf_papeis_fit_norm", "_____no_output_____" ] ], [ [ "# Análise descritiva dos dados", "_____no_output_____" ] ], [ [ "# **Análise Descritiva dos dados**\n\ndtale_app.USE_NGROK = True\n\nd_tale_page = dtale.show(df_papeis_fit_norm, ignore_duplicate=True)\nd_tale_page", "_____no_output_____" ] ], [ [ "# Clusterização ", "_____no_output_____" ] ], [ [ "####### Para muitas colunas\npca_scores = PCA().fit_transform(df_papeis_fit_norm)\n\n# create a dataframe of pca_scores\ndf_pc = pd.DataFrame(pca_scores)\n\ntsne_em = TSNE(n_components=2, perplexity=40.0, early_exaggeration=12, n_iter=2500, learning_rate=368, verbose=1).fit_transform(df_pc)\n\nget_clusters = DBSCAN(eps=2.5, min_samples=10).fit_predict(tsne_em)\nset(get_clusters)\n\n# get t-SNE plot with colors assigned to each cluster\ncluster.tsneplot(score=tsne_em, colorlist=get_clusters, \n colordot=('#713e5a', '#63a375', '#edc79b', '#d57a66', '#ca6680', '#395B50', '#92AFD7', '#b0413e', '#4381c1', '#736ced', '#631a86', '#de541e', '#022b3a', '#000000'), \n legendpos='upper right', legendanchor=(1.15, 1))", "[t-SNE] Computing 121 nearest neighbors...\n[t-SNE] Indexed 378 samples in 0.001s...\n[t-SNE] Computed neighbors for 378 samples in 0.014s...\n[t-SNE] Computed conditional probabilities for sample 378 / 378\n[t-SNE] Mean sigma: 0.032090\n[t-SNE] KL divergence after 250 iterations with early exaggeration: 60.704971\n[t-SNE] KL divergence after 2000 iterations: 0.406670\n" ] ], [ [ "# Resultados da Clusterização DBSCAN", "_____no_output_____" ] ], [ [ "'''\nParâmetros\n* Número mínimo de amostras: 10\n* Distância Máxima: 3\n* Métrica: Euclidiana\n* Taxa de aprendizado: 368\n* N de Interações: 2500\n'''\nImage(\"tsne_2d.png\", width=1000, height=600)", "_____no_output_____" ], [ "# Variação dos parâmetros para testes\n'''\nParâmetros\n* Taxa de aprendizado: 468\n* 2500 interações\n* Número mínimo de amostras: 10\n* Distância Máxima: 3\n* Métrica: Euclidiana\n'''\n#Image(\"468ninter2500.png\", width=1000, height=600)\n\n'''\nParâmetros:\n* Métrica: CityBlock\n* Taxa de Aprendizado: 368\n* N de Interações: 2000\n* Número mínimo de amostras: 10\n* Distância Máxima: 3\n'''\n#Image(\"cityblock.png\", width=1000, height=600)\n \n'''\nParâmetros:\n* Taxa de Aprendizado: 368\n* N de Interações: 2000\n* Métrica Euclidiana\n* Número min de amostras: 15\n* Distância Máxima: 4\n'''\n#Image(\"15samples4eps.png\", width=1000, height=600)\n\n'''\nParâmetros:\n* Número mínimo de amostras: 10\n* Distância Máxima: 3\n* Métrica: Euclidiana\n* Taxa de Aprendizado: 300\n* Numero de Interações: 1000\n'''\n#Image(\"learning_rate300ninter1000.png\", width=1000, height=600)\n\n\n# Recolocando colunas indicando papel, setor, subsetor e cotação.\nCategorias = pd.DataFrame({'Categorias': get_clusters})\ndf2['Categorias']= get_clusters\ndf2[['Categorias','Papel', 'Setor','Subsetor','Cotacao']].sort_values('Categorias')\n\n# Renomear os clusters\nRuido = df2.loc[df2['Categorias'] == -1]\nCat_A = df2.loc[df2['Categorias'] == 0]\nCat_B = df2.loc[df2['Categorias'] == 1]\nCat_C = df2.loc[df2['Categorias'] == 2]\nCat_D = df2.loc[df2['Categorias'] == 3]\nCat_E = df2.loc[df2['Categorias'] == 4]\n\nruido = pd.DataFrame({'Categoria': 'Ruido', 'Papeis':Ruido.Papel.to_list(), 'Subsetor':Ruido.Subsetor.to_list()})\ncat_a = pd.DataFrame({'Categoria': 'A', 'Papeis':Cat_A.Papel.to_list(), 'Subsetor':Cat_A.Subsetor.to_list()})\ncat_b = pd.DataFrame({'Categoria': 'B', 'Papeis':Cat_B.Papel.to_list(), 'Subsetor':Cat_B.Subsetor.to_list()})\ncat_c = pd.DataFrame({'Categoria': 'C', 'Papeis':Cat_C.Papel.to_list(), 'Subsetor':Cat_C.Subsetor.to_list()})\ncat_d = pd.DataFrame({'Categoria': 'D', 'Papeis':Cat_D.Papel.to_list(), 'Subsetor':Cat_D.Subsetor.to_list()})\ncat_e = pd.DataFrame({'Categoria': 'E', 'Papeis':Cat_E.Papel.to_list(), 'Subsetor':Cat_E.Subsetor.to_list()})\n\nprint(ruido.shape)\nprint(cat_a.shape)\nprint(cat_b.shape)\nprint(cat_c.shape)\nprint(cat_d.shape)\nprint(cat_e.shape)", "(48, 3)\n(30, 3)\n(254, 3)\n(18, 3)\n(17, 3)\n(11, 3)\n" ] ], [ [ "# **Regressão e ranking de indicadores**", "_____no_output_____" ] ], [ [ "#prediction multiple steps\n# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps_in, n_steps_out):\n\tX, y = list(), list()\n\tfor i in range(len(sequences)):\n\t\t# find the end of this pattern\n\t\tend_ix = i + n_steps_in\n\t\tout_end_ix = end_ix + n_steps_out-1\n\t\t# check if we are beyond the dataset\n\t\tif out_end_ix > len(sequences):\n\t\t\tbreak\n\t\t# gather input and output parts of the pattern\n\t\tseq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1]\n\t\tX.append(seq_x)\n\t\ty.append(seq_y)\n\treturn array(X), array(y)", "_____no_output_____" ], [ "# Função de importação de séries históricas e cálculo de indicadores EMAs, SMAs, ATR, CCI, ROC, RSI e WCL\ndef import_and_prepare_paper(paper, columns_name=None, verbose=False):\n temp_df = yf.download(f\"{paper}.SA\")\n print(f\"Papel: {paper}\")\n temp_df = temp_df.reset_index()\n temp_df = temp_df[['Open', 'High', 'Low', 'Adj Close', 'Volume', 'Close']]\n df = temp_df\n\n lista = [10,20,50,100,200]\n for x in lista:\n df[f'EMA_{x}'] = df.Close.ewm(span=x).mean().fillna(0)\n df[f'SMA_{x}'] = ti.sma.get_value_list(df.Close ,x)\n df['ATR'] = ti.atr.get_value_list(df.High,df.Low,df.Close)\n df['CCI'] = ti.cci.get_value_list(df.High,df.Low,df.Close)\n df['ROC'] = ti.roc.get_value_list(df.Close)\n df['RSI'] = ti.rsi.get_value_list(df.Close)\n df['WCL'] = ti.wcl.get_value_list(df.High, df.Low, df.Close)\n\n with pd.option_context('mode.use_inf_as_null', True):\n df = df.dropna()\n\n df_aux = df.drop(['Close'],axis=1)\n df_aux['Close'] = temp_df[['Close']]\n temp_df = df_aux\n\n if verbose:\n print(df)\n min_max_scaler = preprocessing.MinMaxScaler(feature_range = (0,1))\n df = min_max_scaler.fit_transform(df_aux)\n if verbose:\n print(df)\n\n return df, temp_df", "_____no_output_____" ], [ "# Processo de treinamento, teste, definição dos modelos de regressão e criação da base final \n# para observação e extração de informações\n\nmetrics = {}\ncount = 63\npapeis = info_papel_fit_no_missing_table.Papel\n\n# Percorre todos os 378 papéis\nfor a in papeis:\n df, temp_df = import_and_prepare_paper(a)\n df1=df\n train_size = int(len(df1) * 0.70)\n test_size = len(df1) - train_size\n train, test = df1[0:train_size,:], df1[train_size:len(df1),:]\n # choose a number of time steps\n n_steps_in, n_steps_out = 10, 1\n # convert into input/output\n x_train1, y_train = split_sequences(train, n_steps_in, n_steps_out)\n x_test1, y_test = split_sequences(test, n_steps_in, n_steps_out)\n # flatten input\n n_input = x_train1.shape[1] * x_train1.shape[2]\n x_train = x_train1.reshape((x_train1.shape[0], n_input))\n #definir modelo\n model = Sequential()\n model.add(Dense(300, activation='relu', input_dim=n_input))\n model.add(Dense(n_steps_out))\n model.compile(optimizer='adam', loss='mse')\n # desfazer normalização\n #precisa rodar para cada um dos papeis 1x\n min_max_scaler1 = preprocessing.MinMaxScaler(feature_range = (0,1))\n df_suporte = min_max_scaler1.fit_transform(temp_df[['Close']])\n print(count)\n count = count - 1\n\n # Percorre o range escolhido para obter scores RMSE de regressão com todos os indicadores\n # e avaliação dos indicadores com o RandomForestRegressor\n for x in range(0,2):\n\n if f'RMSE_{x}' not in metrics:\n metrics[f'RMSE_{x}'] = list()\n \n # treinamento\n model.fit(x_train, y_train, epochs=500, verbose=0)\n\n # flatten input\n n_input_test = x_test1.shape[1] * x_test1.shape[2]\n x_test = x_test1.reshape((x_test1.shape[0], n_input_test))\n\n trainPredict = model.predict(x_train)\n testPredict = model.predict(x_test)\n\n # invert predictions\n train_Predict = min_max_scaler1.inverse_transform(trainPredict)\n trainY = min_max_scaler1.inverse_transform(y_train)\n test_Predict = min_max_scaler1.inverse_transform(testPredict)\n # print(test_Predict)\n testY = min_max_scaler1.inverse_transform(y_test)\n\n # calculate root mean squared error\n testScore = math.sqrt(mean_squared_error(testY[0], test_Predict[0,:]))\n print('Test Score: %.2f RMSE' % (testScore))\n \n #salvar metrica\n metrics[f'RMSE_{x}'].append(testScore)\n\n # fit random forest model\n model1 = RandomForestRegressor(n_estimators=500, random_state=1)\n model1.fit(x_train[:,0:20], y_train.ravel())\n\n # plot importance scores\n names = temp_df.columns.values[0:-1]\n ticks = [i for i in range(len(names))]\n '''\n plt.figure(figsize=(20, 6))\n plt.bar(ticks, model1.feature_importances_)\n plt.xticks(ticks, names)\n plt.show()\n '''\n #seleciona % das melhores features \n array_ = np.array(model1.feature_importances_)\n ind = array_.argsort()[-5:][::-1]\n columns= temp_df.columns.values[0:-1][ind]\n columns =columns.tolist()\n print(columns)\n\n # get filtered dataframe\n df2 = temp_df.filter(columns, axis=1)\n df2['Close'] = temp_df['Close']\n\n #run new prediction with the filtered dataframe\n min_max_scaler2 = preprocessing.MinMaxScaler(feature_range = (0,1))\n df3 = min_max_scaler2.fit_transform(df2)\n train_size = int(len(df3) * 0.70)\n test_size = len(df3) - train_size\n train, test = df3[0:train_size,:], df3[train_size:len(df3),:]\n\n # convert into input/output\n x_train1, y_train = split_sequences(train, n_steps_in, n_steps_out)\n x_test1, y_test = split_sequences(test, n_steps_in, n_steps_out)\n # flatten input\n n_input = x_train1.shape[1] * x_train1.shape[2]\n x_train = x_train1.reshape((x_train1.shape[0], n_input))\n #definir modelo\n\n model3 = Sequential()\n model3.add(Dense(300, activation='relu', input_dim=n_input))\n model3.add(Dense(n_steps_out))\n model3.compile(optimizer='adam', loss='mse')\n\n min_max_scaler3 = preprocessing.MinMaxScaler(feature_range = (0,1))\n df_suporte = min_max_scaler3.fit_transform(temp_df[['Close']])\n\n # Verifica se determinada coluna de indicador técnico já existe na tabela, \n # e marca 1 se estiver no top 5 (25%) do papel e 0 caso contrário.\n for col in temp_df.columns:\n if col not in metrics:\n metrics[col] = list()\n if col in columns:\n metrics[col].append(1)\n else:\n metrics[col].append(0)\n\n # Percorre o range escolhido para obter scores RMSE de regressão com somente os 5 (25%) indicadores\n # melhores avaliados pelo RandomForestRegressor\n for x in range(0,2):\n if f'RMSE2_{x}' not in metrics:\n metrics[f'RMSE2_{x}'] = list()\n\n # treinamento\n model3.fit(x_train, y_train, epochs=500, verbose=0)\n\n # flatten input\n n_input_test = x_test1.shape[1] * x_test1.shape[2]\n x_test = x_test1.reshape((x_test1.shape[0], n_input_test))\n\n trainPredict2 = model3.predict(x_train)\n testPredict2 = model3.predict(x_test)\n \n # invert predictions\n test_Predict2 = min_max_scaler3.inverse_transform(testPredict2)\n testY2 = min_max_scaler3.inverse_transform(y_test)\n\n # calculate root mean squared error\n testScore = math.sqrt(mean_squared_error(testY2[0], test_Predict2[0,:]))\n print('Test Score: %.2f RMSE' % (testScore))\n metrics[f'RMSE2_{x}'].append(testScore)", "2021-05-12 00:30:30,401 [utils._init_num_threads] INFO: NumExpr defaulting to 2 threads.\n" ], [ "# Extrai o dicionário com os resultados e armazena em um arquivo csv.\ndf_metrics = pd.DataFrame.from_dict(metrics)\ndf_metrics.insert(0, 'Papel', papeis.to_list())\n#df_metrics.to_csv('/metrics.csv')", "_____no_output_____" ], [ "complete_df = pd.read_csv('info_papel_fit_no_missing_table.csv')\n\n# A regressão foi realizada separadamente em diferentes plataformas paralelamente, por conta do alto tempo de processamento\n# para 378 papéis.\n\n# Unir todos os arquivos processados paralelamente.\n\ndf1 = complete_df.loc[0:50]\nmetrics_df1 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_0_50.csv')\nmetrics_df1['Setor'] = df1['Setor'].to_list()\nmetrics_df1['Subsetor'] = df1['Subsetor'].to_list()\n\ndf2 = complete_df.loc[51:99]\nmetrics_df2 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_51_100.csv')\nmetrics_df2 = metrics_df2[:len(metrics_df2)-1]\nmetrics_df2['Papel'] = df2['Papel'].to_list()\nmetrics_df2['Setor'] = df2['Setor'].to_list()\nmetrics_df2['Subsetor'] = df2['Subsetor'].to_list()\n\ndf3 = complete_df.loc[100:126]\nmetrics_df3 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_100_127.csv')\nmetrics_df3 = metrics_df3[:len(metrics_df3)-1]\nmetrics_df3['Papel'] = df3['Papel'].to_list()\nmetrics_df3['Setor'] = df3['Setor'].to_list()\nmetrics_df3['Subsetor'] = df3['Subsetor'].to_list()\n\ndf4 = complete_df.loc[127:190]\nmetrics_df4 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_127_190.csv')\nmetrics_df4['Papel'] = df4['Papel'].to_list()\nmetrics_df4['Setor'] = df4['Setor'].to_list()\nmetrics_df4['Subsetor'] = df4['Subsetor'].to_list()\n\ndf5 = complete_df.loc[191:252]\nmetrics_df5 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_191_252.csv')\nmetrics_df5['Papel'] = df5['Papel'].to_list()\nmetrics_df5['Setor'] = df5['Setor'].to_list()\nmetrics_df5['Subsetor'] = df5['Subsetor'].to_list()\n\ndf6 = complete_df.loc[253:337]\nmetrics_df6 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_253_337.csv')\nmetrics_df6['Papel'] = df6['Papel'].to_list()\nmetrics_df6['Setor'] = df6['Setor'].to_list()\nmetrics_df6['Subsetor'] = df6['Subsetor'].to_list()\n\ndf7 = complete_df.loc[338:378]\nmetrics_df7 = pd.read_csv('/content/drive/My Drive/Master/MD/Data_Finance/metrics_338_378.csv')\nmetrics_df7['Papel'] = df7['Papel'].to_list()\nmetrics_df7['Setor'] = df7['Setor'].to_list()\nmetrics_df7['Subsetor'] = df7['Subsetor'].to_list()\n\nmetrics_df = metrics_df1\nmetrics_df = metrics_df.append(metrics_df2)\nmetrics_df = metrics_df.append(metrics_df3)\nmetrics_df = metrics_df.append(metrics_df4)\nmetrics_df = metrics_df.append(metrics_df5)\nmetrics_df = metrics_df.append(metrics_df6)\nmetrics_df = metrics_df.append(metrics_df7)\n\nmetrics_df = metrics_df.set_index('Papel')\nmetrics_df = metrics_df.reset_index()\n\n#Armazena os resultados em um arquivo\n#metrics_df.to_csv('metrics_df_all.csv')\n\nmetrics_df.sort_values(by=['Setor'])\n\n# Tratamento para visualização.\nindicators_by_section_df1 = indicators_by_section_df[['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close']]\nindicators_by_section_df1.sort_values(by=['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close'], inplace=True, ascending=False)\n", "_____no_output_____" ], [ "# Agrupamento por setor\nmetrics_df = pd.read_csv('metrics_df_all_cat_cluster.csv')\n\nindicators_by_section_df2 = metrics_df[['Papel','Setor']].groupby('Setor').count()\nindicators_by_section_df = metrics_df.groupby('Setor').sum()\nindicators_by_section_df['nPapeis'] = indicators_by_section_df2['Papel']\n\nindicators_by_section_df1 = indicators_by_section_df[['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close']]\nindicators_by_section_df1.sort_values(by=['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close'], inplace=True, ascending=False)\n", "<ipython-input-24-9606c2d87c2a>:9: SettingWithCopyWarning:\n\n\nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n\n" ], [ "# Agrupamento por cluster\nmetrics_df = pd.read_csv('metrics_df_all_cat_cluster.csv')\n\nindicators_by_section_df2 = metrics_df[['Papel','Cat_Cluster']].groupby('Cat_Cluster').count()\nindicators_by_section_df = metrics_df.groupby('Cat_Cluster').sum()\nindicators_by_section_df['nPapeis'] = indicators_by_section_df2['Papel']\n\nindicators_by_section_df3 = indicators_by_section_df[['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close']]\nindicators_by_section_df3.sort_values(by=['nPapeis','Open', 'High', 'Low', 'Adj Close', 'Volume', 'EMA_10', 'SMA_10', 'EMA_20', 'SMA_20', 'EMA_50', 'SMA_50', 'EMA_100', 'SMA_100', 'EMA_200', 'SMA_200', 'ATR', 'CCI', 'ROC', 'RSI', 'WCL', 'Close'], inplace=True, ascending=False)\n", "<ipython-input-22-dfe7ab0564f5>:11: SettingWithCopyWarning:\n\n\nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n\n" ] ], [ [ "# Resultados das regressões e do ranking", "_____no_output_____" ] ], [ [ "metrics_df_all = pd.read_csv('metrics_df_all.csv')\n\n# Resultado total das regressões para cada papel, com os RMSE por rodada RMSE_X para todos os indicadores\n# e RMSE2_X para o top 5 (25%) de indicadores. Mais os indicadores usados na etapa 2 de regressão.\nmetrics_df_all", "_____no_output_____" ], [ "# Agrupamento por Cluster\nindicators_by_section_df3", "_____no_output_____" ], [ "# Agrupamento por Setor\nindicators_by_section_df1", "_____no_output_____" ], [ "# Bloco para rodar no Colab\n#dtale_app.USE_NGROK = True\n\n# Bloco para rodar no Jupyter\ndtale_app.USE_NGROK = False\n\ndescribe = indicators_by_section_df3.iloc[:,6:]\n#describe = indicators_by_section_df1.iloc[3:7,6:]\n\n# Utilização da biblioteca dTale para analise dos resultados.\nd_tale_page = dtale.show(describe, ignore_duplicate=True)\nd_tale_page", "2021-05-13 12:10:55,352 - INFO - NumExpr defaulting to 8 threads.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec75292cf81ddb23c99ddc288842c4cc60d299f3
40,451
ipynb
Jupyter Notebook
nbs/interpret_compare-models.ipynb
rsomani95/fastai2_extensions
200838de99dfe8846ba105e6a42c6183836c2f9d
[ "Apache-2.0" ]
13
2021-01-26T00:06:35.000Z
2021-09-16T23:07:52.000Z
nbs/interpret_compare-models.ipynb
rsomani95/fastai2_extensions
200838de99dfe8846ba105e6a42c6183836c2f9d
[ "Apache-2.0" ]
6
2021-01-20T13:44:29.000Z
2022-01-25T05:47:19.000Z
nbs/interpret_compare-models.ipynb
rsomani95/fastai2_extensions
200838de99dfe8846ba105e6a42c6183836c2f9d
[ "Apache-2.0" ]
1
2021-06-19T20:00:38.000Z
2021-06-19T20:00:38.000Z
114.917614
29,268
0.851203
[ [ [ "#default_exp interpret.compare", "_____no_output_____" ] ], [ [ "# Compare Models", "_____no_output_____" ], [ "> This module offers functions to compare two or more models' predictions. More specifically, you can easily extract the filenames of the images that all your models agree on.\n\n<br>\n<br>", "_____no_output_____" ] ], [ [ "#export\ntry:\n from fastai.vision.all import *\n from fastai.metrics import *\nexcept:\n from fastai2.vision.all import *\n from fastai2.metrics import *\nfrom typing import Collection, Tuple, List, Callable", "_____no_output_____" ], [ "#hide\nfrom torchvision.models import mobilenet_v2\n\npath_data = Path('/Users/rahulsomani/Desktop/shot-lighting-cast')\n\ndls1 = ImageDataLoaders.from_folder(path_data/'train', valid_pct=0.2, seed=42,\n item_tfms = Resize(size=224, method=ResizeMethod.Squish))\ndls2 = ImageDataLoaders.from_folder(path_data/'train', valid_pct=0.15, seed=42,\n item_tfms = Resize(size=224, method=ResizeMethod.Squish))\ndls3 = ImageDataLoaders.from_folder(path_data/'train', valid_pct=0.10, seed=42,\n item_tfms = Resize(size=224, method=ResizeMethod.Squish))\n\n\n\nclass ApplyPILFilter(RandTransform):pass\nlearn1 = load_learner(path_data/'fastai2-110-epoch-model.pkl');\nlearn1.dls = dls1\n\nlearn2 = load_learner(path_data/'fastai2-110-epoch-model.pkl');\nlearn2.dls = dls2\n\nlearn3 = load_learner(path_data/'fastai2-110-epoch-model.pkl');\nlearn3.dls = dls3\n", "/Users/rahulsomani/anaconda3/lib/python3.7/site-packages/torch/serialization.py:657: SourceChangeWarning: source code of class 'torch.nn.modules.linear.Linear' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/Users/rahulsomani/anaconda3/lib/python3.7/site-packages/torch/serialization.py:657: SourceChangeWarning: source code of class 'torch.nn.modules.activation.ReLU' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/Users/rahulsomani/anaconda3/lib/python3.7/site-packages/torch/serialization.py:657: SourceChangeWarning: source code of class 'torch.nn.modules.conv.Conv2d' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n" ], [ "#export\ndef intersection(sets:Collection):\n '`set.intersection` for a list of sets of any size'\n if len(sets) == 1: return sets\n res = sets[0]\n for x in sets[1:]: res=res.intersection(x)\n return res", "_____no_output_____" ], [ "sets = [set(('a','b','c','e')), set(('a','b','c','d')), set(('c','d','e'))]\nintersection(sets)", "_____no_output_____" ], [ "#export\nfrom matplotlib_venn import venn2, venn3\n\ndef compare_venn(interps:Collection,\n conf_level:Union[int,float,tuple],\n title=None, mode=('accurate','inaccurate'),\n return_fig=False, return_common=True,\n figsize=(10,6), set_color='tomato'):\n \"\"\"\n Compute the agreement between 2 or more models' predictions\n\n If you only input 2-3 models in `interps`, then you also get\n a venn diagram to visualise the agreement between these models\n\n Key Arguments\n =============\n * interps: a list or tuple of `ClassificationInterpretationEx` objects\n * mode: either 'accurate' or 'inaccurate' to filter predictions\n * conf_level: a single number or a tuple of (min,max) to filter the prediction confidence\n \"\"\"\n assert len(interps) > 1, 'Enter 2 or more models to compare'\n ### Extract fnames per model per label\n fnames_dict = defaultdict()\n for i,interp in enumerate(interps):\n name = f\"interp{i+1}\"\n fnames_dict[name] = {}.fromkeys(interp.dl.vocab)\n for label in fnames_dict[name].keys():\n fnames_dict[name][label] = interp.get_fnames(label, mode, conf_level)\n\n if len(interps) <= 3:\n ### Prepare sets of filenames per label for venn diagrams\n sets_dict = defaultdict(list)\n for interp in fnames_dict.values():\n for label,fnames in interp.items():\n sets_dict[label].append(set(fnames))\n\n ### Plot venn diagrams\n fig, axes = plt.subplots(nrows=1, ncols=interps[0].dl.c, figsize=figsize)\n for i,(label,sets) in enumerate(sets_dict.items()):\n set_labels = [f\"Model {j+1}\" for j in range(len(interps))]\n set_colors = [set_color] * len(interps)\n axes[i].set_title(label)\n if len(interps) == 2:\n venn2(sets, set_labels=set_labels, ax=axes[i], set_colors=set_colors)\n if len(interps) == 3:\n venn3(sets, set_labels=set_labels, ax=axes[i], set_colors=set_colors)\n\n ## Plot titles and subtitles\n if title is not None: fig.suptitle(title)\n else:\n if isinstance(conf_level, tuple): filler = f'Between {conf_level[0]}-{conf_level[1]}'\n else:\n if mode == 'accurate': filler = f'Above {conf_level}'\n else: filler = f'Below {conf_level}'\n if mode == 'accurate': fig.suptitle(f'Model Agreement - {mode.capitalize()} {filler} % Confidence')\n if mode == 'inaccurate': fig.suptitle(f'Model Agreement - {mode.capitalize()} {filler} % Confidence')\n plt.subplots_adjust(top = 1.1, bottom=0.01, hspace=0.25, wspace=0.1)\n else:\n fig=None\n\n if return_common:\n res_set = {label:intersection(sets_dict[label]) for label in interps[0].dl.vocab}\n\n if return_fig and return_common: return (fig, res_set)\n if return_fig and not return_common: return fig\n if return_common and not return_fig: return res_set", "_____no_output_____" ], [ "interp1 = ClassificationInterpretationEx.from_learner(learn1)\ninterp2 = ClassificationInterpretationEx.from_learner(learn2)\ninterp3 = ClassificationInterpretationEx.from_learner(learn3)\ninterp1.compute_label_confidence()\ninterp2.compute_label_confidence()\ninterp3.compute_label_confidence()", "_____no_output_____" ], [ "%%capture\nfig,common_labels = compare_venn(\n conf_level=(0,99), interps=[interp1,interp2],\n mode='accurate',\n return_common=True, return_fig=True,\n set_color='tomato'\n)", "_____no_output_____" ], [ "fig", "_____no_output_____" ], [ "common_labels.keys()", "_____no_output_____" ], [ "print(len(common_labels['shot_lighting_cast_hard']))\nprint(len(common_labels['shot_lighting_cast_soft']))", "23\n185\n" ], [ "#hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_core.ipynb.\nConverted 01-classification-interpretation.ipynb.\nConverted 02-compare-models.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec75376011b245f106dfcc9d4dd32e625bc18551
15,935
ipynb
Jupyter Notebook
ml-regression/week3-4/.ipynb_checkpoints/week-3-polynomial-regression-assignment-checkpoint.ipynb
isendel/machine-learning
0f1e8ef4f0961be73b4ec0d42c8927fd61fd7fc5
[ "Apache-2.0" ]
1
2017-03-09T13:08:23.000Z
2017-03-09T13:08:23.000Z
ml-regression/week3-4/.ipynb_checkpoints/week-3-polynomial-regression-assignment-checkpoint.ipynb
isendel/machine-learning
0f1e8ef4f0961be73b4ec0d42c8927fd61fd7fc5
[ "Apache-2.0" ]
null
null
null
ml-regression/week3-4/.ipynb_checkpoints/week-3-polynomial-regression-assignment-checkpoint.ipynb
isendel/machine-learning
0f1e8ef4f0961be73b4ec0d42c8927fd61fd7fc5
[ "Apache-2.0" ]
null
null
null
27.239316
363
0.586194
[ [ [ "# Regression Week 3: Assessing Fit (polynomial regression)", "_____no_output_____" ], [ "In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will:\n* Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed\n* Use matplotlib to visualize polynomial regressions\n* Use matplotlib to visualize the same polynomial degree on different subsets of the data\n* Use a validation set to select a polynomial degree\n* Assess the final fit using test data\n\nWe will continue to use the House data from previous notebooks.", "_____no_output_____" ], [ "# Fire up graphlab create", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.\n\nThe easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. \nFor example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab)", "_____no_output_____" ] ], [ [ "tmp = np.array([1., 2., 3.])\ntmp_cubed = tmp**3\nprint(tmp)\nprint(tmp_cubed)", "[ 1. 2. 3.]\n[ 1. 8. 27.]\n" ] ], [ [ "We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).", "_____no_output_____" ] ], [ [ "ex_dataframe = pd.DataFrame()\nex_dataframe['power_1'] = tmp\nprint(ex_dataframe)", " power_1\n0 1\n1 2\n2 3\n" ] ], [ [ "# Polynomial_sframe function", "_____no_output_____" ], [ "Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree:", "_____no_output_____" ] ], [ [ "def polynomial_sframe(feature, degree):\n # assume that degree >= 1\n # initialize the SFrame:\n poly_sframe = \n # and set poly_sframe['power_1'] equal to the passed feature\n\n # first check if degree > 1\n if degree > 1:\n # then loop over the remaining degrees:\n # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree\n for power in range(2, degree+1): \n # first we'll give the column a name:\n name = 'power_' + str(power)\n # then assign poly_sframe[name] to the appropriate power of feature\n\n return poly_sframe", "_____no_output_____" ] ], [ [ "To test your function consider the smaller tmp variable and what you would expect the outcome of the following call:", "_____no_output_____" ] ], [ [ "print polynomial_sframe(tmp, 3)", "_____no_output_____" ] ], [ [ "# Visualizing polynomial regression", "_____no_output_____" ], [ "Let's use matplotlib to visualize what a polynomial regression looks like on some real data.", "_____no_output_____" ] ], [ [ "sales = graphlab.SFrame('kc_house_data.gl/')", "_____no_output_____" ] ], [ [ "As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.", "_____no_output_____" ] ], [ [ "sales = sales.sort(['sqft_living', 'price'])", "_____no_output_____" ] ], [ [ "Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.", "_____no_output_____" ] ], [ [ "poly1_data = polynomial_sframe(sales['sqft_living'], 1)\npoly1_data['price'] = sales['price'] # add price to the data since it's the target", "_____no_output_____" ] ], [ [ "NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users.", "_____no_output_____" ] ], [ [ "model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)", "_____no_output_____" ], [ "#let's take a look at the weights before we plot\nmodel1.get(\"coefficients\")", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "plt.plot(poly1_data['power_1'],poly1_data['price'],'.',\n poly1_data['power_1'], model1.predict(poly1_data),'-')", "_____no_output_____" ] ], [ [ "Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. \n\nWe can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial?", "_____no_output_____" ] ], [ [ "poly2_data = polynomial_sframe(sales['sqft_living'], 2)\nmy_features = poly2_data.column_names() # get the name of the features\npoly2_data['price'] = sales['price'] # add price to the data since it's the target\nmodel2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)", "_____no_output_____" ], [ "model2.get(\"coefficients\")", "_____no_output_____" ], [ "plt.plot(poly2_data['power_1'],poly2_data['price'],'.',\n poly2_data['power_1'], model2.predict(poly2_data),'-')", "_____no_output_____" ] ], [ [ "The resulting model looks like half a parabola. Try on your own to see what the cubic looks like:", "_____no_output_____" ], [ "Now try a 15th degree polynomial:", "_____no_output_____" ], [ "What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look.", "_____no_output_____" ], [ "# Changing the data and re-learning", "_____no_output_____" ], [ "We're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results.\n\nTo split the sales data into four subsets, we perform the following steps:\n* First split sales into 2 subsets with `.random_split(0.5, seed=0)`. \n* Next split the resulting subsets into 2 more subsets each. Use `.random_split(0.5, seed=0)`.\n\nWe set `seed=0` in these steps so that different users get consistent results.\nYou should end up with 4 subsets (`set_1`, `set_2`, `set_3`, `set_4`) of approximately equal size. ", "_____no_output_____" ], [ "Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model.", "_____no_output_____" ], [ "Some questions you will be asked on your quiz:\n\n**Quiz Question: Is the sign (positive or negative) for power_15 the same in all four models?**\n\n**Quiz Question: (True/False) the plotted fitted lines look the same in all four plots**", "_____no_output_____" ], [ "# Selecting a Polynomial Degree", "_____no_output_____" ], [ "Whenever we have a \"magic\" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4).\n\nWe split the sales dataset 3-way into training set, test set, and validation set as follows:\n\n* Split our sales data into 2 sets: `training_and_validation` and `testing`. Use `random_split(0.9, seed=1)`.\n* Further split our training data into two sets: `training` and `validation`. Use `random_split(0.5, seed=1)`.\n\nAgain, we set `seed=1` to obtain consistent results for different users.", "_____no_output_____" ], [ "Next you should write a loop that does the following:\n* For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1))\n * Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree\n * hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features)\n * Add train_data['price'] to the polynomial SFrame\n * Learn a polynomial regression model to sqft vs price with that degree on TRAIN data\n * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data.\n* Report which degree had the lowest RSS on validation data (remember python indexes from 0)\n\n(Note you can turn off the print out of linear_regression.create() with verbose = False)", "_____no_output_____" ], [ "**Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data?**", "_____no_output_____" ], [ "Now that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz.", "_____no_output_____" ], [ "**Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data?**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec754ce470e7c819eff0637b53e298b758933036
527,905
ipynb
Jupyter Notebook
07/.ipynb_checkpoints/07 - Introduction to Pandas (complete)-checkpoint.ipynb
barjacks/foundations-homework
f2c4ac9c3123df7ee936303927b48c89671ba4c5
[ "MIT" ]
null
null
null
07/.ipynb_checkpoints/07 - Introduction to Pandas (complete)-checkpoint.ipynb
barjacks/foundations-homework
f2c4ac9c3123df7ee936303927b48c89671ba4c5
[ "MIT" ]
null
null
null
07/.ipynb_checkpoints/07 - Introduction to Pandas (complete)-checkpoint.ipynb
barjacks/foundations-homework
f2c4ac9c3123df7ee936303927b48c89671ba4c5
[ "MIT" ]
null
null
null
51.05958
22,910
0.526032
[ [ [ "# An Introduction to `pandas`\n\nPandas! They are adorable animals. You might think they are [the worst animal ever](https://www.reddit.com/r/todayilearned/comments/3azkqx/til_naturalist_chris_packham_said_he_would_eat/cshqy9y) but that is not true. You might sometimes think `pandas` is the worst library every, and that is only *kind of* true.\n\nThe important thing is **use the right tool for the job**. `pandas` is good for some stuff, SQL is good for some stuff, writing raw Python is good for some stuff. You'll figure it out as you go along.\n\nNow let's start coding. Hopefully you did `pip install pandas` before you started up this notebook.", "_____no_output_____" ] ], [ [ "# import pandas, but call it pd. Why? Because that's What People Do.\nimport pandas as pd", "_____no_output_____" ] ], [ [ "When you import pandas, you use `import pandas as pd`. That means instead of typing `pandas` in your code you'll type `pd`.\n\nYou don't *have* to, but every other person on the planet will be doing it, so you might as well.", "_____no_output_____" ], [ "Now we're going to read in a file. Our file is called `NBA-Census-10.14.2013.csv` because we're **sports moguls**. `pandas` can `read_` different types of files, so try to figure it out by typing `pd.read_` and hitting tab for autocomplete.", "_____no_output_____" ] ], [ [ "# We're going to call this df, which means \"data frame\"\n# It isn't in UTF-8 (I saved it from my mac!) so we need to set the encoding\ndf = pd.read_csv(\"NBA-Census-10.14.2013.csv\", encoding='mac_roman')", "_____no_output_____" ] ], [ [ "**A dataframe is basically a spreadsheet**, except it lives in the world of Python or the statistical programming language R. They can't call it a spreadsheet because then people would think those programmers used Excel, which would make them boring and normal and they'd have to wear a tie every day.\n\n# Selecting rows\n\nNow let's look at our data, since that's what data is for", "_____no_output_____" ] ], [ [ "# Let's look at all of it\ndf", "_____no_output_____" ] ], [ [ "If we scroll we can see all of it. But maybe we don't want to see all of it. Maybe we hate scrolling?", "_____no_output_____" ] ], [ [ "# Look at the first few rows\ndf.head()", "_____no_output_____" ] ], [ [ "...but maybe we want to see more than a measly five results?", "_____no_output_____" ] ], [ [ "# Let's look at MORE of the first few rows\ndf.head(10)", "_____no_output_____" ] ], [ [ "But maybe we want to make a basketball joke and see the **final four?**", "_____no_output_____" ] ], [ [ "# Let's look at the final few rows\ndf.tail(4)", "_____no_output_____" ] ], [ [ "So yes, `head` and `tail` work kind of like the terminal commands. That's nice, I guess.\n\nBut maybe we're incredibly demanding (which we are) and we want, say, **the 6th through the 8th row** (which we do). Don't worry (which I know you were), we can do that, too.", "_____no_output_____" ] ], [ [ "# Show the 6th through the 8th rows\ndf[5:8]", "_____no_output_____" ] ], [ [ "It's kind of like an array, right? Except where in an array we'd say `df[0]` this time we need to give it two numbers, the start and the end.\n\n# Selecting columns\n\nBut jeez, my eyes don't want to go that far over the data. I only want to see, uh, name and age.", "_____no_output_____" ] ], [ [ "# Get the names of the columns, just because\ndf.columns", "_____no_output_____" ], [ "# If we want to be \"correct\" we add .values on the end of it\ndf.columns.values", "_____no_output_____" ], [ "# Select only name and age\ncolumns_to_show = ['Name', 'Age']\ndf[columns_to_show]", "_____no_output_____" ], [ "# Combing that with .head() to see not-so-many rows\ncolumns_to_show = ['Name', 'Age']\ndf[columns_to_show].head()", "_____no_output_____" ], [ "# We can also do this all in one line, even though it starts looking ugly\n# (unlike the cute bears pandas looks ugly pretty often)\ndf[['Name', 'Age']].head()", "_____no_output_____" ] ], [ [ "**NOTE:** That was not `df['Name', 'Age']`, it was `df[['Name', 'Age]]`. You'll definitely type it wrong all of the time. When things break with pandas it's probably because you forgot to put in a million brackets.", "_____no_output_____" ], [ "# Describing your data\n\nA powerful tool of pandas is being able to select a portion of your data, *because who ordered all that data anyway*.", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "I want to know how **many people are in each position**. Luckily, pandas can tell me!", "_____no_output_____" ] ], [ [ "# Grab the POS column, and count the different values in it.\ndf['POS'].value_counts()", "_____no_output_____" ] ], [ [ "**Now that was a little weird, yes** - we used `df['POS']` instead of `df[['POS']]` when viewing the data's details.\n\nBut now I'm curious about numbers: **how old is everyone?** Maybe we could, I don't know, get some statistics about age? Some statistics to **describe** age?", "_____no_output_____" ] ], [ [ "# Summary statistics for Age\ndf['Age'].describe()", "_____no_output_____" ], [ "# That's pretty good. Does it work for everything? How about the money?\ndf['2013 $'].describe()", "_____no_output_____" ] ], [ [ "Unfortunately because that has dollar signs and commas it's thought of as a string. **We'll fix it in a second,** but let's try describing one more thing.", "_____no_output_____" ] ], [ [ "# Doing more describing\ndf['Ht (In.)'].describe()", "_____no_output_____" ] ], [ [ "That's stupid, though, what's an inch even look like? What's 80 inches? I don't have a clue. If only there were some wa to manipulate our data.\n\n# Manipulating data\n\nOh wait there is, HA HA HA.", "_____no_output_____" ] ], [ [ "# Take another look at our inches, but only the first few\ndf['Ht (In.)'].head()", "_____no_output_____" ], [ "# Divide those inches by 12\ndf['Ht (In.)'].head() / 12", "_____no_output_____" ], [ "# Let's divide ALL of them by 12\nfeet = df['Ht (In.)'] / 12\nfeet", "_____no_output_____" ], [ "# Can we get statistics on those?\nfeet.describe()", "_____no_output_____" ], [ "# Let's look at our original data again\ndf.head(2)", "_____no_output_____" ] ], [ [ "Okay that was nice but unfortunately we can't do anything with it. It's just sitting there, separate from our data. If this were normal code we could do `blahblah['feet'] = blahblah['Ht (In.)'] / 12`, but since this is pandas, we can't. Right? **Right?**", "_____no_output_____" ] ], [ [ "# Store a new column\ndf['feet'] = df['Ht (In.)'] / 12\ndf.head()", "_____no_output_____" ] ], [ [ "That's cool, maybe we could do the same thing with their salary? Take out the $ and the , and convert it to an integer?", "_____no_output_____" ] ], [ [ "# Can't just use .replace\ndf['2013 $'].head().replace(\"$\",\"\")", "_____no_output_____" ], [ "# Need to use this weird .str thing\ndf['2013 $'].head().str.replace(\"$\",\"\")", "_____no_output_____" ], [ "# Can't just immediately replace the , either\ndf['2013 $'].head().str.replace(\"$\",\"\").replace(\",\",\"\")", "_____no_output_____" ], [ "# Need to use the .str thing before EVERY string method\ndf['2013 $'].head().str.replace(\"$\",\"\").str.replace(\",\",\"\")", "_____no_output_____" ], [ "# Describe still doesn't work.\ndf['2013 $'].head().str.replace(\"$\",\"\").str.replace(\",\",\"\").describe()", "_____no_output_____" ], [ "# Let's convert it to an integer using .astype(int) before we describe it\ndf['2013 $'].head().str.replace(\"$\",\"\").str.replace(\",\",\"\").astype(int).describe()", "_____no_output_____" ], [ "df['2013 $'].head().str.replace(\"$\",\"\").str.replace(\",\",\"\").astype(int)", "_____no_output_____" ], [ "# Maybe we can just make them millions?\ndf['2013 $'].head().str.replace(\"$\",\"\").str.replace(\",\",\"\").astype(int) / 1000000", "_____no_output_____" ], [ "# Unfortunately one is \"n/a\" which is going to break our code, so we can make n/a be 0\ndf['2013 $'].str.replace(\"$\",\"\").str.replace(\",\",\"\").str.replace(\"n/a\", \"0\").astype(int) / 1000000", "_____no_output_____" ], [ "# Remove the .head() piece and save it back into the dataframe\ndf['millions'] = df['2013 $'].str.replace(\"$\",\"\").str.replace(\",\",\"\").str.replace(\"n/a\",\"0\").astype(int) / 1000000\ndf.head()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ] ], [ [ "The average basketball player makes 3.8 million dollars and is a little over six and a half feet tall.\n\nBut who cares about those guys? I don't care about those guys. They're boring. I want the real rich guys!\n\n# Sorting and sub-selecting", "_____no_output_____" ] ], [ [ "# This is just the first few guys in the dataset. Can we order it?\ndf.head(3)", "_____no_output_____" ], [ "# Let's try to sort them\ndf.sort_values(by='millions').head(3)", "_____no_output_____" ] ], [ [ "Those guys are making nothing! If only there were a way to sort from high to low, a.k.a. descending instead of ascending.", "_____no_output_____" ] ], [ [ "# It isn't descending = True, unfortunately\ndf.sort_values(by='millions', ascending=False).head(3)", "_____no_output_____" ], [ "# We can use this to find the oldest guys in the league\ndf.sort_values(by='Age', ascending=False).head(3)", "_____no_output_____" ], [ "# Or the youngest, by taking out 'ascending=False'\ndf.sort_values(by='Age').head(3)", "_____no_output_____" ] ], [ [ "But sometimes instead of just looking at them, I want to do stuff with them. Play some games with them! Dunk on them~ `describe` them! And we don't want to dunk on everyone, only the players above 7 feet tall.\n\nFirst, we need to check out **boolean things.**", "_____no_output_____" ] ], [ [ "# Get a big long list of True and False for every single row.\ndf['feet'] > 7", "_____no_output_____" ], [ "# We could use value counts if we wanted\nabove_seven_feet = df['feet'] > 7\nabove_seven_feet.value_counts()", "_____no_output_____" ], [ "# But we can also apply this to every single row to say whether YES we want it or NO we don't\ndf['feet'].head() > 7", "_____no_output_____" ], [ "# Instead of putting column names inside of the brackets, we instead\n# put the True/False statements. It will only return the players above \n# seven feet tall\ndf[df['feet'] > 7]", "_____no_output_____" ], [ "# Or only the guards\ndf[df['POS'] == 'G']", "_____no_output_____" ], [ "# Or only the guards who make more than 15 million\ndf[(df['POS'] == 'G') & (df['millions'] > 15)]", "_____no_output_____" ], [ "# It might be easier to break down the booleans into separate variables\nis_guard = df['POS'] == 'G'\nmore_than_fifteen_million = df['millions'] > 15\ndf[is_guard & more_than_fifteen_million]", "_____no_output_____" ], [ "# We can save this stuff\nshort_players = df[df['feet'] < 6.5]\nshort_players", "_____no_output_____" ], [ "short_players.describe()", "_____no_output_____" ], [ "# Maybe we can compare them to taller players?\ndf[df['feet'] >= 6.5].describe()", "_____no_output_____" ] ], [ [ "# Drawing pictures\n\nOkay okay enough code and enough stupid numbers. I'm visual. I want graphics. **Okay?????** Okay.", "_____no_output_____" ] ], [ [ "df['Age'].head()", "_____no_output_____" ], [ "# This will scream we don't have matplotlib.\ndf['Age'].hist()", "_____no_output_____" ] ], [ [ "`matplotlib` is a graphing library. It's the Python way to make graphs!", "_____no_output_____" ] ], [ [ "!pip install matplotlib", "Collecting matplotlib\n Using cached matplotlib-1.5.1-cp34-cp34m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl\nCollecting cycler (from matplotlib)\n Using cached cycler-0.10.0-py2.py3-none-any.whl\nRequirement already satisfied (use --upgrade to upgrade): numpy>=1.6 in /Users/soma/.virtualenvs/pandas-intro/lib/python3.4/site-packages (from matplotlib)\nRequirement already satisfied (use --upgrade to upgrade): pytz in /Users/soma/.virtualenvs/pandas-intro/lib/python3.4/site-packages (from matplotlib)\nRequirement already satisfied (use --upgrade to upgrade): python-dateutil in /Users/soma/.virtualenvs/pandas-intro/lib/python3.4/site-packages (from matplotlib)\nCollecting pyparsing!=2.0.0,!=2.0.4,>=1.5.6 (from matplotlib)\n Using cached pyparsing-2.1.4-py2.py3-none-any.whl\nRequirement already satisfied (use --upgrade to upgrade): six in /Users/soma/.virtualenvs/pandas-intro/lib/python3.4/site-packages (from cycler->matplotlib)\nInstalling collected packages: cycler, pyparsing, matplotlib\nSuccessfully installed cycler-0.10.0 matplotlib-1.5.1 pyparsing-2.1.4\n" ], [ "# this will open up a weird window that won't do anything\ndf['Age'].hist()", "_____no_output_____" ], [ "# So instead you run this code\n%matplotlib inline", "_____no_output_____" ], [ "df['Age'].hist()", "_____no_output_____" ] ], [ [ "But that's ugly. There's a thing called ``ggplot`` for R that looks nice. We want to look nice. We want to look like ``ggplot``.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nplt.style.available", "_____no_output_____" ], [ "plt.style.use('ggplot')", "_____no_output_____" ], [ "df['Age'].hist()", "_____no_output_____" ], [ "plt.style.use('seaborn-deep')\ndf['Age'].hist()", "_____no_output_____" ], [ "plt.style.use('fivethirtyeight')\ndf['Age'].hist()", "_____no_output_____" ] ], [ [ "That might look better with a little more customization. So let's customize it.", "_____no_output_____" ] ], [ [ "# Pass in all sorts of stuff!\n# Most from http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html\n# .range() is a matplotlib thing\ndf['Age'].hist(bins=20, xlabelsize=10, ylabelsize=10, range=(0,40))", "_____no_output_____" ] ], [ [ "I want more graphics! **Do tall people make more money?!?!**", "_____no_output_____" ] ], [ [ "df.plot(kind='scatter', x='feet', y='millions')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# How does experience relate with the amount of money they're making?\ndf.plot(kind='scatter', x='EXP', y='millions')", "_____no_output_____" ], [ "# At least we can assume height and weight are related\ndf.plot(kind='scatter', x='WT', y='feet')", "_____no_output_____" ], [ "# At least we can assume height and weight are related\n# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html\ndf.plot(kind='scatter', x='WT', y='feet', xlim=(100,300), ylim=(5.5, 8))", "_____no_output_____" ], [ "plt.style.use('ggplot')", "_____no_output_____" ], [ "df.plot(kind='scatter', x='WT', y='feet', xlim=(100,300), ylim=(5.5, 8))", "_____no_output_____" ], [ "# We can also use plt separately\n# It's SIMILAR but TOTALLY DIFFERENT\ncenters = df[df['POS'] == 'C']\nguards = df[df['POS'] == 'G']\nforwards = df[df['POS'] == 'F']\nplt.scatter(y=centers[\"feet\"], x=centers[\"WT\"], c='c', alpha=0.75, marker='x')\nplt.scatter(y=guards[\"feet\"], x=guards[\"WT\"], c='y', alpha=0.75, marker='o')\nplt.scatter(y=forwards[\"feet\"], x=forwards[\"WT\"], c='m', alpha=0.75, marker='v')\nplt.xlim(100,300)\nplt.ylim(5.5,8)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec75720f6886853530dbf6612fe2cbe4e5090ad9
4,475
ipynb
Jupyter Notebook
ml/Neural Network - Feedforward.ipynb
disooqi/jupyter-notebooks
48c9e6cc4b446f87f09baa5c6ae3c4caf1482c1e
[ "MIT" ]
null
null
null
ml/Neural Network - Feedforward.ipynb
disooqi/jupyter-notebooks
48c9e6cc4b446f87f09baa5c6ae3c4caf1482c1e
[ "MIT" ]
null
null
null
ml/Neural Network - Feedforward.ipynb
disooqi/jupyter-notebooks
48c9e6cc4b446f87f09baa5c6ae3c4caf1482c1e
[ "MIT" ]
null
null
null
22.831633
226
0.512849
[ [ [ "%matplotlib inline\n\nimport numpy as np\nfrom numpy import array, arange, cos, exp, pi, zeros, column_stack, ones, newaxis, log, dot, append, zeros_like\nfrom numpy.random import permutation, shuffle\nfrom scipy.io import loadmat\nfrom scipy.optimize import minimize, fmin_bfgs\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.figure import Figure", "_____no_output_____" ], [ "# Setup the parameters you will use for this exercise\ninput_layer_size = 400; # 20x20 Input Images of Digits\nhidden_layer_size = 25; # 25 hidden units\nnum_labels = 10; # 10 labels, from 1 to 10 ", "_____no_output_____" ] ], [ [ "### Logistic regression cannot form more complex hypotheses as it is only a linear classifier (You could add more features (such as polynomial features) to logistic regression, but that can be very expensive to train.)\n", "_____no_output_____" ] ], [ [ "handwritten_digits = loadmat('ex3data1.mat')\nhandwritten_digits.keys()", "_____no_output_____" ], [ "features = handwritten_digits['X']\n# X = column_stack((ones((features.shape[0],1)), features))\nm, n = features.shape\n\ny = handwritten_digits['y']\nfeatures.shape, y.shape", "_____no_output_____" ], [ "# Loading Saved Neural Network Parameters ...\nweight = loadmat('ex3weights.mat')\nweight.keys()", "_____no_output_____" ], [ "def sigmoid(z): \n return 1/(1+exp(-z))\n\n\ndef predict_from_three_layer_NN(Theta1, Theta2, X):\n m, _ = X.shape\n A_1 = np.c_[ones((m)), X] # (5000, 400)\n \n Z_2 = Theta1.dot(A_1.T) # (25, 401) * (401, 5000)\n A_tmp = sigmoid(Z_2).T # (5000, 25) \n A_2 = np.c_[(ones((m)), A_tmp)] # (5000, 26) \n \n Z_3 = Theta2.dot(A_2.T) # (10, 26) * (26, 5000) \n A_3 = sigmoid(Z_3).T # (5000, 10)\n \n return A_3\n\npred = predict_from_three_layer_NN(weight['Theta1'], weight['Theta2'], features)", "_____no_output_____" ], [ "print pred.argmax(axis=1)[:10]\npred.shape", "[9 9 9 9 9 9 9 9 9 9]\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec7590106eaad4db2ad7183f9e857300970bedf4
100,530
ipynb
Jupyter Notebook
Using_CNN_from_Scratch.ipynb
adityajn105/CatvsDog-TransferLearning-VGG
17508f01b8094c6fe76c2b5805ae4987768fde26
[ "MIT" ]
null
null
null
Using_CNN_from_Scratch.ipynb
adityajn105/CatvsDog-TransferLearning-VGG
17508f01b8094c6fe76c2b5805ae4987768fde26
[ "MIT" ]
null
null
null
Using_CNN_from_Scratch.ipynb
adityajn105/CatvsDog-TransferLearning-VGG
17508f01b8094c6fe76c2b5805ae4987768fde26
[ "MIT" ]
null
null
null
144.025788
70,262
0.816761
[ [ [ "!wget https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip\n!unzip -q kagglecatsanddogs_3367a.zip", "--2019-05-11 19:02:34-- https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip\nResolving download.microsoft.com (download.microsoft.com)... 184.26.80.188, 2600:1409:12:285::e59, 2600:1409:12:282::e59\nConnecting to download.microsoft.com (download.microsoft.com)|184.26.80.188|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 824894548 (787M) [application/octet-stream]\nSaving to: ‘kagglecatsanddogs_3367a.zip’\n\nkagglecatsanddogs_3 100%[===================>] 786.68M 127MB/s in 6.2s \n\n2019-05-11 19:02:40 (127 MB/s) - ‘kagglecatsanddogs_3367a.zip’ saved [824894548/824894548]\n\n" ], [ "import os\nimport numpy as np\nimport shutil\nimport glob\nimport warnings\nwarnings.filterwarnings('ignore')\n\ncat_files = os.listdir('PetImages/Cat')\ndog_files = os.listdir('PetImages/Dog')\n\nfor cat in cat_files:\n src = os.path.join('PetImages/Cat',cat)\n dst = os.path.join('PetImages/Cat','cat_'+cat)\n os.rename( src,dst )\n\nfor dog in dog_files:\n src = os.path.join('PetImages/Dog',dog)\n dst = os.path.join('PetImages/Dog','dog_'+dog)\n os.rename( src , dst )\n \n\ncat_files = glob.glob('PetImages/Cat/*')\ndog_files = glob.glob('PetImages/Dog/*')\n\nprint(len(cat_files),len(dog_files))\n\ncat_train = np.random.choice(cat_files, size=3000, replace=False)\ndog_train = np.random.choice(dog_files, size=3000, replace=False)\ncat_files = list(set(cat_files) - set(cat_train))\ndog_files = list(set(dog_files) - set(dog_train))\n\ncat_val = np.random.choice(cat_files, size=1000, replace=False)\ndog_val = np.random.choice(dog_files, size=1000, replace=False)\ncat_files = list(set(cat_files) - set(cat_val))\ndog_files = list(set(dog_files) - set(dog_val))\n\ncat_test = np.random.choice(cat_files, size=1000, replace=False)\ndog_test = np.random.choice(dog_files, size=1000, replace=False)\n\nprint('Cat datasets:', cat_train.shape, cat_val.shape, cat_test.shape)\nprint('Dog datasets:', dog_train.shape, dog_val.shape, dog_test.shape)\n#rm -r PetImages/ kagglecatsanddogs_3367a.zip readme\\[1\\].txt MSR-LA\\ -\\ 3467.docx", "12501 12501\nCat datasets: (3000,) (1000,) (1000,)\nDog datasets: (3000,) (1000,) (1000,)\n" ] ], [ [ "### Splitting Train, Validation, Test Data", "_____no_output_____" ] ], [ [ "train_dir = 'training_data'\nval_dir = 'validation_data'\ntest_dir = 'test_data'\n\ntrain_files = np.concatenate([cat_train, dog_train])\nvalidate_files = np.concatenate([cat_val, dog_val])\ntest_files = np.concatenate([cat_test, dog_test])\n\nos.mkdir(train_dir) if not os.path.isdir(train_dir) else None\nos.mkdir(val_dir) if not os.path.isdir(val_dir) else None\nos.mkdir(test_dir) if not os.path.isdir(test_dir) else None\n\nfor fn in train_files:\n shutil.copy(fn, train_dir)\n\nfor fn in validate_files:\n shutil.copy(fn, val_dir)\n \nfor fn in test_files:\n shutil.copy(fn, test_dir)\n#!rm -r test_data/ training_data/ validation_data/", "_____no_output_____" ], [ "from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\n\nIMG_DIM = (150,150)\n\n\ntrain_files = glob.glob('training_data/*')\ntrain_imgs = [];train_labels = []\nfor file in train_files:\n try:\n train_imgs.append( img_to_array(load_img( file,target_size=IMG_DIM )) )\n train_labels.append(file.split('/')[1].split('_')[0])\n except:\n pass\ntrain_imgs = np.array(train_imgs)\n\nvalidation_files = glob.glob('validation_data/*')\nvalidation_imgs = [];validation_labels = []\nfor file in validation_files:\n try:\n validation_imgs.append( img_to_array(load_img( file,target_size=IMG_DIM )) )\n validation_labels.append(file.split('/')[1].split('_')[0])\n except:\n pass\ntrain_imgs = np.array(train_imgs)\nvalidation_imgs = np.array(validation_imgs)\n\n\nprint('Train dataset shape:', train_imgs.shape, \n '\\tValidation dataset shape:', validation_imgs.shape)", "Using TensorFlow backend.\n" ], [ "# encode text category labels\nfrom sklearn.preprocessing import LabelEncoder\n\nle = LabelEncoder()\nle.fit(train_labels)\ntrain_labels_enc = le.transform(train_labels)\nvalidation_labels_enc = le.transform(validation_labels)", "_____no_output_____" ] ], [ [ "### Image Augmentation", "_____no_output_____" ] ], [ [ "train_datagen = ImageDataGenerator(rescale=1./255, \n zoom_range=0.3, \n rotation_range=50,\n width_shift_range=0.2, \n height_shift_range=0.2, \n shear_range=0.2, \n horizontal_flip=True, \n fill_mode='nearest')\nval_datagen = ImageDataGenerator(rescale=1./255)\ntrain_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)\nval_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)", "_____no_output_____" ] ], [ [ "### Keras Model", "_____no_output_____" ] ], [ [ "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Input\nfrom keras.models import Model\nfrom keras import optimizers\n\ninput_shape = (150, 150, 3)\n\ninput_l = Input((150,150,3))\n\nl1_conv = Conv2D(16, kernel_size=(3, 3), activation='relu')(input_l)\nl1_pool = MaxPooling2D(pool_size=(2, 2))(l1_conv)\n\nl2_conv = Conv2D(64, kernel_size=(3, 3), activation='relu')(l1_pool)\nl2_pool = MaxPooling2D(pool_size=(2, 2))(l2_conv)\n\nl3_conv = Conv2D(128, kernel_size=(3, 3), activation='relu')(l2_pool)\nl3_pool = MaxPooling2D(pool_size=(2, 2))(l3_conv)\n\nl4 = Flatten()(l3_pool)\nl4_dropout = Dropout(0.3)(l4)\n\nl5 = Dense(512, activation='relu')(l4_dropout)\nl5_dropout = Dropout(0.3)(l5)\n\noutput = Dense(1, activation='sigmoid')(l5_dropout)\n\n\nmodel = Model(input_l, output)\n\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(), metrics=['accuracy'])\n\nmodel.summary()", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 150, 150, 3) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 148, 148, 16) 448 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 74, 74, 16) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 72, 72, 64) 9280 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 36, 36, 64) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 34, 34, 128) 73856 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 17, 17, 128) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 36992) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 36992) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 18940416 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 513 \n=================================================================\nTotal params: 19,024,513\nTrainable params: 19,024,513\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,\n validation_data=val_generator, validation_steps=50, \n verbose=2) ", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nEpoch 1/100\n - 17s - loss: 0.8622 - acc: 0.5310 - val_loss: 0.6788 - val_acc: 0.5240\nEpoch 2/100\n - 13s - loss: 0.6918 - acc: 0.5479 - val_loss: 0.6593 - val_acc: 0.5730\nEpoch 3/100\n - 13s - loss: 0.6772 - acc: 0.5933 - val_loss: 0.5895 - val_acc: 0.6900\nEpoch 4/100\n - 13s - loss: 0.6583 - acc: 0.6218 - val_loss: 0.6041 - val_acc: 0.6760\nEpoch 5/100\n - 13s - loss: 0.6430 - acc: 0.6293 - val_loss: 0.5745 - val_acc: 0.6990\nEpoch 6/100\n - 13s - loss: 0.6190 - acc: 0.6515 - val_loss: 0.5982 - val_acc: 0.6750\nEpoch 7/100\n - 14s - loss: 0.6172 - acc: 0.6640 - val_loss: 0.5325 - val_acc: 0.7120\nEpoch 8/100\n - 13s - loss: 0.6209 - acc: 0.6582 - val_loss: 0.5567 - val_acc: 0.7320\nEpoch 9/100\n - 13s - loss: 0.6211 - acc: 0.6560 - val_loss: 0.5010 - val_acc: 0.7410\nEpoch 10/100\n - 13s - loss: 0.5903 - acc: 0.6806 - val_loss: 0.5094 - val_acc: 0.7390\nEpoch 11/100\n - 13s - loss: 0.6082 - acc: 0.6700 - val_loss: 0.5429 - val_acc: 0.7120\nEpoch 12/100\n - 13s - loss: 0.5843 - acc: 0.6872 - val_loss: 0.5129 - val_acc: 0.7510\nEpoch 13/100\n - 14s - loss: 0.5839 - acc: 0.6810 - val_loss: 0.4997 - val_acc: 0.7640\nEpoch 14/100\n - 13s - loss: 0.5899 - acc: 0.6836 - val_loss: 0.5882 - val_acc: 0.7130\nEpoch 15/100\n - 13s - loss: 0.5805 - acc: 0.7000 - val_loss: 0.7053 - val_acc: 0.6550\nEpoch 16/100\n - 13s - loss: 0.5820 - acc: 0.6992 - val_loss: 0.4845 - val_acc: 0.7650\nEpoch 17/100\n - 13s - loss: 0.5752 - acc: 0.7017 - val_loss: 0.5363 - val_acc: 0.7270\nEpoch 18/100\n - 13s - loss: 0.5657 - acc: 0.7129 - val_loss: 0.4494 - val_acc: 0.7830\nEpoch 19/100\n - 14s - loss: 0.5787 - acc: 0.6917 - val_loss: 0.5283 - val_acc: 0.7290\nEpoch 20/100\n - 14s - loss: 0.5658 - acc: 0.7089 - val_loss: 0.4581 - val_acc: 0.7770\nEpoch 21/100\n - 14s - loss: 0.5553 - acc: 0.7147 - val_loss: 0.6111 - val_acc: 0.6850\nEpoch 22/100\n - 14s - loss: 0.5580 - acc: 0.7242 - val_loss: 0.5054 - val_acc: 0.7480\nEpoch 23/100\n - 13s - loss: 0.5588 - acc: 0.7100 - val_loss: 0.4720 - val_acc: 0.7730\nEpoch 24/100\n - 13s - loss: 0.5505 - acc: 0.7212 - val_loss: 0.5123 - val_acc: 0.7470\nEpoch 25/100\n - 15s - loss: 0.5712 - acc: 0.7073 - val_loss: 0.4787 - val_acc: 0.7610\nEpoch 26/100\n - 13s - loss: 0.5408 - acc: 0.7316 - val_loss: 0.4961 - val_acc: 0.7530\nEpoch 27/100\n - 13s - loss: 0.5300 - acc: 0.7393 - val_loss: 0.4686 - val_acc: 0.7830\nEpoch 28/100\n - 13s - loss: 0.5505 - acc: 0.7199 - val_loss: 0.5242 - val_acc: 0.7460\nEpoch 29/100\n - 13s - loss: 0.5271 - acc: 0.7457 - val_loss: 0.4545 - val_acc: 0.7900\nEpoch 30/100\n - 13s - loss: 0.5384 - acc: 0.7260 - val_loss: 0.4610 - val_acc: 0.7870\nEpoch 31/100\n - 14s - loss: 0.5223 - acc: 0.7367 - val_loss: 0.4424 - val_acc: 0.7960\nEpoch 32/100\n - 13s - loss: 0.5512 - acc: 0.7213 - val_loss: 0.4779 - val_acc: 0.7780\nEpoch 33/100\n - 13s - loss: 0.5463 - acc: 0.7433 - val_loss: 0.4244 - val_acc: 0.8020\nEpoch 34/100\n - 13s - loss: 0.5385 - acc: 0.7302 - val_loss: 0.5564 - val_acc: 0.7410\nEpoch 35/100\n - 13s - loss: 0.5427 - acc: 0.7340 - val_loss: 0.8722 - val_acc: 0.6250\nEpoch 36/100\n - 13s - loss: 0.5305 - acc: 0.7402 - val_loss: 0.4386 - val_acc: 0.8110\nEpoch 37/100\n - 14s - loss: 0.5184 - acc: 0.7457 - val_loss: 0.4450 - val_acc: 0.7910\nEpoch 38/100\n - 13s - loss: 0.5441 - acc: 0.7332 - val_loss: 0.4206 - val_acc: 0.8070\nEpoch 39/100\n - 13s - loss: 0.5285 - acc: 0.7407 - val_loss: 0.4497 - val_acc: 0.7910\nEpoch 40/100\n - 13s - loss: 0.5281 - acc: 0.7376 - val_loss: 0.4420 - val_acc: 0.8000\nEpoch 41/100\n - 13s - loss: 0.5231 - acc: 0.7410 - val_loss: 0.4106 - val_acc: 0.8160\nEpoch 42/100\n - 13s - loss: 0.5220 - acc: 0.7436 - val_loss: 0.5312 - val_acc: 0.7590\nEpoch 43/100\n - 15s - loss: 0.5149 - acc: 0.7517 - val_loss: 0.4484 - val_acc: 0.7830\nEpoch 44/100\n - 14s - loss: 0.5223 - acc: 0.7432 - val_loss: 0.4120 - val_acc: 0.8120\nEpoch 45/100\n - 14s - loss: 0.5195 - acc: 0.7400 - val_loss: 0.4429 - val_acc: 0.7960\nEpoch 46/100\n - 13s - loss: 0.5246 - acc: 0.7466 - val_loss: 0.4041 - val_acc: 0.8090\nEpoch 47/100\n - 13s - loss: 0.5075 - acc: 0.7553 - val_loss: 0.4265 - val_acc: 0.8250\nEpoch 48/100\n - 13s - loss: 0.5156 - acc: 0.7583 - val_loss: 0.4795 - val_acc: 0.7750\nEpoch 49/100\n - 14s - loss: 0.5120 - acc: 0.7517 - val_loss: 0.4283 - val_acc: 0.8020\nEpoch 50/100\n - 13s - loss: 0.5255 - acc: 0.7409 - val_loss: 0.4847 - val_acc: 0.7830\nEpoch 51/100\n - 13s - loss: 0.5193 - acc: 0.7490 - val_loss: 0.4578 - val_acc: 0.7940\nEpoch 52/100\n - 13s - loss: 0.5122 - acc: 0.7469 - val_loss: 0.4558 - val_acc: 0.8020\nEpoch 53/100\n - 13s - loss: 0.5049 - acc: 0.7603 - val_loss: 0.4554 - val_acc: 0.7800\nEpoch 54/100\n - 13s - loss: 0.5097 - acc: 0.7569 - val_loss: 0.4381 - val_acc: 0.7890\nEpoch 55/100\n - 15s - loss: 0.5139 - acc: 0.7637 - val_loss: 0.4017 - val_acc: 0.8140\nEpoch 56/100\n - 13s - loss: 0.5099 - acc: 0.7560 - val_loss: 0.4347 - val_acc: 0.7940\nEpoch 57/100\n - 13s - loss: 0.4980 - acc: 0.7530 - val_loss: 0.4394 - val_acc: 0.7860\nEpoch 58/100\n - 13s - loss: 0.5130 - acc: 0.7589 - val_loss: 0.4424 - val_acc: 0.7960\nEpoch 59/100\n - 13s - loss: 0.4981 - acc: 0.7733 - val_loss: 0.4552 - val_acc: 0.7710\nEpoch 60/100\n - 13s - loss: 0.4991 - acc: 0.7599 - val_loss: 0.4227 - val_acc: 0.8080\nEpoch 61/100\n - 15s - loss: 0.4875 - acc: 0.7707 - val_loss: 0.4253 - val_acc: 0.8080\nEpoch 62/100\n - 13s - loss: 0.4981 - acc: 0.7632 - val_loss: 0.5508 - val_acc: 0.7510\nEpoch 63/100\n - 13s - loss: 0.5062 - acc: 0.7603 - val_loss: 0.4149 - val_acc: 0.8180\nEpoch 64/100\n - 13s - loss: 0.5008 - acc: 0.7655 - val_loss: 0.3925 - val_acc: 0.8360\nEpoch 65/100\n - 14s - loss: 0.4924 - acc: 0.7760 - val_loss: 0.4087 - val_acc: 0.8190\nEpoch 66/100\n - 13s - loss: 0.4925 - acc: 0.7642 - val_loss: 0.4290 - val_acc: 0.8010\nEpoch 67/100\n - 15s - loss: 0.4722 - acc: 0.7770 - val_loss: 0.3828 - val_acc: 0.8220\nEpoch 68/100\n - 14s - loss: 0.5055 - acc: 0.7606 - val_loss: 0.4122 - val_acc: 0.8120\nEpoch 69/100\n - 13s - loss: 0.4900 - acc: 0.7737 - val_loss: 0.4063 - val_acc: 0.8290\nEpoch 70/100\n - 13s - loss: 0.4993 - acc: 0.7636 - val_loss: 0.4151 - val_acc: 0.8010\nEpoch 71/100\n - 13s - loss: 0.5020 - acc: 0.7690 - val_loss: 0.4158 - val_acc: 0.7990\nEpoch 72/100\n - 13s - loss: 0.4955 - acc: 0.7653 - val_loss: 0.4049 - val_acc: 0.8310\nEpoch 73/100\n - 14s - loss: 0.4823 - acc: 0.7830 - val_loss: 0.4336 - val_acc: 0.8050\nEpoch 74/100\n - 13s - loss: 0.4804 - acc: 0.7819 - val_loss: 0.3934 - val_acc: 0.8180\nEpoch 75/100\n - 13s - loss: 0.5065 - acc: 0.7643 - val_loss: 0.4974 - val_acc: 0.7810\nEpoch 76/100\n - 13s - loss: 0.4888 - acc: 0.7779 - val_loss: 0.4695 - val_acc: 0.7960\nEpoch 77/100\n - 13s - loss: 0.4895 - acc: 0.7730 - val_loss: 0.4161 - val_acc: 0.8160\nEpoch 78/100\n - 13s - loss: 0.4820 - acc: 0.7790 - val_loss: 0.3894 - val_acc: 0.8220\nEpoch 79/100\n - 14s - loss: 0.4907 - acc: 0.7703 - val_loss: 0.3769 - val_acc: 0.8350\nEpoch 80/100\n - 13s - loss: 0.4794 - acc: 0.7779 - val_loss: 0.4107 - val_acc: 0.8180\nEpoch 81/100\n - 13s - loss: 0.4800 - acc: 0.7823 - val_loss: 0.4767 - val_acc: 0.7850\nEpoch 82/100\n - 13s - loss: 0.4937 - acc: 0.7699 - val_loss: 0.4065 - val_acc: 0.8190\nEpoch 83/100\n - 13s - loss: 0.4650 - acc: 0.7860 - val_loss: 0.3732 - val_acc: 0.8380\nEpoch 84/100\n - 13s - loss: 0.4981 - acc: 0.7692 - val_loss: 0.4256 - val_acc: 0.8300\nEpoch 85/100\n - 14s - loss: 0.5049 - acc: 0.7670 - val_loss: 0.3864 - val_acc: 0.8290\nEpoch 86/100\n - 13s - loss: 0.4730 - acc: 0.7830 - val_loss: 0.4212 - val_acc: 0.8200\nEpoch 87/100\n - 13s - loss: 0.4796 - acc: 0.7730 - val_loss: 0.4310 - val_acc: 0.8160\nEpoch 88/100\n - 14s - loss: 0.4944 - acc: 0.7702 - val_loss: 0.3845 - val_acc: 0.8290\nEpoch 89/100\n - 13s - loss: 0.4922 - acc: 0.7790 - val_loss: 0.3772 - val_acc: 0.8360\nEpoch 90/100\n - 13s - loss: 0.4921 - acc: 0.7816 - val_loss: 0.3902 - val_acc: 0.8320\nEpoch 91/100\n - 15s - loss: 0.4711 - acc: 0.7860 - val_loss: 0.4081 - val_acc: 0.8130\nEpoch 92/100\n - 13s - loss: 0.5064 - acc: 0.7709 - val_loss: 0.3651 - val_acc: 0.8450\nEpoch 93/100\n - 13s - loss: 0.4929 - acc: 0.7687 - val_loss: 0.3481 - val_acc: 0.8420\nEpoch 94/100\n - 13s - loss: 0.4720 - acc: 0.7796 - val_loss: 0.4129 - val_acc: 0.8060\nEpoch 95/100\n - 13s - loss: 0.4835 - acc: 0.7820 - val_loss: 0.5525 - val_acc: 0.7710\nEpoch 96/100\n - 13s - loss: 0.4742 - acc: 0.7709 - val_loss: 0.4102 - val_acc: 0.8290\nEpoch 97/100\n - 15s - loss: 0.4675 - acc: 0.7807 - val_loss: 0.5125 - val_acc: 0.7810\nEpoch 98/100\n - 13s - loss: 0.4692 - acc: 0.7786 - val_loss: 0.3939 - val_acc: 0.8340\nEpoch 99/100\n - 13s - loss: 0.4961 - acc: 0.7790 - val_loss: 0.4290 - val_acc: 0.8240\nEpoch 100/100\n - 13s - loss: 0.4662 - acc: 0.7876 - val_loss: 0.4970 - val_acc: 0.7820\n" ] ], [ [ "### Model Performance", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\nt = f.suptitle('Basic CNN Performance', fontsize=12)\nf.subplots_adjust(top=0.85, wspace=0.3)\n\nepoch_list = list(range(1,101))\nax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')\nax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')\nax1.set_xticks(np.arange(0, 101, 5))\nax1.set_ylabel('Accuracy Value')\nax1.set_xlabel('Epoch')\nax1.set_title('Accuracy')\nl1 = ax1.legend(loc=\"best\")\n\nax2.plot(epoch_list, history.history['loss'], label='Train Loss')\nax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')\nax2.set_xticks(np.arange(0, 101, 5))\nax2.set_ylabel('Loss Value')\nax2.set_xlabel('Epoch')\nax2.set_title('Loss')\nl2 = ax2.legend(loc=\"best\")", "_____no_output_____" ], [ "if not os.path.exists('saved_models'): os.mkdir('saved_models')\nmodel.save('saved_models/cnn_scratch_dogvscat.h5')", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec759e92ae95fa04930edbaaad1a086b6f10fd79
273,128
ipynb
Jupyter Notebook
survey_results_analysis.ipynb
VickieL/stackoverflow_survey_analysis
05a9dc25878eee21d3e80b7d77432af59079ff34
[ "FTL", "CNRI-Python" ]
1
2022-03-16T14:00:47.000Z
2022-03-16T14:00:47.000Z
survey_results_analysis.ipynb
VickieL/stackoverflow_survey_analysis
05a9dc25878eee21d3e80b7d77432af59079ff34
[ "FTL", "CNRI-Python" ]
null
null
null
survey_results_analysis.ipynb
VickieL/stackoverflow_survey_analysis
05a9dc25878eee21d3e80b7d77432af59079ff34
[ "FTL", "CNRI-Python" ]
null
null
null
80.926815
41,178
0.673168
[ [ [ "#### Survy result analysis\n\nIn this notebook, I'm exploring the Questions I'm interested in according to the survey results. ", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\n### Read the survey raw data from csv files. \n### We don't use the schema file in later analysis, so let's don't read schema file here.\n\ndf = pd.read_csv('./survey_results_public.csv')\n# schema = pd.read_csv('./survey_results_schema.csv')\n\n### take a look of the data to see if it load successfully. \ndf.head()", "_____no_output_____" ] ], [ [ "## 1. Does developer's interest/preference impact their job satisfaction or career satisfaction?", "_____no_output_____" ], [ "### Business understanding:\nThis question is aim to analyze if there is correlation between developer's interest/preference and job satisfaction or career satisfaction. If there is correlation, we can build prediction model later to predict job satisfaction or career satisfaction. But if not, that's also a finding from the data. ", "_____no_output_____" ], [ "### Data understanding:\nAccording to the schema file, job satisfaction or career satisfaction refers to these columns:\n'CareerSatisfaction', 'JobSatisfaction'\n\ndeveloper's interest/preference refers to these columns:\n'ProblemSolving',\n 'BuildingThings',\n 'LearningNewTech',\n 'BoringDetails',\n 'JobSecurity',\n 'DiversityImportant',\n 'AnnoyingUI',\n 'FriendsDevelopers',\n 'RightWrongWay',\n 'UnderstandComputers',\n 'SeriousWork',\n 'InvestTimeTools',\n 'WorkPayCare',\n 'KinshipDevelopers',\n 'ChallengeMyself',\n 'CompetePeers',\n 'ChangeWorld'\n", "_____no_output_____" ] ], [ [ "### Define a list of column names which related to developer's interest/preference:\ninterest_column_list = ['ProblemSolving',\n 'BuildingThings',\n 'LearningNewTech',\n 'BoringDetails',\n 'JobSecurity',\n 'DiversityImportant',\n 'AnnoyingUI',\n 'FriendsDevelopers',\n 'RightWrongWay',\n 'UnderstandComputers',\n 'SeriousWork',\n 'InvestTimeTools',\n 'WorkPayCare',\n 'KinshipDevelopers',\n 'ChallengeMyself',\n 'CompetePeers',\n 'ChangeWorld']\n\n### View these columns to see their values\ndf[interest_column_list].head()\n", "_____no_output_____" ] ], [ [ "From the table above, we can see these columns are all text type, and with same context:\n\"Strongly disagree\"~\"Strongly agree\"\n\n### Data preparation:\n\nTo analyze the correlation, we need to transfer categorized variables into numbers.\nAll developer's interest/preference survey answers can be transferred into 1-5 levels. ", "_____no_output_____" ] ], [ [ "### Let's define a dictionary to convert answers from text to numbers. \n\nagree_level_dic = {'Strongly disagree':1, 'Disagree':2, 'Somewhat agree':3, 'Agree':4, 'Strongly agree':5}\n\n\n### The next step is convert all answers from text(\"Strongly disagree\"~\"Strongly agree\") to numbers(1~5).\n### We cannot do that one by one, so we need a function to do the convert\n### Define a function that convert agree level to 1-5 number, using the dictionary we defined just now.\n\ndef agree_level_convert(x):\n if x in agree_level_dic.keys():\n return agree_level_dic[x]\n else:\n return x\n\n\n### Now, we can do the converting work, by using a for loop, to convert each interest related column value \n### from text(\"Strongly disagree\"~\"Strongly agree\") to numbers(1~5).\n\nfor i in interest_column_list:\n df.loc[:, [i]] = df[i].apply(agree_level_convert)\n ### print out each column to see if convert works\n print(df[i].head())\n", "0 5.0\n1 NaN\n2 5.0\n3 5.0\n4 NaN\nName: ProblemSolving, dtype: float64\n0 5.0\n1 NaN\n2 5.0\n3 5.0\n4 NaN\nName: BuildingThings, dtype: float64\n0 4.0\n1 NaN\n2 5.0\n3 5.0\n4 NaN\nName: LearningNewTech, dtype: float64\n0 2.0\n1 NaN\n2 3.0\n3 2.0\n4 NaN\nName: BoringDetails, dtype: float64\n0 5.0\n1 NaN\n2 4.0\n3 3.0\n4 NaN\nName: JobSecurity, dtype: float64\n0 4.0\n1 NaN\n2 5.0\n3 4.0\n4 NaN\nName: DiversityImportant, dtype: float64\n0 4.0\n1 NaN\n2 4.0\n3 4.0\n4 NaN\nName: AnnoyingUI, dtype: float64\n0 2.0\n1 NaN\n2 3.0\n3 4.0\n4 NaN\nName: FriendsDevelopers, dtype: float64\n0 3.0\n1 NaN\n2 2.0\n3 3.0\n4 NaN\nName: RightWrongWay, dtype: float64\n0 2.0\n1 NaN\n2 2.0\n3 1.0\n4 NaN\nName: UnderstandComputers, dtype: float64\n0 5.0\n1 NaN\n2 4.0\n3 5.0\n4 NaN\nName: SeriousWork, dtype: float64\n0 5.0\n1 NaN\n2 3.0\n3 4.0\n4 NaN\nName: InvestTimeTools, dtype: float64\n0 1.0\n1 NaN\n2 2.0\n3 2.0\n4 NaN\nName: WorkPayCare, dtype: float64\n0 4.0\n1 NaN\n2 3.0\n3 5.0\n4 NaN\nName: KinshipDevelopers, dtype: float64\n0 4.0\n1 NaN\n2 4.0\n3 5.0\n4 NaN\nName: ChallengeMyself, dtype: float64\n0 2.0\n1 NaN\n2 2.0\n3 3.0\n4 NaN\nName: CompetePeers, dtype: float64\n0 4.0\n1 NaN\n2 4.0\n3 4.0\n4 NaN\nName: ChangeWorld, dtype: float64\n" ], [ "df[interest_column_list].head()", "_____no_output_____" ] ], [ [ "### Data Modeling\nNow all columns we want to analyze for Question 1 are number type. We can calculate the correlation now.", "_____no_output_____" ] ], [ [ "### Pick out the columns names we want to analyze for Question 1\n### StackOverflowSatisfaction is not related to the question, but just curious about the result, so also put it in.\n\nsat_interest_columns = df[['CareerSatisfaction', 'JobSatisfaction', 'StackOverflowSatisfaction',\n 'ProblemSolving',\n 'BuildingThings',\n 'LearningNewTech',\n 'BoringDetails',\n 'JobSecurity',\n 'DiversityImportant',\n 'AnnoyingUI',\n 'FriendsDevelopers',\n 'RightWrongWay',\n 'UnderstandComputers',\n 'SeriousWork',\n 'InvestTimeTools',\n 'WorkPayCare',\n 'KinshipDevelopers',\n 'ChallengeMyself',\n 'CompetePeers',\n 'ChangeWorld',]]\n\n### whether these variable has correlation with CareerSatisfaction or JobSatisfaction\nsat_interest_columns.corr()\n\n\n### the heatmap is not readable, so I comment this line.\n#sns.heatmap(sa_sub.corr(), annot=True, fmt=\".2f\") ", "_____no_output_____" ] ], [ [ "### Evaluate the Results\nFrom the table above, we can see there is no correlation between CareerSatisfaction/JobSatisfaction and those 17 questions about developers interest. So we cannot predict CareerSatisfaction/JobSatisfaction by using those factors.", "_____no_output_____" ], [ "## 2. What are developer's preferred, what are not?", "_____no_output_____" ], [ "### Business understanding:\nThis question is aim to analyze what developer preferred or interested in? what are not?\n\n### Data understanding:\nFrom the survey list, we can see the columns we used in Queston 1 can also help to answer this question: 'ProblemSolving', 'BuildingThings', 'LearningNewTech', 'BoringDetails', 'JobSecurity', 'DiversityImportant', 'AnnoyingUI', 'FriendsDevelopers', 'RightWrongWay', 'UnderstandComputers', 'SeriousWork', 'InvestTimeTools', 'WorkPayCare', 'KinshipDevelopers', 'ChallengeMyself', 'CompetePeers', 'ChangeWorld'.\n\nTake 'ProblemSolving' for example, the survey question is : \"I love solving problems\". This can tell whether developer love solving problems. \n\n### Prepare Data:\nwe already converted all answers from text(\"Strongly disagree\"~\"Strongly agree\") to numbers(1~5) in Question 1 part, so we can just re-use the data. ", "_____no_output_____" ] ], [ [ "df[interest_column_list].head()", "_____no_output_____" ], [ "### let's drop the rows which are all N/A, they're not helpful for the calculation. \ninterest_dropna = df.loc[:, interest_column_list].dropna(how='all', axis=0)\ninterest_dropna.head()", "_____no_output_____" ] ], [ [ "### Data modeling:\nIn this Question, we can calculate the average score each column got, then we can see which got higher score, which got lower score, to represent developer's preferrence.", "_____no_output_____" ] ], [ [ "### Calculate the average score for each column, and show it in a bar chart from highest to lowest.\ninterest_dropna.mean().sort_values().plot.barh()\nplt.show()", "_____no_output_____" ] ], [ [ "### Evaluate the Results:\nThe higher the number is, the more developers agree on these items. The data tells us most developers love solving problems. Other than that, they also like Building things, Learning new technologies, challenge myself. AnnoyingUI score is also high, which means developers also care about software’s UI. And most developers take their work very seriously.\n\nMeanwhile, developers are not afraid about details, and they don’t like competing with peers. This might indicate that most developers not care about competition, and they’re not aggressive. WorkPayCare has the lowest score, which means developers care about what they are working on, instead of the payment itself.", "_____no_output_____" ], [ "## 3. What does developer care about when accessing a potential job?", "_____no_output_____" ], [ "### Business understanding:\nThis question is aim to analyze when developers are seeking for a new job, what they care about? salary? location? let’s see how data tells us.\n\n### Data understanding:\nFrom the survey list, we can see there are couple of questions asked:\"When you're assessing potential jobs to apply to, how important are each of the following to you?\"\nI think these columns can help me answer this business question:\n'AssessJobIndustry',\n 'AssessJobRole',\n 'AssessJobExp',\n 'AssessJobDept',\n 'AssessJobTech',\n 'AssessJobProjects',\n 'AssessJobCompensation',\n 'AssessJobOffice',\n 'AssessJobCommute',\n 'AssessJobRemote',\n 'AssessJobLeaders',\n 'AssessJobProfDevel',\n 'AssessJobDiversity',\n 'AssessJobProduct',\n 'AssessJobFinances'\n\n", "_____no_output_____" ] ], [ [ "### Define a list of column names:\naccess_column_list = ['AssessJobIndustry',\n 'AssessJobRole',\n 'AssessJobExp',\n 'AssessJobDept',\n 'AssessJobTech',\n 'AssessJobProjects',\n 'AssessJobCompensation',\n 'AssessJobOffice',\n 'AssessJobCommute',\n 'AssessJobRemote',\n 'AssessJobLeaders',\n 'AssessJobProfDevel',\n 'AssessJobDiversity',\n 'AssessJobProduct',\n 'AssessJobFinances']\n\n### View these columns to see their values\ndf[access_column_list].head()", "_____no_output_____" ] ], [ [ "From the table above, we can see these columns are all text type, and with same context:\nNot at all important\" ~ \"Very important\"\n\nSimilar like the interest columns, we need to convert answers from text to numbers. \nThis can be also convert to 1 ~ 5 levels.\n\n### Prepare Data:", "_____no_output_____" ] ], [ [ "### Let's define a dictionary to convert answers from text to numbers. \n\nimportant_level_dic = {'Not at all important':1, 'Not very important':2, 'Somewhat important':3, 'Important':4, 'Very important':5}\n\n\n### The next step is convert all columns from text(\"Not at all important\" ~ \"Very important\") to numbers(1~5).\n### We cannot do that one by one, so we need a function to do the convert\n### Define a function that convert important level to 1-5 number, using the dictionary we defined just now.\n\ndef important_level_convert(x):\n if x in important_level_dic.keys():\n return important_level_dic[x]\n else:\n return x\n\n\n### Now, we can do the converting work, by using a for loop, to convert each column value \n### from text(\"Not at all important\" ~ \"Very important\") to numbers(1~5).\n\nfor i in access_column_list:\n df.loc[:, [i]] = df[i].apply(important_level_convert)\n ### print out each column to see if convert works for testing\n # print(df[i].head())\n\nprint(\"finished\")", "finished\n" ], [ "### Check these columns to see if they are converted successfully.\ndf[access_column_list].head()", "_____no_output_____" ], [ "### Then, let's drop the rows which are all N/A, they're not helpful for the calculation.\naccess_dropna = df.loc[:, access_column_list].dropna(how='all', axis=0)\naccess_dropna.head()", "_____no_output_____" ] ], [ [ "### Data Modeling:\nNow all columns are numberic, we can calculate the average score each column got, then we can see which got higher score, which got lower score, to represent developer care about when accessing a potential job.", "_____no_output_____" ] ], [ [ "### Calculate the average score for each column, and show it in a bar chart from highest to lowest.\naccess_dropna.mean().sort_values().plot.barh()\nplt.show()", "_____no_output_____" ] ], [ [ "### Evaluate the Results:\nOpportunities for professional development is the most important thing developer care about. Other than that, developers also care about compensation and benefits, office environment, and job technologies (i.e. languages, frameworks). The amount of time spend commuting and How projects are managed got about 4.0 score on average, they are also considered by developers usually.", "_____no_output_____" ], [ "## 4. What is important in Globex's hiring process?", "_____no_output_____" ], [ "### Business understanding:\nThis question is aim to analyze what high-tech company care when hiring developers?\n\n### Data understanding:\nFrom the survey list, we can see there are couple of questions asked:\"Congratulations! You’ve just been put in charge of technical recruiting at Globex, a multinational high- tech firm. This job comes with a corner office, and you have an experienced staff of recruiters at your disposal. They want to know what they should prioritize when recruiting software developers. How important should each of the following be in Globex’s hiring process?\"\n\nI think these columns can help me answer this business question:\n'ImportantHiringAlgorithms',\n 'ImportantHiringTechExp',\n 'ImportantHiringCommunication',\n 'ImportantHiringOpenSource',\n 'ImportantHiringPMExp',\n 'ImportantHiringCompanies',\n 'ImportantHiringTitles',\n 'ImportantHiringEducation',\n 'ImportantHiringRep',\n 'ImportantHiringGettingThingsDone',\n 'EducationImportant'", "_____no_output_____" ] ], [ [ "### To analyze this question, we need to first define which columns are surveys related to the Question.\n### Define a list of column names:\nimportant_hire_list = ['ImportantHiringAlgorithms',\n 'ImportantHiringTechExp',\n 'ImportantHiringCommunication',\n 'ImportantHiringOpenSource',\n 'ImportantHiringPMExp',\n 'ImportantHiringCompanies',\n 'ImportantHiringTitles',\n 'ImportantHiringEducation',\n 'ImportantHiringRep',\n 'ImportantHiringGettingThingsDone',\n 'EducationImportant']\n\n\n### View these columns to see their values\ndf[important_hire_list].head()", "_____no_output_____" ] ], [ [ "From the table above, we can see these columns are all text type, and with same context:\nNot at all important\" ~ \"Very important\"\n\nSimilar like the access columns in Question 3, we can use the same function to convert answers from text to numbers. \nThis can be also convert to 1 ~ 5 levels.\n\n### Prepare Data:", "_____no_output_____" ] ], [ [ "### Now, we can do the converting work, by using a for loop, to convert each column value \n### from text(\"Not at all important\" ~ \"Very important\") to numbers(1~5).\n\nfor i in important_hire_list:\n df.loc[:, [i]] = df[i].apply(important_level_convert)\n ### print out each column to see if convert works for testing\n #cprint(df[i].head())\n\nprint(\"finished\")", "finished\n" ], [ "### Check these columns to see if they are converted successfully.\ndf[important_hire_list].head()", "_____no_output_____" ], [ "### Then, let's drop the rows which are all N/A, they're not helpful for the calculation.\nhire_dropna = df.loc[:, important_hire_list].dropna(how='all', axis=0)\nhire_dropna.head()", "_____no_output_____" ] ], [ [ "### Data Modeling:\nNow all columns are numberic, we can calculate the average score to rank the columns.", "_____no_output_____" ] ], [ [ "### Calculate the average score for each column, and show it in a bar chart from highest to lowest.\nhire_dropna.mean().sort_values().plot.barh()\nplt.show()", "_____no_output_____" ] ], [ [ "### Evaluate the Results:\nSurprisedly, the most important thing is not developer’s tech skill, but Communication skills! We can see how important the communication is even you’re a software developer. Track record of getting things done, Knowledge of algorithms and data structures and Experience with specific tools are the following important things. Of course they are, they are hard skills for developers. Formal schooling and education is somewhat important, if you can get higher degree, that might be a plus when applying a job.", "_____no_output_____" ], [ "## 5. When most developers prefer to start work?", "_____no_output_____" ], [ "### Business understanding:\nWe know developers often work very late at night, some even say they have more ideas or think more clearly at night. So if they can freely choose when to start work, how will they choose?\n\n### Data understanding:\nFrom the survey list, we can see there is a question asked:\"Suppose you could choose your own working hours for an 8-hour day. What time would you start work for the day? Please adjust the slider to the hour nearest your ideal start time. The box next to the slider will display your selection using a 24-hour clock\"\n\nThe column for this question is named [WorkStart], so we can analyze the data in this column to find the result. \n\n### Prepare data:", "_____no_output_____" ] ], [ [ "### Let's see how many types of answers there are, and how many response for each type:\ndf.groupby(['WorkStart'])['WorkStart'].count().sort_values()", "_____no_output_____" ] ], [ [ "Looks like we don't need further process the data, just use the aggregated data to do visualization.\n\n### Data modeling:", "_____no_output_____" ] ], [ [ "### the data above is not visualized, so let's use more visualized way to show the data\ndf.groupby(['WorkStart'])['WorkStart'].count().sort_values().plot.barh()\nplt.show()", "_____no_output_____" ] ], [ [ "### Evaluate the Results:\nUnexpectedly, very few developer choose start from night. Most developers still want to start work in the morning, around 8:00am to 10:00am. Seems most developers still want to live a normal life:)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec75aaed0c99d8f3e2fe2e1f29d47474e0c90886
984
ipynb
Jupyter Notebook
basic/input_age.ipynb
eric999j/Udemy_Python_Hand_On
7a985b3e2c9adfd3648d240af56ac00bb916c3ad
[ "Apache-2.0" ]
1
2020-12-31T18:03:34.000Z
2020-12-31T18:03:34.000Z
basic/input_age.ipynb
cntfk2017/Udemy_Python_Hand_On
52f2a5585bfdea95d893f961c8c21844072e93c7
[ "Apache-2.0" ]
null
null
null
basic/input_age.ipynb
cntfk2017/Udemy_Python_Hand_On
52f2a5585bfdea95d893f961c8c21844072e93c7
[ "Apache-2.0" ]
2
2019-09-23T14:26:48.000Z
2020-05-25T07:09:26.000Z
24
105
0.490854
[ [ [ "# ---------------------------------------------------------------\n# python best courses https://courses.tanpham.org/\n# ---------------------------------------------------------------\n# Create a program that asks the user to enter their name and their age.\n# Print out a message addressed to them that tells them the year that they will turn 100 years old.", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec75afa101aa850556cf1284658ba693aadfead2
37,916
ipynb
Jupyter Notebook
credit_risk_ensemble.ipynb
Drakeblaze10/Credit_Risk_Analysis
34b4ad1709a248c2bddb5a252080d488fb3e3c32
[ "Apache-2.0" ]
null
null
null
credit_risk_ensemble.ipynb
Drakeblaze10/Credit_Risk_Analysis
34b4ad1709a248c2bddb5a252080d488fb3e3c32
[ "Apache-2.0" ]
null
null
null
credit_risk_ensemble.ipynb
Drakeblaze10/Credit_Risk_Analysis
34b4ad1709a248c2bddb5a252080d488fb3e3c32
[ "Apache-2.0" ]
null
null
null
36.527938
284
0.437019
[ [ [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom collections import Counter", "_____no_output_____" ], [ "from sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom imblearn.metrics import classification_report_imbalanced", "_____no_output_____" ] ], [ [ "# Read the CSV and Perform Basic Data Cleaning", "_____no_output_____" ] ], [ [ "# https://help.lendingclub.com/hc/en-us/articles/215488038-What-do-the-different-Note-statuses-mean-\n\ncolumns = [\n \"loan_amnt\", \"int_rate\", \"installment\", \"home_ownership\",\n \"annual_inc\", \"verification_status\", \"issue_d\", \"loan_status\",\n \"pymnt_plan\", \"dti\", \"delinq_2yrs\", \"inq_last_6mths\",\n \"open_acc\", \"pub_rec\", \"revol_bal\", \"total_acc\",\n \"initial_list_status\", \"out_prncp\", \"out_prncp_inv\", \"total_pymnt\",\n \"total_pymnt_inv\", \"total_rec_prncp\", \"total_rec_int\", \"total_rec_late_fee\",\n \"recoveries\", \"collection_recovery_fee\", \"last_pymnt_amnt\", \"next_pymnt_d\",\n \"collections_12_mths_ex_med\", \"policy_code\", \"application_type\", \"acc_now_delinq\",\n \"tot_coll_amt\", \"tot_cur_bal\", \"open_acc_6m\", \"open_act_il\",\n \"open_il_12m\", \"open_il_24m\", \"mths_since_rcnt_il\", \"total_bal_il\",\n \"il_util\", \"open_rv_12m\", \"open_rv_24m\", \"max_bal_bc\",\n \"all_util\", \"total_rev_hi_lim\", \"inq_fi\", \"total_cu_tl\",\n \"inq_last_12m\", \"acc_open_past_24mths\", \"avg_cur_bal\", \"bc_open_to_buy\",\n \"bc_util\", \"chargeoff_within_12_mths\", \"delinq_amnt\", \"mo_sin_old_il_acct\",\n \"mo_sin_old_rev_tl_op\", \"mo_sin_rcnt_rev_tl_op\", \"mo_sin_rcnt_tl\", \"mort_acc\",\n \"mths_since_recent_bc\", \"mths_since_recent_inq\", \"num_accts_ever_120_pd\", \"num_actv_bc_tl\",\n \"num_actv_rev_tl\", \"num_bc_sats\", \"num_bc_tl\", \"num_il_tl\",\n \"num_op_rev_tl\", \"num_rev_accts\", \"num_rev_tl_bal_gt_0\",\n \"num_sats\", \"num_tl_120dpd_2m\", \"num_tl_30dpd\", \"num_tl_90g_dpd_24m\",\n \"num_tl_op_past_12m\", \"pct_tl_nvr_dlq\", \"percent_bc_gt_75\", \"pub_rec_bankruptcies\",\n \"tax_liens\", \"tot_hi_cred_lim\", \"total_bal_ex_mort\", \"total_bc_limit\",\n \"total_il_high_credit_limit\", \"hardship_flag\", \"debt_settlement_flag\"\n]\n\ntarget = [\"loan_status\"]", "_____no_output_____" ], [ "# Load the data\nfile_path = Path('./Resources/LoanStats_2019Q1.csv')\ndf = pd.read_csv(file_path, skiprows=1)[:-2]\ndf = df.loc[:, columns].copy()\n\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n\n# Drop the null rows\ndf = df.dropna()\n\n# Remove the `Issued` loan status\nissued_mask = df['loan_status'] != 'Issued'\ndf = df.loc[issued_mask]\n\n# convert interest rate to numerical\ndf['int_rate'] = df['int_rate'].str.replace('%', '')\ndf['int_rate'] = df['int_rate'].astype('float') / 100\n\n\n# Convert the target column values to low_risk and high_risk based on their values\nx = {'Current': 'low_risk'} \ndf = df.replace(x)\n\nx = dict.fromkeys(['Late (31-120 days)', 'Late (16-30 days)', 'Default', 'In Grace Period'], 'high_risk') \ndf = df.replace(x)\n\ndf.reset_index(inplace=True, drop=True)\n\ndf.head()", "_____no_output_____" ] ], [ [ "# Split the Data into Training and Testing", "_____no_output_____" ] ], [ [ "# Create our features\nX = df.drop('loan_status', axis = 1)\nX = pd.get_dummies(X)\n\n# Create our target\ny = df['loan_status']", "_____no_output_____" ], [ "X.describe()", "_____no_output_____" ], [ "# Check the balance of our target values\ny.value_counts()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)", "_____no_output_____" ] ], [ [ "# Ensemble Learners\n\nIn this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble AdaBoost classifier . For each algorithm, be sure to complete the folliowing steps:\n\n1. Train the model using the training data. \n2. Calculate the balanced accuracy score from sklearn.metrics.\n3. Print the confusion matrix from sklearn.metrics.\n4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n5. For the Balanced Random Forest Classifier onely, print the feature importance sorted in descending order (most important feature to least important) along with the feature score\n\nNote: Use a random state of 1 for each algorithm to ensure consistency between tests", "_____no_output_____" ], [ "### Balanced Random Forest Classifier", "_____no_output_____" ] ], [ [ "# Resample the training data with the BalancedRandomForestClassifier\nfrom imblearn.ensemble import BalancedRandomForestClassifier\nresample = BalancedRandomForestClassifier(n_estimators=100, random_state = 1)\nresample = resample.fit(X_train, y_train)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred = resample.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred)", "_____no_output_____" ], [ "# Display the confusion matrix\nmatrix = confusion_matrix(y_test, y_pred)\nprint(matrix)", "[[ 58 29]\n [ 1560 15558]]\n" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred))", " pre rec spe f1 geo iba sup\n\n high_risk 0.04 0.67 0.91 0.07 0.78 0.59 87\n low_risk 1.00 0.91 0.67 0.95 0.78 0.62 17118\n\navg / total 0.99 0.91 0.67 0.95 0.78 0.62 17205\n\n" ], [ "# List the features sorted in descending order by feature importance\nfeature = sorted(zip(resample.feature_importances_, X.columns), reverse=True)\nfeature", "_____no_output_____" ] ], [ [ "### Easy Ensemble AdaBoost Classifier", "_____no_output_____" ] ], [ [ "# Train the EasyEnsembleClassifier\nfrom imblearn.ensemble import EasyEnsembleClassifier\neasy_ensemble = EasyEnsembleClassifier(n_estimators = 100, random_state = 1)\neasy_ensemble = easy_ensemble.fit(X_train, y_train)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred = easy_ensemble.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred)", "_____no_output_____" ], [ "# Display the confusion matrix\nmatrix = confusion_matrix(y_test, y_pred)\nprint(matrix)", "[[ 79 8]\n [ 979 16139]]\n" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred))", " pre rec spe f1 geo iba sup\n\n high_risk 0.07 0.91 0.94 0.14 0.93 0.85 87\n low_risk 1.00 0.94 0.91 0.97 0.93 0.86 17118\n\navg / total 0.99 0.94 0.91 0.97 0.93 0.86 17205\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec75b6f61016b7ac288032860a13a85e765eb21a
17,257
ipynb
Jupyter Notebook
Neural Networks from Scratch.ipynb
Yasir323/ML-algorithms-from-scratch
e6198b38ed4169d24a6cfb623761d73faf56db52
[ "MIT" ]
null
null
null
Neural Networks from Scratch.ipynb
Yasir323/ML-algorithms-from-scratch
e6198b38ed4169d24a6cfb623761d73faf56db52
[ "MIT" ]
null
null
null
Neural Networks from Scratch.ipynb
Yasir323/ML-algorithms-from-scratch
e6198b38ed4169d24a6cfb623761d73faf56db52
[ "MIT" ]
null
null
null
26.713622
410
0.594889
[ [ [ "import numpy as np", "_____no_output_____" ], [ "input_vector = np.array([1.72, 1.23])\nweights_1 = np.array([1.26, 0])\nweights_2 = np.array([2.17, 0.32])", "_____no_output_____" ], [ "dot1 = input_vector@weights_1", "_____no_output_____" ], [ "print(f\"The dot product is: {dot1}\")", "The dot product is: 2.1672\n" ], [ "dot2 = np.dot(input_vector, weights_2)", "_____no_output_____" ], [ "print(f\"The dot product is: {dot2}\")", "The dot product is: 4.1259999999999994\n" ] ], [ [ "Working with neural networks consists of doing operations with vectors. You represent the vectors as multidimensional arrays. Vectors are useful in deep learning mainly because of one particular operation: the dot product. The dot product of two vectors tells you how similar they are in terms of direction and is scaled by the magnitude of the two vectors.", "_____no_output_____" ], [ "## Making predictions", "_____no_output_____" ], [ "If you add more layers but keep using only linear operations, then adding more layers would have no effect because each layer will always have some correlation with the input of the previous layer. ", "_____no_output_____" ], [ "What you want is to find an operation that makes the middle layers sometimes correlate with an input and sometimes not correlate.\n\nYou can achieve this behavior by using nonlinear functions. These nonlinear functions are called activation functions. There are many types of activation functions. The ReLU (rectified linear unit), for example, is a function that converts all negative numbers to zero. This means that the network can “turn off” a weight if it’s negative, adding nonlinearity.", "_____no_output_____" ], [ "The network you’re building will use the **sigmoid activation function**.", "_____no_output_____" ], [ "<img src=\"https://robocrop.realpython.net/?url=https%3A//files.realpython.com/media/sigmoid_function.f966c820f8c3.png&w=578&sig=559e58b0e39bc1d37841223862ceabbd6ae8be22\">", "_____no_output_____" ], [ "Probability functions give you the probability of occurrence for possible outcomes of an event. The only two possible outputs of the dataset are 0 and 1, and the Bernoulli distribution is a distribution that has two possible outcomes as well. The sigmoid function is a good choice if your problem follows the Bernoulli distribution, so that’s why you’re using it in the last layer of your neural network.", "_____no_output_____" ], [ "Since the function limits the output to a range of 0 to 1, you’ll use it to predict probabilities. If the output is greater than 0.5, then you’ll say the prediction is 1. If it’s below 0.5, then you’ll say the prediction is 0. This is the flow of the computations inside the network you’re building:", "_____no_output_____" ], [ "<img src=\"https://robocrop.realpython.net/?url=https%3A//files.realpython.com/media/network_architecture.406cfcc68417.png&w=700&sig=ce1ed03252df1cdbaa626424ffbb9084ab2b7b5e\">", "_____no_output_____" ] ], [ [ "input_vector = np.array([1.66, 1.56])\nweights_1 = np.array([1.45, -0.66])\nbias = np.array([0.0])", "_____no_output_____" ], [ "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "_____no_output_____" ], [ "weights1 = np.concatenate((bias, weights_1))\nweights1", "_____no_output_____" ], [ "def make_prediction(input_vector, weights, bias):\n layer_1 = np.dot(input_vector, weights) + bias\n layer_2 = sigmoid(layer_1)\n return layer_2", "_____no_output_____" ], [ "prediction = make_prediction(input_vector, weights_1, bias)\nprediction", "_____no_output_____" ], [ "def predict_class(prediction):\n if prediction > 0.5:\n return 1\n return 0", "_____no_output_____" ], [ "predict_class(prediction)", "_____no_output_____" ], [ "input_vector = np.array([2, 1.5])\nprediction = make_prediction(input_vector, weights_1, bias)\nprediction", "_____no_output_____" ], [ "predict_class(prediction)", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ], [ "To adjust the weights, you’ll use the gradient descent and backpropagation algorithms. Gradient descent is applied to find the direction and the rate to update the parameters. ", "_____no_output_____" ], [ "### Find the error", "_____no_output_____" ], [ "To understand the magnitude of the error, you need to choose a way to measure it. The function used to measure the error is called the cost function, or loss function. In this tutorial, you’ll use the **mean squared error (MSE)** as your cost function.", "_____no_output_____" ] ], [ [ "target = 0", "_____no_output_____" ], [ "mse = np.square(prediction - target)\nmse", "_____no_output_____" ] ], [ [ "### Reducing the error", "_____no_output_____" ], [ "The goal is to change the weights and bias variables so you can reduce the error. To understand how this works, you’ll change only the weights variable and leave the bias fixed for now. You can also get rid of the sigmoid function and use only the result of layer_1. ", "_____no_output_____" ], [ "You compute the MSE by doing error = np.square(prediction - target). If you treat (prediction - target) as a single variable x, then you have error = np.square(x), which is a quadratic function. Here’s how the function looks if you plot it:", "_____no_output_____" ], [ "<img src=\"https://robocrop.realpython.net/?url=https%3A//files.realpython.com/media/quatratic_function.002729dea332.png&w=578&sig=1df4f5711e982f821d54ab9634ac28bd9cd0312d\">", "_____no_output_____" ], [ "The error is given by the y-axis. If you’re in point A and want to reduce the error toward 0, then you need to bring the x value down. On the other hand, if you’re in point B and want to reduce the error, then you need to bring the x value up. To know which direction you should go to reduce the error, you’ll use the derivative. A derivative explains exactly how a pattern will change.\n\nAnother word for the derivative is gradient. Gradient descent is the name of the algorithm used to find the direction and the rate to update the network parameters. ", "_____no_output_____" ] ], [ [ "derivative_of_mse = 2 * (prediction - target)\nderivative_of_mse", "_____no_output_____" ] ], [ [ "### Update the weights", "_____no_output_____" ] ], [ [ "weights_1 = weights_1 - derivative_of_mse if derivative_of_mse >= 0 else weights_1 + derivative_of_mse", "_____no_output_____" ], [ "prediction = make_prediction(input_vector, weights_1, bias)\nprediction", "_____no_output_____" ], [ "error = (prediction - target) ** 2\nerror", "_____no_output_____" ] ], [ [ "The error dropped down to almost 0!", "_____no_output_____" ], [ "In this example, the derivative result was small, but there are some cases where the derivative result is too high. Take the image of the quadratic function as an example. High increments aren’t ideal because you could keep going from point A straight to point B, never getting close to zero. To cope with that, you update the weights with a fraction of the derivative result.\n\nTo define a fraction for updating the weights, you use the alpha parameter, also called the learning rate.", "_____no_output_____" ], [ "Now you want to know how to change weights_1 and bias to reduce the error. You already saw that you can use derivatives for this, but instead of a function with only a sum inside, now you have a function that produces its result using other functions.\n\nSince now you have this function composition, to take the derivative of the error concerning the parameters, you’ll need to use the chain rule from calculus. With the chain rule, you take the partial derivatives of each function, evaluate them, and multiply all the partial derivatives to get the derivative you want. ", "_____no_output_____" ], [ "<img src=\"https://robocrop.realpython.net/?url=https%3A//files.realpython.com/media/partial_derivative_weights_2.c792633559c3.png&w=750&sig=77881b051de83d0af835c87b5abc82dbd340bef5\">", "_____no_output_____" ], [ "### Adjusting the parameters with Backpropagation", "_____no_output_____" ], [ "You want to take the derivative of the error function with respect to the bias, derror_dbias. Then you’ll keep going backward, taking the partial derivatives until you find the bias variable.\n\nSince you are starting from the end and going backward, you first need to take the partial derivative of the error with respect to the prediction. That’s the derror_dprediction in the image below:", "_____no_output_____" ], [ "<img src=\"https://robocrop.realpython.net/?url=https%3A//files.realpython.com/media/partial_derivative_bias_2.177c16a60b9d.png&w=750&sig=72cd2e7882a87d1ef09e678d9cfa9517e0ae63c8\">", "_____no_output_____" ], [ "The function that produces the error is a square function, and the derivative of this function is 2 * x, as you saw earlier. You applied the first partial derivative (derror_dprediction) and still didn’t get to the bias, so you need to take another step back and take the derivative of the prediction with respect to the previous layer, dprediction_dlayer1.\n\nThe prediction is the result of the sigmoid function. You can take the derivative of the sigmoid function by multiplying sigmoid(x) and 1 - sigmoid(x). This derivative formula is very handy because you can use the sigmoid result that has already been computed to compute the derivative of it. You then take this partial derivative and continue going backward.\n\nNow you’ll take the derivative of layer_1 with respect to the bias. There it is—you finally got to it! The bias variable is an independent variable, so the result after applying the power rule is 1.", "_____no_output_____" ] ], [ [ "def dsigmoid(x):\n return sigmoid(x) * (1 - sigmoid(x))", "_____no_output_____" ], [ "derror_dprediction = 2 * (prediction - target)", "_____no_output_____" ], [ "layer_1 = np.dot(input_vector, weights_1) + bias", "_____no_output_____" ], [ "dprediction_dlayer1 = dsigmoid(layer_1)", "_____no_output_____" ], [ "dlayer1_dbias = 1", "_____no_output_____" ], [ "derror_dbias = (derror_dprediction * dprediction_dlayer1 * dlayer1_dbias)", "_____no_output_____" ] ], [ [ "Follow the same process to update the weights", "_____no_output_____" ] ], [ [ "dlayer1_dweights = input_vector", "_____no_output_____" ], [ "derror_dweights = (derror_dprediction * dprediction_dlayer1 * dlayer1_dweights)", "_____no_output_____" ] ], [ [ "**Reference:** https://realpython.com/python-ai-neural-network/#wrapping-the-inputs-of-the-neural-network-with-numpy", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec75daf53b89062a7112915bebecb9ee80ebc435
131,286
ipynb
Jupyter Notebook
nbs/05_projections/010-multiplier-phenomexcan.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
3
2021-08-17T21:59:19.000Z
2022-03-08T15:46:24.000Z
nbs/05_projections/010-multiplier-phenomexcan.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
4
2021-08-04T13:57:24.000Z
2021-10-11T14:57:15.000Z
nbs/05_projections/010-multiplier-phenomexcan.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
null
null
null
38.164535
158
0.391367
[ [ [ "# Description", "_____no_output_____" ], [ "It projects the PhenomeXcan results (S-MultiXcan) into the MultiPLIER latent space.\nBefore projecting, repeated gene symbols as well as genes with NaN are removed.", "_____no_output_____" ], [ "# Modules loading", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from pathlib import Path\n\nfrom IPython.display import display\nimport pandas as pd\n\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.conversion import localconverter\n\nimport conf\nfrom entity import Gene\nfrom data.cache import read_data\nfrom multiplier import MultiplierProjection", "_____no_output_____" ], [ "readRDS = ro.r[\"readRDS\"]", "_____no_output_____" ], [ "saveRDS = ro.r[\"saveRDS\"]", "_____no_output_____" ] ], [ [ "# Settings", "_____no_output_____" ] ], [ [ "RESULTS_PROJ_OUTPUT_DIR = Path(conf.RESULTS[\"PROJECTIONS_DIR\"])\nRESULTS_PROJ_OUTPUT_DIR.mkdir(parents=True, exist_ok=True)\n\ndisplay(RESULTS_PROJ_OUTPUT_DIR)", "_____no_output_____" ] ], [ [ "# Load PhenomeXcan data (S-MultiXcan)", "_____no_output_____" ] ], [ [ "smultixcan_results_filename = conf.PHENOMEXCAN[\"SMULTIXCAN_MASHR_ZSCORES_FILE\"]\n\ndisplay(smultixcan_results_filename)", "_____no_output_____" ], [ "results_filename_stem = smultixcan_results_filename.stem\ndisplay(results_filename_stem)", "_____no_output_____" ], [ "smultixcan_results = pd.read_pickle(smultixcan_results_filename)", "_____no_output_____" ], [ "smultixcan_results.shape", "_____no_output_____" ], [ "smultixcan_results.head()", "_____no_output_____" ] ], [ [ "## Gene IDs to Gene names", "_____no_output_____" ] ], [ [ "smultixcan_results = smultixcan_results.rename(index=Gene.GENE_ID_TO_NAME_MAP)", "_____no_output_____" ], [ "smultixcan_results.shape", "_____no_output_____" ], [ "smultixcan_results.head()", "_____no_output_____" ] ], [ [ "## Remove duplicated gene entries", "_____no_output_____" ] ], [ [ "smultixcan_results.index[smultixcan_results.index.duplicated(keep=\"first\")]", "_____no_output_____" ], [ "smultixcan_results = smultixcan_results.loc[\n ~smultixcan_results.index.duplicated(keep=\"first\")\n]", "_____no_output_____" ], [ "smultixcan_results.shape", "_____no_output_____" ] ], [ [ "## Remove NaN values", "_____no_output_____" ], [ "**TODO**: it might be better to try to impute these values", "_____no_output_____" ] ], [ [ "smultixcan_results = smultixcan_results.dropna(how=\"any\")", "_____no_output_____" ], [ "smultixcan_results.shape", "_____no_output_____" ] ], [ [ "# Project S-MultiXcan data into MultiPLIER latent space", "_____no_output_____" ] ], [ [ "mproj = MultiplierProjection()", "_____no_output_____" ], [ "smultixcan_into_multiplier = mproj.transform(smultixcan_results)", "_____no_output_____" ], [ "smultixcan_into_multiplier.shape", "_____no_output_____" ], [ "smultixcan_into_multiplier.head()", "_____no_output_____" ] ], [ [ "# Quick analysis", "_____no_output_____" ] ], [ [ "(smultixcan_into_multiplier.loc[\"LV603\"].sort_values(ascending=False).head(20))", "_____no_output_____" ], [ "(smultixcan_into_multiplier.loc[\"LV136\"].sort_values(ascending=False).head(20))", "_____no_output_____" ], [ "(smultixcan_into_multiplier.loc[\"LV844\"].sort_values(ascending=False).head(20))", "_____no_output_____" ] ], [ [ "# Save", "_____no_output_____" ] ], [ [ "output_file = Path(\n RESULTS_PROJ_OUTPUT_DIR, f\"projection-{results_filename_stem}.pkl\"\n).resolve()\n\ndisplay(output_file)", "_____no_output_____" ], [ "smultixcan_into_multiplier.to_pickle(output_file)", "_____no_output_____" ] ], [ [ "## RDS format", "_____no_output_____" ] ], [ [ "output_rds_file = output_file.with_suffix(\".rds\")\ndisplay(output_rds_file)", "_____no_output_____" ], [ "with localconverter(ro.default_converter + pandas2ri.converter):\n data_r = ro.conversion.py2rpy(smultixcan_into_multiplier)", "_____no_output_____" ], [ "data_r", "_____no_output_____" ], [ "saveRDS(data_r, str(output_rds_file))", "_____no_output_____" ], [ "# testing: load the rds file again\ndata_r = readRDS(str(output_rds_file))", "_____no_output_____" ], [ "with localconverter(ro.default_converter + pandas2ri.converter):\n data_again = ro.conversion.rpy2py(data_r)\n# data_again.index = data_again.index.astype(int)", "_____no_output_____" ], [ "data_again.shape", "_____no_output_____" ], [ "data_again.head()", "_____no_output_____" ], [ "pd.testing.assert_frame_equal(\n smultixcan_into_multiplier,\n data_again,\n check_names=False,\n check_exact=True,\n # rtol=0.0,\n # atol=1e-50,\n # check_dtype=False,\n)", "_____no_output_____" ] ], [ [ "## Text format", "_____no_output_____" ] ], [ [ "# tsv format\noutput_text_file = output_file.with_suffix(\".tsv.gz\")\ndisplay(output_text_file)", "_____no_output_____" ], [ "smultixcan_into_multiplier.to_csv(\n output_text_file, sep=\"\\t\", index=True, float_format=\"%.5e\"\n)", "_____no_output_____" ], [ "# testing\n# data2 = data.copy()\n# data2.index = list(range(0, data2.shape[0]))\n\ndata_again = pd.read_csv(output_text_file, sep=\"\\t\", index_col=0)\n\n# data_again.index = list(data_again.index)\n# data_again[\"part_k\"] = data_again[\"part_k\"].astype(float)", "_____no_output_____" ], [ "data_again.shape", "_____no_output_____" ], [ "data_again.head()", "_____no_output_____" ], [ "pd.testing.assert_frame_equal(\n smultixcan_into_multiplier,\n data_again,\n check_exact=False,\n rtol=0.0,\n atol=5e-5,\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec75ebc772ef065b97b29e96a09864f0a4e21913
7,242
ipynb
Jupyter Notebook
notebook/Test_for_experiment_one.ipynb
HarikaYadavalli/IronTeam
c93bae560f1ba37c2a28188124036badd9e93c6f
[ "BSD-3-Clause" ]
null
null
null
notebook/Test_for_experiment_one.ipynb
HarikaYadavalli/IronTeam
c93bae560f1ba37c2a28188124036badd9e93c6f
[ "BSD-3-Clause" ]
null
null
null
notebook/Test_for_experiment_one.ipynb
HarikaYadavalli/IronTeam
c93bae560f1ba37c2a28188124036badd9e93c6f
[ "BSD-3-Clause" ]
null
null
null
27.96139
116
0.502209
[ [ [ "import argparse\nimport time\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim.lr_scheduler as lr_scheduler\n", "_____no_output_____" ], [ "import os\nimport torch\n\nfrom collections import Counter\n\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n self.counter = Counter()\n self.total = 0\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n self.counter[token_id] += 1\n self.total += 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids\n", "_____no_output_____" ], [ "def batchify(data, bsz, args):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n #if args.cuda:\n #data = data.cuda()\n return data", "_____no_output_____" ], [ "def evaluate(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n if args.model == 'QRNN': model.reset()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(batch_size)\n for i in range(0, data_source.size(0) - 1, args.bptt):\n #print('eval:',i)\n #print('before get_batch, max_memory_reserved():',torch.cuda.max_memory_reserved()/1024/1024)\n data, targets = get_batch(data_source, i, args, evaluation=True)\n #print('after get_batch, max_memory_reserved():',torch.cuda.max_memory_reserved()/1024/1024)\n output, hidden = model(data, hidden)\n total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data\n hidden = repackage_hidden(hidden)\n return total_loss.item() / len(data_source)", "_____no_output_____" ], [ "#print('Loading cached dataset...')\n#corpus = torch.load('corpus.148650ff682fa3f76e78c18d7d6d5bd6.data')\n\nprint('Producing dataset...')\ncorpus = Corpus('data/penn/')\n#torch.save(corpus, fn)", "Producing dataset...\n" ], [ "test_batch_size = 1\ntrain_batch_size = 20\n#args = {'cuda':True}\nclass arg1:\n cuda = True\n model = 'LSTM'\n bptt = 70\nargs = arg1()\ntest_data = batchify(corpus.test, test_batch_size, args)\ntrain_data = batchify(corpus.train, train_batch_size, args)", "_____no_output_____" ], [ "print('The size of test data = ', test_data.shape)\nprint('The size of train data = ',train_data.shape)", "The size of test data = torch.Size([82430, 1])\nThe size of train data = torch.Size([46479, 20])\n" ], [ "def model_load(fn):\n global model, criterion, optimizer\n #if args.philly:\n #fn = os.path.join(os.environ['PT_OUTPUT_DIR'], fn)\n #fn = '1591009087197056.pt'\n with open(fn, 'rb') as f:\n model, criterion, optimizer = torch.load(f, map_location=torch.device('cpu'))", "_____no_output_____" ], [ "model_load('model/model-{}'.format(105))", "_____no_output_____" ], [ "test_loss = evaluate(test_data, test_batch_size)", "_____no_output_____" ], [ "val_loss", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec75fe16937b89000edbe194ac2e5de8e7ad853b
80,382
ipynb
Jupyter Notebook
notebooks/coursera-DataAnalysiswithPython/importingDatasets.ipynb
dguardia/pyhtonForDataScience-David
1c846554baecf2dc072aa95686a7839b467f6e11
[ "MIT" ]
1
2019-09-10T18:19:35.000Z
2019-09-10T18:19:35.000Z
notebooks/coursera-DataAnalysiswithPython/importingDatasets.ipynb
dguardia/pyhtonForDataScience-David
1c846554baecf2dc072aa95686a7839b467f6e11
[ "MIT" ]
null
null
null
notebooks/coursera-DataAnalysiswithPython/importingDatasets.ipynb
dguardia/pyhtonForDataScience-David
1c846554baecf2dc072aa95686a7839b467f6e11
[ "MIT" ]
null
null
null
48.983547
1,571
0.388097
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "url =\"https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\"", "_____no_output_____" ], [ "df = pd.read_csv(url, header=None)", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ], [ "headers = ['symboling', 'normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price']", "_____no_output_____" ], [ "headers", "_____no_output_____" ], [ "df.columns = headers", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "print(path)", "None\n" ], [ "path = '/notebooks/dataframes/automobile.csv'", "_____no_output_____" ], [ "df.to_csv(path)", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df.describe(include=\"all\")", "_____no_output_____" ], [ "df.info", "_____no_output_____" ], [ "# 0 is row\n# 1 is column\n# df.dropna(axis=0)\n\nmean = df[\"normalized-losses\"].mean()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7603ed2eb34e0388d78c141fd279cda35bfa94
625,389
ipynb
Jupyter Notebook
explore.ipynb
Brandon-Martinez27/couples_who_sleep_alone
e211d15750d43af62ee6f2a59f277024398363c0
[ "CNRI-Python" ]
null
null
null
explore.ipynb
Brandon-Martinez27/couples_who_sleep_alone
e211d15750d43af62ee6f2a59f277024398363c0
[ "CNRI-Python" ]
null
null
null
explore.ipynb
Brandon-Martinez27/couples_who_sleep_alone
e211d15750d43af62ee6f2a59f277024398363c0
[ "CNRI-Python" ]
null
null
null
333.718783
103,708
0.916733
[ [ [ "# Initial Hypotheses\n\n- Do couples in longer relationships sleep in separate beds?\n- Do older couples end up sleeping in separate beds?\n- Are people with higher degrees sleeping separately?\n- Which census region are couples more likely to sleep seperately?\n- **BONUS**: Are people with stressful jobs sleeping separately? *Note: need to do a lot of cleaning for the occupation category. May require NLP to categorize 'Other' reponses. Will do after project is done if time permits*", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.stats as stats\n\nfrom wrangle import wrangle_data, encode_cat_vars, split_data\n\nplt.rc('figure', figsize=(9, 7))\n\nplt.rc('font', size=13)", "_____no_output_____" ], [ "train, validate, test = split_data(wrangle_data())", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 557 entries, 97 to 160\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 rel_length 557 non-null object\n 1 separate_bed 557 non-null int64 \n 2 occupation 557 non-null object\n 3 age 557 non-null object\n 4 education 557 non-null object\n 5 location 557 non-null object\n 6 married 557 non-null uint8 \n 7 male 557 non-null uint8 \ndtypes: int64(1), object(5), uint8(2)\nmemory usage: 31.5+ KB\n" ] ], [ [ "# EDA", "_____no_output_____" ] ], [ [ "def plot_counts(df, col_a, col_b, title):\n plt.rc('figure', figsize=(16, 9))\n plt.subplot(211)\n\n sns.countplot(x = col_a, data=df, palette='Purples', ec='black', linewidth=2)\n\n plt.title(title)\n plt.xlabel('')\n plt.ylabel('')\n\n plt.subplot(212)\n\n # use hue to add another dimension to your viz\n sns.countplot(x = col_a, hue = col_b, data=df, palette='Blues', ec='black', linewidth=2)\n\n plt.legend()\n plt.tight_layout()\n plt.show()", "_____no_output_____" ] ], [ [ "### Target: Separate Beds", "_____no_output_____" ] ], [ [ "train.separate_bed.value_counts(normalize=True)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><b>Summary</b>: About 24% of couples sleep separately according the the data*</div>", "_____no_output_____" ], [ "> **Do couples in longer relationships sleep in separate beds?**", "_____no_output_____" ] ], [ [ "plot_counts(train, 'rel_length', 'separate_bed', 'How Does the Relationship Length Relate to Sleeping Situation?')", "_____no_output_____" ] ], [ [ "#### $Chi^2$ Test", "_____no_output_____" ] ], [ [ "# crosstab of target by age\nlen_obs = pd.crosstab(train.separate_bed, \n train.rel_length)\nlen_obs", "_____no_output_____" ], [ "# Chi^2 test returns 4 variables\nchi2, p, degf, expected = stats.chi2_contingency(len_obs)\n\nnull_hypothesis = \"Relationship Length and Sleeping Sitation are independent\"\n\n# significance\nalpha = .05\n\nif p < alpha:\n print(\"We reject the hypothesis that\", null_hypothesis)\nelse:\n print(\"We fail to reject the null hypothesis that\", null_hypothesis)\n\nprint(p)", "We fail to reject the null hypothesis that Relationship Length and Sleeping Sitation are independent\n0.31523451425551374\n" ], [ "train.groupby('rel_length').separate_bed.mean().plot.bar(ec='black', \n fc='dodgerblue', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Rate of Separate Beds by Relationship Length')\n\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Summary</b>: These relationship length groups surpass the mean for the population for couples in separate beds:\n <li>11-15 years</li>\n <li>16-20 years</li>\n <li>20+ years</li>\n $Chi^2$ Test determined that these are independent.\n </div>", "_____no_output_____" ], [ "> **Do older couples end up sleeping in separate beds?**", "_____no_output_____" ] ], [ [ "plot_counts(train, 'age', 'separate_bed', 'How Does Age Group Relate to Sleeping Situation?')", "_____no_output_____" ], [ "train.groupby('age').separate_bed.mean().plot.bar(ec='black', \n fc='green', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Rate of Separate Beds by Age')\n\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "#### $Chi^2$ Test", "_____no_output_____" ] ], [ [ "# crosstab of target by age\nage_obs = pd.crosstab(train.separate_bed, train.age)\nage_obs", "_____no_output_____" ], [ "# Chi^2 test returns 4 variables\nchi2, p, degf, expected = stats.chi2_contingency(age_obs)\n\nnull_hypothesis = \"Age and Sleeping Sitation are independent\"\n\n# significance\nalpha = .05\n\nif p < alpha:\n print(\"We reject the hypothesis that\", null_hypothesis)\nelse:\n print(\"We fail to reject the null hypothesis that\", null_hypothesis)\n\nprint(p)", "We fail to reject the null hypothesis that Age and Sleeping Sitation are independent\n0.11975865217615421\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Summary</b>: These age groups surpass the mean for the population for couples in separate beds:\n <li>30-44</li>\n <li>60+</li>\n $Chi^2$ Test determined that these are independent.\n </div>", "_____no_output_____" ], [ "> **Are people with higher degrees sleeping separately?**", "_____no_output_____" ] ], [ [ "plot_counts(train, 'education', 'separate_bed', 'How Does the Education Level Relate to Sleeping Situation?')", "_____no_output_____" ], [ "train.groupby('education').separate_bed.mean().plot.bar(ec='black', \n fc='orange', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Rate of Separate Beds by Education')\n\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "#### $Chi^2$ Test", "_____no_output_____" ] ], [ [ "# crosstab of target by age\nedu_obs = pd.crosstab(train.separate_bed, train.education)\nedu_obs", "_____no_output_____" ], [ "# Chi^2 test returns 4 variables\nchi2, p, degf, expected = stats.chi2_contingency(edu_obs)\n\nnull_hypothesis = \"Education Level and Sleeping Sitation are independent\"\n\n# significance\nalpha = .05\n\nif p < alpha:\n print(\"We reject the hypothesis that\", null_hypothesis)\nelse:\n print(\"We fail to reject the null hypothesis that\", null_hypothesis)\n\nprint(p)", "We fail to reject the null hypothesis that Education Level and Sleeping Sitation are independent\n0.6275542881050433\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Summary</b>: These education level groups surpass the mean for the population for couples in separate beds:\n <li>High School or less</li>\n <li>Bachelor's degree</li>\n $Chi^2$ Test determined that these are independent.\n </div>", "_____no_output_____" ], [ "> **Which census region are couples more likely to sleep seperately?**", "_____no_output_____" ] ], [ [ "plot_counts(train, 'location', 'separate_bed', 'How Does the Census Region Relate to Sleeping Situation?')", "_____no_output_____" ], [ "train.groupby('location').separate_bed.mean().plot.bar(ec='maroon', \n fc='pink', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', \n color='grey', label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Rate of Separate Beds by location')\n\nplt.tight_layout()\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "#### $Chi^2$ Test", "_____no_output_____" ] ], [ [ "# crosstab of target by age\nloc_obs = pd.crosstab(train.separate_bed, train.location)\nloc_obs", "_____no_output_____" ], [ "# Chi^2 test returns 4 variables\nchi2, p, degf, expected = stats.chi2_contingency(loc_obs)\n\nnull_hypothesis = \"Education Level and Sleeping Sitation are independent\"\n\n# significance\nalpha = .05\n\nif p < alpha:\n print(\"We reject the hypothesis that\", null_hypothesis)\nelse:\n print(\"We fail to reject the null hypothesis that\", null_hypothesis)\n\nprint(p)", "We fail to reject the null hypothesis that Education Level and Sleeping Sitation are independent\n0.37352490745037314\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Summary</b>: These census location groups surpass the mean for the population for couples in separate beds:\n <li>East South Central</li>\n <li>South Atlantic</li>\n <li>Mountain</li>\n $Chi^2$ Test determined that these are independent.\n </div>", "_____no_output_____" ], [ "> **Are there any combinations of features of couples that are more likely to sleep apart?**", "_____no_output_____" ] ], [ [ "#train.groupby(['age', 'education']).separate_bed.mean()\nplt.rc('figure', figsize=(12, 4))\nctab = pd.crosstab(train['age'], train.education, values=train.separate_bed, aggfunc='mean')\nsns.heatmap(ctab, annot=True, cmap='Purples', fmt='.1%')\n\nplt.title('What was the Rate for Age by Education Level?')\nplt.xticks(rotation=20)\nplt.yticks(rotation=45)\nplt.ylabel('')\nplt.xlabel('')\n\nplt.show()", "_____no_output_____" ], [ "#train.groupby(['location', 'education']).separate_bed.mean()\nplt.rc('figure', figsize=(16, 9))\n\nctab = pd.crosstab(train['location'], train.education, values=train.separate_bed, aggfunc='mean')\nsns.heatmap(ctab, annot=True, cmap='Greens', fmt='.1%')\n\nplt.title('What was the Rate for Age by Relationship Length?')\nplt.xlabel('')\nplt.ylabel('')\n\nplt.show()", "_____no_output_____" ], [ "#train.groupby(['location', 'rel_length']).separate_bed.mean()\n\nctab = pd.crosstab(train['location'], train.rel_length, values=train.separate_bed, aggfunc='mean')\nsns.heatmap(ctab, annot=True, cmap='rocket_r', fmt='.1%')\n\nplt.title('What was the Rate for Location by Relationship Length?')\nplt.xlabel('')\nplt.ylabel('')\n\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n <b>Summary</b>: These combination of groups significantly surpass the mean for the population for couples in separate beds:\n <li>Region: Mountain, Relation Length: 11-15 years = 80%</li>\n <li>Region: East South Central, Relation Length: 6-10 years = 75%</li>\n <li>Age: 30-44, Education: HS degree or less = 66.7%</li>\n <li>Region: East South Central & Pacfic, Education: HS degree or less = 60%</li>\n <li>Region: Mid Atlantic, Relation Length: 16-20 years = 60%</li>\n These combinations may be worth looking into further.\n </div>", "_____no_output_____" ], [ "# Feature Engineering", "_____no_output_____" ] ], [ [ "df = wrangle_data()", "_____no_output_____" ], [ "og_cols = df.columns.to_list()\nog_cols", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df['long_term'] = np.where(\n (df['rel_length'] == '11-15 years') | (df['rel_length'] == '16-20 years'), \n 1, \n 0)\n\ndf['young'] = np.where(\n (df['age'] == '18-29'), \n 1, \n 0)\n\ndf['hs_or_less'] = np.where(\n (df['education'] == 'HS degree or less'), \n 1, \n 0)\n\ndf['regional'] = np.where(\n ((df['location'] == 'East South Central') | \n (df['location'] == 'Mountain') |\n (df['location'] == 'South Atlantic')), \n 1, \n 0)\n\ndf['esc_610'] = np.where(\n ((df['location'] == 'East South Central') & \n (df['rel_length'] == '6-10 years')), \n 1, \n 0)\n\ndf['ma_1620'] = np.where(\n ((df['location'] == 'Middle Atlantic') & \n (df['rel_length'] == '16-20 years')), \n 1, \n 0)\n\ndf['mt_1115'] = np.where(\n ((df['location'] == 'Mountain') & \n (df['rel_length'] == '11-15 years')), \n 1, \n 0)\n\ndf['midage_hs'] = np.where(\n ((df['age'] == '30-44') & \n (df['education'] == 'HS degree or less')), \n 1, \n 0)\n\ndf['esc_bac'] = np.where(\n ((df['location'] == 'East South Central') & \n (df['education'] == 'Bachelor degree')), \n 1, \n 0)\n\ndf['pac_hs'] = np.where(\n ((df['location'] == 'Pacific') & \n (df['education'] == 'HS degree or less')), \n 1,\n 0)", "_____no_output_____" ], [ "df[['location', 'education', 'pac_hs']].sample(10)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "plt.rc('figure', figsize=(16, 12))\n\nplt.subplot(224)\ntrain.groupby('education').separate_bed.mean().plot.bar(ec='black', \n fc='orange', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=25)\nplt.xlabel('')\nplt.ylabel('')\nplt.title('Education')\n\nplt.legend()\n\n\nplt.subplot(223)\ntrain.groupby('location').separate_bed.mean().plot.bar(ec='maroon', \n fc='pink', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', \n color='grey', label='average separate bed rate')\n\nplt.xticks(rotation=25)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Census Region')\n\nplt.tight_layout()\nplt.legend()\n\nplt.subplot(221)\ntrain.groupby('rel_length').separate_bed.mean().plot.bar(ec='black', \n fc='dodgerblue', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('% Separate')\nplt.title('Rate of Separate Beds by Relationship Length')\n\nplt.legend()\n\n\nplt.subplot(222)\ntrain.groupby('age').separate_bed.mean().plot.bar(ec='black', \n fc='green', width=.9)\n\n# plt.hlines(height, starts, ends) (is like y, xmin, xmax)\nplt.hlines(train.separate_bed.mean(), *plt.xlim(), ls='--', color='grey', \n label='average separate bed rate')\n\nplt.xticks(rotation=0)\nplt.xlabel('')\nplt.ylabel('')\nplt.title('Age Group')\n\nplt.legend()\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec7621ef85431cf8ce39834e41ca7933d9cbe8e3
14,708
ipynb
Jupyter Notebook
main.ipynb
sulydeni/trueFilm
69273f1d185f1111b7cad9dead992e31586c0d0e
[ "MIT" ]
null
null
null
main.ipynb
sulydeni/trueFilm
69273f1d185f1111b7cad9dead992e31586c0d0e
[ "MIT" ]
null
null
null
main.ipynb
sulydeni/trueFilm
69273f1d185f1111b7cad9dead992e31586c0d0e
[ "MIT" ]
null
null
null
37.424936
175
0.565679
[ [ [ "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import regexp_extract, col,when,udf,trim,rand\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import DoubleType,IntegerType\n\nfrom sqlalchemy import create_engine\nimport psycopg2 as pg\n\nimport pandas as pd\n\nimport os\n\nos.environ[\"PYSPARK_PYTHON\"]=\"/usr/bin/python3\"\n\n\n#initiate spark session and load xml plugin (https://github.com/databricks/spark-xml) jar\n#https://repo1.maven.org/maven2/com/databricks/spark-xml_2.11/0.5.0/spark-xml_2.11-0.5.0.jar\nspark = SparkSession \\\n .builder \\\n .config(\"spark.jars\", \"spark-xml_2.11-0.5.0.jar\") \\\n .getOrCreate()", "_____no_output_____" ], [ "#load the movies csv\nmovies_metadata=spark.read.csv(\"movies_metadata.csv\",header=True,quote=\"\\\"\",escape=\"\\\"\")\n\n\n#clean non digit values in revenue and budget columns\nmovies_metadata=movies_metadata.withColumn(\"revenue\",\n when(col(\"revenue\").rlike('[\\d]+'), col(\"revenue\")).otherwise(None).cast(DoubleType())\n ).withColumn(\"budget\",\n when(col(\"budget\").rlike('[\\d]+'), col(\"budget\")).otherwise(None).cast(DoubleType())\n ).withColumn(\"release_year\",\n trim(regexp_extract(col(\"release_date\"), '([\\d]{4})\\-', 1)).cast(IntegerType())\n )\n\n\nmovies_metadata.printSchema()", "root\n |-- adult: string (nullable = true)\n |-- belongs_to_collection: string (nullable = true)\n |-- budget: double (nullable = true)\n |-- genres: string (nullable = true)\n |-- homepage: string (nullable = true)\n |-- id: string (nullable = true)\n |-- imdb_id: string (nullable = true)\n |-- original_language: string (nullable = true)\n |-- original_title: string (nullable = true)\n |-- overview: string (nullable = true)\n |-- popularity: string (nullable = true)\n |-- poster_path: string (nullable = true)\n |-- production_companies: string (nullable = true)\n |-- production_countries: string (nullable = true)\n |-- release_date: string (nullable = true)\n |-- revenue: double (nullable = true)\n |-- runtime: string (nullable = true)\n |-- spoken_languages: string (nullable = true)\n |-- status: string (nullable = true)\n |-- tagline: string (nullable = true)\n |-- title: string (nullable = true)\n |-- video: string (nullable = true)\n |-- vote_average: string (nullable = true)\n |-- vote_count: string (nullable = true)\n |-- release_year: integer (nullable = true)\n\n" ], [ "# write a custom user defined funtion to write string ratio\n@udf\ndef calculate_string_ratio(a, b):\n a_,b_=a,b\n \n if (a in (None,0)) or (b in (None,0)):\n return None\n \n while(b):\n a, b = b, a % b\n \n return f\"{int(a_/a)}:{int(b_/a)}\"\n\n#create new columns for string_ratio and float ratio\nmovies_metadata=movies_metadata\\\n .withColumn(\"string_ratio\", calculate_string_ratio(col(\"budget\"),col(\"revenue\")))\\\n .withColumn(\"ratio\", (col(\"budget\")/col(\"revenue\")) )", "_____no_output_____" ], [ "#read wikipedia xml as spark dataframe using the spark-xml plugin\nwikipedia_abstract_df=spark.read.format('xml').options(rowTag='doc').load('enwiki-latest-abstract.xml')\nwikipedia_abstract_df.printSchema()", "root\n |-- abstract: string (nullable = true)\n |-- links: string (nullable = true)\n |-- title: string (nullable = true)\n |-- url: string (nullable = true)\n\n" ], [ "#All titles start with \"Wikipedia:\" prefix and some contain parenthesis with further category or year info\n#clean title is the title without Wikipedia: prefix\n#clean_title_no_brackets is the title without Wikipedia: prefix and without the extra info parenthesis ()\nclean_movie_abs=wikipedia_abstract_df.withColumn('clean_title', regexp_extract(col('title'), '([^:]+):(.*)', 2))\\\n .withColumn('clean_title_no_brackets', trim(regexp_extract(col('title'), '([^:]+):([^(]+)(.*)', 2)))\n\n#some parenthesis (1968 film) or (film)\n#we can estimate the year and category from the text in the parenthesis with regex. estimated_year and estimated_type columns\nclean_movie_abs=clean_movie_abs\\\n .withColumn('estimated_year', when(col('clean_title').rlike('.*\\([\\d]{4}[^\\)]*\\)$'), \\\n trim(regexp_extract(col('clean_title'), '.*\\(([\\d]{4})[^\\)]*\\)$', 1)) ).otherwise(None)\n )\\\n .withColumn('estimated_type',when(col('clean_title').rlike('.*\\(([\\d]{4})?[^\\)]*\\)$'),\\\n trim(regexp_extract(col('clean_title'), '.*\\(([\\d]{4}[\\s]+|)([^\\)]*)\\)$', 2)) ).otherwise(None) \\\n )\n#filter wikipedia dataframe to only include titles that are identical to the movie titles in imdb dataset\nclean_movie_abs=clean_movie_abs.where(\n col(\"clean_title_no_brackets\").isin(movies_metadata.select(\"title\").toPandas()[\"title\"].tolist())\n )\nclean_movie_abs.printSchema()", "root\n |-- abstract: string (nullable = true)\n |-- links: string (nullable = true)\n |-- title: string (nullable = true)\n |-- url: string (nullable = true)\n |-- clean_title: string (nullable = true)\n |-- clean_title_no_brackets: string (nullable = true)\n |-- estimated_year: string (nullable = true)\n |-- estimated_type: string (nullable = true)\n\n" ], [ "#right join the movies and the filtered wikiepdia datasets on cleaned title (wikipedia) and imdb dataset title column\n#only movies with revenue and budgets can have ratios so we use right join to movies dataset\nmovies_metadata_with_wiki_links=clean_movie_abs.drop('title')\\\n.join(movies_metadata, clean_movie_abs.clean_title_no_brackets == movies_metadata.title,how=\"right\")\nmovies_metadata_with_wiki_links.printSchema()", "root\n |-- abstract: string (nullable = true)\n |-- links: string (nullable = true)\n |-- url: string (nullable = true)\n |-- clean_title: string (nullable = true)\n |-- clean_title_no_brackets: string (nullable = true)\n |-- estimated_year: string (nullable = true)\n |-- estimated_type: string (nullable = true)\n |-- adult: string (nullable = true)\n |-- belongs_to_collection: string (nullable = true)\n |-- budget: double (nullable = true)\n |-- genres: string (nullable = true)\n |-- homepage: string (nullable = true)\n |-- id: string (nullable = true)\n |-- imdb_id: string (nullable = true)\n |-- original_language: string (nullable = true)\n |-- original_title: string (nullable = true)\n |-- overview: string (nullable = true)\n |-- popularity: string (nullable = true)\n |-- poster_path: string (nullable = true)\n |-- production_companies: string (nullable = true)\n |-- production_countries: string (nullable = true)\n |-- release_date: string (nullable = true)\n |-- revenue: double (nullable = true)\n |-- runtime: string (nullable = true)\n |-- spoken_languages: string (nullable = true)\n |-- status: string (nullable = true)\n |-- tagline: string (nullable = true)\n |-- title: string (nullable = true)\n |-- video: string (nullable = true)\n |-- vote_average: string (nullable = true)\n |-- vote_count: string (nullable = true)\n |-- release_year: integer (nullable = true)\n |-- string_ratio: string (nullable = true)\n |-- ratio: double (nullable = true)\n\n" ], [ "#there will be duplicate titles where one movie is matched to many wikiepdia articles\n#we create a prioritisation strategy to prioritise titles that have the year mentioned and \"film\" category (2) \n#then film category without a year (1) finally the rest (0)\n\n#afterward we sort the records by title and priority (descending order) then we deduplicate by id\n\nmovies_metadata_with_wiki_links_test=movies_metadata_with_wiki_links.withColumn('match_priority',\n when(\n (col(\"estimated_year\").isNotNull())\n & (col(\"estimated_type\").isNotNull())\n & (col('release_year')==col(\"estimated_year\")) \n & (col(\"estimated_type\")==\"film\")\n ,2).when(\n (col(\"estimated_year\").isNull())\n & (col(\"estimated_type\").isNotNull())\n & (col(\"estimated_type\")==\"film\")\n ,1). \n otherwise(0) \\\n ).orderBy([\"clean_title_no_brackets\",\"match_priority\"], ascending=False).drop_duplicates(subset=['id'])", "_____no_output_____" ], [ "#write a test sample to be tested in excel\ntaste=movies_metadata_with_wiki_links_test.sample(False, 0.1).limit(5000)\\\n.toPandas().to_csv('movies_metadata_with_wiki_links_test_5000.csv')", "_____no_output_____" ], [ "#get the top 1000 ratios and rename the columns\n#keep movies with more than 1000 USD budgets and revenue. Budget and revenue columns contain numbers\n#representing 0s, multiples K or Mil numbers.\nmovies_metadata_with_wiki_links_test=movies_metadata_with_wiki_links_test.orderBy(\"ratio\", ascending=False).limit(1000).\\\nselect(\"title\",\"budget\",\"release_year\",\"revenue\",\"vote_average\",\"ratio\",\\\n\"production_companies\",\"url\",\"abstract\").\\\nwhere((col(\"revenue\")>=1000) & (col(\"budget\")>=1000)).\\\ntoDF('title', 'budget','year','revenue','rating','ratio','production_company','wikipedia_link','wikipedia_abstract')", "_____no_output_____" ], [ "#extract dataframe to pandas\nmovies_metadata_with_wiki_links_final=movies_metadata_with_wiki_links_test.toPandas()\n\nmovies_metadata_with_wiki_links_final.head()", "_____no_output_____" ], [ "#connect to the SQL(postgres) docker image and create engine then a connection\nengine = create_engine('postgresql+psycopg2://postgres:docker@localhost:5432/postgres')\nconnection = engine.raw_connection()", "_____no_output_____" ], [ "#input the top 10000 movies into postgres database postgres and under a table named 'highest_budget_revenues_ratio_movies'\nmovies_metadata_with_wiki_links_final.to_sql('highest_budget_revenues_ratio_movies', engine,index=False, if_exists='replace')", "_____no_output_____" ] ], [ [ "## Testing the Top 1000 Movies List\n\nYou can run the cells below independently of the above steps. Only run these steps after inserting the 1000 movie via the above cell or through running the shell scrips.", "_____no_output_____" ] ], [ [ "from sqlalchemy import create_engine\nimport psycopg2 as pg\n\nimport pandas as pd\n\n#connect to the SQL(postgres) docker image and create engine then a connection\nengine = create_engine('postgresql+psycopg2://postgres:docker@localhost:5432/postgres')\nconnection = engine.raw_connection()", "_____no_output_____" ], [ "df=pd.read_sql(\"select * from highest_budget_revenues_ratio_movies\", connection)\ndf.head()", "_____no_output_____" ], [ "df.to_csv(\"test.csv\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec76328ed01e1bd1946767e564a9955b0f3ecd7e
7,104
ipynb
Jupyter Notebook
notebooks/45_train_test_split.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
null
null
null
notebooks/45_train_test_split.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
8
2021-10-16T19:24:20.000Z
2021-11-25T02:28:32.000Z
notebooks/45_train_test_split.ipynb
rootsdev/nama
1210a24e8ee7689619e800653bd11341d667462d
[ "MIT" ]
null
null
null
26.507463
149
0.560529
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "# Split input pairs into train and test sets", "_____no_output_____" ] ], [ [ "from collections import namedtuple\nimport wandb\n\nfrom src.data.familysearch import train_test_split_on_frequency\nfrom src.data.utils import load_dataset\nfrom src.models.utils import add_padding", "_____no_output_____" ], [ "given_surname = \"given\"\nConfig = namedtuple(\"Config\", \"in_path train_path test_path threshold\")\nconfig = Config(\n in_path=f\"s3://familysearch-names/processed/tree-hr-{given_surname}-similar.csv.gz\",\n train_path=f\"s3://familysearch-names/processed/tree-hr-{given_surname}-train-unfiltered.csv.gz\",\n test_path=f\"s3://familysearch-names/processed/tree-hr-{given_surname}-test.csv.gz\",\n threshold=0.5\n)", "_____no_output_____" ], [ "wandb.init(\n project=\"nama\",\n entity=\"nama\",\n name=\"45_train_test_split\",\n group=given_surname,\n notes=\"\",\n config=config._asdict()\n)", "_____no_output_____" ], [ "train_test_split_on_frequency(config.in_path, config.train_path, config.test_path, config.threshold)", "_____no_output_____" ], [ "input_names_train, weighted_actual_names_train, candidate_names_train = \\\n load_dataset(config.train_path)\ninput_names_test, weighted_actual_names_test, candidate_names_test = \\\n load_dataset(config.test_path)", "_____no_output_____" ], [ "vocab = set(input_names_train).union(set(candidate_names_train))\nprint(len(vocab))", "_____no_output_____" ], [ "# check test set is correct\nn_zero = n_one = n_two = 0\nfor input_name, wans in zip(input_names_test, weighted_actual_names_test):\n for actual_name, _, _ in wans:\n if input_name in vocab and actual_name in vocab and input_name != actual_name:\n n_two += 1\n elif input_name in vocab or actual_name in vocab:\n n_one += 1\n else:\n n_zero += 1\nprint(\"two names in vocab (should not be possible)\", n_two)\nprint(\"one name in vocab\", n_one)\nprint(\"zero names in vocab\", n_zero)", "_____no_output_____" ], [ "print(\"train input names (name1), weighted actual (name1 -> [name2, weighted_count, co_occurrence], candidate names (name2)\")\nprint(\"name1\", len(input_names_train))\nprint(\"weighted actual - should be same as name1\", len(weighted_actual_names_train))\nprint(\"number of actuals\", sum(len(wa) for wa in weighted_actual_names_train))\nprint(\"name2\", len(candidate_names_train))\nprint(\"total unique names\", len(set(input_names_train).union(set(candidate_names_train))))", "_____no_output_____" ], [ "print(\"test out-of-vocab: input names (name1), weighted actual (name1 -> [name2, weighted_count, co_occurrence], candidate names (name2)\")\nprint(\"name1\", len(input_names_test))\nprint(\"weighted actual - should be same as name1\", len(weighted_actual_names_test))\nprint(\"number of actuals\", sum(len(wa) for wa in weighted_actual_names_test))\nprint(\"name2\", len(candidate_names_test))\nprint(\"total unique names\", len(set(input_names_test).union(set(candidate_names_test))))", "_____no_output_____" ] ], [ [ "### Probe datasets", "_____no_output_____" ] ], [ [ "def print_weighted_actual_names(label, weighted_actual_names, max=0):\n print(label)\n print(\"total\", len(weighted_actual_names))\n if 0 < max < len(weighted_actual_names):\n weighted_actual_names = weighted_actual_names[:max]\n for wan in weighted_actual_names:\n print(\" \", wan)\n\nprobe_name = add_padding(\"jones\" if given_surname == \"surname\" else \"richard\")\nprint(\"total weight\", sum(wc for _, wc, _ in weighted_actual_names_train[input_names_train.index(probe_name)]))\nprint_weighted_actual_names(\"train\", weighted_actual_names_train[input_names_train.index(probe_name)], 20)\nprint(\"total weight\", sum(wc for _, wc, _ in weighted_actual_names_test[input_names_test.index(probe_name)]))\nprint_weighted_actual_names(\"test\", weighted_actual_names_test[input_names_test.index(probe_name)], 20)", "_____no_output_____" ], [ "wandb.finish()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec763ed4cdd6f0ccf647f981e77e4e3a4335ae47
229,792
ipynb
Jupyter Notebook
ipynb/labeling_prep/extract_data_for_annotation.ipynb
IBPA/FoodAtlas
0a431f0a391adaa8984b380f3f6f7189f27b9311
[ "Apache-2.0" ]
1
2022-02-07T10:04:35.000Z
2022-02-07T10:04:35.000Z
ipynb/labeling_prep/extract_data_for_annotation.ipynb
IBPA/FoodAtlas
0a431f0a391adaa8984b380f3f6f7189f27b9311
[ "Apache-2.0" ]
null
null
null
ipynb/labeling_prep/extract_data_for_annotation.ipynb
IBPA/FoodAtlas
0a431f0a391adaa8984b380f3f6f7189f27b9311
[ "Apache-2.0" ]
null
null
null
25.127611
1,658
0.347379
[ [ [ "%load_ext nb_black\n%load_ext autoreload\n%autoreload 2\n\nfrom typing import Union, Tuple, List\nimport pandas as pd\nimport numpy as np\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tinydb import TinyDB, Query\nfrom IPython.core.display import display, HTML\nfrom unidecode import unidecode\nimport sys\n\nsys.path.append(\"../src/\")\nfrom food_ke.postprocess_utils import (\n standardize_table_df,\n pop_subheaders,\n reset_columns,\n reset_index,\n _is_numeric_cell_value,\n _split_ct_plus_var_cell_single,\n _is_ct_plus_var,\n)\n\n\nfrom food_ke.ner import doi_to_filename as ner_doi_to_filename\nfrom food_ke.preprocess_utils import (\n _get_ents,\n _get_entity_result,\n _get_clean_soup,\n _get_pd_table,\n preprocess_table,\n)\nfrom food_ke.abbreviations import (\n doi_to_filename as abbrev_doi_to_filename,\n _get_abbrev_out,\n)\nfrom food_ke.stubs import DOCUMENT_DB_DIR, DATA_DIR\nfrom scinthesis.utils import xpath_soup\n\nimport os\nfrom tqdm import tqdm\nfrom pathlib import Path\n\nfrom scinthesis.labelstudio.converters import LSConverter\n\nimport spacy", "_____no_output_____" ] ], [ [ "NER imports", "_____no_output_____" ] ], [ [ "from food_ke.stubs import (\n FOODBERT_URL,\n CHEMDATAEXTRACTOR_URL,\n SPACY_URL,\n NER_DATA_DIR,\n NER_RESOLVED_DIR,\n SERVICES_URL,\n)\nimport requests\nfrom tqdm import tqdm", "_____no_output_____" ], [ "db = TinyDB(DATA_DIR / \"document_db/scraped_articles.json\")\n\nfull_data = pd.read_csv(DATA_DIR / \"full_data.csv\")\nfull_data = full_data.where(full_data[\"Publication Year\"] > 2000)\nfull_data = full_data.dropna(subset=[\"PMID\"])\nfull_data.DOI = \"https://doi.org/\" + full_data.DOI", "_____no_output_____" ] ], [ [ "### Save article entities to file", "_____no_output_____" ] ], [ [ "converter = LSConverter(from_name=\"label\", to_name=\"fulltext\")", "_____no_output_____" ], [ "def _get_model_ents(model_name: str, texts: List[str]):\n response = requests.post(\n \"http://127.0.0.1:8000/\" + model_name,\n data=json.dumps(texts),\n headers={\"Content-type\": \"application/json\"},\n )\n ents = response.json()\n return ents", "_____no_output_____" ], [ "ner_db = TinyDB(DATA_DIR / \"document_db/ner_raw/ner_raw_db.json\")\nEnt = Query()", "_____no_output_____" ], [ "def _filter_texts(ner_db: TinyDB, model: str, doi: str, xpaths: List[str]):\n search_docs = ner_db.search(\n (Ent.model == model) & (Ent.doi == doi) & (Ent.xpath.one_of(xpaths))\n )\n xpaths_exclude = set([doc[\"xpath\"] for doc in search_docs])\n xpaths_include = []\n texts_include = []\n\n for (xpath, text) in zip(xpaths, texts):\n if xpath not in xpaths_exclude:\n # if not _is_numeric_cell_value(text):\n xpaths_include.append(xpath)\n texts_include.append(text)\n\n return texts_include, xpaths_include", "_____no_output_____" ], [ "def _get_table_elements(table):\n return (\n table.find_all(\"td\") + table.find_all(\"th\") + table.parent.parent.find_all(\"p\")\n )", "_____no_output_____" ] ], [ [ "### Get individual NER model predictions", "_____no_output_____" ] ], [ [ "ner_db.truncate()", "_____no_output_____" ], [ "out_dfs = []\nabrv_tables = []\npercent_cols_cont_abrv_list = []\ntable_contains_abrv = []\n\nfor article in tqdm(list(iter(db))[0:5]):\n doi = article[\"doi\"]\n soup = _get_clean_soup(article[\"html\"])\n ents = {}\n\n for i, table in enumerate(list(set(soup.find_all(\"table\")))):\n tabxpath = xpath_soup(table)\n table_df = _get_pd_table(str(table))\n\n if table_df is not None:\n table_df = preprocess_table(table_df)\n elements = _get_table_elements(table)\n\n texts = [x.text for x in elements]\n xpaths = [xpath_soup(el) for el in elements]\n\n for model in [\"foodbert\", \"spacy\", \"chemdataextractor\"]:\n # texts_include, xpaths_include = _filter_texts(\n # ner_db, model, doi, xpaths\n # )\n texts_include, xpaths_include = texts, xpaths\n if texts_include:\n ents = _get_model_ents(model, texts_include)\n for (text, xpath, ent) in zip(texts_include, xpaths_include, ents):\n ner_db.insert(\n {\n \"model\": model,\n \"doi\": doi,\n \"xpath\": xpath,\n \"text\": text,\n \"ents\": ent,\n }\n )", "100%|██████████| 5/5 [03:43<00:00, 44.66s/it]\n" ] ], [ [ "### Resolve predictions", "_____no_output_____" ] ], [ [ "entity_type_mapping = {\n \"UNITS\": \"Units\",\n \"BASIS\": \"Basis\",\n \"STATISTIC\": \"Statistic\",\n \"REPLICATES\": \"Number of Replictes\",\n \"NORP\": \"Food Origin\",\n \"GPE\": \"Food Origin\",\n}", "_____no_output_____" ], [ "ents = list(iter(ner_db))\nfrom food_ke.ner import resolve_ner_conflicts\nfrom itertools import chain", "_____no_output_____" ], [ "from collections import defaultdict", "_____no_output_____" ] ], [ [ "### TODO this can be parallelized", "_____no_output_____" ] ], [ [ "resolved_ents_all = defaultdict(lambda: [])\n\nfor (doi, xpath), d in tqdm(pd.DataFrame(ents).groupby([\"doi\", \"xpath\"])):\n df = d.set_index(\"model\")\n foodbert_ents = df.loc[\"foodbert\", \"ents\"]\n spacy_ents = df.loc[\"spacy\", \"ents\"]\n\n cde_ents = df.loc[\"chemdataextractor\", \"ents\"]\n\n if isinstance(foodbert_ents, pd.Series):\n foodbert_ents = foodbert_ents.sum()\n\n if isinstance(spacy_ents, pd.Series):\n spacy_ents = spacy_ents.sum()\n\n spacy_ents = [x for x in spacy_ents if x[\"type\"] in entity_type_mapping.keys()]\n\n if isinstance(cde_ents, pd.Series):\n cde_ents = cde_ents.sum()\n\n resolved_ents = resolve_ner_conflicts(\n foodbert_ents, spacy_ents, cde_ents, converter, xpath\n )\n resolved_ents_all[doi] += resolved_ents", "\r 0%| | 0/1874 [00:00<?, ?it/s]" ], [ "for k, v in resolved_ents_all.items():\n fname = ner_doi_to_filename(k)\n json.dump(v, open(fname, \"w\"))", "/Users/gabe/aifs_ke/food_ke/../data/document_db/ner_raw/10.1021-jf000728z.json\n/Users/gabe/aifs_ke/food_ke/../data/document_db/ner_raw/10.1021-jf000892m.json\n/Users/gabe/aifs_ke/food_ke/../data/document_db/ner_raw/10.1021-jf001509g.json\n/Users/gabe/aifs_ke/food_ke/../data/document_db/ner_raw/10.1093-jn-131.3.972S.json\n" ] ], [ [ "### Build full article json objects for annotation", "_____no_output_____" ] ], [ [ "out = []\nfor article in list(iter(db))[0:5]:\n soup = _get_clean_soup(article[\"html\"])\n doi = article[\"doi\"]\n fulltext_html = str(soup)\n\n # abbreviations\n abbrev_out = _get_abbrev_out(doi)\n # ner\n try:\n entity_result = _get_entity_result(doi)\n except:\n continue\n\n outdata = {\n \"fulltext_html\": fulltext_html,\n \"goldstandard_html\": \"\", # TODO\n \"doi\": doi,\n }\n outdata.update(abbrev_out)\n\n if outdata is not None:\n outitem = {\n \"data\": outdata,\n \"predictions\": [\n {\n \"result\": entity_result,\n \"score\": 1.0,\n \"model_version\": \"model_version_null\",\n }\n ],\n }\n\n out.append(outitem)\n\njson.dump(out, open(DATA_DIR / \"labeling_input/fulltext_for_annotation.json\", \"w\"))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec7647a7b81e150af7e1ac4f460ce69f674c80ff
359,365
ipynb
Jupyter Notebook
geospatial-analysis/exercise/your-first-map.ipynb
hmdprs/data-scientist
0eccf20a809cd5239843ccfbf75a34d03a95a2f9
[ "MIT" ]
5
2020-08-08T11:41:04.000Z
2021-05-29T19:41:05.000Z
geospatial-analysis/exercise/your-first-map.ipynb
hmdprs/data-scientist
0eccf20a809cd5239843ccfbf75a34d03a95a2f9
[ "MIT" ]
null
null
null
geospatial-analysis/exercise/your-first-map.ipynb
hmdprs/data-scientist
0eccf20a809cd5239843ccfbf75a34d03a95a2f9
[ "MIT" ]
null
null
null
469.144909
169,708
0.93604
[ [ [ "**[Geospatial Analysis Home Page](https://www.kaggle.com/learn/geospatial-analysis)**\n\n---\n", "_____no_output_____" ], [ "# Introduction\n\n[Kiva.org](https://www.kiva.org/) is an online crowdfunding platform extending financial services to poor people around the world. Kiva lenders have provided over $1 billion dollars in loans to over 2 million people.\n\n<center>\n<img src=\"https://i.imgur.com/2G8C53X.png\" width=\"500\"><br/>\n</center>\n\nKiva reaches some of the most remote places in the world through their global network of \"Field Partners\". These partners are local organizations working in communities to vet borrowers, provide services, and administer loans.\n\nIn this exercise, you'll investigate Kiva loans in the Philippines. Can you identify regions that might be outside of Kiva's current network, in order to identify opportunities for recruiting new Field Partners?\n\nTo get started, run the code cell below to set up our feedback system.", "_____no_output_____" ] ], [ [ "from learntools.core import binder\nbinder.bind(globals())\nfrom learntools.geospatial.ex1 import *\nprint(\"Setup is completed.\")\n\nimport geopandas as gpd", "Setup is completed.\n" ] ], [ [ "## 1. Get the data.\n\nUse the next cell to load the shapefile located at `loans_filepath` to create a GeoDataFrame `world_loans`. ", "_____no_output_____" ] ], [ [ "loans_filepath = \"../input/geospatial-learn-course-data/kiva_loans/kiva_loans/kiva_loans.shp\"\n\n# load the data\nworld_loans = gpd.read_file(loans_filepath)\n\n# check your answer\nq_1.check()\n\n# uncomment to view the first five rows of the data\nworld_loans.head()", "_____no_output_____" ], [ "# lines below will give you a hint or solution code\n# q_1.hint()\n# q_1.solution()", "_____no_output_____" ] ], [ [ "<hr/>\n\n## 2. Plot the data.\n\nRun the next code cell without changes to load a GeoDataFrame `world` containing country boundaries.", "_____no_output_____" ] ], [ [ "# This dataset is provided in GeoPandas\nworld_filepath = gpd.datasets.get_path('naturalearth_lowres')\nworld = gpd.read_file(world_filepath)\nworld.head()", "_____no_output_____" ] ], [ [ "Use the `world` and `world_loans` GeoDataFrames to visualize Kiva loan locations across the world.", "_____no_output_____" ] ], [ [ "# define a base map with county boundaries\nax = world.plot(figsize=(20,20), color='whitesmoke', linestyle=':', edgecolor='lightgray')\n\n# add loans to the base map\nworld_loans.plot(ax=ax, markersize=2)\n\n# uncomment to see a hint\n# q_2.hint()", "_____no_output_____" ], [ "# Get credit for your work after you have created a map\nq_2.check()\n\n# uncomment to see our solution (your code may look different!)\n# q_2.solution()", "_____no_output_____" ] ], [ [ "<hr/>\n\n## 3. Select loans based in the Philippines.\n\nNext, you'll focus on loans that are based in the Philippines. Use the next code cell to create a GeoDataFrame `PHL_loans` which contains all rows from `world_loans` with loans that are based in the Philippines.", "_____no_output_____" ] ], [ [ "PHL_loans = world_loans[(world_loans['country'] == 'Philippines')]\n\n# check your answer\nq_3.check()", "_____no_output_____" ], [ "# lines below will give you a hint or solution code\n# q_3.hint()\n# q_3.solution()", "_____no_output_____" ] ], [ [ "<hr/>\n\n## 4. Understand loans in the Philippines.\n\nRun the next code cell without changes to load a GeoDataFrame `PHL` containing boundaries for all islands in the Philippines.", "_____no_output_____" ] ], [ [ "# Load a KML file containing island boundaries\ngpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'\nPHL = gpd.read_file(\"../input/geospatial-learn-course-data/Philippines_AL258.kml\", driver='KML')\nPHL.head()", "_____no_output_____" ] ], [ [ "Use the `PHL` and `PHL_loans` GeoDataFrames to visualize loans in the Philippines.", "_____no_output_____" ] ], [ [ "# define a base map with county boundaries\nax_ph = PHL.plot(figsize=(20,20), color='whitesmoke', linestyle=':', edgecolor='lightgray')\n\n# add loans to the base map\nPHL_loans.plot(ax=ax_ph, markersize=2)\n\n# Uncomment to see a hint\n#q_4.a.hint()", "_____no_output_____" ], [ "# Get credit for your work after you have created a map\nq_4.a.check()\n\n# Uncomment to see our solution (your code may look different!)\n# q_4.a.solution()", "_____no_output_____" ] ], [ [ "Can you identify any islands where it might be useful to recruit new Field Partners? Do any islands currently look outside of Kiva's reach?\n\nYou might find [this map](https://bit.ly/2U2G7x7) useful to answer the question.", "_____no_output_____" ] ], [ [ "# View the solution (Run this code cell to receive credit!)\nq_4.b.solution()", "_____no_output_____" ] ], [ [ "# Keep going\n\nContinue to learn about **[coordinate reference systems](https://www.kaggle.com/alexisbcook/coordinate-reference-systems)**.", "_____no_output_____" ], [ "---\n**[Geospatial Analysis Home Page](https://www.kaggle.com/learn/geospatial-analysis)**\n\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec765ce591525cec61f81090c3abee081d0e96f1
5,963
ipynb
Jupyter Notebook
dataFrame.ipynb
spu-bigdataanalytics-201/welcome-GulkhanAnassova
c18b65372417b72aac2f689e439ec46dbec541cf
[ "MIT" ]
null
null
null
dataFrame.ipynb
spu-bigdataanalytics-201/welcome-GulkhanAnassova
c18b65372417b72aac2f689e439ec46dbec541cf
[ "MIT" ]
null
null
null
dataFrame.ipynb
spu-bigdataanalytics-201/welcome-GulkhanAnassova
c18b65372417b72aac2f689e439ec46dbec541cf
[ "MIT" ]
null
null
null
19.051118
222
0.427637
[ [ [ "import pandas as pd\nimport numpy as np\n\ndf2 = pd.DataFrame({'A': 1.,\n 'B': pd.Timestamp('20130102'),\n 'C': pd.Series(1, index=list(range(4)), dtype='float32'),\n 'D': np.array([3] * 4, dtype='int32'),\n 'E': pd.Categorical([\"test\", \"train\", \"test\", \"train\"]),\n 'F': 'foo'})", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nlabels = ['a', 'b', 'c']\nmy_data = [10,20,30]\narr = np.array(my_data)\nd = {'a':10, 'b':20, 'c':30}\n", "_____no_output_____" ], [ "pd.Series(arr,labels)", "_____no_output_____" ], [ "pd.Series(d)", "_____no_output_____" ], [ "d", "_____no_output_____" ], [ "pd.Series(arr)", "_____no_output_____" ], [ "ser1 = pd.Series([1,2,3,4],['USA','Germany','USSR','Japan'])", "_____no_output_____" ], [ "ser1", "_____no_output_____" ], [ "ser2 = pd.Series([1,2,5,4],['USA','Germany','Italy','Japan'])", "_____no_output_____" ], [ "ser2", "_____no_output_____" ], [ "ser1['USA']", "_____no_output_____" ], [ "ser3 = pd.Series(labels)", "_____no_output_____" ], [ "ser3[0]", "_____no_output_____" ], [ "ser1", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec766c2aced89e577f43bc5f9b46c9cd5cf62a90
15,492
ipynb
Jupyter Notebook
arl-python/examples/arl/imaging-bags.ipynb
Song655/arlo
cee1613d4a2b2e1263da9d5b4b9930eef569509c
[ "Apache-2.0" ]
1
2019-10-18T13:11:01.000Z
2019-10-18T13:11:01.000Z
arl-python/examples/arl/imaging-bags.ipynb
Song655/arlo
cee1613d4a2b2e1263da9d5b4b9930eef569509c
[ "Apache-2.0" ]
1
2019-01-28T23:07:32.000Z
2019-01-28T23:07:32.000Z
arl-python/examples/arl/imaging-bags.ipynb
Song655/arlo
cee1613d4a2b2e1263da9d5b4b9930eef569509c
[ "Apache-2.0" ]
5
2018-03-27T03:30:34.000Z
2019-10-18T13:05:37.000Z
33.460043
271
0.580235
[ [ [ "# Dask bag-based imaging demonstration\n\nThis notebook explores the use of dask bags for parallelisation. For the most part we work with the bags directly. Much of this can be hidden in standard functions.\n\nSee imaging-dask notebook for processing with dask delayed", "_____no_output_____" ], [ "We create the visibility and fill in values with the transform of a number of point sources. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport os\nimport sys\n\nfrom dask import delayed, bag\nfrom distributed import Client\n\nresults_dir = './results'\nos.makedirs(results_dir, exist_ok=True)\n\nfrom matplotlib import pylab\n\npylab.rcParams['figure.figsize'] = (12.0, 12.0)\npylab.rcParams['image.cmap'] = 'rainbow'\n\nimport numpy\n\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.wcs.utils import pixel_to_skycoord\n\nfrom matplotlib import pyplot as plt\n\nfrom arl.calibration.operations import apply_gaintable\nfrom arl.data.polarisation import PolarisationFrame\nfrom arl.visibility.base import create_visibility, copy_visibility\nfrom arl.visibility.operations import concatenate_visibility\nfrom arl.skycomponent.operations import create_skycomponent\nfrom arl.image.operations import show_image, qa_image, create_empty_image_like,\\\n pad_image\nfrom arl.image.deconvolution import deconvolve_cube, restore_cube\nfrom arl.util.testing_support import create_named_configuration, create_test_image\nfrom arl.imaging import create_image_from_visibility, predict_skycomponent_visibility, \\\n advise_wide_field, predict_2d, invert_2d, normalize_sumwt\nfrom arl.imaging.wstack import predict_wstack_single, invert_wstack_single\nfrom arl.imaging.timeslice import predict_timeslice_single, invert_timeslice_single\nfrom arl.visibility.gather_scatter import visibility_gather_w, visibility_scatter_w\nfrom arl.visibility.gather_scatter import visibility_gather_time, visibility_scatter_time\nfrom arl.imaging.weighting import weight_visibility\nfrom arl.graphs.dask_init import get_dask_Client\nfrom arl.pipelines.graphs import create_continuum_imaging_pipeline_graph\nfrom arl.graphs.bags import safe_invert_list, safe_predict_list, sum_invert_bag_results, deconvolve_bag\n\nimport logging\n\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\nlog.addHandler(logging.StreamHandler(sys.stdout))", "_____no_output_____" ] ], [ [ "Define a function to create the visibilities", "_____no_output_____" ] ], [ [ "def ingest_visibility(freq=1e8, chan_width=1e6, reffrequency=[1e8], npixel=512,\n init=False):\n lowcore = create_named_configuration('LOWBD2-CORE')\n times = numpy.linspace(-numpy.pi / 4, numpy.pi / 4, 7)\n frequency = numpy.array([freq])\n channel_bandwidth = numpy.array([chan_width])\n\n phasecentre = SkyCoord(\n ra=+15.0 * u.deg, dec=-26.7 * u.deg, frame='icrs', equinox='J2000')\n vt = create_visibility(\n lowcore,\n times,\n frequency,\n channel_bandwidth=channel_bandwidth,\n weight=1.0,\n phasecentre=phasecentre,\n polarisation_frame=PolarisationFrame(\"stokesI\"))\n if init:\n cellsize = 0.001\n model = create_image_from_visibility(\n vt,\n npixel=npixel,\n cellsize=cellsize,\n npol=1,\n frequency=reffrequency,\n polarisation_frame=PolarisationFrame(\"stokesI\"))\n flux = numpy.array([[100.0]])\n facets = 4\n\n spacing_pixels = npixel // facets\n spacing = 180.0 * cellsize * spacing_pixels / numpy.pi\n centers = -1.5, -0.5, +0.5, +1.5\n comps = list()\n for iy in centers:\n for ix in centers:\n pra = int(round(npixel // 2 + ix * spacing_pixels - 1))\n pdec = int(round(npixel // 2 + iy * spacing_pixels - 1))\n sc = pixel_to_skycoord(pra, pdec, model.wcs)\n comps.append(\n create_skycomponent(\n flux=flux,\n frequency=reffrequency,\n direction=sc,\n polarisation_frame=PolarisationFrame(\"stokesI\")))\n predict_skycomponent_visibility(vt, comps)\n\n return vt", "_____no_output_____" ] ], [ [ "Now make seven of these spanning 800MHz to 1200MHz and put them into a Dask bag.", "_____no_output_____" ] ], [ [ "nfreqwin=7\nvis_bag=bag.from_sequence([ingest_visibility(freq) \n for freq in numpy.linspace(0.8e8,1.2e8,nfreqwin)])\nprint(vis_bag)", "_____no_output_____" ] ], [ [ "We need to compute the bag in order to use it. First we just need a representative data set to calculate imaging parameters.", "_____no_output_____" ] ], [ [ "npixel=512\nfacets=4\ndef get_LSM(vt, cellsize=0.001, reffrequency=[1e8], npixel=512):\n model = pad_image(create_test_image(vt, cellsize=cellsize, frequency=reffrequency, \n phasecentre=vt.phasecentre,\n polarisation_frame=PolarisationFrame(\"stokesI\")),\n shape=[1, 1, 512, 512])\n return model\n\nvis_bag = list(vis_bag)\nmodel = get_LSM(vis_bag[0])\nadvice=advise_wide_field(vis_bag[0], guard_band_image=4.0)\nvis_slices=11", "_____no_output_____" ] ], [ [ "Now we can set up the prediction of the visibility from the model. We scatter over w and then apply the wstack for a single w plane. Then we concatenate the visibilities back together.\n\nTo save recomputing this, we compute it now and place it into another bag of the same name.", "_____no_output_____" ] ], [ [ "vis_bag=bag.from_sequence([ingest_visibility(freq) \n for freq in numpy.linspace(0.8e8,1.2e8,nfreqwin)])\\\n .map(visibility_scatter_w, vis_slices=vis_slices)\\\n .map(safe_predict_list, model, predict=predict_wstack_single)\\\n .map(concatenate_visibility)\n \nvis_bag=bag.from_sequence(vis_bag.compute())", "_____no_output_____" ] ], [ [ "Check out the visibility function. To get the result out of the bag, we do need to compute it but this time it's just a lookup.", "_____no_output_____" ] ], [ [ "vt = vis_bag.compute()[0]\n\n# To check that we got the prediction right, plot the amplitude of the visibility.\nuvdist=numpy.sqrt(vt.data['uvw'][:,0]**2+vt.data['uvw'][:,1]**2)\nplt.clf()\nplt.plot(uvdist, numpy.abs(vt.data['vis']), '.')\nplt.xlabel('uvdist')\nplt.ylabel('Amp Visibility')\nplt.show()", "_____no_output_____" ] ], [ [ "Now we can make the dirty images. As before we will scatter each of the 7 frequency windows (patitions) over w, giving a 2 level nested structure. We make a separate image for each frequency window. The image resolution noticeably improves for the high frequencies.", "_____no_output_____" ] ], [ [ "dirty_bag=vis_bag\\\n .map(visibility_scatter_w, vis_slices=vis_slices)\\\n .map(safe_invert_list, model, invert_wstack_single, dopsf=False, normalize=True)\\\n .map(sum_invert_bag_results)\ndirty_bag=bag.from_sequence(dirty_bag.compute())\n\npsf_bag=vis_bag\\\n .map(visibility_scatter_w, vis_slices=vis_slices)\\\n .map(safe_invert_list, model, invert_wstack_single, dopsf=True, normalize=True)\\\n .map(sum_invert_bag_results)\n \npsf_bag=bag.from_sequence(psf_bag.compute())\n \nfor i, dirty in enumerate(dirty_bag.compute()):\n print(qa_image(dirty[0], context='dirty'))\n fig = show_image(dirty[0], title='Dirty image %d, weight %.3f' \n % (i, dirty[1]))\n plt.show()", "_____no_output_____" ] ], [ [ "In the next step all these seven images will be deconvolved in parallel. In this case we again need to zip the dirty and psf images and then use a simple adapter function.", "_____no_output_____" ] ], [ [ "def bag_deconvolve(dirty_psf_zip, **kwargs):\n result = deconvolve_cube(dirty_psf_zip[0][0], dirty_psf_zip[1][0], **kwargs)\n return result[0]\n\ncomp_bag=bag.zip(dirty_bag, psf_bag).map(bag_deconvolve, niter=1000, threshold=0.001, \n fracthresh=0.01, window_shape='quarter',\n gain=0.7, scales=[0, 3, 10, 30])\n\ncomp = comp_bag.compute()\nfig=show_image(comp[0])\n\ncomp_bag=bag.from_sequence(comp)", "_____no_output_____" ] ], [ [ "Now we can calculate the model and residual visibility. To calculate the residual visibility, we will zip the original and model visibilities together and map our adapter across the zipped bag.", "_____no_output_____" ] ], [ [ "model_vis_bag=vis_bag\\\n .map(visibility_scatter_w, vis_slices=101)\\\n .map(safe_predict_list, comp_bag, predict=predict_wstack_single)\\\n .map(concatenate_visibility)\n \nmodel_vis_bag = bag.from_sequence(model_vis_bag.compute())\n\ndef subtract_vis(vis_model_zip):\n residual_vis = copy_visibility(vis_model_zip[0])\n residual_vis.data['vis'] -= vis_model_zip[1].data['vis']\n return residual_vis\n\nresidual_vis_bag = bag.zip(vis_bag, model_vis_bag)\\\n .map(subtract_vis)\n \nresidual_vis_bag=bag.from_sequence(residual_vis_bag.compute())\n \novt = vis_bag.compute()[0]\nvt = residual_vis_bag.compute()[0]\n\n# To check that we got the prediction right, plot the amplitude of the visibility.\nuvdist=numpy.sqrt(vt.data['uvw'][:,0]**2+vt.data['uvw'][:,1]**2)\nplt.clf()\nplt.plot(uvdist, numpy.abs(ovt.data['vis']), '.', color='b')\nplt.plot(uvdist, numpy.abs(vt.data['vis']), '.', color='r')\nplt.xlabel('uvdist')\nplt.ylabel('Amp Visibility')\nplt.show()", "_____no_output_____" ] ], [ [ "Now we can restore the images", "_____no_output_____" ] ], [ [ "residual_bag=residual_vis_bag\\\n .map(visibility_scatter_w, vis_slices=11)\\\n .map(safe_invert_list, model, invert_wstack_single, dopsf=False, normalize=True)\\\n .map(sum_invert_bag_results)\n \nresidual_bag=bag.from_sequence(residual_bag.compute())\n\ndef bag_restore(cpr_zip, **kwargs):\n return restore_cube(cpr_zip[0], cpr_zip[1][0], cpr_zip[2][0], **kwargs)\n\nrestore_bag = bag.zip(comp_bag, psf_bag, residual_bag)\\\n .map(bag_restore)\n\nfor i, restored in enumerate(restore_bag.compute()):\n fig = show_image(restored, title='Restored image %d' %i)\n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec7679e2c9faa82b2fe960db2e63b72051500281
33,279
ipynb
Jupyter Notebook
azure_code/Training_Testing_MultiK_RF-Copy1.ipynb
hemanthme22/Zero-Shot-Learning
23dc8bf82013e57e2b1365bdb19326307a679fb0
[ "MIT" ]
null
null
null
azure_code/Training_Testing_MultiK_RF-Copy1.ipynb
hemanthme22/Zero-Shot-Learning
23dc8bf82013e57e2b1365bdb19326307a679fb0
[ "MIT" ]
null
null
null
azure_code/Training_Testing_MultiK_RF-Copy1.ipynb
hemanthme22/Zero-Shot-Learning
23dc8bf82013e57e2b1365bdb19326307a679fb0
[ "MIT" ]
1
2021-03-27T11:13:52.000Z
2021-03-27T11:13:52.000Z
60.950549
1,814
0.634274
[ [ [ "# Training classifiers for each values of K using saved features", "_____no_output_____" ] ], [ [ "import os \nimport numpy as np \nimport pickle\nimport pandas as pd\nimport gc\n\n\ndata_dir = os.path.join(os.getcwd(),'BlobStorage')\n\ntrain_data_df = pd.read_pickle(data_dir+'/train_data_features_df.pkl')\nval_data_df = pd.read_pickle(data_dir+'/val_data_features_df.pkl')", "_____no_output_____" ], [ "#Combining train and val data\ntrain_val_data_df = pd.concat([train_data_df,val_data_df])", "_____no_output_____" ], [ "#Reading Test data\ntest_data_df = pd.read_pickle(data_dir+'/test_data_features_df.pkl')\nX_test = test_data_df.img_features.apply(pd.Series)\n#y_test = test_data_df['class_name'].astype('category')", "_____no_output_____" ], [ "print(train_data_df.shape)\nprint(val_data_df.shape)\nprint(test_data_df.shape)", "_____no_output_____" ], [ "#Training a classifier for each value of K.\nfrom sklearn.ensemble import RandomForestClassifier\n\nf = open(\"fasttext/clusterCenters.txt\",'r')\n\nlines = f.readlines()\n\nfor line in lines[12:]:\n \n line = line.split()\n modelName = line[0]\n classesNow = line[1:]\n print(modelName)\n \n #Subsetting dataframe for only the classes being used now.\n train_now_df = train_val_data_df[train_val_data_df['class_name'].isin(classesNow)]\n \n X_train_val = train_now_df.img_features.apply(pd.Series)\n y_train_val = train_now_df['class_name'].astype('category')\n\n #training randomforest\n mdl_rf = RandomForestClassifier(n_estimators=700,random_state=0,verbose=1,n_jobs=-1, min_samples_split= 2, min_samples_leaf= 1, max_features= 'auto', max_depth= 40, bootstrap= False)\n \n clf_fit = mdl_rf.fit(X_train_val, y_train_val)\n\n #Saving baseline model\n #pickle.dump(clf_fit, open('trained_models/'+ modelName + '.sav', 'wb'))\n \n # evaluate the model on test data\n yhat_clf = clf_fit.predict(X_test)\n\n pred_df = pd.DataFrame(data=yhat_clf, index=test_data_df['image_paths'], columns=['max_prob'])\n pred_df.to_pickle('predictions/'+modelName+'.pkl') \n \n #Finding prob predictions for all classes\n yhat_clf_prob = clf_fit.predict_proba(X_test)\n \n pred_df = pd.DataFrame(data=yhat_clf_prob, index=test_data_df['image_paths'], columns=clf_fit.classes_)\n pred_df.to_pickle('predictions/all_categories/'+modelName+'.pkl') \n \n del clf_fit,train_now_df,X_train_val,y_train_val\n gc.collect()\n \nf.close()", "model70\n" ] ], [ [ "# Using Trained classifiers to predict on test data for each K. Saving predictions", "_____no_output_____" ] ], [ [ "import os \nimport numpy as np \nimport pickle\nimport pandas as pd\n\ndata_dir = os.path.join(os.getcwd(),'BlobStorage')\n\ntest_data_df = pd.read_pickle(data_dir+'/test_data_features_df.pkl')", "_____no_output_____" ], [ "X_test = test_data_df.img_features.apply(pd.Series)\ny_test = test_data_df['class_name'].astype('category')", "_____no_output_____" ], [ "f = open(\"fasttext/clusterCenters.txt\",'r')\n\nlines = f.readlines()\n\nfor line in lines:\n line = line.split()\n modelName = line[0]\n classesNow = line[1:]\n print(modelName)\n \n clf_fit = pickle.load(open('trained_models/'+ modelName + '.sav', 'rb')) \n \n # evaluate the model on test data\n yhat_clf = clf_fit.predict(X_test)\n \n pred_df = pd.DataFrame(data=yhat_clf, index=test_data_df['image_paths'], columns=['max_prob'])\n pred_df.to_pickle('predictions/'+modelName+'.pkl') \n \n #Finding prob predictions for all classes\n yhat_clf_prob = clf_fit.predict_proba(X_test)\n \n pred_df = pd.DataFrame(data=yhat_clf_prob, index=test_data_df['image_paths'], columns=clf_fit.classes_)\n pred_df.to_pickle('predictions/all_categories/'+modelName+'.pkl') \n\nf.close()", "_____no_output_____" ] ], [ [ "# Generating close word dict from FastText for each K", "_____no_output_____" ] ], [ [ "#Finding closest words to top predictions on testing set\nimport math\nimport pickle\nfrom scipy.spatial import distance\n#from itertools import islice\n\n#def take(n, iterable):\n# \"Return first n items of the iterable as a list\"\n# return list(islice(iterable, n))\n\ndef scipy_distance(v, u):\n return distance.euclidean(v, u)\n\n#Reading the fasttext dictionary populated at clustering phase\nfastext_dict = pickle.load(open(\"fasttext/fastext_dict.pkl\",\"rb\"))\nprint(len(fastext_dict))\n#print(fastext_dict.keys())\n#print(fastext_dict['car'])\n\n#total_classes = 379\n\ndict_keys = list(fastext_dict.keys())", "_____no_output_____" ], [ "#Generating the close words dictionary for all dictionary keys\n\ncloseWords_Count = 6\n \ncloseWord_dict = {}\n \nfor word in dict_keys:\n distance_dict = {}\n \n for fast_word in dict_keys:\n dist = scipy_distance(fastext_dict[word],fastext_dict[fast_word])\n distance_dict[fast_word] = dist\n \n #sorted_distace_dict = {k: v for k, v in sorted(distance_dict.items(), key=lambda item: item[1],reverse = True)[:closeWords_Count+1]}\n closeWords_dict = {k: v for k, v in sorted(distance_dict.items(), key=lambda item: item[1])[:closeWords_Count]}\n \n closeWord_dict[word] = list(closeWords_dict.keys())\n \npickle.dump(closeWord_dict, open('close_word_dict/closeWord_dict.pkl', 'wb'))", "_____no_output_____" ], [ "#Generating the close words dictionary for each model\n\ncloseWords_Count = 6\n\nf = open(\"fasttext/clusterCenters.txt\",'r')\n\nlines = f.readlines()\n\nfor line in lines:\n \n line = line.split()\n modelName = line[0]\n print(modelName)\n classesNow = line[1:]\n \n closeWord_dict = {}\n \n for word in classesNow:\n distance_dict = {}\n \n for fast_word in dict_keys:\n dist = scipy_distance(fastext_dict[word],fastext_dict[fast_word])\n distance_dict[fast_word] = dist\n \n #sorted_distace_dict = {k: v for k, v in sorted(distance_dict.items(), key=lambda item: item[1],reverse = True)[:closeWords_Count+1]}\n closeWords_dict = {k: v for k, v in sorted(distance_dict.items(), key=lambda item: item[1])[:closeWords_Count]}\n \n closeWord_dict[word] = list(closeWords_dict.keys())\n \n pickle.dump(closeWord_dict, open('close_word_dict/'+ modelName + '_closeWord_dict.pkl', 'wb'))\n \n #pred_df = pd.read_csv('predictions/'+modelName+'.txt', header=True, index=True, sep=',')\nf.close()", "_____no_output_____" ] ], [ [ "# Running final predictions from classifier and close word dict", "_____no_output_____" ] ], [ [ "import os \nimport numpy as np \nimport pickle\nimport pandas as pd\n\ndata_dir = os.path.join(os.getcwd(),'BlobStorage')\n\ntest_data_df = pd.read_pickle(data_dir+'/test_data_features_df.pkl')\ny_test_df = pd.DataFrame(test_data_df.set_index('image_paths').class_name)\n\ncloseWord_dict = pickle.load(open('close_word_dict/closeWord_dict.pkl',\"rb\"))", "_____no_output_____" ], [ "#Running final predictions for top 3 predictions from classifier\nh = open(\"Kmodels_final_accuracy.txt\", \"w\")\n\nf = open(\"fasttext/clusterCenters.txt\",'r')\n\nlines = f.readlines()\n\nfor line in lines[0:12]:\n \n line = line.split()\n modelName = line[0]\n print(modelName)\n \n #Reading the predictions for each model\n pred_df = pd.read_pickle('predictions/all_categories/'+modelName+'.pkl')\n \n #Finding top 3 predictions\n top_n_predictions = np.argsort(pred_df.values, axis = 1)[:,-3:]\n #then find the associated code for each prediction\n top_class = pred_df.columns[top_n_predictions]\n top_class_df = pd.DataFrame(data=top_class,columns=['top1','top2','top3'],index = pred_df.index)\n\n results = pd.merge(y_test_df, top_class_df, left_index=True, right_index=True)\n \n #closeWord_dict = pickle.load(open('close_word_dict/'+ modelName + '_closeWord_dict.pkl',\"rb\"))\n \n results['guesses_1'] = results['top1'].map(closeWord_dict)\n results['guesses_2'] = results['top2'].map(closeWord_dict)\n results['guesses_3'] = results['top3'].map(closeWord_dict)\n \n pred_check = []\n \n #pred_df['pred_check'] = np.where(pred_df['actual_label'] in pred_df['guesses'],1,0)\n for index,row in results.iterrows():\n if (row['class_name'] in row['guesses_1']) or (row['class_name'] in row['guesses_2']) or (row['class_name'] in row['guesses_3']):\n pred_check.append(1)\n else:\n pred_check.append(0)\n \n results['pred_check'] = pred_check\n \n total_right = results['pred_check'].sum()\n total_rows = len(pred_df)\n accuracy = round(total_right/total_rows,4)\n \n h.write(str(modelName) + ',' + str(accuracy) + '\\n')\n \nf.close()\nh.close() ", "model10\nmodel15\nmodel20\nmodel25\nmodel30\nmodel35\nmodel40\nmodel45\nmodel50\nmodel55\nmodel60\nmodel65\n" ], [ "#Running final predictions for single predictions\nh = open(\"Kmodels_singlePred_final_accuracy.txt\", \"w\")\n\nf = open(\"fasttext/clusterCenters.txt\",'r')\n\nlines = f.readlines()\n\nfor line in lines:\n \n line = line.split()\n modelName = line[0]\n print(modelName)\n \n #Reading the predictions for each model\n pred_df = pd.read_pickle('predictions/'+modelName+'.pkl')\n\n results = pd.merge(y_test_df, pred_df, left_index=True, right_index=True)\n \n closeWord_dict = pickle.load(open('close_word_dict/'+ modelName + '_closeWord_dict.pkl',\"rb\"))\n \n results['guesses'] = results['max_prob'].map(closeWord_dict)\n \n pred_check = []\n \n #pred_df['pred_check'] = np.where(pred_df['actual_label'] in pred_df['guesses'],1,0)\n for index,row in results.iterrows():\n if row['class_name'] in row['guesses']:\n pred_check.append(1)\n else:\n pred_check.append(0)\n \n results['pred_check'] = pred_check\n \n total_right = results['pred_check'].sum()\n total_rows = len(pred_df)\n accuracy = round(total_right/total_rows,4)\n \n h.write(str(modelName) + ',' + str(accuracy) + '\\n')\n \nf.close()\nh.close() ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec768724029a21edc636bb2539fc4df09e074b84
893
ipynb
Jupyter Notebook
pset_challenging_ext/exercises/nb/p40.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_challenging_ext/exercises/nb/p40.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_challenging_ext/exercises/nb/p40.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
30.793103
210
0.599104
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec7694cb2053786b04a35fffcc2b235987134683
3,061
ipynb
Jupyter Notebook
notebooks/data-distributions/gaussian-1d.ipynb
MinhPhys/RoarkVoila
1b8a5861412f63a3a701ee43c8d625f740c2bccf
[ "MIT" ]
1
2022-02-22T01:18:30.000Z
2022-02-22T01:18:30.000Z
notebooks/data-distributions/gaussian-1d.ipynb
MinhPhys/RoarkVoila
1b8a5861412f63a3a701ee43c8d625f740c2bccf
[ "MIT" ]
null
null
null
notebooks/data-distributions/gaussian-1d.ipynb
MinhPhys/RoarkVoila
1b8a5861412f63a3a701ee43c8d625f740c2bccf
[ "MIT" ]
null
null
null
24.685484
122
0.552761
[ [ [ "In this notebook, we'll look at the impact of location, $\\mu$ and scale, $\\sigma$ on the plot of gaussian density.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy.stats import norm\n\nfrom ipywidgets import FloatSlider, HBox, VBox\nimport bqplot.pyplot as plt", "_____no_output_____" ], [ "x = np.linspace(-10, 10, 200)\ny = norm.pdf(x)\n\n# plot the gaussian density\ntitle_tmpl = 'Gaussian Density (mu = {} and sigma = {})'\npdf_fig = plt.figure(title=title_tmpl.format(0, 1))\npdf_fig.layout.width = '800px'\npdf_fig.layout.height = '600px'\npdf_line = plt.plot(x, y, 'm', stroke_width=3)", "_____no_output_____" ], [ "# use two sliders to represent mu and sigma\nmu_slider = FloatSlider(description='$\\mu$', value=0, min=-5, max=5, step=.1)\nsigma_slider = FloatSlider(description='$\\sigma$', value=1, min=0.1, max=5, step=.1)\n\nslider_layout = HBox([mu_slider, sigma_slider])", "_____no_output_____" ], [ "def update_density(change):\n new_mu = mu_slider.value\n new_sigma = sigma_slider.value\n pdf_line.y = norm.pdf(x, new_mu, new_sigma)\n pdf_fig.title = title_tmpl.format(new_mu, new_sigma)\n\n# register the above callback with the 'value' trait of the sliders\nfor slider in [mu_slider, sigma_slider]:\n slider.observe(update_density, 'value')", "_____no_output_____" ], [ "VBox([pdf_fig, slider_layout])", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec769cbfac90ffb816b86e0646e18d4829a28e1e
2,357
ipynb
Jupyter Notebook
260-stone-game.ipynb
arkeros/projecteuler
c95db97583034af8fc61d5786692d82eabe50c12
[ "MIT" ]
2
2017-02-19T12:37:13.000Z
2021-01-19T04:58:09.000Z
260-stone-game.ipynb
arkeros/projecteuler
c95db97583034af8fc61d5786692d82eabe50c12
[ "MIT" ]
null
null
null
260-stone-game.ipynb
arkeros/projecteuler
c95db97583034af8fc61d5786692d82eabe50c12
[ "MIT" ]
4
2018-01-05T14:29:09.000Z
2020-01-27T13:37:40.000Z
38.016129
206
0.584641
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec76bd96b4f82fafb0a46bdfbac05b4641150301
6,747
ipynb
Jupyter Notebook
Notebooks/Image Classification - Parameter Optimalization.ipynb
StijnBosch/5850266-GIMA-Thesis
6051303bdb5eb84e481955547920f7e0757868a6
[ "CNRI-Python" ]
null
null
null
Notebooks/Image Classification - Parameter Optimalization.ipynb
StijnBosch/5850266-GIMA-Thesis
6051303bdb5eb84e481955547920f7e0757868a6
[ "CNRI-Python" ]
null
null
null
Notebooks/Image Classification - Parameter Optimalization.ipynb
StijnBosch/5850266-GIMA-Thesis
6051303bdb5eb84e481955547920f7e0757868a6
[ "CNRI-Python" ]
null
null
null
31.976303
96
0.504965
[ [ [ "#import necessary modules\nimport numpy as np\nimport rasterio as rs\nimport matplotlib.pyplot as plt\nimport earthpy.spatial as es\nimport earthpy.plot as ep\nfrom numpy import loadtxt\nfrom osgeo import gdal, gdalconst\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import decomposition\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn import svm", "_____no_output_____" ], [ "#Set imagery as variables\nasi = #'Path_to_SingleDateImage_Amsterdam.tif'\nami = #'Path_to_MedianImage_Amsterdam.tif'\nmsi = #'Path_to_SingleDateImage_Milano.tif'\nmmi = #'Path_to_MedianImage_Milano.tif'\nbsi = #'Path_to_SingleDateImage_Budapest.tif'\nbmi = #'Path_to_MedianImage_Budapest.tif'\nfilist = [asi, ami, msi, mmi, bsi, bmi]", "_____no_output_____" ], [ "def read_data(inras):\n # Read data\n img = gdal.Open(inras, gdal.GA_ReadOnly) \n bands = [img.GetRasterBand(i).ReadAsArray() for i in range(1, img.RasterCount + 1)]\n img = np.array(bands)\n img = img[0:12,:,:]\n img = np.transpose(img, [1, 2, 0])\n img_rgbnir = img[:,:,[3,2,1,7]]\n return img_rgbnir\n\n#Import Feature extraction\nNDWI = loadtxt(#'Path_to_NDWI.csv', delimiter=',')\nNDVI = loadtxt(#'Path_to_NDVI.csv', delimiter=',')\nEMP = np.load(#'Path_to_EMP.npy')\n\nGLCM_load = loadtxt(#'Path_to_GLCM.csv', delimiter=',')\nimg = read_data(#imgvariable)\nnr, nc, nb = img.shape\nnf = 16\nGLCM = GLCM_load.reshape(nr,nc,nf)\nwhere_are_NaNs = isnan(GLCM)\nGLCM[where_are_NaNs] = 0", "_____no_output_____" ], [ "%%time\n#IF\nestimators = [50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150]\nthreshold = [0.3, 0.4, 0.5, 0.6, 0.7]\nAOscores = {}\nPscores = {}\nRscores ={}\nFscores = {}\nAUCscores = {}\n\n\nfor e in estimators:\n for t in threshold:\n x = str(e+t)\n img = read_data_EMP(asi)\n stack = np.concatenate((img, EMPas, GLCMas), axis=2)\n stacked = np.dstack((stack, NDVIas, NDWIas))\n #Setup for IF and OCSVM\n X = stacked\n Ytr1 = Twita\n nr,nc,nb = X.shape\n ns = nr*nc\n X = X.reshape((ns,nb))\n Ytr = Ytr1.reshape((ns,))\n ind = np.where(Ytr > 0)\n Xtr = X[ind[0],:]\n Ytr = Ytr[ind[0]]\n standard_scaler = StandardScaler()\n Xtr = standard_scaler.fit_transform(Xtr) \n X = standard_scaler.transform(X)\n model = IsolationForest(n_estimators = e)\n model.fit(Xtr)\n yhat = model.score_samples(X)\n yhat1 = (yhat-min(yhat))/(max(yhat)-min(yhat))\n class_map1 = np.reshape(yhat1,(nr,nc))\n class_map = np.where(class_map1>t, 1, 0)\n class_map = class_map.flatten()\n pred = class_map\n true1 = Val30ma\n true2 = Val2GAIAa\n true3 = Val3GHS50a\n cfm1 = confusion_matrix(true1, pred)\n TN1,FP1,FN1,TP1= cfm1.ravel()\n cfm2 = confusion_matrix(true2, pred)\n TN2,FP2,FN2,TP2= cfm2.ravel()\n cfm3 = confusion_matrix(true3, pred)\n TN3,FP3,FN3,TP3= cfm3.ravel()\n\n AO = (((TP1+TN1)/(TP1+TN1+FP1+FN1))+\n ((TP2+TN2)/(TP2+TN2+FP2+FN2))+\n ((TP3+TN3)/(TP3+TN3+FP3+FN3)))/3\n AOscores[x] = AO\n \n P = ((TP1/(TP1+FP1))+\n (TP2/(TP2+FP2))+\n (TP3/(TP3+FP3)))/3\n Pscores[x] = P\n \n R = ((TP1/(TP1+FN1))+\n (TP2/(TP2+FN2))+\n (TP3/(TP3+FN3)))/3\n Rscores[x] = R\n \n F = (((2*TP1)/(2*TP1+FP1+FN1))+\n ((2*TP2)/(2*TP2+FP2+FN2))+\n ((2*TP3)/(2*TP3+FP3+FN3)))/3\n Fscores[x] = F\n \n auc1 = roc_auc_score(true1,pred)\n auc2 = roc_auc_score(true2,pred)\n auc3 = roc_auc_score(true3,pred)\n score = ((auc1+auc2+auc3)/3)\n AUCscores[x] = score", "_____no_output_____" ], [ "#Amsterdam\nall_values = AOscores.values()\nmax_value = max(all_values)\nmax_key = max(AOscores, key=AOscores.get)\nprint('OA' + str(max_key) + str(max_value))\n\nall_values1 = Pscores.values()\nmax_value1 = max(all_values1)\nmax_key1 = max(Pscores, key=Pscores.get)\nprint('P' + str(max_key1) + str(max_value1))\n\nall_values2 = Rscores.values()\nmax_value2 = max(all_values2)\nmax_key2 = max(Rscores, key=Rscores.get)\nprint('R' + str(max_key2) + str(max_value2))\n\nall_values3 = Fscores.values()\nmax_value3 = max(all_values3)\nmax_key3 = max(Fscores, key=Fscores.get)\nprint('F' + str(max_key3) + str(max_value3))\n\nall_values4 = AUCscores.values()\nmax_value4 = max(all_values4)\nmax_key4 = max(AUCscores, key=AUCscores.get)\nprint('AUC' + str(max_key4) + str(max_value4))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec76ce79a33027eb6d70c79f33d3e68fdfa9d9f6
7,126
ipynb
Jupyter Notebook
code_notebooks/1.1-quantecon/pandas/index.ipynb
jacqueshbartlett/data-science-methods
d745c8e5001dcd534596393d7a8040f9b0f669d7
[ "MIT" ]
2
2020-03-04T09:51:31.000Z
2021-07-04T09:03:57.000Z
code_notebooks/1.1-quantecon/pandas/index.ipynb
jacqueshbartlett/data-science-methods
d745c8e5001dcd534596393d7a8040f9b0f669d7
[ "MIT" ]
6
2020-03-03T06:15:07.000Z
2022-03-12T00:17:32.000Z
code_notebooks/1.1-quantecon/pandas/index.ipynb
jacqueshbartlett/data-science-methods
d745c8e5001dcd534596393d7a8040f9b0f669d7
[ "MIT" ]
2
2020-04-02T09:34:12.000Z
2020-05-24T18:50:15.000Z
47.506667
133
0.643699
[ [ [ "# pandas\n\nThis section of the workshop covers data ingestion, cleaning,\nmanipulation, analysis, and visualization in Python.\n\nWe build on the skills learned in the [Python\nfundamentals](../python_fundamentals/index.ipynb) section and teach the\n[pandas](https://pandas.pydata.org) library.\n\nAt the end of this section, you will be able to:\n\n- Access data stored in a variety of formats \n- Combine multiple datasets based on observations that link them\n together \n- Perform custom operations on tables of data \n- Use the split-apply-combine method for analyzing sub-groups of data \n- Automate static analysis on changing data \n- Produce publication quality visualizations \n\n\nIn the end, our goal with this section is to provide you the\nnecessary skills to – at a minimum – **immediately** replicate your current\ndata analysis workflow in Python with no loss of total (computer +\nhuman) time.\n\nThis is a lower bound on the benefits you should expect to receive by\nstudying this section.\n\nThe expression “practice makes perfect” is especially true here.\n\nAs you work with these tools, both the time to write and the time to run\nyour programs will fall dramatically.\n\n<div class=\"toctree\">\n\n- [Introduction](intro.ipynb)\n - [pandas](intro.ipynb#pandas)\n - [Series](intro.ipynb#series)\n - [DataFrame](intro.ipynb#dataframe)\n - [Data Types](intro.ipynb#data-types)\n - [Changing DataFrames](intro.ipynb#changing-dataframes)\n - [Exercises](intro.ipynb#exercises)\n- [Basic Functionality](basics.ipynb)\n - [State Unemployment Data](basics.ipynb#state-unemployment-data)\n - [Dates in pandas](basics.ipynb#dates-in-pandas)\n - [DataFrame Aggregations](basics.ipynb#dataframe-aggregations)\n - [Transforms](basics.ipynb#transforms)\n - [Boolean Selection](basics.ipynb#boolean-selection)\n - [Exercises](basics.ipynb#exercises)\n- [The Index](the_index.ipynb)\n - [So What is this Index?](the_index.ipynb#so-what-is-this-index)\n - [Setting the Index](the_index.ipynb#setting-the-index)\n - [Re-setting the Index](the_index.ipynb#re-setting-the-index)\n - [Choose the Index Carefully](the_index.ipynb#choose-the-index-carefully)\n - [Exercises](the_index.ipynb#exercises)\n- [Storage Formats](storage_formats.ipynb)\n - [File Formats](storage_formats.ipynb#file-formats)\n - [Writing DataFrames](storage_formats.ipynb#writing-dataframes)\n - [Reading Files into DataFrames](storage_formats.ipynb#reading-files-into-dataframes)\n - [Practice](storage_formats.ipynb#practice)\n- [Cleaning Data](data_clean.ipynb)\n - [Cleaning Data](data_clean.ipynb#id1)\n - [String Methods](data_clean.ipynb#string-methods)\n - [Type Conversions](data_clean.ipynb#type-conversions)\n - [Missing Data](data_clean.ipynb#missing-data)\n - [Case Study](data_clean.ipynb#case-study)\n - [Appendix: Performance of `.str` Methods](data_clean.ipynb#appendix-performance-of-str-methods)\n - [Exercises](data_clean.ipynb#exercises)\n- [Reshape](reshape.ipynb)\n - [Tidy Data](reshape.ipynb#tidy-data)\n - [Reshaping your Data](reshape.ipynb#reshaping-your-data)\n - [Long vs Wide](reshape.ipynb#long-vs-wide)\n - [`set_index`, `reset_index`, and Transpose](reshape.ipynb#set-index-reset-index-and-transpose)\n - [`stack` and `unstack`](reshape.ipynb#stack-and-unstack)\n - [`melt`](reshape.ipynb#melt)\n - [`pivot` and `pivot_table`](reshape.ipynb#pivot-and-pivot-table)\n - [Visualizing Reshaping](reshape.ipynb#visualizing-reshaping)\n - [Exercises](reshape.ipynb#exercises)\n- [Merge](merge.ipynb)\n - [Combining Datasets](merge.ipynb#combining-datasets)\n - [`pd.concat`](merge.ipynb#pd-concat)\n - [`pd.merge`](merge.ipynb#pd-merge)\n - [Arguments to `merge`](merge.ipynb#arguments-to-merge)\n - [`df.join`](merge.ipynb#df-join)\n - [Case Study](merge.ipynb#case-study)\n - [Extra Example: Airline Delays](merge.ipynb#extra-example-airline-delays)\n - [Visualizing Merge Operations](merge.ipynb#visualizing-merge-operations)\n - [Exercises](merge.ipynb#exercises)\n- [GroupBy](groupby.ipynb)\n - [Split-Apply-Combine](groupby.ipynb#split-apply-combine)\n - [Case Study: Airline Delays](groupby.ipynb#case-study-airline-delays)\n - [Exercise: Cohort Analysis using Shopify Data](groupby.ipynb#exercise-cohort-analysis-using-shopify-data)\n - [Exercises](groupby.ipynb#exercises)\n- [Time series](timeseries.ipynb)\n - [Intro](timeseries.ipynb#intro)\n - [Parsing Strings as Dates](timeseries.ipynb#parsing-strings-as-dates)\n - [Date Formatting](timeseries.ipynb#date-formatting)\n - [Extracting Data](timeseries.ipynb#extracting-data)\n - [Accessing Date Properties](timeseries.ipynb#accessing-date-properties)\n - [Leads and Lags: `df.shift`](timeseries.ipynb#leads-and-lags-df-shift)\n - [Rolling Computations: `.rolling`](timeseries.ipynb#rolling-computations-rolling)\n - [Changing Frequencies: `.resample`](timeseries.ipynb#changing-frequencies-resample)\n - [Optional: API keys](timeseries.ipynb#optional-api-keys)\n - [Exercises](timeseries.ipynb#exercises)\n- [Intermediate Plotting](matplotlib.ipynb)\n - [Introduction](matplotlib.ipynb#introduction)\n - [The Want Operator: Replicate a Professional Figure](matplotlib.ipynb#the-want-operator-replicate-a-professional-figure)\n - [Data](matplotlib.ipynb#data)\n - [Warmup](matplotlib.ipynb#warmup)\n - [Data Cleaning](matplotlib.ipynb#data-cleaning)\n - [Constructing the Plot](matplotlib.ipynb#constructing-the-plot)\n - [Saving the Figure](matplotlib.ipynb#saving-the-figure)\n - [Exercises](matplotlib.ipynb#exercises)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
ec76d14f4ab452a8023dc8c8c67e071020649581
10,738
ipynb
Jupyter Notebook
msi_model.ipynb
mdanilevicz/maize_early_yield_prediction
e1090e6555a544a13bec19c974d628efccbcbeca
[ "MIT" ]
1
2021-11-20T00:08:40.000Z
2021-11-20T00:08:40.000Z
msi_model.ipynb
hulaba/maize_early_yield_prediction
e1090e6555a544a13bec19c974d628efccbcbeca
[ "MIT" ]
null
null
null
msi_model.ipynb
hulaba/maize_early_yield_prediction
e1090e6555a544a13bec19c974d628efccbcbeca
[ "MIT" ]
4
2021-07-22T07:12:30.000Z
2022-03-10T17:29:48.000Z
30.856322
165
0.570963
[ [ [ "# Deep learning model based on spectral feature data\n\nThis nb shows the code used to run the spectral deep learning model.\n\nThe data used in this nb was prepared on \"msi_processing.ipynb\"", "_____no_output_____" ], [ "This motebook has the code to run the spectral module individually, it employs fastai and pytorch libraries to create to load the data and train the model.\n\nThe custom functions were created for this project, they are based on fastai and pytorch forum discussions on how to use multispectral images in deep learning.", "_____no_output_____" ] ], [ [ "# Load libraries\n\n%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\n# Import libraries\nfrom fastai.vision.all import *\nimport torch\nfrom ipywidgets import IntProgress\nfrom glob import glob\n\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.model_selection import StratifiedKFold\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport cv2\n\n# Custom functions\nfrom msi_utils import *\nfrom fold_utils import *\n\n\n# Check that you are using gpu, if available\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndevice", "_____no_output_____" ], [ "# Path to where the images are located\npath = Path('/data/g2f_data/input_data/')", "_____no_output_____" ] ], [ [ "## Reference Tables", "_____no_output_____" ] ], [ [ "df_test = pd.read_csv('/data/fielddata/df_test.csv')\ndf_train_val = pd.read_csv('/data/fielddata/df_train_val.csv')", "_____no_output_____" ], [ "# Use random splitter function from fastai\nsplitter = RandomSplitter(seed=42)\nsplits = splitter(range_of(df_train_val))\nsplits", "_____no_output_____" ] ], [ [ "## Kfold validation", "_____no_output_____" ] ], [ [ "# KFOLD VALIDATION\nval_loss = []\nrmse_kfold = []\nrmse_pct_kfold =[]\nr2_kfold=[]\n\nkfold_preds = pd.DataFrame(columns=['predictions', 'target_yield'])\nsplit_list = kfold_splitter(df=df_train_val)\n\n# Callbacks\ncsvlogger = CSVLogger('/data/results/spectral_5fold_metrics.csv', append=True)\nearly_stopping = EarlyStoppingCallback(monitor='valid_loss', patience=5, min_delta=0.01)\ncbs = [csvlogger, early_stopping]\n \nfor i in range(5):\n getter = get_fold(split_list, fold=i)\n # Call MSI dataloader\n msi_fold = DataBlock(blocks = (MSITensorBlock, RegressionBlock),\n get_items = get_npy,\n get_y = get_y,\n splitter = getter)\n msi_dl = msi_fold.dataloaders(df_train_val, bs=8) \n \n # Learner\n model_msi = xresnet18(n_out=1, c_in=13, pretrained=False, sa=True, p=0.5, ndim=2)\n learn_msi = Learner(msi_dl, \n model_msi,\n opt_func=Adam, \n loss_func=root_mean_squared_error,\n metrics=[rmse, R2Score()])\n \n # Disable Fastai progress bar (optional but cleaner)\n with learn_msi.no_bar()and learn_msi.no_logging():\n learn_msi.fit_one_cycle(100, 1e-3, cbs=cbs)\n \n df_ymin, df_ymax = df_train_val['Yield'].min(), df_train_val['Yield'].max()\n val_loss_k, rmse_k, r2score_k = learn_msi.validate()\n val_loss.append(val_loss_k)\n rmse_kfold.append(rmse_k)\n rmse_pct_kfold.append(((rmse_k/(df_ymax - df_ymin))*100))\n r2_kfold.append(r2score_k)\n \n # Extract the predictions and save in vis_results\n ypred, yval = learn_msi.get_preds()\n\n pn = msi_dl.valid_ds.items\n images_id = []\n for i in range(len(pn)):\n name = pn[i].stem\n images_id.append(name)\n \n vis_df = pd.DataFrame()\n vis_df['items'] = images_id\n vis_df['items'] = vis_df['items'].str.replace('id_', '')\n vis_df['predictions'] = ypred.flatten()\n vis_df['target_yield'] = yval\n vis_df = vis_df.merge(df_train_val, how='left', left_on='items', right_on='Barcode')\n \n kfold_preds = kfold_preds.append(vis_df)\n\n# Allows you to save the predictions performed on each kfold, and then calculate the desired metrics\nkfold_preds.to_csv('/data/results/spectral_5fold_predictions.csv')", "_____no_output_____" ], [ "# Stratified kfold with emb_ps, ps and wd for around 18 epochs with early stopping\nd ={\"validation loss\":val_loss, \"rmse\": rmse_kfold, \"rmse %\": rmse_pct_kfold, \"r2score\":r2_kfold}\n\nfastkfold = pd.DataFrame(data=d)\nfastkfold['rmse %'] = fastkfold['rmse %'].apply(lambda x: np.mean(x))\nfastkfold.to_csv('/data/results/spetral_5fold_summary_metrics.csv', index=False)\nfastkfold", "_____no_output_____" ] ], [ [ "## Test predictions", "_____no_output_____" ] ], [ [ "data_load = DataBlock(blocks = (MSITensorBlock, RegressionBlock),\n get_items = get_npy,\n get_y = get_y,\n splitter = splitter)\n \nmsi_dls = data_load.dataloaders(df_train_val, bs=8)\nmodel_msi = xresnet18(n_out=1, c_in=13, pretrained=False, sa=True, p=0.5, ndim=2)\nearly_stopping = EarlyStoppingCallback(monitor='valid_loss', patience=3, min_delta=0.01)\n\nlearn_msi = Learner(msi_dls, \n model_msi,\n opt_func=Adam, \n loss_func=root_mean_squared_error,\n metrics=[rmse, R2Score()])", "_____no_output_____" ], [ "learn_msi.fit_one_cycle(100, 1e-3,cbs=early_stopping)", "_____no_output_____" ], [ "# Export and save the model\nlearn_msi.save('/data/model_weights/xresnet18_model')\n\n# If you want to load the model use the command below\n# learn_msi.load('/nbs_dir/g2f/model_weights/VIS_resnet18_v5_8epochs_earlystop')", "_____no_output_____" ] ], [ [ "The prediction of the test (holdout) dataset is made in two parts.", "_____no_output_____" ] ], [ [ "# Part 1- Load the test set\ntest_dls = data_load.dataloaders(df_test)\nlearn_msi.dls.loaders.append(msi_dls.test_dl(test_dls[0].items, with_labels=True))\ndl_testing = learn_msi.dls.test_dl(test_dls[0].items, with_labels=True)\npredicts, targets = learn_msi.get_preds(dl=dl_testing)\nfi = dl_testing.items\n\nimages_id = []\n# Get the items idx\nfor i in range(len(fi)):\n name = fi[i].stem\n images_id.append(name)\n\ntest_results = pd.DataFrame()\ntest_results['Items'] = images_id\ntest_results['Items'] = test_results['Items'].str.replace('id_', '')\ntest_results['Predictions'] = predicts.flatten().tolist()\ntest_results['Target_yield'] = targets\ntest_results = test_results.merge(df_test, how='left', left_on='Items', right_on='Barcode')\n", "_____no_output_____" ], [ "# Part 2 - Repeat the step above with the second hald of the holdout dataset\nlearn_msi.dls.loaders.append(msi_dls.test_dl(test_dls[1].items, with_labels=True))\ndl_testing = learn_msi.dls.test_dl(test_dls[1].items, with_labels=True)\npredicts, targets = learn_msi.get_preds(dl=dl_testing)\nfi = dl_testing.items\n\nimages_id = []\n# Get the items idx\nfor i in range(len(fi)):\n name = fi[i].stem\n images_id.append(name)\n\ntest_results1 = pd.DataFrame()\ntest_results1['Items'] = images_id\ntest_results1['Items'] = test_results1['Items'].str.replace('id_', '')\ntest_results1['Predictions'] = predicts.flatten().tolist()\ntest_results1['Target_yield'] = targets\ntest_results1 = test_results1.merge(df_test, how='left', left_on='Items', right_on='Barcode')", "_____no_output_____" ], [ "test_df = test_results.append(test_results1)\ntest_df.to_csv('/data/results/spetral_prediction_on_holdout_dataset.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec76d40852d1ec86fa6246609c570962f113aea5
52,158
ipynb
Jupyter Notebook
container_files/demos/Image Processing with Convolutions.ipynb
mldbai/mldb
0554aa390a563a6294ecc841f8026a88139c3041
[ "Apache-2.0" ]
665
2015-12-09T17:00:14.000Z
2022-03-25T07:46:46.000Z
container_files/demos/Image Processing with Convolutions.ipynb
mldbai/mldb
0554aa390a563a6294ecc841f8026a88139c3041
[ "Apache-2.0" ]
797
2015-12-09T19:48:19.000Z
2022-03-07T02:19:47.000Z
container_files/demos/Image Processing with Convolutions.ipynb
mldbai/mldb
0554aa390a563a6294ecc841f8026a88139c3041
[ "Apache-2.0" ]
103
2015-12-25T04:39:29.000Z
2022-02-03T02:55:22.000Z
106.881148
12,314
0.800548
[ [ [ "# Image Processing with Convolutions\n\nIn image processing, most image filters and image transformation use convolutions. Convolutions modify the original matrix of pixels through a pointwise multiplication with a kernel or filter matrix. Wikipedia describes <a href=\"https://en.wikipedia.org/wiki/Kernel_(image_processing)\">convolutions on images</a> as:\n\n> Convolution is the process of multiplying each element of the image with its local neighbors, weighted by the kernel. For example, if we have two three-by-three matrices, one a kernel, and the other an image piece, convolution is the process of flipping both the rows and columns of the kernel and then multiplying locationally similar entries and summing. The [2,2] element of the resulting image would be a weighted combination of all the entries of the image matrix, with weights given by the kernel: \n\n> ![Image of convolution](https://wikimedia.org/api/rest_v1/media/math/render/svg/1a5bdd585d515770c888ea5b4ea07a7a5166cc8d)\n\nAmongst the suite of applications of convolutions, image blurring and sharpening as well as edge detection are the most common. In this demo, we will use MLDB query to efficiently transform images. To do so, we will use the [MNIST database of handwriten digits](http://yann.lecun.com/exdb/mnist/).\n\nIn this demo, we will use the [jseval](../../../../doc/#builtin/sql/ValueExpression.md.html#jseval) function to execute JavaScript code inline with SQL, and the [SQL Expression Function](../../../../doc/#builtin/functions/SqlExpressionFunction.md.html) to persist and reuse the same JavaScript code.", "_____no_output_____" ], [ "The notebook cells below use `pymldb`'s `Connection` class to make [REST API](../../../../doc/#builtin/WorkingWithRest.md.html) calls. You can check out the [Using `pymldb` Tutorial](../../../../doc/nblink.html#_tutorials/Using pymldb Tutorial) for more details.", "_____no_output_____" ] ], [ [ "from pymldb import Connection\nmldb = Connection()", "_____no_output_____" ] ], [ [ "... And other Python librairies", "_____no_output_____" ] ], [ [ "import random\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom IPython.display import display, Latex\nfrom ipywidgets import widgets, interact", "_____no_output_____" ] ], [ [ "## Loading the data\n\nA pickled version of the dataset is available on the [deeplearning.net website](http://deeplearning.net/tutorial/gettingstarted.html).\n\nThe dataset has been unpickled and saved in a public Amazon's S3 cloud storage. Check out MLDB's [Protocol Handlers](../../../../doc/#builtin/Url.md.html) for Files and URLS for more details on loading remote ressources.", "_____no_output_____" ] ], [ [ "data_url_mnist = 'file://mldb/mldb_test_data/digits_data.csv.gz'\n\nprint mldb.put('/v1/procedures/import_digits_mnist', {\n \"type\":\"import.text\",\n \"params\": {\n \"dataFileUrl\": data_url_mnist,\n \"outputDataset\": \"digits_mnist\",\n \"select\": \"{* EXCLUDING(\\\"785\\\")} AS *, \\\"785\\\" AS label\",\n \"runOnCreation\": True,\n }\n})", "<Response [201]>\n" ] ], [ [ "## Taking a random image and starting image manipulation\n\nSimilarly to the first few steps in the [Real-Time Digits Recognizer](../../../../doc/nblink.html#_demos/Real-Time%20Digits%20Recognizer.ipynb) demo, we will display random MNIST digits from the test set. At each refresh, we get a randomly selected row using the [`sample` function in a SQL From Expression](../../../../doc/#builtin/sql/FromExpression.md.html).", "_____no_output_____" ] ], [ [ "data = mldb.query(\"\"\"\n SELECT * EXCLUDING(label) \n FROM sample(\n (select * from digits_mnist where rowHash() % 5 = 0),\n {rows: 1}\n )\n\"\"\")\n\nimage = data.as_matrix().reshape(28, 28)\nplt.imshow(image)\nplt.gray()", "_____no_output_____" ] ], [ [ "## Defining the convolution function\n\nA discrete convolution can be defined mathematically as:\n\n$newPixel[i,j] = \\sum_{y=i}^{i+r}\\sum_{x=j}^{j+r}oldPixel[x,y] \\cdot weight[x,y]$\n\nwhere the $weight[]$ matrix (see 'kernelDict' dictionary in a couple of cells below) defines the type of image manipulation and $r$ is the area of effect. Imagine a \"square box\" centered at the pixel that you want to transform. The kernel weighted sum of \"old pixels\" in the \"square box\" gives you a \"new pixel\".\n\nAs seen in the code below, each new pixel in the convolved picture is the weighted sum of the the pixel and its neighboring pixels where the weights are the values in the kernel matrix. \n\nDoing convolutions with custom function of type [SQL Expression Function](../../../../doc/#builtin/functions/SqlExpressionFunction.md.html) and [jseval](../../../../doc/#builtin/sql/ValueExpression.md.html) for inline definition of functions using Javascript allows us to process large amounts of data using the optimizations inherent to MLDB. Convolutions are typically very time consuming operations with \n$O(n\\cdot r^2)$ complexity in this case where n is the number of features and r is the radius (i.e. neighboring pixels).\n\nThere were two steps to creating the function below:\n* JsConvolutionExpr used a jseval built-in function where all the logic resides\n* A 'convolution' SQL Expression Function is created, allowing us to call 'convolution' with a simple [mldb.query](../../../../doc/#builtin/sql/Sql.md.html)", "_____no_output_____" ] ], [ [ "# JavaScript code loosely based on Ivan Kuckir's blog post: http://blog.ivank.net/fastest-gaussian-blur.html\n\ndef create_convolution():\n \n JsConvolutionExpr = \"\"\"\n jseval('\n var row_val = val;\n var dim = Math.sqrt(row_val.length);\n var radius = Math.sqrt(kernel.length);\n \n \n /*************************************\n ******** Function Definition *********\n **************************************/\n\n // input 1D list, output 1D list, pixel matrix dimensions\n function convolution(inList, outList, width, height, radius) {\n\n for (var i = 0; i < height; i++)\n for (var j = 0; j < width; j++) {\n var newPixel = 0;\n var indexW = 0;\n \n for (var yr = i; yr < i + radius; yr++)\n for (var xr = j; xr < j + radius; xr++) {\n \n var y = Math.min(height - 1, Math.max(0, yr));\n var x = Math.min(width - 1, Math.max(0, xr));\n \n newPixel = newPixel + inList[y * width + x] * weights[indexW];\n indexW ++;\n }\n \n new_value = newPixel;\n outList[i * width + j] = new_value;\n }\n return outList;\n } // End of convolution\n \n //Assuring that the 1d row is in the right order\n function arrangeMatrix(inList) {\n \n var length = inList.length;\n var data = new Array(length);\n for (var i = 0; i < length; i++) {\n data[parseInt(inList[i][0][0])] = inList[i][1];\n }\n return data\n }\n \n /*************************************\n ********** Using Functions ***********\n **************************************/\n \n var weights = arrangeMatrix(kernel); // filter matrix\n var matrix = arrangeMatrix(row_val); // my picture\n var convolvedMatrix = [];\n \n convolution(matrix, convolvedMatrix, dim, dim, radius);\n\n return convolvedMatrix;',\n 'val, kernel', \n valueExpr, kernel\n ) AS *\n \"\"\"\n\n\n print mldb.put(\"/v1/functions/convolution\", {\n \"type\": \"sql.expression\",\n \"params\": {\n \"expression\": JsConvolutionExpr,\n \"prepared\": True\n }\n })\n \ncreate_convolution()", "<Response [201]>\n" ] ], [ [ "This function will used in the interactive menu in the next section. We will take the image of the digit that we have seen before and apply different filters. You will need to load the cells in this notebook to make it work.", "_____no_output_____" ], [ "## Using the convolution function\n\nWe will first define the type filters or kernels that we want to try.", "_____no_output_____" ] ], [ [ "kernelDict = {\n 'Right Sobel': [-1, 0, 1, -2, 0, 2, -1, 0, 1], \n 'Detect Edges': [1, 1, 1, 1, -8, 1, 1, 1, 1],\n 'Sharpen': [0, -1, 0, -1, 5, -1, 0, -1, 0],\n 'Box Blur': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],\n 'Approximated Gaussian Blur': [0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625, 0.125, 0.0625]\n}", "_____no_output_____" ], [ "def convolutionFunc(image_processing):\n \n SQL_Expr = \"\"\"\n SELECT convolution({\n valueExpr: %(data)s,\n kernel: %(kernel)s\n }) AS *\n \"\"\" % {\n \"data\": data.values[0].tolist(),\n \"kernel\": kernelDict[image_processing]\n }\n\n convolvedData = mldb.query(SQL_Expr)\n image = convolvedData.as_matrix().reshape(28, 28)\n plt.imshow(image)", "_____no_output_____" ], [ "options=('Right Sobel', 'Detect Edges', 'Sharpen', 'Box Blur', 'Approximated Gaussian Blur')\ninteract(convolutionFunc, image_processing=kernelDict.keys(), );\nprint \"Choose an image processing option from the drop-down menu\"", "Choose an image processing option from the drop-down menu\n" ] ], [ [ "I found the 'Detect Edges' convolution particularly useful when training image recognition models. This can be useful in many Machine Vision applications.", "_____no_output_____" ], [ "## Convolutions with MLDB's TensorFlow plug-in\n\nNot everyone will want to code their own convolutions from scratch (such as with the `create_convolution()` function above). In fact, given the myriad of tools available, it may save you time and effort to use external librairies. MLDB has integrated the TensorFlow Open Source Library for Machine Intelligence allowing us to leverage some of the great Computer Vision APIs and GPU accelaration that it offers. Let's get started with the same images as before.\n\nFirst, I reshape my image and kernel lists into 4D tensors in the NHWC tensor format. Then, I use the [`tf_Conv2D`](../../../../doc/builtin/sql/ValueExpression.md.html#builtinfunctions), the TensorFlow operator that is exposed as an MLDB built-in function directly in SQL.", "_____no_output_____" ] ], [ [ "data_ = data.values[0].reshape(1, 28, 28, 1).tolist() \n# image input must be a [batch, in_height, in_width, in_channels] shaped tensor", "_____no_output_____" ], [ "def TensorFlowConvolution(image_processing):\n \n kernel = np.asarray(kernelDict[image_processing]).reshape(3, 3, 1, 1).tolist() \n # kernel must be a [filter_height, filter_width, in_channels, out_channels] shaped tensor\n strides = [ 1, 1, 1, 1]\n SQL_Expr = \"\"\"\n SELECT tf_Conv2D(\n {input: %(data)s, filter: %(kernel)s}, \n {T: { type: 'DT_FLOAT'}, padding: 'SAME', strides: %(strides)s })\n AS *\n \"\"\" % {\n \"data\": data_,\n \"kernel\": kernel,\n \"strides\": strides\n }\n \n convolvedData = mldb.query(SQL_Expr)\n image = convolvedData.as_matrix().reshape(28, 28)\n plt.imshow(image)", "_____no_output_____" ], [ "options=('Right Sobel', 'Detect Edges', 'Sharpen', 'Box Blur', 'Approximated Gaussian Blur')\ninteract(TensorFlowConvolution, image_processing=kernelDict.keys(), );\nprint \"Choose an image processing option from the drop-down menu\"", "Choose an image processing option from the drop-down menu\n" ] ], [ [ "Here are a few definitions:\n* Batch: The data is sometimes split into batches to parallelize Image Processing.\n* Strides: Stride is a step that the \"square box\" (as described in 'Defining the Convolution Function' section above) will take. A stride has shape [stride_batch, stride_width, stride_height, stride_channel] so [1, 1, 1, 1] will shift the box one pixel at the time for each batch and each channel.\n* Padding: As you may have noticed, sometimes the \"square box\" has elements outside the picture (i.e. for pixels at the boundries of the image). The 'SAME' padding allows the convolution algorithm to go beyond picture borders. Pixels in the padding area will typically be zero and the output image will have the same size as the input image. 'VALID' padding does not allow the \"square box\" to go beyond the picture boundries. In this case, the output picture size will be smaller. For moreinformation, see [this article](http://radio.feld.cvut.cz/matlab/toolbox/images/linfilt4.html).\n* Channel: we have only one channel here because it is grayscale. For images with colors (i.e. RGB), we have 3 channels. See ImageMagick's [Color Basics and Channels](http://www.imagemagick.org/Usage/color_basics/) article for more information.", "_____no_output_____" ], [ "## Where to next?\n\nNow you can move on to the [Real-Time Digits Recognizer](../../../../doc/nblink.html#_demos/Real-Time Digits Recognizer) demo where we'll show the machine learning steps to follow to build *MLPaint*, the real-time digits recognizer plugin.\n\nOtherwise, check out the other [Tutorials and Demos](../../../../doc/#builtin/Demos.md.html).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec76d7075dca3322fe71337135f1fb81b8010220
81,771
ipynb
Jupyter Notebook
teaching_material/session_2/session_2_slides.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
20
2021-09-08T12:14:32.000Z
2021-11-19T11:57:39.000Z
teaching_material/session_2/session_2_slides.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
10
2021-08-12T14:41:18.000Z
2021-11-27T12:41:34.000Z
teaching_material/session_2/session_2_slides.ipynb
tlh957/DO2021
20c615451240a80bc5a2100e15828dfc4163fd49
[ "MIT" ]
20
2021-09-12T22:13:22.000Z
2021-12-07T19:27:05.000Z
26.609502
182
0.367844
[ [ [ "# Session 2: Data Structuring 1\n\n*Nicklas Johansen*", "_____no_output_____" ], [ "## Agenda\n\nIn this session, we will work with `pandas` and how to structure your data.\n\n- Tidy Data\n- numpy & pandas modules\n- pandas series\n- pandas DataFrames\n- selecting data\n- indexing & renaming\n\n\nNB\n- Download this file to your computer\n- Rename the file\n- Run code together with Nicklas", "_____no_output_____" ], [ "## Why We Structure Data\n\n### Motivation\n*Why do we want to learn data structuring?*\n\n- Data rarely comes in the form of our model. We need to 'wrangle' our data.\n- Someone has to do this\n- You need to understand how data was prepared to avoid misconclusions\n\n### Tidy Data\n\nGood discussion [here](https://cran.r-project.org/web/packages/tidyr/vignettes/tidy-data.html). The fundamentals:\n- Every column is a variable.\n- Every row is an observation.\n- Every cell is a single value.\n\n<center><img src='https://raw.githubusercontent.com/abjer/sds2017/master/slides/figures/tidy.png'></center>", "_____no_output_____" ], [ "# Numpy and Pandas", "_____no_output_____" ] ], [ [ "# Loading packages\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Numpy Overview\n*What is the [`numpy`](http://www.numpy.org/) module?*\n\n`numpy` is a Python module / library / package\n- fast and versatile for manipulating arrays\n- linear algebra tools available\n- used in some machine learning and statistics packages\n\nExample of creating an array similar to a 2x2 matrix:", "_____no_output_____" ] ], [ [ "table = [[1,2],[3,4]]\narr = np.array(table)\narr", "_____no_output_____" ] ], [ [ "## Pandas Motivation\n*Why use Pandas?*\n\nIt is built on numpy:\n- Simplicity: Pandas is built with Python's simplicity \n- Powerful and fast tools for manipulating data from numpy\n\nImproves on numpy:\n- Clarity, flexibility by using labels (keys)\n- Introduces lots of new, useful tools for data analysis (more on this)\n\nNote: Much more similar to common software for data manipulation like, say, Stata\n\n\n#### Pandas Popularity\n\n<center><img src='https://www.sqlshack.com/wp-content/uploads/2020/08/pandas-in-python-popularity-from-stack-overflow.png' alt=\"Drawing\" style=\"width: 500px;\"/></center>\n\n", "_____no_output_____" ], [ "# DataFrames and Series", "_____no_output_____" ] ], [ [ "# Loading packages\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport requests\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "## Pandas Data Types\n*How do we work with data in Pandas?*\n\n- We use two fundamental data stuctures: \n - ``Series``, and\n - ``DataFrame``.", "_____no_output_____" ], [ "## Pandas Series (1:5)\n*What is a `Series`?*\n- A vector/list with labels for each entry. Example:", "_____no_output_____" ] ], [ [ "L = [1, 1.2, 'abc', True]\n\nmy_series = pd.Series(L)\nmy_series", "_____no_output_____" ] ], [ [ "## Pandas Series (2:5)\n*What are the components in a Series?* \n\nSeries generally consists of three components:\n- `index`: label for each observation\n- `values`: observation data\n- `dtype`: the format of the series (`object` means any data type is allowed)\n - examples are fundamental datatypes (`float`, `int`, `bool`) \n - in terms of precision: `float`>`int`>`bool`\n - this comes at a cost in the form of speed", "_____no_output_____" ], [ "## Pandas Series (3:5)\n*How do we set custom index?* \n\nIndices need not have a sequential structure. To see this, consider the following example", "_____no_output_____" ] ], [ [ "num_data = range(0,3) # Generate data\nnum_data", "_____no_output_____" ], [ "indices = ['B', 'C', 'A'] # Generate index names\nindices", "_____no_output_____" ], [ "# Create a pandas series from the two\nmy_series2 = pd.Series(data=num_data, index=indices) \nmy_series2", "_____no_output_____" ] ], [ [ "## Pandas Series (4:5)\n*What data structure does the pandas series remind us of?*\n\nA mix of Python list and dictionary. Consider the following simple transformation:", "_____no_output_____" ], [ "A mix of Python list and dictionary. Consider the following simple transformation:", "_____no_output_____" ] ], [ [ "my_series.to_dict()", "_____no_output_____" ] ], [ [ "*Can we also convert a dictionary to a series?*\n- Yes, we just put into the Series (class) constructor. Example:", "_____no_output_____" ] ], [ [ "d = {'yesterday': 0, 'today': 1, 'tomorrow':3} # Create some dictionary\nd", "_____no_output_____" ], [ "my_series3 = pd.Series(d) # Use the constructor\nmy_series3", "_____no_output_____" ] ], [ [ "## Pandas Series (5:5)\n*How is the series different from a dict?*\n- An important distinction: Series indices are NOT unique! Example:", "_____no_output_____" ] ], [ [ "s = pd.Series(range(3), index=['A','A', 'A']) # Create series with same indices\ns", "_____no_output_____" ], [ "print(s.index.duplicated()) # Check duplicates\n", "[False True True]\n" ], [ "print(s.to_dict()) # So translating to a dict gives...", "{'A': 2}\n" ] ], [ [ "Series are both key and index based (i.e. sequential).\n- Remember that unlike, say, lists, dictionaries are not sequential!", "_____no_output_____" ], [ "## Pandas Data Frames (1:4)\n\n*OK, so now we know what a series is. What is a `DataFrame` then?*\n- A 2d-array (matrix) with labelled columns and rows (which are called indices). Example:", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(data=[[1,2],[3,4]],columns=['A', 'B'])\ndf", "_____no_output_____" ] ], [ [ "## Pandas Data Frames (2:4)\n *How can we really think about this?*\n\nThere are at least two simple ways of seeing the pandas DataFrame:\n1. A numpy array with some additional stuff.\n2. A set of series that have been merged horizontally\n - Note that columns can have different datatypes!\n \nMost functions from `numpy` can be applied directly to Pandas. We can convert a DataFrame to a `numpy` array with `values` attribute.", "_____no_output_____" ] ], [ [ "df.values", "_____no_output_____" ] ], [ [ "*To note*: In Python we can describe it as a *list of lists* or sometimes a *dict of dicts*.", "_____no_output_____" ] ], [ [ "df.values.tolist()", "_____no_output_____" ] ], [ [ "## Pandas Data Frames (3:4)\n*How can larger pandas dataframes be built?*\n- Similar to Series, DataFrames can be built from dictionaries.\n- An important difference: When it comes to creating distinct columns, DataFrames require that each value in the -dictionary is also a dictionary. Example:", "_____no_output_____" ] ], [ [ "djan = {'1st': 0, '2nd': 1, '3rd':3} # Create some dictionary for january\ndfeb = {'1st': -3, '2nd': -1, '3rd':-2} # Create some dictionary for february\ndmar = {'1st': 3, '2nd': 5, '3rd':4} # Create some dictionary for march\n\nd = {'january': djan, 'february': dfeb, 'march': dmar} # Create dictionary of dictionaries\nmy_df1 = pd.DataFrame(d) # Use the constructor\nmy_df1", "_____no_output_____" ] ], [ [ "## Pandas Data Frames (4:4)\n\n*What happens if keys are not the same?*\n- No big deal...", "_____no_output_____" ] ], [ [ "djan = {'1st': 0, '2nd': 1, '3rd':3} # Create some dictionary for january\ndfeb = {'1st': -3, '2nd': -1, '3rd':-2} # Create some dictionary for february\ndmar = {'1st': 3, '2nd': 5, '4th':4} # Create some dictionary for march\n\nd = {'january': djan, 'february': dfeb, 'march': dmar} # Create dictionary of dictionaries\nmy_df2 = pd.DataFrame(d) # Use the constructor\nmy_df2", "_____no_output_____" ] ], [ [ "## Series vs DataFrames (1:2)\n*How are Series related to DataFrames?*\n- Putting it simple: Every column is a series. Example, access as key (recommended):", "_____no_output_____" ] ], [ [ "print(df['B'])", "0 2\n1 4\nName: B, dtype: int64\n" ] ], [ [ "Another option is access as object method... smart, but dangerous! Sometimes it works...", "_____no_output_____" ] ], [ [ "print(df.B)", "0 2\n1 4\nName: B, dtype: int64\n" ] ], [ [ "## Series vs DataFrames (2:2)\n*But when wouldn't this work?*\n- To illustrate, add one more column:", "_____no_output_____" ] ], [ [ "df['count'] = 5 # adding new column to df\nprint(df)", " A B count\n0 1 2 5\n1 3 4 5\n" ] ], [ [ "Now print this and see!", "_____no_output_____" ] ], [ [ "print(df.count)", "<bound method DataFrame.count of A B count\n0 1 2 5\n1 3 4 5>\n" ] ], [ [ "Clearly, the key-based option is more robust as variables named same as methods, e.g. `count`, cannot be accesed.", "_____no_output_____" ], [ "## Converting Data Types", "_____no_output_____" ], [ "The data type of a series can be converted with the **astype** method. Some examples:", "_____no_output_____" ] ], [ [ "print(my_series3)\nprint()\nprint(my_series3.astype(np.float))\nprint()\nprint(my_series3.astype(np.str))", "yesterday 0\ntoday 1\ntomorrow 3\ndtype: int64\n\nyesterday 0.0\ntoday 1.0\ntomorrow 3.0\ndtype: float64\n\nyesterday 0\ntoday 1\ntomorrow 3\ndtype: object\n" ] ], [ [ "## Indices and Column Names\n*Why don't we just use numpy arrays and matrices?*\n\n- Inspection of data is quicker\n - What was it that column 18 represented?\n- Keep track of rows after deletion\n - Again.... What was it that column 18 represented!?\n- Indices may contain fundamentally different data structures \n - e.g. time series (more about this later)\n - Other datatypes (spatial data $\\rightarrow$ advanced course)\n- Facilitates complex operation (next session):\n - Merging datasets\n - Split-apply-combine (operations on subsets of data)\n - Method chaining (multiple operations in sequence)", "_____no_output_____" ], [ "## Viewing Series and Dataframes\n*How can we view the contents in our dataset?*\n- We can use `print` on our dataset\n- We can visualize patterns by plotting", "_____no_output_____" ], [ "## The Head and Tail\n*But what if we have a large data set with many rows?*\n- Let's load the 'titanic' data set that comes with the *seaborn* library:", "_____no_output_____" ] ], [ [ "import seaborn as sns\ntitanic = sns.load_dataset('titanic')", "_____no_output_____" ] ], [ [ "We now select the *first* 3 rows in a the with the `head` method.", "_____no_output_____" ] ], [ [ "titanic.head(3)", "_____no_output_____" ] ], [ [ "The `tail` method selects the last observations in a DataFrame. ", "_____no_output_____" ], [ "## Row and Column Selection (1:3)\n*How can we select certain rows in a DataFrame using **keys**?*\n\nWith the `loc` attribute. Example:", "_____no_output_____" ] ], [ [ "print(titanic.loc[range(3),['survived', 'age', 'sex']])", " survived age sex\n0 0 22.0 male\n1 1 38.0 female\n2 1 26.0 female\n" ] ], [ [ "## Row and Column Selection (2:3)\n*How can we select certain rows in a DataFrame using **index integers**?* \n\nThe `iloc` method selects rows and columns for provided index integers. ", "_____no_output_____" ] ], [ [ "print(titanic.iloc[10:15,:5])", " survived pclass sex age sibsp\n10 1 3 female 4.0 1\n11 1 1 female 58.0 0\n12 0 3 male 20.0 0\n13 0 3 male 39.0 1\n14 0 3 female 14.0 0\n" ] ], [ [ "## Row and Column Selection (3:3)\n*Other things to be aware of?* \n\nWe can select rows for all columns by not specfifying columns (or specifying `:`). I.e:", "_____no_output_____" ] ], [ [ "titanic.loc[[0,1,2]]", "_____no_output_____" ] ], [ [ "We can also select certain columns by specifying column names:", "_____no_output_____" ] ], [ [ "titanic[['survived']].head(3)", "_____no_output_____" ] ], [ [ "## Modifying DataFrames\n*Why do we want to modify DataFrames?*\n\n- Because data rarely comes in the form we want it.\n", "_____no_output_____" ], [ "## Changing the Index (1:3)\n*How can we change the index of a DataFrame?*", "_____no_output_____" ] ], [ [ "my_df = pd.DataFrame([[1,2], [3,4], [5,6]], columns = ['a', 'b'], index = ['i', 'ii', 'iii'])\nmy_df", "_____no_output_____" ] ], [ [ "We change or set a DataFrame's index using its method `set_index`. Example:", "_____no_output_____" ] ], [ [ "print(my_df.set_index('a'))", " b\na \n1 2\n3 4\n5 6\n" ] ], [ [ "Clearly, doing so, we also implicitly delete the previous index.\n\nAlso, notice the level shift in *b* due to this.", "_____no_output_____" ] ], [ [ "my_df", "_____no_output_____" ] ], [ [ "## Changing the Index (2:3)\n*Is our DataFrame changed? I.e. does it have a new index?*\n- Modifying DataFrames", "_____no_output_____" ] ], [ [ "my_df", "_____no_output_____" ], [ "my_df_a = my_df.set_index('a')\nmy_df_a", "_____no_output_____" ], [ "my_df_a", "_____no_output_____" ] ], [ [ "## Changing the index (3:3)\n\nSometimes we wish to remove the index. This is done with the `reset_index` method:", "_____no_output_____" ] ], [ [ "print(my_df_a.reset_index()) # drop=True\nprint()\nprint(my_df_a.reset_index(drop=True)) # drop=True", " a b\n0 1 2\n1 3 4\n2 5 6\n\n b\n0 2\n1 4\n2 6\n" ] ], [ [ "By specifying the keyword `drop=True` we delete the old index.\n\n*To note:* Indices can have multiple levels, in this case `level` can be specified to delete a specific level.", "_____no_output_____" ], [ "## Changing the Column Names\n\nColumn names can simply be changed with `columns`:", "_____no_output_____" ] ], [ [ "print(my_df)\nmy_df.columns = ['A', 'B']\nprint()\nprint(my_df)", " a b\ni 1 2\nii 3 4\niii 5 6\n\n A B\ni 1 2\nii 3 4\niii 5 6\n" ] ], [ [ "DataFrame's also have the function called `rename`.", "_____no_output_____" ] ], [ [ "my_df.rename(columns={'A': 'Aa'}, inplace=True)\nprint(my_df)", " Aa B\ni 1 2\nii 3 4\niii 5 6\n" ] ], [ [ "## Changing all Column Values\n*How can we can update values in a DataFrame?*", "_____no_output_____" ] ], [ [ "print(my_df)\n\n# # set uniform value\nmy_df['B'] = 3\nprint()\nprint(my_df)\n\n# set different values\nmy_df['B'] = [2,17,0] \nprint()\nprint(my_df)", " Aa B\ni 1 2\nii 3 4\niii 5 6\n\n Aa B\ni 1 3\nii 3 3\niii 5 3\n\n Aa B\ni 1 2\nii 3 17\niii 5 0\n" ] ], [ [ "## Changing Specific Column Values\n*How can we can update values in a DataFrame?*", "_____no_output_____" ] ], [ [ "print(my_df)\n\n# loc, iloc\nmy_loc2 = ['i', 'iii']\nmy_df.loc[my_loc2, 'Aa'] = 10\n\nprint()\nprint(my_df)", " Aa B\ni 1 2\nii 3 17\niii 5 0\n\n Aa B\ni 10 2\nii 3 17\niii 10 0\n" ] ], [ [ "## Sorting Data\n\nA DataFrame can be sorted with `sort_values`; this method takes one or more columns to sort by. ", "_____no_output_____" ] ], [ [ "print(my_df.sort_values(by='Aa', ascending=True))", " Aa B\nii 3 17\ni 10 2\niii 10 0\n" ] ], [ [ "Many key word arguments are possible for sort_values, including ascending if for one or more valuable, we want descending values. \n\nIn addition, sorting by index is also possible with `sort_index`.", "_____no_output_____" ] ], [ [ "print(my_df.sort_index())", " Aa B\ni 10 2\nii 3 17\niii 10 0\n" ] ], [ [ "## DO2021 COHORT", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndf = pd.read_excel ('data.xlsx', sheet_name='Complete')\ndf", "_____no_output_____" ], [ "# Hvor mange har svaret på survey?\ndf['Samlet status - Gennemført'].value_counts()", "_____no_output_____" ], [ "# Hvem har ikke svaret?\ndf[df['Samlet status - Gennemført']==0]['E-mail']", "_____no_output_____" ], [ "# Folk elsker pizza?\ndf['Pizza eller Poke Bowl?'].value_counts()", "_____no_output_____" ], [ "# Hvad er MSc vs BSc fodelingen i klassen?\ndf['Er du lige nu indskrevet på bachelor- eller kandidatstudieordning?'].value_counts()", "_____no_output_____" ], [ "To be contiued...", "_____no_output_____" ] ], [ [ "## Assignment 0\n\n- Fundamentals of Python:\n - Data types: numeric, string and boolean\n - Operators: numerical and logical\n - Sequential containers (and a tiny bit on the non-sequential)\n \n \n- Building blocks of code:\n - If-then syntax\n - Loops: for and while\n - Reuseable code: Functions, classes and modules\n\n\n- Data Structuring in Pandas\n - Constructing a pandas Series/DataFrame\n - Reading csv_files\n - Naming columns and rows\n - Selecting columns and rows\n - Numerical operations\n - Sorting data", "_____no_output_____" ], [ "## Associated Readings\n\nPDA, section 5.3: Descriptive statistics and numerical methods\n\nPDA, chapter 7:\n- Handling missing data\n- Data transformations (duplicates, dummies, binning, etc.)\n- String manipulations\n\nPDA, sections 11.1-11.2:\n- Dates and time in Python\n- Working with time series in pandas (time as index)\n\nPDA, sections 12.1, 12.3:\n- Working with categorical data in pandas\n- Method chaining\n\nPML, chapter 4, section 'Handling categorical data':\n- Encoding class labels with `LabelEncoder`\n- One-hot encoding", "_____no_output_____" ], [ "## Group Exercises\nExercises where you can practice your data wrangling skills! \nWill be uploaded after the weekly lecture.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ec76e2d6d0fcdd9366e3d4c9a9437cff0a75511f
2,456
ipynb
Jupyter Notebook
notebook/break_nested_loops_timeit.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
174
2018-05-30T21:14:50.000Z
2022-03-25T07:59:37.000Z
notebook/break_nested_loops_timeit.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
5
2019-08-10T03:22:02.000Z
2021-07-12T20:31:17.000Z
notebook/break_nested_loops_timeit.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
53
2018-04-27T05:26:35.000Z
2022-03-25T07:59:37.000Z
19.648
80
0.415717
[ [ [ "import itertools", "_____no_output_____" ], [ "n = 100\nl1 = range(n)\nl2 = range(n)\nl3 = range(n)\n\nx = n - 1", "_____no_output_____" ], [ "%%timeit\nfor i in l1:\n for j in l2:\n for k in l3:\n if i == x and j == x and k == x:\n break\n else:\n continue\n break\n else:\n continue\n break", "43 ms ± 1.33 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ], [ "%%timeit\nflag = False\nfor i in l1:\n for j in l2:\n for k in l3:\n if i == x and j == x and k == x:\n flag = True\n break\n if flag:\n break\n if flag:\n break", "45.2 ms ± 3.42 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ], [ "%%timeit\nfor i, j, k in itertools.product(l1, l2, l3):\n if i == x and j == x and k == x:\n break", "55.8 ms ± 458 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec76e80dd060de57b672624d9706c37158b001ae
45,029
ipynb
Jupyter Notebook
ipynb/11_Inference.ipynb
jungsee/Teaching
921380d5dfecee34eb9246676f0650151e96f159
[ "MIT" ]
1
2020-03-23T23:06:46.000Z
2020-03-23T23:06:46.000Z
ipynb/11_Inference.ipynb
jungsee/Teaching
921380d5dfecee34eb9246676f0650151e96f159
[ "MIT" ]
null
null
null
ipynb/11_Inference.ipynb
jungsee/Teaching
921380d5dfecee34eb9246676f0650151e96f159
[ "MIT" ]
null
null
null
25.878736
5,864
0.51489
[ [ [ "# 推論", "_____no_output_____" ], [ "If you come here without expecting Japanese, please click [Google translated version](https://translate.google.com/translate?hl=&sl=ja&tl=en&u=https%3A%2F%2Fpy4etrics.github.io%2F11_Inference.html) in English or the language of your choice.\n\n---", "_____no_output_____" ] ], [ [ "from scipy.stats import t, f\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.formula.api import ols\nimport matplotlib.pyplot as plt\nimport wooldridge", "_____no_output_____" ] ], [ [ "## 説明:仮説検定", "_____no_output_____" ], [ "仮説検定は以下の手順でおこなう。\n1. 母集団のパラメータについて帰無仮説(Null Hypothesis)と対立仮説(Alternative Hypothesis)を設定する。\n * 帰無仮説は棄却するかどうかの判断の対象となる仮説(例えば,$\\hat{\\beta}_1=0$)\n * 対立仮説は帰無仮説に相反する仮説(例えば,$\\hat{\\beta}_1\\neq 0$)\n1. 適切な検定統計量を決め,判定するための有意水準を予め決める。\n * 有意水準 = 下で説明する**第一種過誤の確率**\n1. 計算した検定統計量の値と有意水準に対応する値を比較することにより,帰無仮説を棄却するかしないかを判断する。2つの可能性しかない。\n * 帰無仮説を棄却する(対立仮説を採択する)。\n * 帰無仮説を棄却できない(対立仮説を採択できない)。", "_____no_output_____" ], [ "---\n検定をおこなう際には間違いが発生する。それをまとめたのが次の表である。", "_____no_output_____" ], [ "| | 帰無仮説は真 | 帰無仮説は偽 | |\n|------------------------|--------------|--------------|---|\n| 帰無仮説を棄却できない | 正しい結論 | 第2種過誤 | |\n| 帰無仮説を棄却 | 第1種過誤 | 正しい結論 | |", "_____no_output_____" ], [ "検定を警察の行動に例えて次のように考えることもできる。\n* 帰無仮説 $\\Rightarrow$ 「無罪」\n* 帰無仮説の棄却 $\\Rightarrow$ 「逮捕」\n* 帰無仮説を棄却しない $\\Rightarrow$ 「逮捕しない」\n* 第1種過誤 $\\Rightarrow$「冤罪で逮捕」\n* 第2種過誤 $\\Rightarrow$「真犯人を取り逃がす」", "_____no_output_____" ], [ "| | 無実は真 | 無実は偽 |\n|------------------------|:------------:|:------------:|\n| 逮捕しない | 正しい結論 | 取り逃す |\n| 逮捕 | 冤罪 | 正しい結論 |", "_____no_output_____" ], [ "* 統計的優位性とは,帰無仮説が正しい場合,帰無仮説の内容が発生した確率が非常に低い場合を指す。即ち,発生イベントが非常に起こりにくく,偶然のの可能性が非常に高い場合である。\n* 第一種過誤は,帰無仮説が正しい場合に棄却すると発生する。慣例でその確率を$\\alpha$で表す。$\\alpha=0.05$がよく使われるが,これを使う場合,間違って正しい帰無仮説を棄却する確率は最大5%まで許容するという意味になる。", "_____no_output_____" ], [ "## $t$検定", "_____no_output_____" ], [ "### 説明:$t$値と$p$値", "_____no_output_____" ], [ "新たな仮定を導入する。\n\n* 仮定6:誤差項は説明変数は独立であり正規分布に従う(誤差項の正規性)\n\n$$u\\sim N\\left(0,\\sigma^2\\right)$$\n\n仮定1〜6を**Classical Linear Model (CLM) Assumptions**(CLM仮定)と呼ぶ。", "_____no_output_____" ], [ "---\nCLM仮定の下では:\n* OLS推定量は正規分布に従う。\n\n $$\n \\hat{\\beta}_j\\sim N\\left(\\beta_j,\\text{Var}\\left(\\hat{\\beta}_j\\right)\\right)\n $$\n \n* OLS推定量の標準誤差を使い標準化すると以下が成立する。\n\n $$\n \\frac{\\hat{\\beta}_j-\\beta_j}{\\text{se}\\left(\\hat{\\beta}_j\\right)}\\sim t_{n-k-1}\n $$\n \n ここで$t_{n-k-1}$は自由度$n-k-1$の$t$分布を表している。$n$は標本の大きさ,$k$は定数項以外の説明変数の数である(定数項がある場合)。", "_____no_output_____" ], [ "---\nこの関係を使うことによりOLS推定量の検定が可能となる。\n\n* 帰無仮説:$H_0:\\hat{\\beta}_j=0$\n * $H_0$が正しい場合,$t$統計量は次の分布に従う。\n \n $$\n t_{\\hat{\\beta}_j}\\equiv\\frac{\\hat{\\beta}_j}{\\text{se}\\left(\\hat{\\beta}_j\\right)}\\sim t_{n-k-1}\n $$\n \n* 対立仮説と棄却ルール($\\alpha=$有意水準):\n * 右側検定:$H_A:\\hat{\\beta}_j>0$\n \n $$\n t_{\\hat{\\beta}_j}>t_c(\\alpha)>0\\quad\\Rightarrow\\quad H_0\\text{を棄却する}\n $$\n \n * 左側検定:$H_A:\\hat{\\beta}_j<0$\n \n $$\n t_{\\hat{\\beta}_j}<t_c(\\alpha)<0\\quad\\Rightarrow\\quad H_0\\text{を棄却する}\n $$\n \n * 両側検定:$H_A:\\hat{\\beta}_j\\neq 0$\n \n $$\n \\left|t_{\\hat{\\beta}_j}\\right|>t_c(\\alpha/2)>0\\quad\\Rightarrow\\quad H_0\\text{を棄却する}\n $$\n\n\nここで,$t_c(\\alpha)$は有意水準$\\alpha$に対応する片側検定の棄却臨界値であり、$t_c(\\alpha/2)$は有意水準$\\alpha$に対応する両側検定の棄却臨界値である。", "_____no_output_____" ], [ "---\n**<$p$値>**\n\n$p$値は,帰無仮説が正しい場合に観測された事象が起こった確率である。即ち、$p$値が非常に低い場合(例えば、0.01)、帰無仮説のもとで発生した事象は非常に起こりにくく,偶然のの可能性が非常に高いことを意味する。また、第一種過誤の確率は非常に低いことも意味する。\n\n$p$値を使う場合の検定手順\n* 棄却するための有意水準 $\\alpha$を決める(例えば,0.05)\n* $p値\\leq \\alpha$の場合,$\\text{H}_0$を棄却", "_____no_output_____" ], [ "### $t$値と$p$値の例", "_____no_output_____" ], [ "定数項があり\n* 有意水準:$\\alpha=0.05$\n* 標本の大きさ:$n=30$\n* 説明変数の数(定数項以外):$k=5$\n\nの場合を考えよう。", "_____no_output_____" ], [ "#### $t$値", "_____no_output_____" ], [ "右側検定の場合の$t_c(0.05)$は次の値となる。", "_____no_output_____" ] ], [ [ "a = 0.05\ndof = 30-5-1\n\nt_right = t.ppf(1-a, dof) # t.ppfについてはscipy.statsを参照\nt_right", "_____no_output_____" ] ], [ [ "左側検定の場合の$t_c(0.05)$は次の値となる。", "_____no_output_____" ] ], [ [ "t_left = t.ppf(a, dof)\nt_left", "_____no_output_____" ] ], [ [ "両側検定の場合の$t_c(0.05)$は、$1-a/2$を使うと次のコードとなる。", "_____no_output_____" ] ], [ [ "t_both = t.ppf(1-a/2, dof)\nt_both", "_____no_output_____" ] ], [ [ "$a/2$を使うと次のコードで同じ値を計算できる。", "_____no_output_____" ] ], [ [ "abs(t.ppf(a/2,dof))", "_____no_output_____" ] ], [ [ "`abs()`は絶対値を計算する関数である。", "_____no_output_____" ], [ "#### $p$値", "_____no_output_____" ], [ "$p$値の計算の例として上の`t_right`、`t_left`、`t_both`を使ってみる。\n\n右側検定で$t_{\\hat{\\beta}_j}=$\n`t_right`の場合,$p$値は次の値になる。", "_____no_output_____" ] ], [ [ "1-t.cdf(t_right, dof) # t.cdfについてはscipy.statsを参照", "_____no_output_____" ] ], [ [ "左側検定で$t_{\\hat{\\beta}_j}=$\n`t_left`の場合,$p$値は次の値になる。", "_____no_output_____" ] ], [ [ "t.cdf(t_left, dof)", "_____no_output_____" ] ], [ [ "両側検定で$\\left|t_{\\hat{\\beta}_j}\\right|=$\n`t_both`の場合,$p$値は次の値になる。", "_____no_output_____" ] ], [ [ "2*( 1-t.cdf(t_both, dof) )", "_____no_output_____" ] ], [ [ "```{figure} ./images/t_p_values.jpeg\n:align: center\n\n上で計算した数値の図示(累積分布関数と確率密度関数は$t$分布)。\n```", "_____no_output_____" ], [ "### 回帰分析と$t$検定", "_____no_output_____" ], [ "`gpa1`のデータを使った回帰分析を考えよう。 ", "_____no_output_____" ] ], [ [ "gpa = wooldridge.data('gpa1')\nwooldridge.data('gpa1',description=True)", "name of dataset: gpa1\nno of variables: 29\nno of observations: 141\n\n+----------+--------------------------------+\n| variable | label |\n+----------+--------------------------------+\n| age | in years |\n| soph | =1 if sophomore |\n| junior | =1 if junior |\n| senior | =1 if senior |\n| senior5 | =1 if fifth year senior |\n| male | =1 if male |\n| campus | =1 if live on campus |\n| business | =1 if business major |\n| engineer | =1 if engineering major |\n| colGPA | MSU GPA |\n| hsGPA | high school GPA |\n| ACT | 'achievement' score |\n| job19 | =1 if job <= 19 hours |\n| job20 | =1 if job >= 20 hours |\n| drive | =1 if drive to campus |\n| bike | =1 if bicycle to campus |\n| walk | =1 if walk to campus |\n| voluntr | =1 if do volunteer work |\n| PC | =1 of pers computer at sch |\n| greek | =1 if fraternity or sorority |\n| car | =1 if own car |\n| siblings | =1 if have siblings |\n| bgfriend | =1 if boy- or girlfriend |\n| clubs | =1 if belong to MSU club |\n| skipped | avg lectures missed per week |\n| alcohol | avg # days per week drink alc. |\n| gradMI | =1 if Michigan high school |\n| fathcoll | =1 if father college grad |\n| mothcoll | =1 if mother college grad |\n+----------+--------------------------------+\n\nChristopher Lemmon, a former MSU undergraduate, collected these data\nfrom a survey he took of MSU students in Fall 1994.\n" ] ], [ [ "このデータセットを使い,次の問題を考える。ミシガン州立大学で学生のGPA(`colGDP`)は以下の変数とどのような関係にあるのか。\n* `hsGPA`:高校のGPA\n* `ACT`:米国大学進学適性試験\n* `skipped`:週平均の講義欠席回数", "_____no_output_____" ] ], [ [ "formula_gpa = 'colGPA ~ hsGPA + ACT + skipped'\nres_gpa = ols(formula_gpa, data=gpa).fit()", "_____no_output_____" ] ], [ [ "`res_gpa`の属性`tvalues`を使い$t$値を表示してみる。", "_____no_output_____" ] ], [ [ "res_gpa.tvalues", "_____no_output_____" ] ], [ [ "この値を1つ1つ棄却臨界値と比べるのも手間がかかるので、次のような関数を作成し、任意の棄却臨界値に基づき両側検定を行うことも簡単にできる。", "_____no_output_____" ] ], [ [ "def significance_check(res, a=0.05):\n \"\"\"\n 定数項がある場合のt検定をおこなう。\n \n 引数:\n res = statsmodelsによるOLS推定結果\n a = 有意水準(デフォルト:0.05)\n 返り値:\n 帰無仮説を棄却する場合はTrueを返す。\n \"\"\"\n \n dof = res.nobs-res.df_model-1\n result = abs(res.tvalues) > t.ppf(1-a/2, dof)\n \n return pd.Series(result, index=res.tvalues.index)", "_____no_output_____" ] ], [ [ "<コードの説明>\n> `\"\"\"`と`\"\"\"`で囲まれた行は関数の説明であり、`docstring`と呼ばれる。`docstring`は覚え書きのようなもので、何ヶ月後の自分が読んだり、他の人が読む場合に便利となる。また、`statsmodels`などのパッケージは数多くの関数を使っており、それらにも`docstring`が追加されているのが普通である。関数`help()`を使うと`docstring`が表示される。", "_____no_output_____" ] ], [ [ "help(significance_check)", "Help on function significance_check in module __main__:\n\nsignificance_check(res, a=0.05)\n 定数項がある場合のt検定をおこなう。\n \n 引数:\n res = statsmodelsによるOLS推定結果\n a = 有意水準(デフォルト:0.05)\n 返り値:\n 帰無仮説を棄却する場合はTrueを返す。\n\n" ] ], [ [ "次のコードでも同じ内容を確認できる。\n```\nsignificance_check?\n```\n\n関数を実行してみよう。", "_____no_output_____" ] ], [ [ "significance_check(res_gpa, 0.05)", "_____no_output_____" ] ], [ [ "`res_gpa`の属性`pvalues`から$p$値を表示してみよう。", "_____no_output_____" ] ], [ [ "res_gpa.pvalues", "_____no_output_____" ] ], [ [ "$t$値と$p$値を含めて表にまとめたものを表示してみる。", "_____no_output_____" ] ], [ [ "print(res_gpa.summary().tables[1])", "==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept 1.3896 0.332 4.191 0.000 0.734 2.045\nhsGPA 0.4118 0.094 4.396 0.000 0.227 0.597\nACT 0.0147 0.011 1.393 0.166 -0.006 0.036\nskipped -0.0831 0.026 -3.197 0.002 -0.135 -0.032\n==============================================================================\n" ] ], [ [ "* `coef`: OLS推定量($\\hat{\\beta}_j$)\n* `std err`: OLS推定量の標準誤差($\\text{se}(\\hat{\\beta}_j)$)\n* `t`: $t$値\n* `p`: $p$値\n* `[0.025 0.975]`: 信頼区間(次の節を参照) \n\nOLS推定量の標準誤差は属性`.bse`として取得できる。", "_____no_output_____" ] ], [ [ "res_gpa.bse", "_____no_output_____" ] ], [ [ "## 信頼区間", "_____no_output_____" ], [ "### 信頼区間の計算", "_____no_output_____" ], [ "信頼区間は以下のように定義される。\n\n$$\n\\hat{\\beta}_j\\pm t_c(a)\\cdot se\\left(\\hat{\\beta}_j\\right)\n$$\n\nここで,$a$は有意水準である。\n\n**(解釈)**\n\n標本を取りOLS推定値を計算するというプロセスを100回繰り返した場合(それが可能な場合、それぞれの標本は異なり$\\hat{\\beta}_j$も異なることになる),その内,母集団の(真の)$\\beta_j$の値が信頼区間に入るのは$(1-a)\\times 100$回である($a=0.05$の場合,95回)。", "_____no_output_____" ], [ "`gpa`の例を考える。`res_gpa`の属性から信頼区間を取得できる。", "_____no_output_____" ] ], [ [ "print(res_gpa.conf_int())", " 0 1\nIntercept 0.733930 2.045178\nhsGPA 0.226582 0.597050\nACT -0.006171 0.035612\nskipped -0.134523 -0.031703\n" ] ], [ [ "有意水準のオプション`alpha=`を指定することもできる(デフォルトは`0.05`)。有意水準が1%の場合は次のように指定する。", "_____no_output_____" ] ], [ [ "print(res_gpa.conf_int(alpha=0.01))", " 0 1\nIntercept 0.523472 2.255635\nhsGPA 0.167121 0.656511\nACT -0.012877 0.042318\nskipped -0.151026 -0.015200\n" ] ], [ [ "OLS推定値は1つの値を返す「点推定」だが,信頼区間は有意水準に基づく「区間推定」である。両方を確認することが大事である。例えば、`ACT`の信頼区間を考えよう。100回推定をした場合,母集団の$\\beta_{ACT}$の値は95回この区間に入ることになるが,有意水準が1%でも5%でも`0`が含まれている。0の可能性が高いことを示している。", "_____no_output_____" ], [ "### 信頼区間の図示", "_____no_output_____" ], [ "`matplotlib`の`errorbar`を使い図示する。", "_____no_output_____" ] ], [ [ "coef = res_gpa.params # 推定値\nvarname = coef.index # 変数名\nci = res_gpa.conf_int() # 信頼区間を ci に入れる\nerror = ci.iloc[:,1]-coef # 信頼区間の幅の半分\n\nplt.errorbar(x=coef[1:], y=varname[1:], xerr=error[1:], fmt='o',capsize=5) # 切片以外のエラーバー\nplt.axvline(0, color='red') # 垂直線\npass", "_____no_output_____" ] ], [ [ "`ACT`の信頼区間に`0`が含まれることが分かる。", "_____no_output_____" ], [ "## $F$検定", "_____no_output_____" ], [ "### 説明", "_____no_output_____" ], [ "回帰分析において複数のOLS推定量に何らかの制約が妥当かどうかを調べる際に使うのが$F$検定である。例えば,次の回帰式の場合\n\n$$\ny=\\beta_0+\\beta_1x_1+\\beta_2x_2+\\beta_3x_3+u\n$$\n\n制約式の例として以下が挙げられる。\n\n$\\beta_1=\\beta_2=\\beta_3=0$\n\n$\\beta_2=\\beta_3=0$\n\n$\\beta_2=2\\beta_3$", "_____no_output_____" ], [ "---\n帰無仮説と対立仮説は次のようになる。\n* 帰無仮説:$\\text{H}_0$:制約が成立する\n* 対立仮説:$\\text{H}_A$:制約は成立しない\n\n検定手順\n1. 制約がない回帰式を推定する。\n2. 制約がある回帰式を推定する。\n3. 次の式に従い$F$値を計算する\n\n $$\n F=\\frac{\\left(SSR_1-SSR_0\\right)/q}{SSR_0/(n-k-1)}\n =\\frac{\\left(R_0^2-R_1^2\\right)/q}{\\left(1-R_0^2\\right)/(n-k-1)}\n \\sim F_{n-k-1}^q\n $$\n \n 左辺の$F$は$F$値、右辺の$F_{n-k-1}^q$は自由度が$q$と$n-k-1$の$F$分布。\n\n4. 有意水準に基づく棄却臨界値と比べる(もしくは$p$値を有意水準と比べる)", "_____no_output_____" ], [ "上の式で\n* $0$:制約がない変数の添え字\n* $1$:制約がある変数の添え字\n* $SSR$:残差変動の平方和(Residual Sum of Squares)\n* $R^2$:決定係数\n* $n$:標本の大きさ\n* $k$:定数項以外の説明変数(制約がない回帰式)\n* $q$:制約式の数\n\n名称:\n* $q$を「分子の自由度」と呼ぶ\n* $n-k-1$を「分母の自由度」と呼ぶ", "_____no_output_____" ], [ "---\n**<棄却ルール>**\n\n$F>F_c(a)$ $\\Rightarrow$ $\\text{H}_0$を棄却\n\n* $F_c(a)$:棄却臨界値\n* $a$:有意水準\n\n(注意)$F$値は必ず正の値をとる。従って,$t$検定の「右片側検定」のパターンしか存在しない。", "_____no_output_____" ], [ "**<$p$値を使う場合の手順>**\n\n* 棄却するための有意水準 $a$を決める(例えば,0.05)\n* $p値<a$の場合,$\\text{H}_0$を棄却", "_____no_output_____" ], [ "### 回帰分析", "_____no_output_____" ], [ "例として米国野球選手(メジャーリーグ)に関するデータ`mlb1`を使う。", "_____no_output_____" ] ], [ [ "mlb1 = wooldridge.data('mlb1')\nwooldridge.data('mlb1',description=True)", "name of dataset: mlb1\nno of variables: 47\nno of observations: 353\n\n+----------+----------------------------+\n| variable | label |\n+----------+----------------------------+\n| salary | 1993 season salary |\n| teamsal | team payroll |\n| nl | =1 if national league |\n| years | years in major leagues |\n| games | career games played |\n| atbats | career at bats |\n| runs | career runs scored |\n| hits | career hits |\n| doubles | career doubles |\n| triples | career triples |\n| hruns | career home runs |\n| rbis | career runs batted in |\n| bavg | career batting average |\n| bb | career walks |\n| so | career strike outs |\n| sbases | career stolen bases |\n| fldperc | career fielding perc |\n| frstbase | = 1 if first base |\n| scndbase | =1 if second base |\n| shrtstop | =1 if shortstop |\n| thrdbase | =1 if third base |\n| outfield | =1 if outfield |\n| catcher | =1 if catcher |\n| yrsallst | years as all-star |\n| hispan | =1 if hispanic |\n| black | =1 if black |\n| whitepop | white pop. in city |\n| blackpop | black pop. in city |\n| hisppop | hispanic pop. in city |\n| pcinc | city per capita income |\n| gamesyr | games per year in league |\n| hrunsyr | home runs per year |\n| atbatsyr | at bats per year |\n| allstar | perc. of years an all-star |\n| slugavg | career slugging average |\n| rbisyr | rbis per year |\n| sbasesyr | stolen bases per year |\n| runsyr | runs scored per year |\n| percwhte | percent white in city |\n| percblck | percent black in city |\n| perchisp | percent hispanic in city |\n| blckpb | black*percblck |\n| hispph | hispan*perchisp |\n| whtepw | white*percwhte |\n| blckph | black*perchisp |\n| hisppb | hispan*percblck |\n| lsalary | log(salary) |\n+----------+----------------------------+\n\nCollected by G. Mark Holmes, a former MSU undergraduate, for a term\nproject. The salary data were obtained from the New York Times, April\n11, 1993. The baseball statistics are from The Baseball Encyclopedia,\n9th edition, and the city population figures are from the Statistical\nAbstract of the United States.\n" ] ], [ [ "このデータセットを使い,年俸(`salary`)が次の変数とどう関係するかを考える。\n* `years`:メジャーリーグでプレイした期間(年)\n* `gamesyr`:年間出場回数\n* `bavg`:通算打率\n* `hrunsyr`:1シーズンあたりホームラン数\n* `rbinsyr`:1シーズンあたり打点\n\nまず制約がない回帰分析をおこなう。", "_____no_output_____" ] ], [ [ "formula_0 = 'np.log(salary) ~ years + gamesyr + bavg + hrunsyr + rbisyr'\nres_0 = ols(formula_0, data=mlb1).fit()", "_____no_output_____" ] ], [ [ "#### 例1", "_____no_output_____" ], [ "**<制約>**\n`bavg`, `hrunsyr`, `rbisyr`は`salary`に影響を与えない。\n\n$H_0:\\;\\beta_{\\text{bavg}}=\\beta_{\\text{hrunsyr}}=\\beta_{\\text{rbisyr}}=0$\n\n$H_A:\\;H_0は成立しない$", "_____no_output_____" ], [ "制約がある場合の回帰分析", "_____no_output_____" ] ], [ [ "formula_1 = 'np.log(salary) ~ years + gamesyr'\nres_1 = ols(formula_1, data=mlb1).fit()", "_____no_output_____" ] ], [ [ "##### 手計算", "_____no_output_____" ], [ "$F$値の計算", "_____no_output_____" ] ], [ [ "# 決定係数\nr2_0 = res_0.rsquared\nr2_1 = res_1.rsquared\n\nn = res_0.nobs # 標本の大きさ\nk = res_0.df_model # 定数項以外の説明変数の数\nq = 3 # 制約式の数\n\n# F値:\nF = (r2_0 - r2_1) / (1 - r2_0) * (n - k - 1) / q\nF", "_____no_output_____" ] ], [ [ "$a=0.05$の場合の棄却臨界値", "_____no_output_____" ] ], [ [ "a = 0.05\nf.ppf(1-a, dfn=q, dfd=n-k-1) # f.ppfについてはscipy.statsを参照", "_____no_output_____" ] ], [ [ "従って$H_0$を棄却する。", "_____no_output_____" ], [ "$p$値", "_____no_output_____" ] ], [ [ "1-f.cdf(F, dfn=q, dfd=n-k-1)", "_____no_output_____" ] ], [ [ "##### メソッドを使う", "_____no_output_____" ], [ "制約式の定義", "_____no_output_____" ] ], [ [ "hypotheses = 'bavg = 0, hrunsyr = 0, rbisyr = 0'", "_____no_output_____" ] ], [ [ "`res_0`のメソッド`f_test`を使い、引数に上のセルで定義した制約式`hypotheses`を使う。", "_____no_output_____" ] ], [ [ "res_f_test = res_0.f_test(hypotheses)", "_____no_output_____" ] ], [ [ "`res_f_test`に`F`検定の結果を割り当てたが、そのメソッド`summary()`を使い結果を表示する。", "_____no_output_____" ] ], [ [ "res_f_test.summary()", "_____no_output_____" ] ], [ [ "上の結果にある記号の意味:\n* `F`:$F$値\n* `p`:$p$値\n* `df_denom`:分母の自由度\n* `df_num`:分子の自由度\n\n手計算と同じ結果である。", "_____no_output_____" ], [ "#### 例2", "_____no_output_____" ], [ "**<制約>**\n定数項以外の全ての説明変数は`salary`に影響を与えない。\n\n$H_0:\\;\\beta_{\\text{years}}=\\beta_{\\text{gamesyr}}=\\beta_{\\text{bavg}}=\\beta_{\\text{hrunsyr}}=\\beta_{\\text{rbisyr}}=0$\n\n$H_A:\\;H_0は成立しない$\n\nこの場合,`res_0`の`fvalue`から$F$値,`f_pvalue`から$p$値を属性として取得できる。", "_____no_output_____" ] ], [ [ "res_0.fvalue", "_____no_output_____" ], [ "res_0.f_pvalue", "_____no_output_____" ] ], [ [ "$p$値は非常に小さな値となっており、0.1%の有意水準でも帰無仮説を棄却できる。\n\n例2の結果はOLS推定結果のメソッド`summary()`で表示される表の中にある`F-statistic`と`Prob(F-statistic)`で確認することもできる。", "_____no_output_____" ] ], [ [ "print(res_0.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: np.log(salary) R-squared: 0.628\nModel: OLS Adj. R-squared: 0.622\nMethod: Least Squares F-statistic: 117.1\nDate: Tue, 02 Mar 2021 Prob (F-statistic): 2.94e-72\nTime: 20:04:07 Log-Likelihood: -385.11\nNo. Observations: 353 AIC: 782.2\nDf Residuals: 347 BIC: 805.4\nDf Model: 5 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept 11.1924 0.289 38.752 0.000 10.624 11.760\nyears 0.0689 0.012 5.684 0.000 0.045 0.093\ngamesyr 0.0126 0.003 4.742 0.000 0.007 0.018\nbavg 0.0010 0.001 0.887 0.376 -0.001 0.003\nhrunsyr 0.0144 0.016 0.899 0.369 -0.017 0.046\nrbisyr 0.0108 0.007 1.500 0.134 -0.003 0.025\n==============================================================================\nOmnibus: 6.816 Durbin-Watson: 1.265\nProb(Omnibus): 0.033 Jarque-Bera (JB): 10.197\nSkew: -0.068 Prob(JB): 0.00610\nKurtosis: 3.821 Cond. No. 2.09e+03\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n[2] The condition number is large, 2.09e+03. This might indicate that there are\nstrong multicollinearity or other numerical problems.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec770f3d57fe5670ddd6ea46d39f3cdecdad8669
400,628
ipynb
Jupyter Notebook
notebooks/ls-0.1-train_random_forest.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
notebooks/ls-0.1-train_random_forest.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
notebooks/ls-0.1-train_random_forest.ipynb
TheKaggleKings/kaggle-data-science-bowl
d21a6871d6857f3dff7409f6d4f80df51398034f
[ "FTL" ]
null
null
null
135.852153
94,016
0.819219
[ [ [ "import pandas as pd\nimport sklearn\nimport numpy as np\nfrom sklearn import preprocessing\nimport sklearn.metrics as metrics\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder", "_____no_output_____" ], [ "features_orig = pd.read_pickle(\"../data/processed/features.pkl\")\nfeatures_orig.head(2)", "_____no_output_____" ], [ "# data = pd.read_pickle(\"../data/processed/memory_optimized_data.pkl\")", "_____no_output_____" ], [ "categories_of_titles=data[['title','type','world']]\ncategories_of_titles=categories_of_titles.drop_duplicates('title')\ncategories_of_titles=categories_of_titles.set_index('title')\ncategories_of_titles", "_____no_output_____" ], [ "#Filter only for a given world\n# features=features_orig[['accuracy_group','title']+categories_of_titles[categories_of_titles['world']=='CRYSTALCAVES'].index.to_list()]\n# features=features[features['title']=='Cart Balancer (Assessment)']\n\n\nfeatures=features_orig\nfeatures", "_____no_output_____" ], [ "# Labels are the values we want to predict\nlabels = np.array(features['accuracy_group'])\n# Remove the labels from the features\n# axis 1 refers to the columns\nfeatures= features.drop(['accuracy_group','accuracy','num_correct','num_incorrect','installation_id','game_session'], axis = 1)\n\n\n# #Filter only for a given world\n# features= features.drop(['accuracy_group','title'], axis = 1)\n\n\n# # Saving feature names for later use\n# feature_list = list(features.columns)", "_____no_output_____" ], [ "features=pd.get_dummies(features, prefix=['title'], columns=['title'])", "_____no_output_____" ], [ "features.head(2)", "_____no_output_____" ], [ "# Convert to numpy array\nfeatures_copy=features.copy()\nfeatures = np.array(features)", "_____no_output_____" ], [ "# Using Skicit-learn to split data into training and testing sets\nfrom sklearn.model_selection import train_test_split\n# Split the data into training and testing sets\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42)", "_____no_output_____" ], [ "print('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n", "Training Features Shape: (13267, 49)\nTraining Labels Shape: (13267,)\nTesting Features Shape: (4423, 49)\nTesting Labels Shape: (4423,)\n" ], [ "# Import the model we are using\nfrom sklearn.ensemble import RandomForestRegressor\n# Instantiate model with 1000 decision trees\nrf = RandomForestRegressor(n_estimators = 1000, random_state = 42)\n# Train the model on training data\nrf.fit(train_features, train_labels);", "_____no_output_____" ], [ "# Use the forest's predict method on the test data\npredictions = rf.predict(test_features)", "_____no_output_____" ], [ "def prepare_for_comparison(predictions,labels):\n predictions_formatted=predictions.round()\n labels_formatted=labels.round()\n labels_formatted_pd=pd.DataFrame(data=labels_formatted,columns=[['labels']])\n print(labels_formatted_pd.tail())\n predictions_formatted_pd=pd.DataFrame(predictions_formatted,columns=[['predictions']])\n combined_df=pd.concat([predictions_formatted_pd,labels_formatted_pd],axis=1)\n combined_df.columns=['predictions', 'labels']\n combined_df['predictions']=combined_df['predictions'].map(lambda x : int(x))\n \n return combined_df", "_____no_output_____" ], [ "combined = prepare_for_comparison(predictions,test_labels)", " labels\n4418 3\n4419 3\n4420 3\n4421 3\n4422 3\n" ], [ "predictions_train = rf.predict(train_features)", "_____no_output_____" ], [ "combined_train = prepare_for_comparison(predictions_train,train_labels)", " labels\n13262 2\n13263 3\n13264 3\n13265 3\n13266 3\n" ], [ "comparison=combined['predictions']==combined['labels']\nround(comparison.sum()/len(comparison),2)", "_____no_output_____" ], [ "comparison=combined_train['predictions']==combined_train['labels']\nround(comparison.sum()/len(comparison),2)", "_____no_output_____" ], [ "print(sklearn.metrics.classification_report(combined['labels'], combined['predictions']))", " precision recall f1-score support\n\n 0 0.79 0.24 0.36 1050\n 1 0.23 0.43 0.30 611\n 2 0.16 0.53 0.25 564\n 3 0.76 0.38 0.51 2198\n\n accuracy 0.37 4423\n macro avg 0.48 0.39 0.35 4423\nweighted avg 0.62 0.37 0.41 4423\n\n" ], [ "print(sklearn.metrics.classification_report(combined_train['labels'], combined_train['predictions']))", " precision recall f1-score support\n\n 0 0.98 0.50 0.66 3179\n 1 0.38 0.61 0.47 1800\n 2 0.26 0.72 0.38 1641\n 3 0.94 0.60 0.73 6647\n\n accuracy 0.59 13267\n macro avg 0.64 0.61 0.56 13267\nweighted avg 0.79 0.59 0.64 13267\n\n" ], [ "print(round(len(combined_train[combined_train['labels']==0])/len(combined_train),2))\nprint(round(len(combined_train[combined_train['labels']==1])/len(combined_train),2))\nprint(round(len(combined_train[combined_train['labels']==2])/len(combined_train),2))\nprint(round(len(combined_train[combined_train['labels']==3])/len(combined_train),2))", "0.13\n0.08\n0.11\n0.67\n" ] ], [ [ "## Visualisation and Interpretation", "_____no_output_____" ] ], [ [ "# estimator = rf.estimators_[5]\n\n# from sklearn.tree import export_graphviz\n# # Export as dot file\n# export_graphviz(estimator, out_file='tree.dot', \n# feature_names = features_copy.columns,\n# class_names = ['group_acc'],\n# rounded = True, proportion = False, \n# precision = 2, filled = True)\n# import pydot\n# import os\n# i_tree=0\n# for tree_in_forest in rf.estimators_[5:6]:\n# export_graphviz(tree_in_forest,out_file='tree.dot',\n# feature_names=features_copy.columns,\n# filled=True,\n# rounded=True)\n# (graph,) = pydot.graph_from_dot_file('tree.dot')\n# name = 'tree' + str(i_tree)\n# graph.write_png(name+ '.png')\n# os.system('dot -Tpng tree.dot -o tree.png')\n# i_tree +=1\n\n# from IPython.display import Image\n# Image(filename = 'tree.png')", "_____no_output_____" ], [ "features_copy.columns", "_____no_output_____" ], [ "\nimport matplotlib.pyplot as plt\ncol = features_copy.columns\n#modelname.feature_importance_\ny = estimator.feature_importances_\n#plot\nfig, ax = plt.subplots() \nwidth = 0.4 # the width of the bars \nind = np.arange(len(y)) # the x locations for the groups\nax.barh(ind, y, width, color='green')\nax.set_yticks(ind+width/10)\nax.set_yticklabels(col, minor=False)\nplt.title('Feature importance in RandomForest Classifier')\nplt.xlabel('Relative importance')\nplt.ylabel('feature') \nplt.figure(figsize=(20,20))\nfig.set_size_inches(20, 20, forward=True)", "_____no_output_____" ], [ "print(col)", "Index(['12 Monkeys', 'Air Show', 'All Star Sorting', 'Balancing Act',\n 'Bird Measurer (Assessment)', 'Bottle Filler (Activity)', 'Bubble Bath',\n 'Bug Measurer (Activity)', 'Cart Balancer (Assessment)',\n 'Cauldron Filler (Assessment)', 'Chest Sorter (Assessment)',\n 'Chicken Balancer (Activity)', 'Chow Time', 'Costume Box',\n 'Crystal Caves - Level 1', 'Crystal Caves - Level 2',\n 'Crystal Caves - Level 3', 'Crystals Rule', 'Dino Dive', 'Dino Drink',\n 'Egg Dropper (Activity)', 'Fireworks (Activity)',\n 'Flower Waterer (Activity)', 'Happy Camel', 'Heavy, Heavier, Heaviest',\n 'Honey Cake', 'Leaf Leader', 'Lifting Heavy Things',\n 'Magma Peak - Level 1', 'Magma Peak - Level 2',\n 'Mushroom Sorter (Assessment)', 'Ordering Spheres', 'Pan Balance',\n 'Pirate's Tale', 'Rulers', 'Sandcastle Builder (Activity)',\n 'Scrub-A-Dub', 'Slop Problem', 'Treasure Map',\n 'Tree Top City - Level 1', 'Tree Top City - Level 2',\n 'Tree Top City - Level 3', 'Watering Hole (Activity)',\n 'Welcome to Lost Lagoon!', 'title_Bird Measurer (Assessment)',\n 'title_Cart Balancer (Assessment)',\n 'title_Cauldron Filler (Assessment)', 'title_Chest Sorter (Assessment)',\n 'title_Mushroom Sorter (Assessment)'],\n dtype='object')\n" ], [ "results=pd.DataFrame(y,col)\nresults", "_____no_output_____" ], [ "categories_of_titles", "_____no_output_____" ], [ "results_total=results.join(categories_of_titles)\nresults_total", "_____no_output_____" ], [ "results_total[results_total['world']=='MAGMAPEAK']", "_____no_output_____" ], [ "def create_plot(world,color):\n fig, ax = plt.subplots() \n width = 0.4 # the width of the bars \n ind = np.arange(len(results_total[results_total['world']==world][0])) # the x locations for the groups\n ax.barh(ind, results_total[results_total['world']==world][0], width, color=color)\n ax.set_yticks(ind+width/10)\n ax.set_yticklabels(results_total[results_total['world']==world].index.tolist(), minor=False)\n plt.title('Feature importance in RandomForest Classifier')\n plt.xlabel('Relative importance')\n plt.ylabel('feature') \n plt.xlim(0, 0.03)\n plt.figure(figsize=(10,10))\n fig.set_size_inches(10, 10, forward=True)\n", "_____no_output_____" ], [ "create_plot('MAGMAPEAK','red')\ncreate_plot('TREETOPCITY','green')\ncreate_plot('CRYSTALCAVES','blue')", "_____no_output_____" ], [ "def create_plot_type(type_interaction,color):\n fig, ax = plt.subplots() \n width = 0.4 # the width of the bars \n ind = np.arange(len(results_total[results_total['type']==type_interaction][0])) # the x locations for the groups\n ax.barh(ind, results_total[results_total['type']==type_interaction][0], width, color=color)\n ax.set_yticks(ind+width/10)\n ax.set_yticklabels(results_total[results_total['type']==type_interaction].index.tolist(), minor=False)\n plt.title('Feature importance in RandomForest Classifier')\n plt.xlabel('Relative importance')\n plt.ylabel('feature') \n plt.xlim(0, 0.03)\n plt.figure(figsize=(10,10))\n fig.set_size_inches(10, 10, forward=True)\n", "_____no_output_____" ], [ "results_total[results_total['type']=='Activity'].index.tolist()", "_____no_output_____" ], [ "create_plot_type('Activity','red')\ncreate_plot_type('Game','green')\ncreate_plot_type('Clip','blue')\ncreate_plot_type('Assessment','pink')\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec771c22583f863a235211c76c100fb204d8d6c0
10,228
ipynb
Jupyter Notebook
COVID19-Daily-Cases.ipynb
lgfunderburk/Test-Repository
32be453e8e3f4f50d8a8e998ed5320059972ed86
[ "MIT" ]
null
null
null
COVID19-Daily-Cases.ipynb
lgfunderburk/Test-Repository
32be453e8e3f4f50d8a8e998ed5320059972ed86
[ "MIT" ]
null
null
null
COVID19-Daily-Cases.ipynb
lgfunderburk/Test-Repository
32be453e8e3f4f50d8a8e998ed5320059972ed86
[ "MIT" ]
null
null
null
23.351598
191
0.542628
[ [ [ "# COVID 19 Daily Cases Using Open Data\n\n##### Author: Laura G. Funderburk\n##### Date: July 28 2020\n##### Last modified: July 29 2020\n\n### Intro\n\nIn this notebook I will visualize COVID 19 Daily cases in Canada and other countries using Python. \n\n### Source\n\nCOVID-19 Data Repository by the Center for Systems Science and Engineering (CSSE) at Johns Hopkins University https://github.com/CSSEGISandData/COVID-19.\n\nDong E, Du H, Gardner L. An interactive web-based dashboard to track COVID-19 in real time. Lancet Infect Dis; published online Feb 19. https://doi.org/10.1016/S1473-3099(20)30120-1.\n\nThis notebook uses an API implemented by https://github.com/CSSEGISandData/COVID-19/issues/851", "_____no_output_____" ] ], [ [ "# Importing libraries\nimport requests as r \nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n#do this if you don't have the latest pandas version\n#from pandas.io.json import json_normalize", "_____no_output_____" ] ], [ [ "Now we are going to download the data using the API. ", "_____no_output_____" ] ], [ [ "# Get API LINK with confirmed cases\nAPI_LINK= \"https://covid19api.herokuapp.com/deaths\"\n# Pull data\njson_data = r.get(API_LINK).json()", "_____no_output_____" ] ], [ [ "Once we have downloaded the data, we will format it into a pandas dataframe using the `locations` column.", "_____no_output_____" ] ], [ [ "confirmed_df = pd.json_normalize(json_data,record_path=['locations'])\n# do this if you don't have the latest pandas version\n# json_nomrmalize(json_data,record_path=['locations'])", "_____no_output_____" ] ], [ [ "We will then reset the index to the `country` column.", "_____no_output_____" ] ], [ [ "confirmed_df.set_index('country',inplace=True)", "_____no_output_____" ] ], [ [ "This is what our data looks like:", "_____no_output_____" ] ], [ [ "confirmed_df.head()", "_____no_output_____" ] ], [ [ "We next need to remove the \"coordinates\" and \"history\" prefix - this will make plotting and manipulating our data easier. \n\nWe will define a function to remove the prefix for us. ", "_____no_output_____" ] ], [ [ "# Define a function to drop the history.prefix\n# Create function drop_prefix\ndef drop_prefix(self, prefix):\n self.columns = self.columns.str.lstrip(prefix)\n return self\n\n# Call function\npd.core.frame.DataFrame.drop_prefix = drop_prefix", "_____no_output_____" ] ], [ [ "We want to clean up our data a bit - let's remove prefices, and sort by date. ", "_____no_output_____" ] ], [ [ "# Define function which removes history. prefix, and orders the column dates in ascending order\ndef order_dates(flat_df):\n \"\"\"This function takes as input a dataframe containing\n daily COVID 19 cases and as output generates a dataframe\n ordered by date, where prefices history and coordinated \n are removed\"\"\"\n # Drop prefix\n flat_df.drop_prefix('history.')\n flat_df.drop_prefix(\"coordinates.\")\n # Isolate dates columns\n flat_df.iloc[:,6:].columns = pd.to_datetime(flat_df.iloc[:,6:].columns)\n # Transform to datetim format\n sub = flat_df.iloc[:,6:]\n sub.columns = pd.to_datetime(sub.columns)\n # Sort\n sub2 = sub.reindex(sorted(sub.columns), axis=1)\n sub3 = flat_df.reindex(sorted(flat_df.columns),axis=1).iloc[:,-5:]\n # Concatenate\n final = pd.concat([sub2,sub3], axis=1, sort=False)\n return final", "_____no_output_____" ], [ "final = order_dates(confirmed_df)\nfinal.head()", "_____no_output_____" ], [ "condition = final.index=='US'\nfinal[condition]", "_____no_output_____" ], [ "transposed_final = final[final.index=='Canada'].set_index(\"province\").T.iloc[:-4,]", "_____no_output_____" ], [ "transposed_final.head()", "_____no_output_____" ] ], [ [ "## Visualizing Total (Cumulative) Cases per Province\n\nRun the cell below to get the provinces. ", "_____no_output_____" ] ], [ [ "transposed_final.columns", "_____no_output_____" ] ], [ [ "Select one of the provinces from the list, and enter it in the `province` variable in the code below.", "_____no_output_____" ] ], [ [ "province = \"British Columbia\"\npx.scatter(transposed_final,\\\n x=transposed_final.index,\n y=province,\n title='Cumulative Cases in ' + str(province),\n labels=\n {\"x\":\"Time (daily)\",\n str(province):\"Number of reported deaths in \"\n + str(province)})", "_____no_output_____" ] ], [ [ "## Observations\n\nWe see that for most provinces there is an increase in the cumulative number of cases between January and July 2020.", "_____no_output_____" ], [ "## Total Cumulative and Non-Cumulative Cases in the Country\n\nIn this section we will visualize cumulative and daily (non-cumulative) cases in Canada. ", "_____no_output_____" ] ], [ [ "transposed_final.head(1)", "_____no_output_____" ], [ "# Getting cumulative cases\ntransposed_final[\"TotalDailyCase\"] = transposed_final.sum(axis=1)", "_____no_output_____" ], [ "px.scatter(transposed_final,\n x=transposed_final.index,\n y=\"TotalDailyCase\",\n title=\"Total (cumulative) COVID19 Reported Deaths in Canada \",\n labels={\"x\":\"Time (daily)\",\n \"TotalDailyCase\": \"Total number of reported deaths\"})", "_____no_output_____" ] ], [ [ "We want to get more granularity with respect to our cases. \n\nLet's look at non-cumulative reported infections. \n\n___\n\n## Non-cumulative cases in Canada", "_____no_output_____" ], [ "## Final remarks\n\nWe observe a first wave of COVID 19 cases during March 2020 to June 2020, followed by what seems to be a second wave. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec77271cce20186c16647786ea49b91426ad7646
362,164
ipynb
Jupyter Notebook
assignment2/BatchNormalization.ipynb
kamikat/cs231n
0346efaeddd4084d1e5370a8640a160a1cc77ea7
[ "WTFPL" ]
null
null
null
assignment2/BatchNormalization.ipynb
kamikat/cs231n
0346efaeddd4084d1e5370a8640a160a1cc77ea7
[ "WTFPL" ]
null
null
null
assignment2/BatchNormalization.ipynb
kamikat/cs231n
0346efaeddd4084d1e5370a8640a160a1cc77ea7
[ "WTFPL" ]
null
null
null
501.612188
178,084
0.925694
[ [ [ "# Batch Normalization\nOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].\n\nThe idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\n\nThe authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.\n\nIt is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.\n\n[3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\nInternal Covariate Shift\", ICML 2015.", "_____no_output_____" ] ], [ [ "# As usual, a bit of setup\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "run the following from the cs231n directory and try again:\npython setup.py build_ext --inplace\nYou may also need to restart your iPython kernel\n" ], [ "# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.iteritems():\n print '%s: ' % k, v.shape", "X_val: (1000, 3, 32, 32)\nX_train: (49000, 3, 32, 32)\nX_test: (1000, 3, 32, 32)\ny_val: (1000,)\ny_train: (49000,)\ny_test: (1000,)\n" ] ], [ [ "## Batch normalization: Forward\nIn the file `cs231n/layers.py`, implement the batch normalization forward pass in the function `batchnorm_forward`. Once you have done so, run the following to test your implementation.", "_____no_output_____" ] ], [ [ "# Check the training-time forward pass by checking means and variances\n# of features both before and after batch normalization\n\n# Simulate the forward pass for a two-layer network\nN, D1, D2, D3 = 200, 50, 60, 3\nX = np.random.randn(N, D1)\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\na = np.maximum(0, X.dot(W1)).dot(W2)\n\nprint 'Before batch normalization:'\nprint ' means: ', a.mean(axis=0)\nprint ' stds: ', a.std(axis=0)\n\n# Means should be close to zero and stds close to one\nprint 'After batch normalization (gamma=1, beta=0)'\na_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\nprint ' mean: ', a_norm.mean(axis=0)\nprint ' std: ', a_norm.std(axis=0)\n\n# Now means should be close to beta and stds close to gamma\ngamma = np.asarray([1.0, 2.0, 3.0])\nbeta = np.asarray([11.0, 12.0, 13.0])\na_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\nprint 'After batch normalization (nontrivial gamma, beta)'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)", "Before batch normalization:\n means: [-24.95082476 3.2550613 -5.38759126]\n stds: [ 33.42661165 31.58890584 34.22524032]\nAfter batch normalization (gamma=1, beta=0)\n mean: [ -1.14352972e-16 4.91273688e-17 1.11022302e-18]\n std: [ 1. 0.99999999 1. ]\nAfter batch normalization (nontrivial gamma, beta)\n means: [ 11. 12. 13.]\n stds: [ 1. 1.99999999 2.99999999]\n" ], [ "# Check the test-time forward pass by running the training-time\n# forward pass many times to warm up the running averages, and then\n# checking the means and variances of activations after a test-time\n# forward pass.\n\nN, D1, D2, D3 = 200, 50, 60, 3\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\n\nbn_param = {'mode': 'train'}\ngamma = np.ones(D3)\nbeta = np.zeros(D3)\nfor t in xrange(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\nbn_param['mode'] = 'test'\nX = np.random.randn(N, D1)\na = np.maximum(0, X.dot(W1)).dot(W2)\na_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n# Means should be close to zero and stds close to one, but will be\n# noisier than training-time forward passes.\nprint 'After batch normalization (test-time):'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)", "After batch normalization (test-time):\n means: [-0.15254185 -0.03694944 -0.07738367]\n stds: [ 1.07207763 1.03093424 1.05288691]\n" ] ], [ [ "## Batch Normalization: backward\nNow implement the backward pass for batch normalization in the function `batchnorm_backward`.\n\nTo derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.\n\nOnce you have finished, run the following to numerically check your backward pass.", "_____no_output_____" ] ], [ [ "# Gradient check batchnorm backward pass\n\nN, D = 4, 5\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nfx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\nda_num = eval_numerical_gradient_array(fg, gamma, dout)\ndb_num = eval_numerical_gradient_array(fb, beta, dout)\n\n_, cache = batchnorm_forward(x, gamma, beta, bn_param)\ndx, dgamma, dbeta = batchnorm_backward(dout, cache)\nprint 'dx error: ', rel_error(dx_num, dx)\nprint 'dgamma error: ', rel_error(da_num, dgamma)\nprint 'dbeta error: ', rel_error(db_num, dbeta)", "dx error: 6.76838322071e-10\ndgamma error: 1.86190201951e-11\ndbeta error: 3.27560641853e-12\n" ] ], [ [ "## Batch Normalization: alternative backward\nIn class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.\n\nSurprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function `batchnorm_backward_alt` and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.\n\nNOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it.", "_____no_output_____" ] ], [ [ "N, D = 100, 500\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nout, cache = batchnorm_forward(x, gamma, beta, bn_param)\n\nt1 = time.time()\ndx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)\nt2 = time.time()\ndx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)\nt3 = time.time()\n\nprint 'dx difference: ', rel_error(dx1, dx2)\nprint 'dgamma difference: ', rel_error(dgamma1, dgamma2)\nprint 'dbeta difference: ', rel_error(dbeta1, dbeta2)\nprint 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))", "dx difference: " ] ], [ [ "## Fully Connected Nets with Batch Normalization\nNow that you have a working implementation for batch normalization, go back to your `FullyConnectedNet` in the file `cs2312n/classifiers/fc_net.py`. Modify your implementation to add batch normalization.\n\nConcretely, when the flag `use_batchnorm` is `True` in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.\n\nHINT: You might find it useful to define an additional helper layer similar to those in the file `cs231n/layer_utils.py`. If you decide to do so, do it in the file `cs231n/classifiers/fc_net.py`.", "_____no_output_____" ] ], [ [ "N, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print 'Running check with reg = ', reg\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print 'Initial loss: ', loss\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))\n if reg == 0: print", " Running check with reg = 0\nInitial loss: 2.35469539618\nW1 relative error: 6.26e-04\nW2 relative error: 2.73e-04\nW3 relative error: 4.58e-10\nb1 relative error: 2.22e-03\nb2 relative error: 4.44e-08\nb3 relative error: 1.40e-10\nbeta1 relative error: 7.08e-07\nbeta2 relative error: 7.66e-09\ngamma1 relative error: 6.61e-07\ngamma2 relative error: 4.25e-09\n\nRunning check with reg = 3.14\nInitial loss: 7.08610967175\nW1 relative error: 6.79e-06\nW2 relative error: 1.09e-06\nW3 relative error: 1.79e-08\nb1 relative error: 8.88e-03\nb2 relative error: 1.11e-08\nb3 relative error: 3.64e-10\nbeta1 relative error: 1.04e-08\nbeta2 relative error: 1.66e-08\ngamma1 relative error: 8.30e-09\ngamma2 relative error: 8.71e-09\n" ] ], [ [ "# Batchnorm for deep networks\nRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.", "_____no_output_____" ] ], [ [ "# Try training a very deep net with batchnorm\nhidden_dims = [100, 100, 100, 100, 100]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 2e-2\nbn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\nbn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nbn_solver.train()\n\nsolver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nsolver.train()", "(Iteration 1 / 200) loss: 2.313650\n(Epoch 0 / 10) train acc: 0.142000; val_acc: 0.129000\n(Epoch 1 / 10) train acc: 0.361000; val_acc: 0.277000\n(Epoch 2 / 10) train acc: 0.449000; val_acc: 0.336000\n(Epoch 3 / 10) train acc: 0.518000; val_acc: 0.339000\n(Epoch 4 / 10) train acc: 0.578000; val_acc: 0.313000\n(Epoch 5 / 10) train acc: 0.586000; val_acc: 0.318000\n(Epoch 6 / 10) train acc: 0.684000; val_acc: 0.346000\n(Epoch 7 / 10) train acc: 0.708000; val_acc: 0.307000\n(Epoch 8 / 10) train acc: 0.743000; val_acc: 0.338000\n(Epoch 9 / 10) train acc: 0.792000; val_acc: 0.341000\n(Epoch 10 / 10) train acc: 0.796000; val_acc: 0.334000\n(Iteration 1 / 200) loss: 2.301745\n(Epoch 0 / 10) train acc: 0.102000; val_acc: 0.107000\n(Epoch 1 / 10) train acc: 0.243000; val_acc: 0.235000\n(Epoch 2 / 10) train acc: 0.307000; val_acc: 0.286000\n(Epoch 3 / 10) train acc: 0.354000; val_acc: 0.290000\n(Epoch 4 / 10) train acc: 0.421000; val_acc: 0.308000\n(Epoch 5 / 10) train acc: 0.445000; val_acc: 0.300000\n(Epoch 6 / 10) train acc: 0.465000; val_acc: 0.297000\n(Epoch 7 / 10) train acc: 0.537000; val_acc: 0.327000\n(Epoch 8 / 10) train acc: 0.527000; val_acc: 0.310000\n(Epoch 9 / 10) train acc: 0.598000; val_acc: 0.299000\n(Epoch 10 / 10) train acc: 0.602000; val_acc: 0.312000\n" ] ], [ [ "Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.", "_____no_output_____" ] ], [ [ "plt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 1)\nplt.plot(solver.loss_history, 'o', label='baseline')\nplt.plot(bn_solver.loss_history, 'o', label='batchnorm')\n\nplt.subplot(3, 1, 2)\nplt.plot(solver.train_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')\n\nplt.subplot(3, 1, 3)\nplt.plot(solver.val_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()", "_____no_output_____" ] ], [ [ "# Batch normalization and initialization\nWe will now run a small experiment to study the interaction of batch normalization and weight initialization.\n\nThe first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.", "_____no_output_____" ] ], [ [ "# Try training a very deep net with batchnorm\nhidden_dims = [50, 50, 50, 50, 50, 50, 50]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nbn_solvers = {}\nsolvers = {}\nweight_scales = np.logspace(-4, 0, num=20)\nfor i, weight_scale in enumerate(weight_scales):\n print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))\n bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n bn_solver.train()\n bn_solvers[weight_scale] = bn_solver\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n solver.train()\n solvers[weight_scale] = solver", "Running weight scale 1 / 20\nRunning weight scale 2 / 20\nRunning weight scale 3 / 20\nRunning weight scale 4 / 20\nRunning weight scale 5 / 20\nRunning weight scale 6 / 20\nRunning weight scale 7 / 20\nRunning weight scale 8 / 20\nRunning weight scale 9 / 20\nRunning weight scale 10 / 20\nRunning weight scale 11 / 20\nRunning weight scale 12 / 20\nRunning weight scale 13 / 20\nRunning weight scale 14 / 20\nRunning weight scale 15 / 20\nRunning weight scale 16 / 20\n" ], [ "# Plot results of weight scale experiment\nbest_train_accs, bn_best_train_accs = [], []\nbest_val_accs, bn_best_val_accs = [], []\nfinal_train_loss, bn_final_train_loss = [], []\n\nfor ws in weight_scales:\n best_train_accs.append(max(solvers[ws].train_acc_history))\n bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))\n \n best_val_accs.append(max(solvers[ws].val_acc_history))\n bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))\n \n final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))\n bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))\n \nplt.subplot(3, 1, 1)\nplt.title('Best val accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best val accuracy')\nplt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')\nplt.legend(ncol=2, loc='lower right')\n\nplt.subplot(3, 1, 2)\nplt.title('Best train accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best training accuracy')\nplt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')\nplt.legend()\n\nplt.subplot(3, 1, 3)\nplt.title('Final training loss vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Final training loss')\nplt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')\nplt.legend()\n\nplt.gcf().set_size_inches(10, 15)\nplt.show()", "_____no_output_____" ] ], [ [ "# Question:\nDescribe the results of this experiment, and try to give a reason why the experiment gave the results that it did.", "_____no_output_____" ], [ "# Answer:\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec772c36dd27a7c207b4227e9f5d9ee5db476aac
219,256
ipynb
Jupyter Notebook
notebooks/depNeural Network-Copy1.ipynb
BeckResearchLab/homoGANize
1d33af3797eae5fb9b082c97dab572168644df0a
[ "MIT" ]
null
null
null
notebooks/depNeural Network-Copy1.ipynb
BeckResearchLab/homoGANize
1d33af3797eae5fb9b082c97dab572168644df0a
[ "MIT" ]
null
null
null
notebooks/depNeural Network-Copy1.ipynb
BeckResearchLab/homoGANize
1d33af3797eae5fb9b082c97dab572168644df0a
[ "MIT" ]
1
2019-07-02T11:25:24.000Z
2019-07-02T11:25:24.000Z
43.279905
1,800
0.414903
[ [ [ "import numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\n\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nfrom math import floor, ceil\nfrom pylab import rcParams\nfrom sklearn.utils import shuffle\nfrom sklearn.feature_selection import VarianceThreshold \n\n%matplotlib inline", "_____no_output_____" ], [ "biodeg = pd.read_csv('../../big datasets/featurized_biodeg.csv', low_memory=False)", "_____no_output_____" ], [ "biodeg.head()", "_____no_output_____" ], [ "def encode(series): \n return pd.get_dummies(series.astype(str))", "_____no_output_____" ], [ "train_x = biodeg[biodeg['Status'].str.contains('Train')]\ntrain_y = encode(train_x['Class'])\n\n# manipulate data type of x data to float\ntrain_x = train_x.iloc[:,4:1449]\ntrain_x.drop(train_x.select_dtypes(['object']), inplace=True, axis=1)\ntrain_x.astype(float)", "_____no_output_____" ], [ "test_x = biodeg[biodeg['Status'].str.contains('Test')]\ntest_y = encode(test_x['Class'])\n\n# manipulate data type of x data to float\ntest_x = test_x.iloc[:,4:1449]\ntest_x.drop(test_x.select_dtypes(['object']), inplace=True, axis=1)\ntest_x.astype(float)", "_____no_output_____" ], [ "constant_filter = VarianceThreshold(threshold=0)\nqconstant_filter = VarianceThreshold(threshold=0.01)", "_____no_output_____" ], [ "constant_filter.fit(train_x)\nlen(train_x.columns[constant_filter.get_support()])\nconstant_columns = [column for column in train_x.columns if column\n not in train_x.columns[constant_filter.get_support()]]\n\ntrain_x.drop(labels=constant_columns, axis=1, inplace=True)\ntest_x.drop(labels=constant_columns, axis=1, inplace=True)", "_____no_output_____" ], [ "qconstant_filter.fit(train_x)\nlen(train_x.columns[qconstant_filter.get_support()])\n\nq_constant_columns = [column for column in train_x.columns if column\n not in train_x.columns[qconstant_filter.get_support()]]\n\ntrain_x = qconstant_filter.transform(train_x)\ntest_x = qconstant_filter.transform(test_x)\n\ntrain_x.shape, test_x.shape", "_____no_output_____" ], [ "correlated_features = set()\ncorrelation_matrix = pd.DataFrame(train_x).corr()", "_____no_output_____" ], [ "for i in range(len(correlation_matrix.columns)):\n for j in range(i):\n if abs(correlation_matrix.iloc[i,j]) > 0.8:\n colname = correlation_matrix.columns[i]\n correlated_features.add(colname)", "_____no_output_____" ], [ "len(correlated_features)", "_____no_output_____" ], [ "train_x = pd.DataFrame(train_x)\ntest_x = pd.DataFrame(test_x)", "_____no_output_____" ], [ "train_x.drop(labels=correlated_features, axis=1, inplace=True)\ntest_x.drop(labels=correlated_features, axis=1, inplace=True)", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\nxscaler = StandardScaler().fit(train_x)\ntrain_x = xscaler.transform(train_x)\ntestscaler = StandardScaler().fit(test_x)\ntest_x = testscaler.transform(test_x)\n", "_____no_output_____" ], [ "x_train = pd.DataFrame(x_train_res)\ny_train = pd.DataFrame(y_train_res)\nx_test = test_x\ny_test = test_y", "_____no_output_____" ], [ "random_state = 42\nnp.random.seed(random_state)\ntf.set_random_seed(random_state)", "_____no_output_____" ], [ "def multilayer_perceptron(x, weights, biases, keep_prob):\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n layer_1 = tf.nn.dropout(layer_1, keep_prob)\n out_layer = tf.matmul(layer_1, weights['out']) + biases['out']\n \n return out_layer\n\n", "_____no_output_____" ], [ "param_dist = {'hidden' : [10, 20, 30, 40, 50],\n 'prob' : [0.2, 0.4, 0.6, 0.8],\n 'size': [32, 45, 60],\n 'rate' : [0.05, 0.01, 0.001, 0.001]}\n", "_____no_output_____" ], [ "def setup_model(param_dist):\n n_hidden_1 = hidden\n n_input = train_x.shape[1]\n n_classes = train_y.shape[1]\n\n weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n }\n\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\n keep_prob = tf.placeholder(\"float\")\n\n training_epochs = 200\n display_step = 90\n batch_size = size\n\n x = tf.placeholder(\"float\", [None, n_input])\n y = tf.placeholder(\"float\", [None, n_classes])\n predictions = multilayer_perceptron(x, weights, biases, keep_prob)\n \n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=rate).minimize(cost)\n\n return\n\ndef run_model(setup_model):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n \n for epoch in range(training_epochs):\n avg_cost = 0.0\n total_batch = int(len(x_train) / batch_size)\n x_batches = np.array_split(x_train, total_batch)\n y_batches = np.array_split(y_train, total_batch)\n for i in range(total_batch):\n batch_x, batch_y = x_batches[i], y_batches[i]\n _, c = sess.run([optimizer, cost], \n feed_dict={\n x: batch_x, \n y: batch_y, \n keep_prob: prob\n })\n avg_cost += c / total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n \n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n print(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n total_error = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))\n unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y, predictions)))\n R_squared = tf.subtract(1.0, tf.divide(unexplained_error, total_error))\n print(R_squared.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n return hidden, prob, size, rate", "_____no_output_____" ], [ " #### use meeeeee \nn_hidden_1 = 60\nn_input = train_x.shape[1]\nn_classes = train_y.shape[1]\n\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n }\n\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\nkeep_prob = tf.placeholder(\"float\")\n\ntraining_epochs = 10\ndisplay_step = 1\nbatch_size = 60\n\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\npredictions = multilayer_perceptron(x, weights, biases, keep_prob)\n \ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(cost)\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(training_epochs):\n avg_cost = 0.0\n total_batch = int(len(x_train) / batch_size)\n x_batches = np.array_split(x_train, total_batch)\n y_batches = np.array_split(y_train, total_batch)\n for i in range(total_batch):\n batch_x, batch_y = x_batches[i], y_batches[i]\n _, c = sess.run([optimizer, cost], \n feed_dict={\n x: batch_x, \n y: batch_y, \n keep_prob: 0.35\n })\n avg_cost += c / total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n \n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n print(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0})) ", "_____no_output_____" ], [ "from imblearn.over_sampling import SMOTE", "_____no_output_____" ], [ "#train_x = train_x.values\ntrain_y = train_y.values\n\nsm = SMOTE(sampling_strategy='minority', random_state=12, ratio = 1.0, )\nx_train_res, y_train_res = sm.fit_sample(train_x, train_y)", "_____no_output_____" ], [ "estimator = run_model(setup_model)", "_____no_output_____" ], [ "param_dist = {'hidden' : [10, 20, 30, 40, 50],\n 'prob' : [0.2, 0.4, 0.6, 0.8],\n 'size': [32, 45, 60],\n 'rate' : [0.05, 0.01, 0.001, 0.001]}", "_____no_output_____" ], [ " = [30, 35, 40, 45, 50, 55, 60, 65, 70]\npro = [0.3, 0.35, 0.4]\nrat = [0.1]", "_____no_output_____" ], [ "acc = []\nfor hidden in hid:\n for prob in pro:\n for rate in rat:\n \n mod = []\n \n n_hidden_1 = hidden\n n_input = train_x.shape[1]\n n_classes = train_y.shape[1]\n weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n }\n\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\n keep_prob = tf.placeholder(\"float\")\n\n training_epochs = 200\n display_step = 100\n batch_size = 60\n\n x = tf.placeholder(\"float\", [None, n_input])\n y = tf.placeholder(\"float\", [None, n_classes])\n predictions = multilayer_perceptron(x, weights, biases, keep_prob)\n \n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=rate).minimize(cost)\n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n \n for epoch in range(training_epochs):\n avg_cost = 0.0\n total_batch = int(len(x_train) / batch_size)\n x_batches = np.array_split(x_train, total_batch)\n y_batches = np.array_split(y_train, total_batch)\n for i in range(total_batch):\n batch_x, batch_y = x_batches[i], y_batches[i]\n _, c = sess.run([optimizer, cost], \n feed_dict={\n x: batch_x, \n y: batch_y, \n keep_prob: prob\n })\n avg_cost += c / total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n mod.append(accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n \n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n mod.append(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n mod.append(hidden)\n mod.append(prob)\n mod.append(size)\n mod.append(rate)\n \n acc.append(mod)\n", "_____no_output_____" ], [ "sorted(acc, key=lambda x: x[0])", "_____no_output_____" ], [ "[0.7706422, array([[118, 28], [ 22, 50]]), 10, 0.7, 60, 0.1], \n[0.80733943, array([[131, 15], [ 27, 45]]), 50, 0.4, 60, 0.1], # best learning rate: 0.1, best batch size: 60\n[0.80733943, array([[133, 13], [ 29, 43]]), 50, 0.7, 60, 0.1], # need to explore hidden layers and keep prob\n[0.8211009, array([[125, 21], [ 18, 54]]), 30, 0.7, 60, 0.1], \n[0.853211, array([[127, 19], [ 13, 59]]), 30, 0.4, 60, 0.1]", "_____no_output_____" ], [ "[0.7706422, array([[110, 36],\n [ 14, 58]]), 10, 0.7, 60, 0.1], \n[0.77522933, array([[117, 29],\n [ 20, 52]]), 30, 0.5, 60, 0.1], \n[0.77522933, array([[127, 19],\n [ 30, 42]]), 70, 0.7, 60, 0.1], \n[0.7844037, array([[131, 15],\n [ 32, 40]]), 50, 0.3, 60, 0.1], \n[0.7844037, array([[133, 13],\n [ 34, 38]]), 50, 0.7, 60, 0.1], # best 0.1 learning rate, dropout 0.5 or 0.3, best hidden unknown...\n[0.78899086, array([[129, 17],\n [ 29, 43]]), 30, 0.7, 60, 0.1], \n[0.79816514, array([[113, 33],\n [ 11, 61]]), 10, 0.5, 60, 0.1], \n[0.79816514, array([[138, 8],\n [ 36, 36]]), 50, 0.5, 60, 0.1], \n[0.8027523, array([[119, 27],\n [ 16, 56]]), 30, 0.3, 60, 0.1], \n[0.8027523, array([[137, 9],\n [ 34, 38]]), 70, 0.3, 60, 0.1], \n[0.8348624, array([[128, 18],\n [ 18, 54]]), 70, 0.5, 60, 0.1]", "_____no_output_____" ], [ " [0.83027524, array([[132, 14],\n [ 23, 49]]), 50, 0.3, 60, 0.1]] # narrow in on 50 and 0.3", "_____no_output_____" ], [ "[0.8211009, array([[126, 20],\n [ 19, 53]]), 60, 0.35, 60, 0.1], winner \nhyperparameters: 60, .35, 60, 0.1", "_____no_output_____" ], [ "from random import randint\nfrom sklearn.model_selection import RandomizedSearchCV", "_____no_output_____" ], [ "# Load the dataset\nX, Y = x_train, y_train\n\n# Create model for KerasClassifier\ndef create_model(hparams1,\n hparams2,\n hparams3,\n hparams4):\n \n n_hidden_1 = hparams1\n n_input = train_x.shape[1]\n n_classes = train_y.shape[1]\n\n weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n }\n\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\n keep_prob = tf.placeholder(\"float\")\n\n training_epochs = 200\n display_step = 90\n batch_size = hparams3\n\n x = tf.placeholder(\"float\", [None, n_input])\n y = tf.placeholder(\"float\", [None, n_classes])\n predictions = multilayer_perceptron(x, weights, biases, keep_prob)\n \n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=hparams4).minimize(cost)\n\n return\n\nwith tf.Session() as sess:\n model = sess.run(tf.global_variables_initializer())\n\n \n for epoch in range(training_epochs):\n avg_cost = 0.0\n total_batch = int(len(x_train) / batch_size)\n x_batches = np.array_split(x_train, total_batch)\n y_batches = np.array_split(y_train, total_batch)\n for i in range(total_batch):\n batch_x, batch_y = x_batches[i], y_batches[i]\n _, c = sess.run([optimizer, cost], \n feed_dict={\n x: batch_x, \n y: batch_y, \n keep_prob: hparams2\n })\n avg_cost += c / total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n \n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n print(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n\n# Specify parameters and distributions to sample from\nhparams1 = randint(10, 100)\nhparams2 = randint(0,10)*0.1\nhparams3 = randint(30,60)\nhparams4 = [0.1, 0.05, 0.01, 0.001]\n\n# Prepare the Dict for the Search\nparam_dist = dict(hparams1=hparams1, \n hparams2=hparams2, \n hparams3=hparams3, \n hparams4=hparams4)\n\n# Search in action!\nn_iter_search = 16 # Number of parameter settings that are sampled.\nrandom_search = RandomizedSearchCV(estimator=model, \n param_distributions=param_dist,\n n_iter=n_iter_search)\nrandom_search.fit(X, Y)\n\n# Show the results\nprint(\"Best: %f using %s\" % (random_search.best_score_, random_search.best_params_))\nmeans = random_search.cv_results_['mean_test_score']\nstds = random_search.cv_results_['std_test_score']\nparams = random_search.cv_results_['params']\nfor mean, stdev, param in zip(means, stds, params):\n print(\"%f (%f) with: %r\" % (mean, stdev, param))", "_____no_output_____" ], [ "# Load the dataset\n\n# Create model for KerasClassifier\n \nn_hidden_1 = hparams1\nn_input = train_x.shape[1]\nn_classes = train_y.shape[1]\n\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n }\n\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\nkeep_prob = tf.placeholder(\"float\")\n\ntraining_epochs = 200\ndisplay_step = 90\nbatch_size = hparams3\n\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\npredictions = multilayer_perceptron(x, weights, biases, keep_prob)\n \ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=hparams4).minimize(cost)\n\nwith tf.Session() as sess:\n model = sess.run(tf.global_variables_initializer())\n\n \n for epoch in range(training_epochs):\n avg_cost = 0.0\n total_batch = int(len(x_train) / batch_size)\n x_batches = np.array_split(x_train, total_batch)\n y_batches = np.array_split(y_train, total_batch)\n for i in range(total_batch):\n batch_x, batch_y = x_batches[i], y_batches[i]\n _, c = sess.run([optimizer, cost], \n feed_dict={\n x: batch_x, \n y: batch_y, \n keep_prob: hparams2\n })\n avg_cost += c / total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n \n \n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n print(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n\n# Specify parameters and distributions to sample from\nhparams1 = randint(10, 100)\nhparams2 = randint(0,10)*0.1\nhparams3 = randint(30,60)\nhparams4 = [0.1, 0.05, 0.01, 0.001]\n\n# Prepare the Dict for the Search\nparam_dist = dict(hparams1=hparams1, \n hparams2=hparams2, \n hparams3=hparams3, \n hparams4=hparams4)\n\n# Search in action!\nn_iter_search = 16 # Number of parameter settings that are sampled.\nrandom_search = RandomizedSearchCV(estimator=model, \n param_distributions=param_dist,\n n_iter=n_iter_search)\nrandom_search.fit(X, Y)\n\n# Show the results\nprint(\"Best: %f using %s\" % (random_search.best_score_, random_search.best_params_))\nmeans = random_search.cv_results_['mean_test_score']\nstds = random_search.cv_results_['std_test_score']\nparams = random_search.cv_results_['params']\nfor mean, stdev, param in zip(means, stds, params):\n print(\"%f (%f) with: %r\" % (mean, stdev, param))", "_____no_output_____" ], [ "def model(X_train, Y_train, X_val, Y_val):\n \n model = Sequential()\n model.add(Dense({{choice([10, 20, 40, 104])}}))\n model.add(Activation({{choice(['relu', 'sigmoid'])}}))\n model.add(Dropout({{uniform(0, 1)}}))\n model.add(Dense({{choice([10, 20, 40, 104])}}))\n model.add(Activation({{choice(['relu', 'sigmoid'])}}))\n model.add(Dropout({{uniform(0, 1)}}))\n \n if conditional({{choice(['two', 'three'])}}) == 'three':\n model.add(Dense({{choice([10, 20, 40, 104])}}))\n model.add(Activation({{choice(['relu', 'sigmoid'])}}))\n model.add(Dropout({{uniform(0, 1)}}))\n \n model.add(Dense(10))\n model.add(Activation('softmax'))\n adam = keras.optimizers.Adam(lr={{choice([10**-3, 10**-2, 10**-1])}})\n rmsprop = keras.optimizers.RMSprop(lr={{choice([10**-3, 10**-2, 10**-1])}})\n sgd = keras.optimizers.SGD(lr={{choice([10**-3, 10**-2, 10**-1])}})\n \n choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}\n if choiceval == 'adam':\n optim = adam\n elif choiceval == 'rmsprop':\n optim = rmsprop\n else:\n optim = sgd\n \n model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer=optim)\n model.fit(X_train, Y_train,\n batch_size={{choice([128,256,512])}},\n nb_epoch=20,\n verbose=2,\n validation_data=(X_val, Y_val))\n score, acc = model.evaluate(X_val, Y_val, verbose=0)\n print('Test accuracy:', acc)\n return {'loss': -acc, 'status': STATUS_OK, 'model': model}", "_____no_output_____" ], [ "X_train, Y_train, X_val, Y_val = x_train, y_train, x_test, y_test", "_____no_output_____" ], [ "from hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform\nimport keras", "Using TensorFlow backend.\n" ], [ "data = X_train, Y_train, X_val, Y_val\nbest_run, best_model = optim.minimize(model=model,\n data=data,\n algo=tpe.suggest,\n max_evals=30,\n trials=Trials(),\n notebook_name='Neural Network-Copy1')", ">>> Imports:\n#coding=utf-8\n\ntry:\n import numpy as np\nexcept:\n pass\n\ntry:\n import pandas as pd\nexcept:\n pass\n\ntry:\n import tensorflow as tf\nexcept:\n pass\n\ntry:\n import matplotlib.pyplot as plt\nexcept:\n pass\n\ntry:\n import seaborn as sns\nexcept:\n pass\n\ntry:\n from math import floor, ceil\nexcept:\n pass\n\ntry:\n from pylab import rcParams\nexcept:\n pass\n\ntry:\n from sklearn.utils import shuffle\nexcept:\n pass\n\ntry:\n from sklearn.feature_selection import VarianceThreshold\nexcept:\n pass\n\ntry:\n from sklearn.preprocessing import StandardScaler\nexcept:\n pass\n\ntry:\n from random import randint\nexcept:\n pass\n\ntry:\n from sklearn.model_selection import RandomizedSearchCV\nexcept:\n pass\n\ntry:\n from hyperopt import Trials, STATUS_OK, tpe\nexcept:\n pass\n\ntry:\n from hyperas import optim\nexcept:\n pass\n\ntry:\n from hyperas.distributions import choice, uniform\nexcept:\n pass\n\ntry:\n import keras\nexcept:\n pass\n\n>>> Hyperas search space:\n\ndef get_space():\n return {\n 'Dense': hp.choice('Dense', [10, 20, 40, 104]),\n 'Activation': hp.choice('Activation', ['relu', 'sigmoid']),\n 'Dropout': hp.uniform('Dropout', 0, 1),\n 'Dense_1': hp.choice('Dense_1', [10, 20, 40, 104]),\n 'Activation_1': hp.choice('Activation_1', ['relu', 'sigmoid']),\n 'Dropout_1': hp.uniform('Dropout_1', 0, 1),\n 'conditional': hp.choice('conditional', ['two', 'three']),\n 'Dense_2': hp.choice('Dense_2', [10, 20, 40, 104]),\n 'Activation_2': hp.choice('Activation_2', ['relu', 'sigmoid']),\n 'Dropout_2': hp.uniform('Dropout_2', 0, 1),\n 'lr': hp.choice('lr', [10**-3, 10**-2, 10**-1]),\n 'lr_1': hp.choice('lr_1', [10**-3, 10**-2, 10**-1]),\n 'lr_2': hp.choice('lr_2', [10**-3, 10**-2, 10**-1]),\n 'choiceval': hp.choice('choiceval', ['adam', 'sgd', 'rmsprop']),\n 'batch_size': hp.choice('batch_size', [128,256,512]),\n }\n\n" ] ], [ [ "need to make this different... this needs to have a different train/test layout because the test data is not getting called properly. try https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428 this method here", "_____no_output_____" ], [ "_________________________________", "_____no_output_____" ], [ " correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(predictions, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))\n\n\n confusion = tf.confusion_matrix(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1), num_classes=2) \n print(confusion.eval({x: x_test, y: y_test, keep_prob: 1.0}))", "_____no_output_____" ], [ " total_error = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))\n unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y, predictions)))\n R_squared = tf.subtract(1.0, tf.divide(unexplained_error, total_error))\n print(R_squared.eval({x: x_test, y: y_test, keep_prob: 1.0}))", "_____no_output_____" ], [ "sklearn naive random oversampling (imbalanced data)\nhttps://imbalanced-learn.readthedocs.io/en/stable/over_sampling.html\nhttps://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.tensorflow.balanced_batch_generator.html", "_____no_output_____" ], [ "* look for network architecture from paper that used the QSAR \n* lasso \n* fix for sparse data\n* find any columns that are uniform (or very low variation)\n* normalize \n* tensorboard\n* early stopping - ask rainie if i need help\n* put layer in after dropout\n* if oversampling, up the dropout (is there a ratio)\n* test set needs to be balanced but not oversampled \n* use their train/test split, then shuffle the data\n* combine the set up cells and do a for loop for the number of nodes (like 5-50 at 5 or 10 node increments)\n * use on waffle?\n \n", "_____no_output_____" ], [ "create environment (homoganize)\ninstall everything i need into it\n* do the wget thing", "_____no_output_____" ], [ " MSE = tf.metrics.mean_squared_error(tf.cast(y_test, tf.float32),\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n print(\"MSE:\", MSE)\n fn = tf.metrics.false_negatives(\n tf.cast(y_test, tf.float32),\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n tn = tf.metrics.true_negatives(\n tf.cast(y_test, tf.float32),\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n fp = tf.metrics.false_positives(\n tf.cast(y_test, tf.float32),\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n tp = tf.metrics.true_positives(\n tf.cast(y_test, tf.float32),\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n print(\"FN:\", fn, \"TN:\", tn, \"FP:\", fp, \"TP:\", tp)\n total_error = tf.reduce_sum(tf.square(tf.subtract(tf.cast(y_test, tf.float32), tf.reduce_mean(tf.cast(y_test, tf.float32)))))\n unexplained_error = tf.reduce_sum(tf.square(tf.subtract(tf.cast(y_test, tf.float32), tf.cast(predictions, tf.float32))))\n R_squared = tf.subtract(1.0, tf.divide(unexplained_error, total_error))\n print(R_squared)", "_____no_output_____" ], [ "df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n... 'num_wings': [2, 0, 0, 0],\n... 'num_specimen_seen': [10, 2, 1, 8]},\n... index=['falcon', 'dog', 'spider', 'fish'])", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df1 = df.iloc[:,1:3]\ndf1.head()", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec774ed50c1bcdbc6e6786b5e1f019ae1399a16d
1,187
ipynb
Jupyter Notebook
Python Notebooks/Nested Stats. _ Scope - Bootcamp.ipynb
Isaquehg/Scripts
d6d94e2d32171262b8286bae82ccd83b3baf30a5
[ "MIT" ]
null
null
null
Python Notebooks/Nested Stats. _ Scope - Bootcamp.ipynb
Isaquehg/Scripts
d6d94e2d32171262b8286bae82ccd83b3baf30a5
[ "MIT" ]
null
null
null
Python Notebooks/Nested Stats. _ Scope - Bootcamp.ipynb
Isaquehg/Scripts
d6d94e2d32171262b8286bae82ccd83b3baf30a5
[ "MIT" ]
null
null
null
1,187
1,187
0.638585
[ [ [ "#This is the priority order of Python´s scopes(LEGB Rules):\r\n#L: local\r\n#E: enclosing\r\n#G: global\r\n#B: built-in functions(len, max, range, etc.)\r\n#GLOBAL\r\nname = \"THIS IS A GLOBAL STR\"\r\n\r\ndef greet():\r\n \r\n #ENCLOSING\r\n name = \"Sammy\"\r\n\r\n def hello():\r\n #LOCAL\r\n name = \"i´m local\"\r\n print(\"Hello\", name)\r\n\r\n hello()\r\n\r\n#calling everything...\r\ngreet()", "Hello i´m local\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec775283560d1dc54169908dfa1949c364807d67
63,594
ipynb
Jupyter Notebook
examples/train_clstm_example.ipynb
cnyambura/NovoNordisk_Capstone
dba965880e5eb5a3a9edd5fcebcf11b6787e4f8c
[ "MIT" ]
null
null
null
examples/train_clstm_example.ipynb
cnyambura/NovoNordisk_Capstone
dba965880e5eb5a3a9edd5fcebcf11b6787e4f8c
[ "MIT" ]
null
null
null
examples/train_clstm_example.ipynb
cnyambura/NovoNordisk_Capstone
dba965880e5eb5a3a9edd5fcebcf11b6787e4f8c
[ "MIT" ]
null
null
null
158.985
9,692
0.874501
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom keras import backend as K\nfrom ndac.data_processing import quantile_classify, encode_sequence, value_classify\nfrom ndac.predict import train_clstm", "_____no_output_____" ], [ "# read in sequence/property data\ndata = pd.read_csv('dataframes/DF_prest.csv', index_col=0)", "_____no_output_____" ] ], [ [ "# train with nucleotide seq", "_____no_output_____" ] ], [ [ "# split quantiles and encode with nucleotide sequence\ndf, hist = quantile_classify(data['conc_cf'], data['nt_seq'],\n [0.25, 0.75], drop_class=[1])\nX, y = encode_sequence(df['nt_seq'], df['class'],\n max_length=200, tag='GACAAGCTTGCGGCCGCA')\n\nnt_model = train_clstm(X, y, test_fraction=0.3, \n epochs=1)", "45206 samples input.\n11302 samples in class 0\n11301 samples in class 1\n22603 samples removed.\nWARNING:tensorflow:From /Users/joshsmith/Git/NovoNordisk_Capstone/.env/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.\nInstructions for updating:\n`NHWC` for data_format is deprecated, use `NWC` instead\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 200, 4) 296 \n_________________________________________________________________\nconv1d_1 (Conv1D) (None, 200, 128) 1664 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 100, 128) 0 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 100) 91600 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 101 \n=================================================================\nTotal params: 93,661\nTrainable params: 93,661\nNon-trainable params: 0\n_________________________________________________________________\nNone\nEpoch 1/1\n15677/15677 [==============================] - 24s 2ms/step - loss: 0.6856 - acc: 0.5382\nAccuracy: 62.05%\n" ], [ "# Visualize embedded sequence after training nt_model\nget_1st_layer_output = K.function([nt_model.layers[0].input, K.learning_phase()],\n [nt_model.layers[0].output])\n\nx = X[0].reshape(-1,200) # after using keras Tokenizer on X\n# output in train mode = 1\nlayer_output = get_1st_layer_output([x, 1])[0]\n\nplt.figure(figsize=(10, 1))\nplt.imshow(layer_output[0].T)", "_____no_output_____" ] ], [ [ "# train with amino acid sequence", "_____no_output_____" ] ], [ [ "df, hist = quantile_classify(data['conc_cf'], data['aa_seq'],\n [0.25, 0.75], drop_class=[1])\nX, y = encode_sequence(df['aa_seq'], df['class'],\n max_length=200)\n\naa_model = train_clstm(X, y, test_fraction=0.3, epochs=1)", "45206 samples input.\n11302 samples in class 0\n11301 samples in class 1\n22603 samples removed.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_2 (Embedding) (None, 200, 4) 84 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 200, 128) 1664 \n_________________________________________________________________\nmax_pooling1d_2 (MaxPooling1 (None, 100, 128) 0 \n_________________________________________________________________\nlstm_2 (LSTM) (None, 100) 91600 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 101 \n=================================================================\nTotal params: 93,449\nTrainable params: 93,449\nNon-trainable params: 0\n_________________________________________________________________\nNone\nEpoch 1/1\n15822/15822 [==============================] - 25s 2ms/step - loss: 0.6749 - acc: 0.5713\nAccuracy: 62.88%\n" ], [ "# Visualize embedded sequence after training aa_model\nget_1st_layer_output = K.function([aa_model.layers[0].input, K.learning_phase()],\n [aa_model.layers[0].output])\n\nx = X[0].reshape(-1,200) # after using keras Tokenizer on X\n# output in train mode = 1\nlayer_output = get_1st_layer_output([x, 1])[0]\n\nplt.figure(figsize=(10, 1))\nplt.imshow(layer_output[0].T)", "_____no_output_____" ] ], [ [ "# solubility multiclass", "_____no_output_____" ] ], [ [ "# read in sequence/property data\ndata = pd.read_csv('dataframes/DF_solubility.csv', index_col=0)", "_____no_output_____" ], [ "df, hist = value_classify(data['solubility_class_1M'], data['aa_seq'],\n [3.9, 4.1], drop_class=[1])\nX, y = encode_sequence(df['aa_seq'], df['class'],\n max_length=200)\naa_model = train_clstm(X, y, test_fraction=0.3, epochs=1)", "16082 samples input.\n3324 samples in class 0\n7667 samples in class 1\n5091 samples removed.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_6 (Embedding) (None, 200, 4) 84 \n_________________________________________________________________\nconv1d_6 (Conv1D) (None, 200, 128) 1664 \n_________________________________________________________________\nmax_pooling1d_6 (MaxPooling1 (None, 100, 128) 0 \n_________________________________________________________________\nlstm_6 (LSTM) (None, 100) 91600 \n_________________________________________________________________\ndense_6 (Dense) (None, 1) 101 \n=================================================================\nTotal params: 93,449\nTrainable params: 93,449\nNon-trainable params: 0\n_________________________________________________________________\nNone\nEpoch 1/1\n7693/7693 [==============================] - 13s 2ms/step - loss: 0.5822 - acc: 0.7047\nAccuracy: 76.86%\n" ], [ "df, hist = value_classify(data['solubility_class_1M'], data['aa_seq'],\n [1, 2, 3, 4])\nX, y = encode_sequence(df['aa_seq'], df['class'],\n max_length=200)\naa_model = train_clstm(X, y, test_fraction=0.3, epochs=1)", "16082 samples input.\n838 samples in class 0\n1071 samples in class 1\n1394 samples in class 2\n5112 samples in class 3\n7667 samples in class 4\n0 samples removed.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_8 (Embedding) (None, 200, 4) 84 \n_________________________________________________________________\nconv1d_8 (Conv1D) (None, 200, 128) 1664 \n_________________________________________________________________\nmax_pooling1d_8 (MaxPooling1 (None, 100, 128) 0 \n_________________________________________________________________\nlstm_8 (LSTM) (None, 100) 91600 \n_________________________________________________________________\ndense_8 (Dense) (None, 5) 505 \n=================================================================\nTotal params: 93,853\nTrainable params: 93,853\nNon-trainable params: 0\n_________________________________________________________________\nNone\nEpoch 1/1\n11257/11257 [==============================] - 19s 2ms/step - loss: 1.2842 - acc: 0.4714\nAccuracy: 47.69%\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec776564c53bfa7470f93c5e08a55b6a01136a57
286,488
ipynb
Jupyter Notebook
tensorflow/artificial_neural_network.ipynb
juliocnsouzadev/gcp-data-engineer
c32a516440c8989f28a33234a05a02873c7fc5b8
[ "MIT" ]
null
null
null
tensorflow/artificial_neural_network.ipynb
juliocnsouzadev/gcp-data-engineer
c32a516440c8989f28a33234a05a02873c7fc5b8
[ "MIT" ]
null
null
null
tensorflow/artificial_neural_network.ipynb
juliocnsouzadev/gcp-data-engineer
c32a516440c8989f28a33234a05a02873c7fc5b8
[ "MIT" ]
null
null
null
87.157895
491
0.661906
[ [ [ "# Artificial Neural Network", "_____no_output_____" ], [ "### Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf", "_____no_output_____" ], [ "tf.__version__", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ] ], [ [ "## Part 1 - Data Preprocessing", "_____no_output_____" ], [ "### Importing the dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Churn_Modelling.csv')", "_____no_output_____" ] ], [ [ "## Description and Info of the Dataset", "_____no_output_____" ] ], [ [ "dataset.describe()", "_____no_output_____" ], [ "dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10000 entries, 0 to 9999\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 RowNumber 10000 non-null int64 \n 1 CustomerId 10000 non-null int64 \n 2 Surname 10000 non-null object \n 3 CreditScore 10000 non-null int64 \n 4 Geography 10000 non-null object \n 5 Gender 10000 non-null object \n 6 Age 10000 non-null int64 \n 7 Tenure 10000 non-null int64 \n 8 Balance 10000 non-null float64\n 9 NumOfProducts 10000 non-null int64 \n 10 HasCrCard 10000 non-null int64 \n 11 IsActiveMember 10000 non-null int64 \n 12 EstimatedSalary 10000 non-null float64\n 13 Exited 10000 non-null int64 \ndtypes: float64(2), int64(9), object(3)\nmemory usage: 1.1+ MB\n" ] ], [ [ "## Getting Features and Outcome", "_____no_output_____" ] ], [ [ "X = dataset.iloc[:, 3:-1].values\ny = dataset.iloc[:, -1].values", "_____no_output_____" ], [ "print(X)", "[[619 'France' 'Female' ... 1 1 101348.88]\n [608 'Spain' 'Female' ... 0 1 112542.58]\n [502 'France' 'Female' ... 1 0 113931.57]\n ...\n [709 'France' 'Female' ... 0 1 42085.58]\n [772 'Germany' 'Male' ... 1 0 92888.52]\n [792 'France' 'Female' ... 1 0 38190.78]]\n" ], [ "print(y)", "[1 0 1 ... 1 1 0]\n" ] ], [ [ "### Encoding categorical data", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "Label Encoding the \"Gender\" column", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\nX[:, 2] = le.fit_transform(X[:, 2])", "_____no_output_____" ], [ "print(X)", "[[619 'France' 0 ... 1 1 101348.88]\n [608 'Spain' 0 ... 0 1 112542.58]\n [502 'France' 0 ... 1 0 113931.57]\n ...\n [709 'France' 0 ... 0 1 42085.58]\n [772 'Germany' 1 ... 1 0 92888.52]\n [792 'France' 0 ... 1 0 38190.78]]\n" ] ], [ [ "One Hot Encoding the \"Geography\" column", "_____no_output_____" ] ], [ [ "from sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))", "_____no_output_____" ], [ "print(X)", "[[1.0 0.0 0.0 ... 1 1 101348.88]\n [0.0 0.0 1.0 ... 0 1 112542.58]\n [1.0 0.0 0.0 ... 1 0 113931.57]\n ...\n [1.0 0.0 0.0 ... 0 1 42085.58]\n [0.0 1.0 0.0 ... 1 0 92888.52]\n [1.0 0.0 0.0 ... 1 0 38190.78]]\n" ] ], [ [ "### Splitting the dataset into the Training set and Test set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)", "_____no_output_____" ] ], [ [ "### Feature Scaling", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)", "_____no_output_____" ] ], [ [ "## Part 2 - Building the ANN", "_____no_output_____" ], [ "### Initializing the ANN", "_____no_output_____" ] ], [ [ "ann = tf.keras.models.Sequential()", "_____no_output_____" ] ], [ [ "### Adding the input layer and the first hidden layer", "_____no_output_____" ] ], [ [ "firstLayer = tf.keras.layers.Dense(units=6, activation='relu')\nann.add(firstLayer)", "_____no_output_____" ] ], [ [ "### Adding the second hidden layer", "_____no_output_____" ] ], [ [ "\n\nsecondLayer = tf.keras.layers.Dense(units=6, activation='relu')\nann.add(secondLayer)", "_____no_output_____" ] ], [ [ "### Adding the output layer", "_____no_output_____" ] ], [ [ "outputLayer = tf.keras.layers.Dense(units=1, activation='sigmoid')\nann.add(outputLayer)", "_____no_output_____" ] ], [ [ "## Part 3 - Training the ANN", "_____no_output_____" ], [ "### Compiling the ANN", "_____no_output_____" ] ], [ [ "ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])", "_____no_output_____" ] ], [ [ "### Training the ANN on the Training set", "_____no_output_____" ] ], [ [ "ann.fit(X_train, y_train, batch_size = 32, epochs = 100)", "Epoch 1/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.6710 - accuracy: 0.5975\nEpoch 2/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4807 - accuracy: 0.8006\nEpoch 3/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4482 - accuracy: 0.8067\nEpoch 4/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4344 - accuracy: 0.8109\nEpoch 5/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4255 - accuracy: 0.8126\nEpoch 6/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4178 - accuracy: 0.8185\nEpoch 7/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4096 - accuracy: 0.8231\nEpoch 8/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.4002 - accuracy: 0.8306\nEpoch 9/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3865 - accuracy: 0.8409\nEpoch 10/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3732 - accuracy: 0.8494\nEpoch 11/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3636 - accuracy: 0.8534\nEpoch 12/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3575 - accuracy: 0.8537\nEpoch 13/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3534 - accuracy: 0.8524\nEpoch 14/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3507 - accuracy: 0.8544\nEpoch 15/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3485 - accuracy: 0.8561\nEpoch 16/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3466 - accuracy: 0.8585\nEpoch 17/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3455 - accuracy: 0.8589\nEpoch 18/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3442 - accuracy: 0.8599\nEpoch 19/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3436 - accuracy: 0.8600\nEpoch 20/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3425 - accuracy: 0.8596\nEpoch 21/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3419 - accuracy: 0.8590\nEpoch 22/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3414 - accuracy: 0.8589\nEpoch 23/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3406 - accuracy: 0.8605\nEpoch 24/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3405 - accuracy: 0.8591\nEpoch 25/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3400 - accuracy: 0.8618\nEpoch 26/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3398 - accuracy: 0.8591\nEpoch 27/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3392 - accuracy: 0.8605\nEpoch 28/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3394 - accuracy: 0.8616\nEpoch 29/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3387 - accuracy: 0.8622\nEpoch 30/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3379 - accuracy: 0.8621\nEpoch 31/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3382 - accuracy: 0.8621\nEpoch 32/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3376 - accuracy: 0.8626\nEpoch 33/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3377 - accuracy: 0.8619\nEpoch 34/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3374 - accuracy: 0.8634\nEpoch 35/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3368 - accuracy: 0.8618\nEpoch 36/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3370 - accuracy: 0.8621\nEpoch 37/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3365 - accuracy: 0.8612\nEpoch 38/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3367 - accuracy: 0.8615\nEpoch 39/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3361 - accuracy: 0.8630\nEpoch 40/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3366 - accuracy: 0.8618\nEpoch 41/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3364 - accuracy: 0.8631\nEpoch 42/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3364 - accuracy: 0.8630\nEpoch 43/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3355 - accuracy: 0.8625\nEpoch 44/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3357 - accuracy: 0.8633\nEpoch 45/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3356 - accuracy: 0.8621\nEpoch 46/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3352 - accuracy: 0.8624\nEpoch 47/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3354 - accuracy: 0.8621\nEpoch 48/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3352 - accuracy: 0.8636\nEpoch 49/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3350 - accuracy: 0.8644\nEpoch 50/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3347 - accuracy: 0.8626\nEpoch 51/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3354 - accuracy: 0.8637\nEpoch 52/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3348 - accuracy: 0.8626\nEpoch 53/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3348 - accuracy: 0.8630\nEpoch 54/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3347 - accuracy: 0.8635\nEpoch 55/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3347 - accuracy: 0.8619\nEpoch 56/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3342 - accuracy: 0.8636\nEpoch 57/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3345 - accuracy: 0.8618\nEpoch 58/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3342 - accuracy: 0.8631\nEpoch 59/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3345 - accuracy: 0.8626\nEpoch 60/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3342 - accuracy: 0.8629\nEpoch 61/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3338 - accuracy: 0.8639\nEpoch 62/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3343 - accuracy: 0.8629\nEpoch 63/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3341 - accuracy: 0.8627\nEpoch 64/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3339 - accuracy: 0.8631\nEpoch 65/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3338 - accuracy: 0.8635\nEpoch 66/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3334 - accuracy: 0.8629\nEpoch 67/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3335 - accuracy: 0.8631\nEpoch 68/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3334 - accuracy: 0.8616\nEpoch 69/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3335 - accuracy: 0.8630\nEpoch 70/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3333 - accuracy: 0.8634\nEpoch 71/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3334 - accuracy: 0.8629\nEpoch 72/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3331 - accuracy: 0.8616\nEpoch 73/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3329 - accuracy: 0.8655\nEpoch 74/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3332 - accuracy: 0.8644\nEpoch 75/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3330 - accuracy: 0.8633\nEpoch 76/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3330 - accuracy: 0.8641\nEpoch 77/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3328 - accuracy: 0.8634\nEpoch 78/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3325 - accuracy: 0.8633\nEpoch 79/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3326 - accuracy: 0.8650\nEpoch 80/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3328 - accuracy: 0.8652\nEpoch 81/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3325 - accuracy: 0.8643\nEpoch 82/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3325 - accuracy: 0.8633\nEpoch 83/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3323 - accuracy: 0.8648\nEpoch 84/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3326 - accuracy: 0.8644\nEpoch 85/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3326 - accuracy: 0.8629\nEpoch 86/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3325 - accuracy: 0.8639\nEpoch 87/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3324 - accuracy: 0.8624\nEpoch 88/100\n250/250 [==============================] - 0s 996us/step - loss: 0.3323 - accuracy: 0.8637\nEpoch 89/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3320 - accuracy: 0.8641\nEpoch 90/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3324 - accuracy: 0.8639\nEpoch 91/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3328 - accuracy: 0.8620\nEpoch 92/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3322 - accuracy: 0.8636\nEpoch 93/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3321 - accuracy: 0.8645\nEpoch 94/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3323 - accuracy: 0.8636\nEpoch 95/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3322 - accuracy: 0.8635\nEpoch 96/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3318 - accuracy: 0.8646\nEpoch 97/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3321 - accuracy: 0.8624\nEpoch 98/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3321 - accuracy: 0.8645\nEpoch 99/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3322 - accuracy: 0.8635\nEpoch 100/100\n250/250 [==============================] - 0s 1ms/step - loss: 0.3319 - accuracy: 0.8641\n" ] ], [ [ "## Part 4 - Making the predictions and evaluating the model", "_____no_output_____" ], [ "### Predicting the result of a single observation", "_____no_output_____" ], [ "Use our ANN model to predict if the customer with the following informations will leave the bank: \n\nGeography: France\n\nCredit Score: 600\n\nGender: Male\n\nAge: 40 years old\n\nTenure: 3 years\n\nBalance: \\$ 60000\n\nNumber of Products: 2\n\nDoes this customer have a credit card? Yes\n\nIs this customer an Active Member: Yes\n\nEstimated Salary: \\$ 50000\n\nSo, should we say goodbye to that customer?", "_____no_output_____" ], [ "**Solution**", "_____no_output_____" ] ], [ [ "def showPrediction(predicition):\n shouldWeSayGoodBye = prediction > 0.5\n result = \"definitely\" if shouldWeSayGoodBye == True else \"not\"\n probability = str(100 - prediction[0] * 100)\n print(\"We should \" + result + \" say goodbye to the customer. The probality of the customer staying is \" + probability + \"%\" )", "_____no_output_____" ], [ "creditScore = 600\ngender = 1\nage = 40\ntenure = 3\nbalance = 60000\nproducts = 2\nhaveCreditCard = 1\nactiveMember = 1\nestimatedSalary = 50000\ntarget = [[1, 0, 0, creditScore, gender, age, tenure, balance, products, haveCreditCard, activeMember, estimatedSalary]]\ntarget = sc.transform(target)\nprediction = ann.predict(target)\nshowPrediction(prediction[0])", "We should not say goodbye to the customer. The probality of the customer staying is [97.89198]%\n" ] ], [ [ "Therefore, our ANN model predicts that this customer stays in the bank!\n\n**Important note 1:** Notice that the values of the features were all input in a double pair of square brackets. That's because the \"predict\" method always expects a 2D array as the format of its inputs. And putting our values into a double pair of square brackets makes the input exactly a 2D array.\n\n**Important note 2:** Notice also that the \"France\" country was not input as a string in the last column but as \"1, 0, 0\" in the first three columns. That's because of course the predict method expects the one-hot-encoded values of the state, and as we see in the first row of the matrix of features X, \"France\" was encoded as \"1, 0, 0\". And be careful to include these values in the first three columns, because the dummy variables are always created in the first columns.", "_____no_output_____" ], [ "### Predicting the Test set results", "_____no_output_____" ] ], [ [ "y_pred = ann.predict(X_test)\nfor prediction in y_pred:\n showPrediction(prediction)", "We should not say goodbye to the customer. The probality of the customer staying is 80.50497770309448%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.02417314052582%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.7300636768341%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.30465006828308%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.93864476680756%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 14.76401686668396%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.80108535289764%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.54421973228455%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.64285469055176%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.531107425689697%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.80670702457428%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.76141929626465%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.592846155166626%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.97495341300964%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.86096477508545%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.980229139328%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.44027817249298%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.66922855377197%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.0052410364151%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.74150431156158%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.086753368377686%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.88662397861481%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.60973155498505%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.89207243919373%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.80695641040802%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.62512671947479%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.59151470661163%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.26110398769379%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.19579422473907%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.35003852844238%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.20973372459412%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.481016755104065%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.90849673748016%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.67106771469116%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.43450474739075%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.96666204929352%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.31466674804688%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.22414219379425%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.69320094585419%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.85921347141266%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.8770352602005%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.20292580127716%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.51192033290863%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.89942002296448%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.80195760726929%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.92211151123047%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.22027373313904%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.48527991771698%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.56092429161072%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.62466955184937%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 2.1030187606811523%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.06582188606262%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.45235133171082%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.57778036594391%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.29725217819214%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.17794239521027%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.39552283287048%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.41482949256897%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.943103313446045%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.33393597602844%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.3182965517044%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.75194478034973%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.02392029762268%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.32320380210876%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.76578211784363%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 10.491937398910522%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.83697021007538%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.01393783092499%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.05349397659302%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.496977806091309%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.68612575531006%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.84485304355621%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3284523487091%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.3179850578308105%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.01712131500244%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.85120570659637%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.653377294540405%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.80976974964142%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.09095358848572%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.54432582855225%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.049384593963623%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.93851947784424%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.1550921201706%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.74746358394623%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.65833806991577%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.07625305652618%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.26761782169342%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.6071367263794%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.355847358703613%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.96651899814606%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.09703505039215%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.18713414669037%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.37437331676483%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.089163780212402%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.37807619571686%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.09997057914734%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.71246016025543%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.84818053245544%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.32502055168152%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.60375821590424%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.42607879638672%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.47670865058899%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.97827899456024%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.10364615917206%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.04091024398804%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.59721219539642%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.01506304740906%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.77755618095398%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.1341826915741%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.69051218032837%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.26536452770233%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 24.71703290939331%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.40157520771027%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.15902984142303%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.89350080490112%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.97011613845825%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.31082713603973%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.82722926139832%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.38873100280762%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.4703859090805%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.95547485351562%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.65478157997131%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 35.045671463012695%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.94614100456238%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.02768433094025%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 24.57290291786194%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.81260681152344%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.36523258686066%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.486886978149414%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.53843140602112%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.62076139450073%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.229397773742676%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.56858670711517%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.85679185390472%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.57239723205566%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.26684522628784%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.11937582492828%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.344053983688354%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.95521998405457%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.94544875621796%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.22542071342468%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.07464754581451%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 29.90207076072693%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.52357947826385%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.90989816188812%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.279059648513794%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.88508296012878%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.04589557647705078%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.75760817527771%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 7.512634992599487%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.95695948600769%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.40740656852722%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.57786989212036%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.21432769298553%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.078066349029541%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.23106670379639%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.6816257238388%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.97772991657257%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.4199161529541%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.84604978561401%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.67989885807037%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.47514724731445%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.12982082366943%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.0021288394928%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.69801700115204%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.98532509803772%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.42672634124756%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.51727831363678%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.90760660171509%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.59306716918945%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.65909111499786%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.0517086982727%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.60054838657379%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.40051078796387%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.14882826805115%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.27380573749542%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.25918853282928%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.56902348995209%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.45458173751831%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.71103072166443%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.5380824804306%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.48197197914124%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 12.114214897155762%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.75357663631439%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.01768743991852%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.57775259017944%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.21655750274658%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.34070587158203%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.05175316333771%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18478429317474%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.19688940048218%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.93544411659241%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.31333065032959%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.2278743982315%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.58126187324524%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.40388870239258%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.80420088768005%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.01463508605957%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.06092858314514%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.47635221481323%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.66733348369598%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.93783152103424%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.58423900604248%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.17273449897766%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.57517945766449%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.97213447093964%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.36587905883789%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.607418060302734%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.29287421703339%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.20370650291443%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.95150554180145%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.62221133708954%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.688274383544922%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 8.223533630371094%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.87311148643494%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.18174922466278%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.75922799110413%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.71109616756439%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.61676096916199%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.9639083147049%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.52487134933472%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.20694816112518%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.17931318283081%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.02788472175598%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.69111847877502%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.7078663110733%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.04180562496185%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.15831756591797%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.30491936206818%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.53153038024902%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.43624806404114%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.94281101226807%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.04766869544983%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.53638708591461%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.04915988445282%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.41189360618591%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.72343683242798%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.0672082901001%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.339627265930176%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.7515686750412%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.07459783554077%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.52977955341339%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.11295056343079%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.47457110881805%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.82978796958923%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.81927466392517%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.20791971683502%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.864901542663574%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.93838858604431%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.20864498615265%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3086963891983%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.079622983932495%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.26465725898743%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 25.64370632171631%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.83512532711029%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.10352802276611%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.19623136520386%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.14450824260712%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.81505370140076%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.41924107074738%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.45347261428833%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.51012349128723%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.00781762599945%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.50360774993896%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4311113357544%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.3598141670227%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.08674013614655%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.58681333065033%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.54369556903839%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.27359819412231%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.646371603012085%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.24797713756561%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.224273920059204%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 18.51062774658203%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.83935248851776%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.97063362598419%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.15432035923004%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.2822345495224%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.3940898180008%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 11.22136116027832%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.92719769477844%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.66004168987274%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.97369623184204%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.10079491138458%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.36838507652283%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.15621757507324%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.06708180904388%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.47167956829071%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.18810212612152%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.8273755311966%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.59052097797394%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.37926506996155%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.38390338420868%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.47090327739716%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.88441133499146%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.62469828128815%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.356862783432%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.58261930942535%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.23482167720795%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.46175956726074%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.27841210365295%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.69534540176392%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.5486820936203%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 46.44772410392761%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.05482232570648%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.03089165687561%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.68263590335846%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.37637150287628%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 3.989917039871216%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.17385470867157%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.16615855693817%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.29676103591919%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.96605467796326%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.2206404209137%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.39752113819122%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.45957243442535%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.15645515918732%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.73543047904968%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.2335190773010254%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.38617825508118%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 12.564301490783691%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.04837906360626%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.28438067436218%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.051403045654296875%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.54410219192505%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.42474746704102%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.59263980388641%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.67604744434357%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.34884572029114%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.34387135505676%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.12650287151337%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.21190619468689%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.80590188503265%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.6613918542862%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.85208308696747%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.29073524475098%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.31590712070465%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.13147985935211%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.94535517692566%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.99593687057495%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.44813048839569%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.59123158454895%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.07856726646423%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.71175837516785%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.78854751586914%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.88052463531494%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.36569690704346%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.62213850021362%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.30355942249298%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.45806443691254%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.35396111011505%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.83047044277191%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.57231271266937%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 2.4416685104370117%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.70556139945984%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.5146980285644531%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.65077412128448%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.58237171173096%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.07819175720215%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.39520955085754%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.06474661827087%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.02093207836151%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.9752368927002%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.98416709899902%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.662994384765625%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 7.093757390975952%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.72319173812866%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.706844329833984%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.93469488620758%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.41443598270416%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.79992008209229%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.01237571239471%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.036906719207764%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.91937255859375%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.25064599514008%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.78250253200531%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.05128490924835%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.543229579925537%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.20645523071289%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.64512288570404%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.43895995616913%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18729662895203%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.899980545043945%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.31993496417999%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.5187269449234%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.91314601898193%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.274699449539185%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.38069534301758%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 21.17311954498291%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.58871221542358%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.46197354793549%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.94159615039825%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.26358711719513%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.95077633857727%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.44850993156433%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.00449573993683%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.03585970401764%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.43267166614532%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.37452352046967%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.16752874851227%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.06509709358215%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.34135460853577%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.54125761985779%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.54418230056763%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18953478336334%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 15.277516841888428%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4220335483551%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.90578353404999%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.66126942634583%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.11828315258026%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.63886427879333%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.6231175661087%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.09701633453369%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.05950593948364%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.78594374656677%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.56662142276764%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.62247860431671%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.0424610376358%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.41189241409302%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.72171890735626%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.31852519512177%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.03516829013824%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.13406550884247%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.09596133232117%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.754159927368164%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.65675449371338%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.87781953811646%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.4982579946518%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.65416586399078%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.12092304229736%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.647027373313904%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.96676194667816%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 21.181142330169678%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.07820010185242%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 10.681909322738647%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.5451854467392%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.9666405916214%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.02105522155762%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.44396114349365%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.47941601276398%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.50539696216583%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.34410750865936%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.66530764102936%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.73905682563782%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.79681348800659%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.98817539215088%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.06201541423798%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.7458016872406%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.76682758331299%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.13085103034973%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.02343821525574%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.87433636188507%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.909535884857178%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.07008528709412%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.58151888847351%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.39320158958435%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.56408524513245%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.91756665706635%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.85773658752441%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.92845118045807%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.11106526851654%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.2363373041153%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.69628071784973%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.54623174667358%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.86977672576904%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 9.920257329940796%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.46584296226501%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 25.699317455291748%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.69856584072113%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.57630753517151%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.83583855628967%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.54590272903442%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.97686743736267%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.864861488342285%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.00333261489868%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.12740480899811%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.3389505147934%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.20964074134827%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.39440739154816%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.92407703399658%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.16427111625671%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 24.992358684539795%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.95826363563538%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.75946366786957%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.63113915920258%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.58531594276428%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.98793315887451%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.97215497493744%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.89758706092834%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.91676187515259%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 14.533734321594238%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.87369132041931%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.06133949756622%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.15832018852234%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.14611077308655%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.37017726898193%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.82238304615021%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.58344042301178%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.04281556606293%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.20774698257446%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 42.78814196586609%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.59530794620514%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.648149490356445%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 42.999714612960815%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 29.90660071372986%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.7170786857605%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.04931259155273%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.33744156360626%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.05935907363892%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.61381936073303%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.56212389469147%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.78574800491333%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.15503060817719%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 15.968859195709229%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.84006094932556%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.95322597026825%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.08600223064423%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.81918120384216%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.21844518184662%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.2223037481308%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.30403506755829%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.39649891853333%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.36830484867096%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.07945096492767%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.38952195644379%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.20432984828949%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.17903435230255%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.59752440452576%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.13495528697968%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.54142355918884%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.66241216659546%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.51965367794037%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.51047658920288%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.76048123836517%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.72216331958771%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.5905464887619%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.83481645584106%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.39566242694855%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.87449443340302%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.8244115114212%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.54794335365295%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.40592610836029%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.83344173431396%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.99471318721771%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.97265827655792%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.75722026824951%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.02073180675507%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.14937281608582%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.44247686862946%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.68100464344025%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.72661352157593%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.694223284721375%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.33092939853668%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.47754526138306%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.89670991897583%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.21893358230591%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.57371699810028%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.23888301849365%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.00995659828186%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.78311204910278%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.017881393432617188%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.22710692882538%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.14317905902863%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.184865474700928%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.76567888259888%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.37372195720673%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.92674958705902%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.63975036144257%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.41118395328522%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.78426420688629%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.46767544746399%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.62855172157288%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.68356084823608%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.009822845458984375%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.29954242706299%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.85599207878113%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.76057720184326%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.32559740543365%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.163758277893066%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.25349855422973633%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.93855571746826%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.24605703353882%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.10043013095856%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.30058813095093%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.44812321662903%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 11.832988262176514%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.97750580310822%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.31535720825195%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.45858943462372%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.67806732654572%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.335527420043945%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.675416707992554%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.14583230018616%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.98167848587036%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.06696963310242%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.75905561447144%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.92693650722504%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.04860055446625%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.348745346069336%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.57334887981415%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.94598317146301%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.68303394317627%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 19.908982515335083%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.85243856906891%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.6682106256485%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.85959684848785%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.26545810699463%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.62081038951874%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.35148811340332%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.488116025924683%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.31053698062897%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.90488874912262%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.27887618541718%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.73013877868652%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.007259845733642578%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.51308298110962%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.63835859298706%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4364697933197%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.79687595367432%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.21367657184601%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.23143637180328%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.14416563510895%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.3292555809021%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.83238065242767%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.85861337184906%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.27815580368042%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.1834602355957%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.25087821483612%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.85598504543304%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.4658955335617%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.35088038444519%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 14.182984828948975%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.53914976119995%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.36600041389465%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.65632224082947%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.70025086402893%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.01729106903076%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.985650300979614%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.84704744815826%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.262128591537476%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.36111569404602%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.18307733535767%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.92982220649719%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.48107612133026%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.50214743614197%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.83793866634369%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.75559782981873%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.96286225318909%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.76111340522766%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.63304948806763%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.15804600715637%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.03296077251434%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.99489271640778%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.79042303562164%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 29.808449745178223%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.41980123519897%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.96715307235718%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.23218667507172%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.74413394927979%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.38333415985107%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.98816049098969%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.97161531448364%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.70184910297394%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.14003503322601%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.55836844444275%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 35.740458965301514%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.69118976593018%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.184366106987%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.11250126361847%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.92609894275665%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.7349214553833%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.20949184894562%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.36492896080017%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.882199883461%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.87035703659058%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.76049220561981%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.21452105045319%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.77546513080597%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.65156364440918%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.05472731590271%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.50950038433075%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.92816185951233%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.5155029296875%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.84885287284851%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.54482507705688%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.59503889083862%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.66548442840576%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.9596244096756%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.20684552192688%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.32022738456726%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.85838747024536%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.03376352787018%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.23862171173096%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.80797612667084%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.91459512710571%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.53693807125092%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.14176905155182%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.97711265087128%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.6917690038681%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.77887046337128%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.87538862228394%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.79144895076752%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.45521581172943%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.08642756938934%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.7285361289978%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.68888592720032%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.88389086723328%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.89349818229675%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.15233850479126%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.9193844795227%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.41141307353973%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.9815411567688%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.39356470108032%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.96388912200928%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.91809487342834%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.3906911611557%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.31087279319763%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.96463453769684%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.96860790252686%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.7329113483429%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.884843587875366%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.63455963134766%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.97680842876434%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4741518497467%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.94946348667145%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.73965954780579%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.56553375720978%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.74409425258636%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.12882339954376%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.63283169269562%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.65780341625214%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.692188024520874%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.98750340938568%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 3.621041774749756%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.65722179412842%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.27952289581299%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 16.511404514312744%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.00876367092133%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.62337112426758%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.66793274879456%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.25117003917694%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.75705695152283%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.45726656913757%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.70021760463715%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.45266425609589%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.38194704055786%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.49118494987488%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.20978426933289%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.8425281047821%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.865150928497314%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.30856609344482%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.59689104557037%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.75527012348175%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.202556848526%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.94857108592987%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.98345756530762%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.31216323375702%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.74756336212158%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 8.799874782562256%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.73800826072693%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.85999464988708%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.06008541584015%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.67913007736206%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.18606996536255%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 40.36229848861694%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.50036668777466%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.53598773479462%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.84384346008301%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.68143951892853%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.8977677822113%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.59808337688446%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.91724967956543%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.13296735286713%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.44427943229675%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.82959222793579%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.32734251022339%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.92037796974182%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.10383403301239%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.04072761535645%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.95790839195251%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.85659348964691%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.4448105096817%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.91809451580048%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.69284498691559%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.77123153209686%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.76233530044556%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.21661305427551%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 19.6089506149292%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.93503761291504%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.47266376018524%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.37269484996796%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.24917876720428%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.06859397888184%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.8981100320816%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.77001976966858%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.963397979736328%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.2597885131836%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.12743949890137%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.07237386703491%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.25526463985443%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.20556139945984%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.28973305225372%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.25394451618195%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.5361557006836%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.46335458755493164%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.23694431781769%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.90686655044556%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.67357218265533%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.31136751174927%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.80628025531769%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.35262203216553%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.4897643327713%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.238052010536194%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.26371467113495%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.7955846786499%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.92192220687866%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.4132229089737%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.21229720115662%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.9623019695282%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.3674920797348%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.9098516702652%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.002336263656616%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.32486605644226%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.91883516311646%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.81566846370697%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.5243136882782%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.86560332775116%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 22.257578372955322%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.54477334022522%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.18659949302673%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.11269903182983%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.75055253505707%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.75084400177002%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.28775548934937%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.06731247901917%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.76491522789001%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.64159297943115%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.55709052085876%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.41228413581848%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18327927589417%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.9595113992691%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.21992611885071%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.46748495101929%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.97470664978027%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.42644035816193%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.6309140920639%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.6426899433136%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.60336303710938%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.14693295955658%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.329477310180664%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.64441108703613%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.47263538837433%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.33874702453613%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.84289562702179%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.1408417224884%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3983805179596%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 40.48328995704651%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.9003517627716%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.64803147315979%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.29863059520721%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.06968975067138672%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.38873314857483%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.55309343338013%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.13154566287994%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.14737296104431%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.22416996955872%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.88162803649902%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.36249804496765%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.5359035730362%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.72135174274445%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.4815764427185%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.65908825397491%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.72067868709564%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.5453177690506%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.78104984760284%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18742179870605%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.88425362110138%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.3005553483963%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.57819879055023%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 40.06699323654175%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.61845803260803%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4066436290741%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.61744844913483%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.99611175060272%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.84316980838776%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.51609921455383%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.74473106861115%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.43614983558655%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.10610473155975%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.62856757640839%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 1.2854814529418945%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 15.245229005813599%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 7.196664810180664%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.073516845703125%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 7.897669076919556%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.67252206802368%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.39773571491241%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.93544924259186%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.13437259197235%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.96900355815887%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.03266334533691406%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.86766171455383%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.02004408836365%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.72937846183777%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.35269057750702%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.19062507152557%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.38730919361115%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.24374604225159%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.3786906003952%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.99942529201508%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.42991530895233%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.01972270011902%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.65056574344635%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.37840116024017%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.95490002632141%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.59999024868011%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.82836389541626%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.26763164997101%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.98156094551086%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.39382088184357%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.70491218566895%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.75743067264557%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.73666036128998%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.023651123046875%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.57804679870605%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.51605641841888%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.63095235824585%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.01646137237549%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.08984136581421%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.7537454366684%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.82128357887268%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.25414407253265%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.86715483665466%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.60475075244904%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.19710052013397%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.74291336536407%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.67254436016083%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.1728230714798%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.11939787864685%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.10974371433258%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.294251441955566%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 35.73518991470337%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.44259309768677%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.39657187461853%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.67665457725525%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.09917306900024%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.17387962341309%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.857093334198%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.3242437839508%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.872562885284424%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 23.82609248161316%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.13367331027985%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.99099206924438%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.28925585746765%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.53179514408112%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.47681605815887%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.22914588451385%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 30.204284191131592%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.37809216976166%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.270299077034%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.33106064796448%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.76087927818298%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.70469963550568%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.4578982591629%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.39714968204498%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.93020164966583%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.35325980186462%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.08383703231812%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.80334794521332%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.57606399059296%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.58928513526917%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.24827790260315%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.58117425441742%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.28985333442688%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.31202781200409%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.03652262687683%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.54863750934601%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.25353622436523%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.42445266246796%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.12448823451996%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.412424325943%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.09389436244965%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.88216209411621%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.96096086502075%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.98181653022766%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.40373778343201%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.45330595970154%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.77011835575104%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.82905578613281%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.70200645923615%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.90657162666321%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.19976150989532%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.68409025669098%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.22342014312744%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.927664041519165%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.95610010623932%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.57773733139038%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.71437311172485%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.34751605987549%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.81423199176788%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.10250496864319%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.70238709449768%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.32806491851807%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.93226420879364%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 10.210531949996948%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.35082590579987%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.2542622089386%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.83450937271118%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.43889427185059%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.720449447631836%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.66905784606934%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.3384519815445%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.17467474937439%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.98072624206543%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.00448024272919%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.10116195678711%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.21436071395874%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.52565431594849%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.03187930583954%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.12975597381592%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.06960809230804%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.67998504638672%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.54644930362701%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.09433650970459%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.71299934387207%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.72687005996704%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.88293325901031%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.15619361400604%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.14200794696808%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.23544216156006%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.83615267276764%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.03140616416931%\nWe should not say goodbye to the customer. The probality of the customer staying is 58.20373296737671%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.30670762062073%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.47849774360657%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 21.510809659957886%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.91952848434448%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.36701595783234%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.55518245697021%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.82822215557098%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.93975222110748%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.32261610031128%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.82847559452057%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.71796810626984%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.01821947097778%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.23339748382568%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.19202411174774%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.85404574871063%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.598546028137207%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.35042691230774%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.83962154388428%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.9547986984253%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.1943781375885%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.52295446395874%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.65570664405823%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.21551406383514%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.24097609519958%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.557921409606934%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.7533985376358%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.08121180534363%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.31737518310547%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.95659565925598%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 8.822882175445557%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.596959233284%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.85010480880737%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.97702729701996%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.10833394527435%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.55302250385284%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.56205666065216%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.41955387592316%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.528879165649414%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.24579453468323%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.47522950172424%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.91681051254272%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.60310649871826%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.730788230895996%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.9515141248703%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.65180695056915%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.96365189552307%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.81076335906982%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.68472099304199%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.22597360610962%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.20471906661987%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.62302017211914%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.14245700836182%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.62661564350128%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.68464148044586%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.36625134944916%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.11155784130096%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 13.446533679962158%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.05885243415833%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.24265730381012%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.46389770507812%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.96895158290863%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.68660366535187%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.158764600753784%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.21588468551636%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.72550427913666%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.74895703792572%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.99409103393555%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.06641817092896%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 48.10225963592529%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.92003321647644%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.2718756198883%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 46.112626791000366%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.049245357513427734%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.45904397964478%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.92118227481842%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.29271626472473%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.41654288768768%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 12.179708480834961%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.13347136974335%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.76836919784546%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.87577891349792%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.49224054813385%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.49576842784882%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.58513295650482%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 42.49250888824463%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.3717520236969%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.79446113109589%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.40944850444794%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.64582192897797%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.78851175308228%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.831759452819824%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.91035854816437%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.57450127601624%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.351202726364136%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.66191828250885%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 13.16913366317749%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.84675133228302%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.47240829467773%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.448774576187134%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.49034404754639%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.07076394557953%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.55851721763611%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.46379661560059%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.58577394485474%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 11.66776418685913%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.782974720001221%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.235153436660767%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.72841393947601%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.20530295372009%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.15201306343079%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.20860016345978%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.55625021457672%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.95138621330261%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.93032217025757%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.92645812034607%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.212796211242676%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.06373405456543%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 34.58266258239746%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.77559566497803%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.59286749362946%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.11846458911896%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.75917887687683%\nWe should not say goodbye to the customer. The probality of the customer staying is 65.92679023742676%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.1509337425232%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.06103682518005%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 40.960633754730225%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 10.717713832855225%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.95340192317963%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.11215555667877%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.08422768115997%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.29633140563965%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3537038564682%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.87782919406891%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.14726674556732%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.730433225631714%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.71966874599457%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.4954047203064%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.93293750286102%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.59147047996521%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.77212905883789%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.41472363471985%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 23.172980546951294%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.074378490448%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.29461431503296%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.11797046661377%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.59621214866638%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.83426451683044%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 17.780357599258423%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.17847776412964%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.3256276845932%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.47826063632965%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.25788033008575%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.37854588031769%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.67454838752747%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.98081147670746%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.75797927379608%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.12402617931366%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.58108067512512%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.17741811275482%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.70927107334137%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.69376909732819%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 22.641336917877197%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.525325775146484%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.24372601509094%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.45292103290558%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.1172482967376709%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.22993230819702%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.43909311294556%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.2491124868393%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.43164873123169%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.32897961139679%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.03002202510834%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.72378873825073%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.95090234279633%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.29192852973938%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.18832290172577%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.98355984687805%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.91038203239441%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.72693514823914%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.68814134597778%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.18770384788513%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.95885074138641%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.97525143623352%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.2779803276062%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.17953562736511%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.66093611717224%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.10164594650269%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.69518792629242%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.21968376636505%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.46359395980835%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.09520137310028%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.57121980190277%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.20748496055603%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.2900824546814%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.53458905220032%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.235201358795166%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.066803336143494%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.43713843822479%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.74550211429596%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.66395390033722%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.31611704826355%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.1573588848114%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 25.62328577041626%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.02850317955017%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 48.91940355300903%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.4656093120575%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.49181377887726%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.67679178714752%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.51479279994965%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.9599198102951%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.19290387630463%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.90721988677979%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.27660405635834%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.47978842258453%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.02864384651184%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 23.907041549682617%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.4244853258133%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.5495982170105%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.94537401199341%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.82322335243225%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.69983696937561%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.7699077129364%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.85737669467926%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.37153041362762%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 12.153220176696777%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.363078117370605%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.33295834064484%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.63207149505615%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.03088080883026%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.33059215545654%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.21727395057678%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.12191390991211%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.98122537136078%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.06697940826416%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.375519156456%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.31506681442261%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.38686668872833%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.47353601455688%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.55655407905579%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.71359038352966%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.7006893157959%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.88810896873474%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 19.571739435195923%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.726545214653015%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.85224497318268%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.37949573993683%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.99892473220825%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 21.098971366882324%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 22.988438606262207%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.3805867433548%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.08976650238037%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.08065104484558105%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.042540192604065%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.24291217327118%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.07594120502472%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.39074218273163%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.10067915916443%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.6260130405426%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.83650839328766%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.06546449661255%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.87947940826416%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.35406541824341%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.122826099395752%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.2978572845459%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.63500666618347%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.70343792438507%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.41854274272919%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.34784460067749%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.5873349905014%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.18449711799622%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.32315623760223%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.89530217647552%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.05801463127136%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 19.576799869537354%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.10528635978699%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.72540032863617%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.0290676355362%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.29894995689392%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.53689742088318%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.4580591917038%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.41949820518494%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.18464708328247%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.93100905418396%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.36811661720276%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.44572448730469%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.44105756282806%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.70383107662201%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.45866274833679%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.15790867805481%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.98750221729279%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.561030626297%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.07902073860168%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.09915101528168%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.12573647499084%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.7510929107666%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.33634555339813%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.55478537082672%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.89966344833374%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.79200446605682%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.90729284286499%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.76229155063629%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.71138882637024%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 3.534841537475586%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.01397490501404%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.63446426391602%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.65160381793976%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.2867020368576%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.59462082386017%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.06800639629364%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.1225517988205%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.75628685951233%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.6631726026535%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.87809157371521%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.4194643497467%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.75633251667023%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 3.7805557250976562%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.79731726646423%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.25683808326721%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.34233725070953%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.32592988014221%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.414443254470825%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.66061520576477%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.09458756446838%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.546334981918335%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.3551602363586426%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.111830949783325%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.68739807605743%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.34567427635193%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.37280988693237%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.63204753398895%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.61322236061096%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.14617371559143%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.95960581302643%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.133177042007446%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.86497354507446%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.22615361213684%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.536510705947876%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.22025763988495%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.89105153083801%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.88518118858337%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.46168875694275%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.25271689891815%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.7678427696228%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.35691928863525%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.75732421875%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 40.474486351013184%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.68786036968231%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.33619892597198%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 26.376885175704956%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.58786070346832%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.41376316547394%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.68205988407135%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.80701863765717%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.36457550525665%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.402907371521%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.8121178150177%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.03742825984955%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 8.326244354248047%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.36281299591064%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.30776357650757%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.12262010574341%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.60878145694733%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.21692264080048%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.48838150501251%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.17352950572968%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.997761368751526%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.50225329399109%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.619136571884155%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.18881726264954%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 7.722991704940796%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.00448703765869%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.01170408725739%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.25829565525055%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.24020421504974%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.83397603034973%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.65614879131317%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.51202261447906%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.49740505218506%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.15783309936523%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.45332789421082%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3807703256607%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.64156234264374%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.24242377281189%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.016897201538086%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.93891215324402%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.27179312705994%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.50603759288788%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.25674080848694%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.45468723773956%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.0825947523117%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.23581373691559%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.11091125011444%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.755955934524536%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.47427463531494%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.33890390396118%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.85249710083008%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.73799872398376%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 1.438581943511963%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.13236677646637%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.61144316196442%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.20804524421692%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.78720605373383%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.14902698993683%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 18.845689296722412%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.43789529800415%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.10012912750244%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 42.94745326042175%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.1818437576294%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.15966391563416%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.71522080898285%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.28642106056213%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.06391406059265137%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.61327755451202%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 9.565109014511108%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.71839737892151%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.11379611492157%\nWe should not say goodbye to the customer. The probality of the customer staying is 66.51654839515686%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.98031747341156%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.8597856760025%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 45.94981074333191%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.79877519607544%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.14151132106781%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.99030303955078%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.50919234752655%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.87982869148254%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.62366998195648%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.08173382282257%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 48.15131425857544%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.2737101316452%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.0083612203598%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.25377833843231%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3784636259079%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 14.628291130065918%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.36210012435913%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.5293755531311%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.025480985641479492%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 30.111408233642578%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.19398486614227%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.64147508144379%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.688533782958984%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.3485758304596%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.66196405887604%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.07685816287994%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.79010319709778%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.72709810733795%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.38108932971954%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.34452629089355%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.64324128627777%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.3866879940033%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.17954206466675%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.51715958118439%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.56075119972229%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.94824147224426%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.15463578701019%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.38362300395966%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.65334415435791%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.62367868423462%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.34138655662537%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.145066261291504%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.112544298172%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.86433565616608%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.26095116138458%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.2609646320343%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.71172678470612%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 35.65781116485596%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.1303118467331%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.90104591846466%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 9.582716226577759%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.57546973228455%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.33911848068237%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.7525839805603%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.27275276184082%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.31606578826904%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.39758956432343%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.95911192893982%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.88318276405334%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.93445682525635%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.79006040096283%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.1122213602066%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.20811712741852%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.82137167453766%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.72680389881134%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.94958317279816%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.48635983467102%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.260169506073%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.81632137298584%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.89719188213348%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.93719637393951%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.15277636051178%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.25087034702301%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.53827345371246%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.21757102012634%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.94609725475311%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.93735134601593%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.2872484922409%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.45145547389984%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 12.852871417999268%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.43456423282623%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.93397438526154%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 2.9144585132598877%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.23884356021881%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.08676373958588%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.82338082790375%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.086794257164%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.57849729061127%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.31951379776001%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.93403911590576%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.22191786766052%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.30286002159119%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.98816537857056%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.74352562427521%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.27610278129578%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.15208995342255%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.81540334224701%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.48046278953552%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.28079807758331%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.71829545497894%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.00556135177612%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.00368392467499%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.85365343093872%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.49262630939484%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.33063507080078%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.99683058261871%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.94456565380096%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.95103204250336%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.054931640625%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.89420962333679%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.60106313228607%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.7585037946701%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.9943642616272%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.9518814086914%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.98185980319977%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.57798099517822%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.78195023536682%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.63618469238281%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 3.8692474365234375%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.42267537117004%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.7667076587677%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.6896504163742%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.29080307483673%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.13343954086304%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.90543484687805%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.84093689918518%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.05430269241333%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.34296560287476%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.42227005958557%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.526675701141357%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.72807931900024%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.771308183670044%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.20569658279419%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.6759284734726%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.29847764968872%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 5.191338062286377%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.97847962379456%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.44654190540314%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.35547244548798%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.75408852100372%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.12817060947418%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.10362088680267%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.809459686279297%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.81088030338287%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.64800655841827%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.160470962524414%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.52656102180481%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.39421010017395%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.44846987724304%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.22548186779022%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.79040932655334%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.24836730957031%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.06972634792328%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.48428320884705%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.59816980361938%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.54873061180115%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.2041711807251%\nWe should not say goodbye to the customer. The probality of the customer staying is 74.91180002689362%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.60157930850983%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.52082633972168%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.73724365234375%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.667078375816345%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.3638664484024%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.23204720020294%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.17104744911193848%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.48743176460266%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.30602562427521%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.4366135597229%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.24603700637817%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.1631498336792%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.58517169952393%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.2134838104248%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.71072769165039%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.84655237197876%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.82267105579376%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.8591980934143%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.02926278114319%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.5943021774292%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.99711453914642%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 46.06249928474426%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.3718888759613%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.86645746231079%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.71988987922668%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.38877892494202%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.1249281167984%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.55501675605774%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.23769688606262%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.19367182254791%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.72477352619171%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.14241826534271%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.824866771698%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 31.533336639404297%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.54301416873932%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.09222996234894%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.6706999540329%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.74507367610931%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.49396848678589%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.1859860420227%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.77548110485077%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 36.89786195755005%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.19208931922913%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.86412501335144%\nWe should not say goodbye to the customer. The probality of the customer staying is 56.502047181129456%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.03000593185425%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.99157857894897%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.37724351882935%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.05486989021301%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.39365565776825%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.40969467163086%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.20023369789124%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.7026184797287%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.677430152893066%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.94731414318085%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.1758644580841%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.09710943698883%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.32628273963928%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 27.65229344367981%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.47891497612%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.34539568424225%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.193804025650024%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.61109697818756%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.350136041641235%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.52755928039551%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.2927953004837%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.51779019832611%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.3709203004837%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.71418309211731%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.84364676475525%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.1044430732727%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.78340971469879%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.03374719619751%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.009497165679932%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.39144015312195%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.99566960334778%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.06992483139038%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.97548544406891%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.8115473985672%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.52422046661377%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.9091557264328%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.48574483394623%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.37533009052277%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.82631468772888%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.91874170303345%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 16.251540184020996%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.800740122795105%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 0.38775205612182617%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.572195172309875%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.91652643680573%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.99304401874542%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.67366123199463%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.56451106071472%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.55085778236389%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.19391059875488%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.59797155857086%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.96952772140503%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.447356939315796%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.22881317138672%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.12904596328735%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.51550900936127%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.35474371910095%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.72369694709778%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.86593246459961%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.72833776473999%\nWe should not say goodbye to the customer. The probality of the customer staying is 63.50736618041992%\nWe should not say goodbye to the customer. The probality of the customer staying is 77.8110146522522%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 30.267643928527832%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.78969180583954%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.22890770435333%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 28.232645988464355%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.76077151298523%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.83340787887573%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.34941613674164%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.53069067001343%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.884960770607%\nWe should not say goodbye to the customer. The probality of the customer staying is 59.35737490653992%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.40268099308014%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 26.465415954589844%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.41385388374329%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.22664642333984%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.80189979076385%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.28990972042084%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.31247425079346%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.8739401102066%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.09278452396393%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.52544343471527%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 23.842740058898926%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 13.508731126785278%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.9405403137207%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.48501181602478%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.80107426643372%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.31926691532135%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.92682385444641%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.75914931297302%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.17135238647461%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.75379252433777%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.65858316421509%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.49850904941559%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.21028411388397%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.91411769390106%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.87291467189789%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.88083720207214%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.28807413578033%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.32009160518646%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.77322316169739%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.00559675693512%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.72023117542267%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.016618967056274%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 16.647207736968994%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.91168177127838%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.87377202510834%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.04627358913422%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.03965592384338%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.07371294498444%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.61027204990387%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.07886576652527%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.18267810344696%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.68065977096558%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.63572895526886%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.81658840179443%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.78237020969391%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.60285007953644%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.42752230167389%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.80167508125305%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.906986236572266%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.78614246845245%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.15606546401978%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.53904843330383%\nWe should not say goodbye to the customer. The probality of the customer staying is 64.09031748771667%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.08704948425293%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 44.84243988990784%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.63030409812927%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.43971824645996%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.81031012535095%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.87565016746521%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.74998664855957%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.55982458591461%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.63393998146057%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.42780494689941%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.69449424743652%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 37.317776679992676%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.63087689876556%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.27460527420044%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.12868535518646%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.24358916282654%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.00651109218597%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.80313754081726%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.78140687942505%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.49352312088013%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.16681504249573%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.95841455459595%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.52331149578094%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.84197008609772%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.6496741771698%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.25782883167267%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.71453130245209%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.16130077838898%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 41.56569242477417%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.74817621707916%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 42.06665754318237%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.95867109298706%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.12823724746704%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.83620131015778%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.10748994350433%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.786740899086%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.95966219902039%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.55613470077515%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.19509172439575%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.93311429023743%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.26949441432953%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.02382290363312%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.86370527744293%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 39.2572283744812%\nWe should not say goodbye to the customer. The probality of the customer staying is 67.09722876548767%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.09365677833557%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.99825513362885%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 18.33972930908203%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.65705251693726%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.73071122169495%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.21123242378235%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.2390501499176%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.06164216995239%\nWe should not say goodbye to the customer. The probality of the customer staying is 78.64626944065094%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 48.74143600463867%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.03200054168701%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.59673237800598%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.244225025177%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.63877069950104%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.51361274719238%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 48.113274574279785%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.71692216396332%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.27913916110992%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.28065383434296%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.701313614845276%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.6883133649826%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.7365790605545%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.67506921291351%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.6721853017807%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.01760876178741%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.81467711925507%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.35963118076324%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.42830514907837%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.4468777179718%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.00896203517914%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.20825231075287%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.06616497039795%\nWe should not say goodbye to the customer. The probality of the customer staying is 50.016140937805176%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.17790269851685%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 4.9756646156311035%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.39687740802765%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.75299632549286%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.1690833568573%\nWe should not say goodbye to the customer. The probality of the customer staying is 61.79044842720032%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.40126633644104%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.8320140838623%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.71561765670776%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.0490254163742%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.92590141296387%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.86490738391876%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.44953906536102%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.1078017950058%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 32.95987844467163%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.9369044303894%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.81742191314697%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 43.217045068740845%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.50232064723969%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.692211627960205%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.58644604682922%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 18.475371599197388%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.84852159023285%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.80947947502136%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.636385679245%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.38276588916779%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.76942932605743%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.18034672737122%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.52580189704895%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.63759124279022%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.53339529037476%\nWe should not say goodbye to the customer. The probality of the customer staying is 92.20855832099915%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.89426720142365%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 2.9087066650390625%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.42926359176636%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.03652846813202%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.06730616092682%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.2842960357666%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.97656679153442%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 47.14500308036804%\nWe should not say goodbye to the customer. The probality of the customer staying is 72.91436791419983%\nWe should not say goodbye to the customer. The probality of the customer staying is 83.62013697624207%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.93912076950073%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.42679691314697%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.97651362419128%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 25.141465663909912%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.49033951759338%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.37455153465271%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.32212924957275%\nWe should not say goodbye to the customer. The probality of the customer staying is 73.59239459037781%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.48119115829468%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.63346743583679%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 20.39463520050049%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.51499783992767%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.76683950424194%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.5879100561142%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.82872200012207%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.51817655563354%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.08827304840088%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.75801980495453%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 49.11680817604065%\nWe should not say goodbye to the customer. The probality of the customer staying is 70.4273521900177%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.37458717823029%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.00661790370941%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.18821382522583%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.83223795890808%\nWe should not say goodbye to the customer. The probality of the customer staying is 91.30449295043945%\nWe should not say goodbye to the customer. The probality of the customer staying is 60.53659915924072%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.71230721473694%\nWe should not say goodbye to the customer. The probality of the customer staying is 68.60157251358032%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.14183115959167%\nWe should not say goodbye to the customer. The probality of the customer staying is 62.174949049949646%\nWe should not say goodbye to the customer. The probality of the customer staying is 57.2155237197876%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.08088040351868%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.59549295902252%\nWe should not say goodbye to the customer. The probality of the customer staying is 75.14512836933136%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.5418541431427%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 21.137189865112305%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.90818393230438%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.08367693424225%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.43063712120056%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.19347548484802%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.56805408000946%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.03036141395569%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.29826748371124%\nWe should not say goodbye to the customer. The probality of the customer staying is 54.334187507629395%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.571897745132446%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.20643448829651%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.16072416305542%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.14511501789093%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.96782433986664%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.81509852409363%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 38.01250457763672%\nWe should not say goodbye to the customer. The probality of the customer staying is 53.173625469207764%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.29657757282257%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.90705943107605%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.88947701454163%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.48720920085907%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.21731114387512%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.27188730239868%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 6.094563007354736%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.50717389583588%\nWe should not say goodbye to the customer. The probality of the customer staying is 69.54162120819092%\nWe should not say goodbye to the customer. The probality of the customer staying is 84.41679775714874%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.88769686222076%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.57201063632965%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.85110890865326%\nWe should not say goodbye to the customer. The probality of the customer staying is 76.89393758773804%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.79463708400726%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.23228621482849%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 22.485744953155518%\nWe should not say goodbye to the customer. The probality of the customer staying is 52.390480041503906%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 33.35474133491516%\nWe should not say goodbye to the customer. The probality of the customer staying is 88.78561854362488%\nWe should not say goodbye to the customer. The probality of the customer staying is 55.16043305397034%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.49052023887634%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.93486785888672%\nWe should not say goodbye to the customer. The probality of the customer staying is 87.08008527755737%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.83803427219391%\nWe should not say goodbye to the customer. The probality of the customer staying is 85.14404594898224%\nWe should not say goodbye to the customer. The probality of the customer staying is 71.43588960170746%\nWe should not say goodbye to the customer. The probality of the customer staying is 95.08514702320099%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.67370128631592%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.16371881961823%\nWe should not say goodbye to the customer. The probality of the customer staying is 90.22164046764374%\nWe should definitely say goodbye to the customer. The probality of the customer staying is 18.220198154449463%\nWe should not say goodbye to the customer. The probality of the customer staying is 89.37256932258606%\nWe should not say goodbye to the customer. The probality of the customer staying is 94.39600706100464%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.38725328445435%\nWe should not say goodbye to the customer. The probality of the customer staying is 81.18544816970825%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.81031906604767%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.28489029407501%\nWe should not say goodbye to the customer. The probality of the customer staying is 79.17755246162415%\nWe should not say goodbye to the customer. The probality of the customer staying is 51.61032974720001%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.4313497543335%\nWe should not say goodbye to the customer. The probality of the customer staying is 93.63955557346344%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.00264036655426%\nWe should not say goodbye to the customer. The probality of the customer staying is 99.4182139635086%\nWe should not say goodbye to the customer. The probality of the customer staying is 97.75750041007996%\nWe should not say goodbye to the customer. The probality of the customer staying is 98.71193766593933%\nWe should not say goodbye to the customer. The probality of the customer staying is 96.0423469543457%\nWe should not say goodbye to the customer. The probality of the customer staying is 80.32666146755219%\nWe should not say goodbye to the customer. The probality of the customer staying is 86.17867827415466%\nWe should not say goodbye to the customer. The probality of the customer staying is 82.86837339401245%\n" ], [ "whoStays = [x for x in y_pred if x > 0.5]\nwhoLeaves = [x for x in y_pred if x <= 0.5]\ntotalStaying = len(whoStays)\ntotalLeaving = len(whoLeaves)\nprint(\"In total \" + str(totalStaying) + \" will stay and \" + str(totalLeaving) + \" will leave.\")", "In total 288 will stay and 1712 will leave.\n" ] ], [ [ "### Making the Confusion Matrix", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix, accuracy_score\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\naccuracy_score(y_test, y_pred)", "[[1529 66]\n [ 213 192]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec7766d6bcf5bd568f7fd6d4546de2fe1a3a5e63
87,787
ipynb
Jupyter Notebook
_notebooks/2019-11-28-NM.ipynb
jackhmiller/My-DS-Blog
24630bb20f0f64783d4da43285d37b748db84398
[ "Apache-2.0" ]
null
null
null
_notebooks/2019-11-28-NM.ipynb
jackhmiller/My-DS-Blog
24630bb20f0f64783d4da43285d37b748db84398
[ "Apache-2.0" ]
null
null
null
_notebooks/2019-11-28-NM.ipynb
jackhmiller/My-DS-Blog
24630bb20f0f64783d4da43285d37b748db84398
[ "Apache-2.0" ]
null
null
null
187.179104
39,698
0.88881
[ [ [ "# Accelerated Gradient Descent and Newton's Method\n> \"From scratch implementation of accelerated GD and Newton's method using a funky gamma-distributed loss function\"\n- toc: false\n- branch: master\n- badges: true\n- comments: true\n- image: images/Hess.png\n- hide: false\n- search_exclude: false", "_____no_output_____" ], [ "Nonconvex optimization problems are ubiquitous in modern machine learning. While it is NP-hard to find global minima of a nonconvex function in the worst case, in the setting of machine learning it has proved useful to consider a less stringent notion of success, namely that of convergence to a first-order stationary point. However, architectures such as deep neural networks induce optimization surfaces that can be teeming with highly suboptimal saddle points.", "_____no_output_____" ], [ "In this setting, the glaring limitation of gradient descent is that it can get stuck in flat areas or bounce around if the objective function returns noisy gradients. Therefore second-order descent methods are needed for optimization. Momentum is an approach that accelerates the progress of the search to skim across flat areas and smooth out bouncy gradients. In some cases, the acceleration of momentum can cause the search to miss or overshoot the minima at the bottom of basins or valleys. Nesterov momentum is an extension of momentum that involves calculating the decaying moving average of the gradients of projected positions in the search space rather than the actual positions themselves. This has the effect of harnessing the accelerating benefits of momentum whilst allowing the search to slow down when approaching the optima and reduce the likelihood of missing or overshooting it. Further, Nesterov’s accelerated gradient descent (AGD), an instance of the general family of “momentum methods,” provably achieves faster convergence rate than gradient descent (GD) in the convex setting.", "_____no_output_____" ], [ "$$ L(\\alpha,\\beta|y_1,...,y_n) = \\prod_{i=1}^{n}\\frac{\\Gamma (\\alpha+\\beta)}{\\Gamma(\\alpha)\\Gamma(\\beta)}y_i^{\\alpha-1}(1-y_i)^{\\beta-1} $$", "_____no_output_____" ], [ "Given the above likelihood function, where $\\Gamma$ is the gamma function, and $\\alpha$ and $\\beta$ are greater than zero. First, we convert our likelihood function to the log likelihood. ", "_____no_output_____" ], [ "$$ log(L(\\alpha, \\beta | y_{1}, ..., y_{n})) = nlog(\\Gamma(a+b)) -nlog(\\Gamma(a)) -nlog(\\Gamma(b)) +(a-1)\\sum_{i=1}^{n}log(y_{i}) +(b-1)\\sum_{i=1}^{n}log(1-y_{i}) $$", "_____no_output_____" ], [ "And its maximum likelihood formulation.", "_____no_output_____" ], [ "$$ \\widehat{\\alpha}, \\widehat{\\beta} = \\underset{i}{argmax}(log(\\widehat{L}(\\alpha, \\beta | y_{1}, ..., y_{n}))) $$", "_____no_output_____" ], [ "And our gradient and Hessian from the log likelihood function:", "_____no_output_____" ], [ "&emsp; Gradient", "_____no_output_____" ], [ "$$ \n\\bigtriangledown log(L)) =\n\\begin{bmatrix}\nn\\psi(\\alpha+\\beta) -n\\psi(\\alpha) +\\sum_{i=1}^{n}log(y_{i})\n\\\\ \nn\\psi(\\alpha+\\beta) -n\\psi(\\beta) +\\sum_{i=1}^{n}log(1-y_{i})\n\\end{bmatrix}\n$$", "_____no_output_____" ], [ "&emsp; Hessian", "_____no_output_____" ], [ "$$ \\bigtriangledown^{2} log(L) =\n\\begin{bmatrix}\nn\\psi'(\\alpha+\\beta) -n\\psi'(\\alpha) & n\\psi'(\\alpha+\\beta)\n\\\\ \n n\\psi'(\\alpha+\\beta) & n\\psi'(\\alpha+\\beta) -n\\psi'(\\beta) \n\\end{bmatrix}\n$$", "_____no_output_____" ], [ "#### Let's code it up.\nFor digamma $\\psi(x) = \\frac{\\Gamma'(x)}{\\Gamma(x)}$ and trigamma $\\psi'(x)$, we will cheat and use built-in functions from scipy.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport math\nfrom scipy.io import loadmat\nimport seaborn as sns\nimport scipy\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom random import randrange\nfrom scipy.interpolate import BSpline\nfrom scipy.ndimage import gaussian_filter\n%matplotlib inline\nfrom scipy.special import digamma, polygamma", "_____no_output_____" ] ], [ [ "Loading the data and coding our objective function, gradient, and Hessian.", "_____no_output_____" ] ], [ [ "Y = loadmat('Sample_data.mat')['y']", "_____no_output_____" ], [ "def objective(data, x):\n n = len(data)\n return n*math.log(math.gamma(x[0]+x[1]))-n*math.log(math.gamma(x[0]))-n*math.log(math.gamma(x[1])) + (x[0]-1)*sum([math.log(i) for i in data]) + (x[1]-1)*sum([math.log(1-i) for i in data]) \n\ndef gradient(data, x):\n n = len(data)\n return np.array([n* digamma(x[0]+x[1]) - n*digamma(x[0]) + sum([math.log(i) for i in data]), n* digamma(x[0]+x[1]) - n*digamma(x[1]) + sum([math.log(1-i) for i in data])]) \n\ndef Hessian(data, x):\n n = len(data)\n return np.array([[n* polygamma(1, x[0]+x[1]) - n*polygamma(1, x[0]), n* polygamma(1, x[0]+x[1])],[n* polygamma(1, x[0]+x[1]), n* polygamma(1, x[0]+x[1]) - n*polygamma(1, x[1])]])", "_____no_output_____" ] ], [ [ "&emsp; Newton's Method", "_____no_output_____" ] ], [ [ "alpha = 1 \nlam = 1e-10\nepsilon = 0.01\nx0 = np.random.rand(2)\n\nf0 = objective(Y, x0) # initialize\nfL = [f0]\ng0 = gradient(Y, x0) # initialize\ngL = [g0]\nH0 = Hessian(Y, x0) # initialize\nOmega = -np.linalg.solve(H0 + lam*np.eye(2),g0)\nstopping = np.linalg.norm(Omega)\niterations = 1\nparams = []\nwhile stopping > epsilon:\n x = x0 + alpha*Omega \n fval = objective(Y, x)\n while fval < f0: \n alpha *= 0.1\n x = x0 + alpha*Omega \n fval = objective(Y, x)\n alpha = alpha**0.5\n params.append(x)\n x0 = x\n f0 = fval\n fL.append(fval)\n g0 = gradient(Y, x0)\n Omega = -np.linalg.solve(Hessian(Y, x0)+lam*np.eye(2),g0)\n stopping = np.linalg.norm(Omega)\n iterations += 1\n\nresults = x0\niters = iterations\n\nplt.figure(figsize=(20,10))\nplt.plot(fL)\nplt.title('Newton Method')\nplt.xlabel('Number of iterations')\nplt.ylabel('Log-Likelihood')", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(15,10))\nax.plot([x[0] for x in params], lw=3, label='Alpha')\nax.plot([x[1] for x in params], lw = 3, label='Beta')\nplt.title('Alpha & Beta Values vs Number of Iterations')\nplt.xlabel('Number of iterations')\nplt.ylabel('Parameter Value')\nax.legend(loc='best')", "_____no_output_____" ], [ "print(f\" Newton's Method optimized parameter values- Alpha: {round(x0[0], 3)}, Beta: {round(x0[1], 3)}\")", " Newton's Method optimized parameter values- Alpha: 4.97, Beta: 12.173\n" ] ], [ [ "&emsp; Accelerated Gradient Descent", "_____no_output_____" ] ], [ [ "x = np.random.rand(2)\nbeta = np.random.rand(2) \nx0 = x \n\nconvergence = gradient(Y, beta)@gradient(Y, beta)\nconvergence_results = [convergence]\nloss = [objective(Y, x)]\nalpha = 0.1\nepsilon = 0.01\niterations = 1\nwhile convergence > epsilon:\n x = beta + alpha*gradient(Y, x)\n beta = x - (iterations-1)/(iterations+2)*(x-x0)\n convergence = gradient(Y, x)@gradient(Y, x)\n convergence_results.append(convergence)\n try:\n loss.append(objective(Y, x))\n except:\n pass\n x0 = x\n iterations += 1\n \nresults = x0\niters = iterations\n\nplt.figure(figsize=(20,10))\nplt.plot(loss)\nplt.title('Accelerated Gradient Descent')\nplt.xlabel('Iterations')\nplt.ylabel('Log-Likelihood')", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec77811005eef6088e68c191219ddf63a708518a
20,115
ipynb
Jupyter Notebook
Quaternion Inverse Check.ipynb
jgoppert/iekf_analysis
d41ad34b37ef2636e20680accf399ea4a9332811
[ "BSD-3-Clause" ]
5
2018-01-16T06:46:38.000Z
2019-06-19T10:17:12.000Z
Quaternion Inverse Check.ipynb
jgoppert/iekf_analysis
d41ad34b37ef2636e20680accf399ea4a9332811
[ "BSD-3-Clause" ]
null
null
null
Quaternion Inverse Check.ipynb
jgoppert/iekf_analysis
d41ad34b37ef2636e20680accf399ea4a9332811
[ "BSD-3-Clause" ]
null
null
null
66.167763
4,350
0.74263
[ [ [ "import transforms3d.quaternions as tf\nimport sympy\nsympy.init_printing()", "_____no_output_____" ], [ "a = sympy.symbols('a_1:3')\nb = sympy.symbols('b_1:3')\nc = sympy.symbols('c_1:3')\nd = sympy.symbols('d_1:3')\n\nq0 = sympy.Matrix([a[0], b[0], c[0], d[0]])\nq1 = sympy.Matrix([a[1], b[1], c[1], d[1]])", "_____no_output_____" ], [ "def qmult(q0, q1):\n res = sympy.Matrix(tf.qmult(q0, q1))\n res.simplify()\n return res\n\ndef qconj(q):\n return sympy.Matrix([q[0], -q[1], -q[2], -q[3]])\n\ndef qnorm(q):\n return sympy.sqrt(qmult(q, qconj(q))[0])", "_____no_output_____" ], [ "qmult(q0, q1)", "_____no_output_____" ], [ "qconj(q1).T", "_____no_output_____" ], [ "qnorm(q1)", "_____no_output_____" ], [ "qmult(q0, qconj(q0)/qnorm(q0)**2)", "_____no_output_____" ], [ "qmult(q0, qconj(q0)/qnorm(q0))", "_____no_output_____" ], [ "def qinv(q):\n res = qconj(q)/qnorm(q)**2\n res.simplify()\n return res", "_____no_output_____" ], [ "qmult(q0, qinv(q1))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec778c6e8808383f8e99464bf65afdce36917031
72,273
ipynb
Jupyter Notebook
legacy/4_1D_GatedCNN-Copy1.ipynb
ataraxno/weighing_dev
d4ea91645435bef2656d6ed3235888ae90d0ad59
[ "Apache-2.0" ]
null
null
null
legacy/4_1D_GatedCNN-Copy1.ipynb
ataraxno/weighing_dev
d4ea91645435bef2656d6ed3235888ae90d0ad59
[ "Apache-2.0" ]
null
null
null
legacy/4_1D_GatedCNN-Copy1.ipynb
ataraxno/weighing_dev
d4ea91645435bef2656d6ed3235888ae90d0ad59
[ "Apache-2.0" ]
null
null
null
105.662281
36,956
0.79749
[ [ [ "import pandas as pd\nimport numpy as np\nnp.set_printoptions(precision=6, suppress=True)\nfrom sklearn.utils import shuffle\n\nfrom tqdm import tqdm\n\nimport tensorflow as tf\nfrom tensorflow.keras import *\ntf.__version__", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.ticker import (LinearLocator, MultipleLocator, FormatStrFormatter)\nfrom matplotlib.dates import MONDAY\nfrom matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter\nfrom matplotlib import gridspec\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n%matplotlib inline", "_____no_output_____" ], [ "plt.rcParams['figure.figsize'] = ((8/2.54), (6/2.54))\nplt.rcParams[\"font.family\"] = \"Arial\"\nplt.rcParams[\"mathtext.default\"] = \"rm\"\nplt.rcParams.update({'font.size': 11})\nMARKER_SIZE = 15\ncmap_m = [\"#f4a6ad\", \"#f6957e\", \"#fccfa2\", \"#8de7be\", \"#86d6f2\", \"#24a9e4\", \"#b586e0\", \"#d7f293\"]\ncmap = [\"#e94d5b\", \"#ef4d28\", \"#f9a54f\", \"#25b575\", \"#1bb1e7\", \"#1477a2\", \"#a662e5\", \"#c2f442\"]\n\nplt.rcParams['axes.spines.top'] = False\n# plt.rcParams['axes.edgecolor'] = \nplt.rcParams['axes.linewidth'] = 1\nplt.rcParams['lines.linewidth'] = 1.5\nplt.rcParams['xtick.major.width'] = 1\nplt.rcParams['xtick.minor.width'] = 1\nplt.rcParams['ytick.major.width'] = 1\nplt.rcParams['ytick.minor.width'] = 1", "_____no_output_____" ], [ "tf.config.list_physical_devices('GPU')", "_____no_output_____" ], [ "gpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n print(e)", "2 Physical GPUs, 2 Logical GPUs\n" ], [ "strategy = tf.distribute.MirroredStrategy()", "INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')\n" ] ], [ [ "# Hyperparameters", "_____no_output_____" ] ], [ [ "BEST_PATH = './models/convnet.h5'\nTRAINING_EPOCHS = 200\nLEARNING_RATE = 0.002\nEPSILON = 1e-06\nBATCH_SIZE = 16", "_____no_output_____" ] ], [ [ "# Data loading", "_____no_output_____" ] ], [ [ "l = np.load('./results/2020_S/fw_dataset.npz', allow_pickle=True)\ndata_indices = l['data_indices']\ninput_data = l['input_data']\noutput_label = l['output_label']\nINPUT_MAXS = l['INPUT_MAXS']\nINPUT_MINS = l['INPUT_MINS']\nOUTPUT_MAX = l['OUTPUT_MAX']\nOUTPUT_MIN = l['OUTPUT_MIN']", "_____no_output_____" ], [ "input_data = input_data.astype('float32')\noutput_label = output_label.astype('float32')", "_____no_output_____" ], [ "print(input_data.shape)\nprint(output_label.shape)", "(363, 144, 9)\n(363, 1)\n" ], [ "print(INPUT_MAXS)\nprint(INPUT_MINS)", "[ 42.31875 90.9425 330.1 73.335 10.006 5.057 36.58\n 50.264 19.731 ]\n[15.83875 4.4325 -0.5957 32.115 2.296 0.579 15.7 8.744\n 11.393 ]\n" ], [ "print(OUTPUT_MAX)\nprint(OUTPUT_MIN)", "2.470441467376113\n0.40038664002968494\n" ], [ "data_indices, input_data, output_label = shuffle(data_indices, input_data, output_label, random_state=3101)", "_____no_output_____" ], [ "N_TRAIN = int(input_data.shape[0]*.8)\ntrain_input = input_data[:N_TRAIN, ...]\ntrain_label = output_label[:N_TRAIN, ...]\ntrain_indices = data_indices[:N_TRAIN]\nval_input = input_data[N_TRAIN:, ...]\nval_label = output_label[N_TRAIN:, ...]\nval_indices = data_indices[N_TRAIN:]", "_____no_output_____" ], [ "print(f'number of training set: {train_input.shape[0]}')\nprint(f'number of validation set: {val_input.shape[0]}')", "number of training set: 290\nnumber of validation set: 73\n" ], [ "with strategy.scope():\n train_dataset = tf.data.Dataset.from_tensor_slices((train_input, train_label))\n train_dataset = train_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False)\n val_dataset = tf.data.Dataset.from_tensor_slices((val_input, val_label))\n val_dataset = val_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False)", "_____no_output_____" ] ], [ [ "# Model construction", "_____no_output_____" ] ], [ [ "class ResNet1D(Model):\n def __init__(self):\n super(ResNet1D, self).__init__()\n self.n = [128, 128, 256, 256, 512] # number of nodes\n self.k = [1, 5, 10, 20, 50] # kernal size\n self.s = 2 # stride (= pooling size)\n \n self.conv1_1 = layers.Conv1D(self.n[0]/4, self.k[0], kernel_initializer='glorot_normal', padding='same')\n self.conv1_2 = layers.Conv1D(self.n[0]/4, self.k[1], kernel_initializer='glorot_normal', padding='same')\n self.conv1_3 = layers.Conv1D(self.n[0]/4, self.k[2], kernel_initializer='glorot_normal', padding='same')\n self.conv1_4 = layers.Conv1D(self.n[0]/4, self.k[3], kernel_initializer='glorot_normal', padding='same')\n self.conv1_5 = layers.Conv1D(self.n[0]/4, self.k[4], kernel_initializer='glorot_normal', padding='same')\n self.batch1 = layers.BatchNormalization()\n self.activation1 = layers.Activation(tf.nn.leaky_relu)\n self.pool1 = layers.MaxPooling1D(2)\n \n self.conv2 = layers.Conv1D(self.n[1], 1, kernel_initializer='glorot_normal', padding='valid')\n self.batch2 = layers.BatchNormalization()\n self.activation2 = layers.Activation(tf.nn.leaky_relu)\n self.pool2 = layers.MaxPooling1D(2)\n \n self.conv3 = layers.Conv1D(self.n[2], 1, kernel_initializer='glorot_normal', padding='valid')\n self.batch3 = layers.BatchNormalization()\n self.activation3 = layers.Activation(tf.nn.leaky_relu)\n self.pool3 = layers.MaxPooling1D(2)\n \n self.conv4 = layers.Conv1D(self.n[3], 1, kernel_initializer='glorot_normal', padding='valid')\n self.batch4 = layers.BatchNormalization()\n self.activation4 = layers.Activation(tf.nn.leaky_relu)\n self.pool4 = layers.MaxPooling1D(2)\n \n self.output_conv = layers.Conv1D(self.n[4], 1, kernel_initializer='glorot_normal', padding='valid')\n self.gate = layers.Dense(1, activation=tf.nn.sigmoid)\n \n def call(self, inp):\n \n inp = tf.concat([self.conv1_1(inp), self.conv1_2(inp), self.conv1_3(inp), self.conv1_4(inp), self.conv1_5(inp)], -1)\n inp = self.pool1(self.activation1(self.batch1(inp)))\n \n inp = self.pool2(self.activation2(self.batch2(self.conv2(inp))))\n inp = self.pool3(self.activation3(self.batch3(self.conv3(inp))))\n inp = self.pool4(self.activation4(self.batch4(self.conv4(inp))))\n \n inp = self.gate(self.output_conv(inp))\n \n return inp", "_____no_output_____" ], [ "callbacks = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=.5, patience=2, verbose=0, mode='min',\n min_delta=0.0001, cooldown=0, min_lr=0)\n\nsave = tf.keras.callbacks.ModelCheckpoint(\n BEST_PATH, monitor='val_loss', verbose=0,\n save_best_only=True, save_weights_only=True, mode='min', save_freq='epoch')\n\nearly_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=20) ", "_____no_output_____" ], [ "with strategy.scope():\n opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE, epsilon=EPSILON)\n model = ResNet1D()\n model.compile(optimizer=opt, loss='mae')\n model.fit(train_dataset, epochs=TRAINING_EPOCHS, validation_data=val_dataset,\n verbose=1, callbacks=[callbacks, save, early_stop]) ", "Epoch 1/200\nWARNING:tensorflow:From /home/phil/.virtualenvs/tf20/lib/python3.6/site-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.data.Iterator.get_next_as_optional()` instead.\nINFO:tensorflow:batch_all_reduce: 28 all-reduces with algorithm = nccl, num_packs = 1\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:batch_all_reduce: 28 all-reduces with algorithm = nccl, num_packs = 1\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\n19/19 [==============================] - ETA: 0s - loss: 0.2230INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\n19/19 [==============================] - 2s 119ms/step - loss: 0.2230 - val_loss: 0.2320\nEpoch 2/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.2278 - val_loss: 0.2387\nEpoch 3/200\n19/19 [==============================] - 1s 32ms/step - loss: 0.1945 - val_loss: 0.2181\nEpoch 4/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.1911 - val_loss: 0.2055\nEpoch 5/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.1268 - val_loss: 0.0576\nEpoch 6/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.1245 - val_loss: 0.1169\nEpoch 7/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.1080 - val_loss: 0.0588\nEpoch 8/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0910 - val_loss: 0.0675\nEpoch 9/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0957 - val_loss: 0.0802\nEpoch 10/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.0765 - val_loss: 0.0446\nEpoch 11/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0741 - val_loss: 0.0450\nEpoch 12/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0710 - val_loss: 0.0754\nEpoch 13/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0702 - val_loss: 0.0513\nEpoch 14/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0747 - val_loss: 0.0625\nEpoch 15/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0667 - val_loss: 0.0553\nEpoch 16/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0588 - val_loss: 0.0517\nEpoch 17/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0607 - val_loss: 0.0446\nEpoch 18/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0597 - val_loss: 0.0485\nEpoch 19/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.0649 - val_loss: 0.0424\nEpoch 20/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.0585 - val_loss: 0.0401\nEpoch 21/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.0577 - val_loss: 0.0365\nEpoch 22/200\n19/19 [==============================] - 0s 25ms/step - loss: 0.0636 - val_loss: 0.0355\nEpoch 23/200\n19/19 [==============================] - 0s 26ms/step - loss: 0.0633 - val_loss: 0.0326\nEpoch 24/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0572 - val_loss: 0.0368\nEpoch 25/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0657 - val_loss: 0.0352\nEpoch 26/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0564 - val_loss: 0.0347\nEpoch 27/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0524 - val_loss: 0.0349\nEpoch 28/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0579 - val_loss: 0.0357\nEpoch 29/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0536 - val_loss: 0.0352\nEpoch 30/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0552 - val_loss: 0.0357\nEpoch 31/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0556 - val_loss: 0.0380\nEpoch 32/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0557 - val_loss: 0.0385\nEpoch 33/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0653 - val_loss: 0.0398\nEpoch 34/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0586 - val_loss: 0.0402\nEpoch 35/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0543 - val_loss: 0.0414\nEpoch 36/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0689 - val_loss: 0.0432\nEpoch 37/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0558 - val_loss: 0.0431\nEpoch 38/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0590 - val_loss: 0.0429\nEpoch 39/200\n19/19 [==============================] - 1s 32ms/step - loss: 0.0587 - val_loss: 0.0448\nEpoch 40/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0740 - val_loss: 0.0460\nEpoch 41/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0622 - val_loss: 0.0460\nEpoch 42/200\n19/19 [==============================] - 0s 22ms/step - loss: 0.0594 - val_loss: 0.0456\nEpoch 43/200\n19/19 [==============================] - 0s 23ms/step - loss: 0.0585 - val_loss: 0.0452\n" ], [ "model.load_weights(BEST_PATH)", "_____no_output_____" ], [ "model.evaluate(val_dataset)", "5/5 [==============================] - 0s 4ms/step - loss: 0.0326\n" ], [ "pred_output = model.predict(val_input)", "_____no_output_____" ], [ "pred_output = pred_output*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN\nval_label = val_label*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN", "_____no_output_____" ], [ "fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2)))\nax0 = plt.subplot()\n\nax0.spines['right'].set_visible(False)\nax0.spines['left'].set_position(('outward', 5))\nax0.spines['bottom'].set_position(('outward', 5))\n\nax0.plot(val_label, pred_output, 'o', ms=5, mec='k', c=cmap[0])\n\n\nfig.tight_layout()", "_____no_output_____" ], [ "pred_df = pd.DataFrame(val_label, index=val_indices[:, 0], columns=['label'])\npred_df['pred'] = pred_output\npred_df.index = pd.DatetimeIndex(pred_df.index)\npred_df = pred_df.sort_index()", "_____no_output_____" ], [ "fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2)))\nax0 = plt.subplot()\n\nax0.spines['right'].set_visible(False)\nax0.spines['left'].set_position(('outward', 5))\nax0.spines['bottom'].set_position(('outward', 5))\n\nax0.plot(pred_df.index, pred_df['label'], '-o', ms=5, mec='k', c=cmap[4])\nax0.plot(pred_df.index, pred_df['pred'], 'o', ms=5, mec='k', c=cmap[0])\n\n\nfig.tight_layout()", "_____no_output_____" ], [ "pred_df.to_csv('./results/model_output/ffnn_fw.csv')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec77a3a55f4a98f2d064a2e811d33049fa0b05e3
496
ipynb
Jupyter Notebook
hello.ipynb
Vikram-Singh-Yadav/object-detection-tensorflow
531443830b3f867387d43fa6d7f3ae37eb89daba
[ "MIT" ]
null
null
null
hello.ipynb
Vikram-Singh-Yadav/object-detection-tensorflow
531443830b3f867387d43fa6d7f3ae37eb89daba
[ "MIT" ]
null
null
null
hello.ipynb
Vikram-Singh-Yadav/object-detection-tensorflow
531443830b3f867387d43fa6d7f3ae37eb89daba
[ "MIT" ]
null
null
null
15.5
43
0.514113
[ [ [ "print('Hello')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec77b4377a519c367c9ff4628280a5fc53f96073
206,923
ipynb
Jupyter Notebook
DigitalSignalProcessing/dsp_add_whitenoise.ipynb
tam17aki/speech_process_exercise
5acca627fac3114e75749d5e272405b33eee2e04
[ "MIT" ]
74
2020-06-08T04:31:22.000Z
2022-03-21T06:51:20.000Z
DigitalSignalProcessing/dsp_add_whitenoise.ipynb
theLittleTiger/speech_process_exercise
c241becdf0ab1f79f65fc445513df3e0fe7638b7
[ "MIT" ]
null
null
null
DigitalSignalProcessing/dsp_add_whitenoise.ipynb
theLittleTiger/speech_process_exercise
c241becdf0ab1f79f65fc445513df3e0fe7638b7
[ "MIT" ]
5
2020-07-14T16:41:06.000Z
2022-03-30T23:22:03.000Z
932.085586
88,836
0.923595
[ [ [ "# 音声に白色雑音を加える", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import wavfile\nfrom IPython.display import Audio", "_____no_output_____" ], [ "IN_WAVE_FILE = \"in.wav\" # モノラル音声(前提)\nOUT_WAVE_FILE = \"out_whitenoise.wav\"", "_____no_output_____" ], [ "# 音声データ読み込み (fsがサンプリング周波数、dataは音声データ)\nfs, speech_data = wavfile.read(IN_WAVE_FILE)", "_____no_output_____" ], [ "# 音声データの長さ\nn_speech = len(speech_data)\n\n# 雑音だけの区間の長さ\nn_noise = 4000\n\n# 全体の長さ\nn_samples = n_noise + n_speech", "_____no_output_____" ], [ "# 白色雑音を生成\nwhite_noise = np.random.normal(scale=0.04, size=n_samples)\n\n# 2バイトのデータとして書き込むためにスケールを調整\nwhite_noise = white_noise * np.iinfo(np.int16).max\n\n# ゲインを調整\nwhite_noise = 0.5 * white_noise", "_____no_output_____" ], [ "# 白色雑音を混ぜる\nmixed_signal = white_noise # 最初に雑音を入れる\nmixed_signal[n_noise:] += speech_data # 後から音声を足す", "_____no_output_____" ], [ "# プロット枠を確保 (10がヨコのサイズ、4はタテのサイズ)\nfig = plt.figure(figsize=(12, 8))\naxes1 = fig.add_subplot(2, 1, 1)\nn_samples = len(speech_data)\ntime = np.arange(n_samples) / fs\naxes1.plot(time, speech_data) # 音声データのプロット\naxes1.set_xlabel(\"Time (sec)\") # x軸のラベル\naxes1.set_ylabel(\"Amplitude\") # y軸のラベル\naxes1.set_title(\"Original speech\")\n\naxes2 = fig.add_subplot(2, 1, 2)\nn_samples = len(mixed_signal)\ntime = np.arange(n_samples) / fs\naxes2.plot(time, mixed_signal) # 音声データのプロット\naxes2.set_xlabel(\"Time (sec)\") # x軸のラベル\naxes2.set_ylabel(\"Amplitude\") # y軸のラベル\naxes2.set_title(\"Mixed speech (original + white noise)\")\n\n# 画像を画面表示\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "## 音声の再生(オリジナル)", "_____no_output_____" ] ], [ [ "Audio(speech_data, rate=fs)", "_____no_output_____" ] ], [ [ "## 音声の再生(白色雑音入り)", "_____no_output_____" ] ], [ [ "Audio(mixed_signal, rate=fs)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec77be37fb791c5e49980f6eca820c6cb1604945
44,565
ipynb
Jupyter Notebook
Week_1.ipynb
srodriguex/coursera_data_analysis_tools
3d841a12a52e9cff8d6791874c7c0264a806141b
[ "MIT" ]
null
null
null
Week_1.ipynb
srodriguex/coursera_data_analysis_tools
3d841a12a52e9cff8d6791874c7c0264a806141b
[ "MIT" ]
null
null
null
Week_1.ipynb
srodriguex/coursera_data_analysis_tools
3d841a12a52e9cff8d6791874c7c0264a806141b
[ "MIT" ]
null
null
null
77.369792
14,876
0.737126
[ [ [ "# Week 1 Assignment: Running an analysis of variance\n\nIn this [assignment](https://www.coursera.org/learn/data-analysis-tools/peer/nhp1Q/running-an-analysis-of-variance) I've chosen the [Gapminder](./data/gapminder.csv) dataset. Looking through its [codebook](./data/GapMinderCodebook.pdf) we've decided to study two variables, incomeperperson and lifeexpectancy:\n\n\n- incomeperperson\n\n> 2010 Gross Domestic Product per capita in constant 2000 US$. The World Bank Work Development inflation but not the differences in the cost of living between countries Indicators\nhas been taken into account.\n\n\n\n- lifeexpectancy\n\n> 2011 life expectancy at birth (years). The average number of years a newborn child would live if current mortality patterns were to stay the same.\n\nWe'll be studying the relationship between the income and life expectancy. To fulfil the assignment requirements, we'll transformate the numeric variable `incomeperperson` into categorical using the [US poverty threshold](http://www.irp.wisc.edu/faqs/faq1.htm) as a guideline to divide this variable up in three values: countries wich income per capita is less than 40% of this threshold (not included) will be classified as **low income**, those between 40% (included) and the thresold (not included) will be classified as **medium income**, and those equal or above will be **high income**.\n\nThe null hypothesis $H_o$ is that the life expectancy between the countries with low, medium and high income are the same, or in other words, income is not a drive for life expectancy. The alternative $H_a$ hypothesis is that life expectancy between the classified countries are not the same.\n\n", "_____no_output_____" ] ], [ [ "# Import all ploting and scientific library,\n# and embed figures in this file.\n%pylab inline\n\n# Package to manipulate dataframes.\nimport pandas as pd\n\n# Nice looking plot functions.\nimport seaborn as sn\n\n# Read the dataset.\ndf = pd.read_csv('data/gapminder.csv')\n\n# Set the country name as the index of the dataframe.\ndf.index = df.country\n\n# This column is no longer needed.\n#del df['country']\n\n# Select only the variables we're interested.\ndf = df[['incomeperperson','lifeexpectancy']]\n\n# Convert the types properly.\ndf = df.convert_objects(convert_numeric=True)\n\n# Remove missing values.\ndf = df.dropna()\n\n\n", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "## Descriptive analysis\n\nLet's take a look at the variables.", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "## Create the categorical variable\n\nWe'll create the categorical variable `income_level` based on the `incomeperperson` and the USA poverty threshold.", "_____no_output_____" ] ], [ [ "# http://www.irp.wisc.edu/faqs/faq1.htm\nincome_threshold= 11720\nincome_level = pd.cut(df.incomeperperson, \n [0, income_threshold*0.4, income_threshold, 110000 ], \n labels=['Low income', 'Medium income', 'High income'])\n", "_____no_output_____" ] ], [ [ "# http://www.irp.wisc.edu/faqs/faq1.htm\nincome_threshold= 11720\nincome_level = pd.cut(df.incomeperperson, \n [0, income_threshold, 110000 ], \n labels=['Low income', 'High income'])\n", "_____no_output_____" ] ], [ [ "Looking at the distribution of countries by the new categorical variable `income_level` in the graph below, the majority of them take low income per capita, followed by high income and medium income.", "_____no_output_____" ] ], [ [ "il = income_level.value_counts()\nf, a = subplots()\nf.set_size_inches(6,3)\nsn.barplot(il.values, il.index.tolist(), ax=a);\n\na.set_title('Number of countries by income_level', fontsize=14);\nyticks(fontsize=12),xticks(fontsize=12);", "_____no_output_____" ] ], [ [ "Let's save the `income_level` variable in our data frame. We must explicitly convert it to `object` because of an actual misunderstanding betwen `pandas` and `stastmodels` packages.", "_____no_output_____" ] ], [ [ "import numpy as np\ndf['income_level'] = income_level.astype(np.object)", "_____no_output_____" ] ], [ [ "Let's take a look at the population means by the `income_level` categories. The table and graph below show that the life expectancy means are diferent among the countries income levels, as our alternative hyphothesis $H_a$ states.", "_____no_output_____" ] ], [ [ "g = df.groupby('income_level')\nincome_mean = g.mean()\nincome_mean", "_____no_output_____" ], [ "sn.boxplot(df.income_level, df.lifeexpectancy);\ntitle('Life expectancy by income level groups', fontsize=14, fontweight='bold');\nxticks(fontsize=12)", "_____no_output_____" ] ], [ [ "In the next session, we'll see whether $H_o$ can be rejected or not.", "_____no_output_____" ], [ "## Calculate the F statistic\n\nThe F value calculated below shows the variability between the three groups is 64 times greater than the variability within groups. The p-value $1.02*10^{-21}$ is far below 0.05, thus we can reject $H_o$ and select $H_a$: life expectancy varies across income per capita.", "_____no_output_____" ] ], [ [ "import statsmodels.formula.api as smf\nimport statsmodels.stats.multicomp as multi \nmodel = smf.ols('lifeexpectancy ~ C(income_level)', df)\nresult = model.fit()\nresult.summary()", "_____no_output_____" ] ], [ [ "## Post hoc test\n\nFinally, we'll conduct a post hoc test to see wich group of income level is diferent from the others. By the table below we can reject the equality between all pairwise comparison. So, <b>the life expectancy mean are diferent among income_level groups<b>.", "_____no_output_____" ] ], [ [ "res = multi.pairwise_tukeyhsd(df.lifeexpectancy, df.income_level)\nprint(res.summary())\n", " Multiple Comparison of Means - Tukey HSD,FWER=0.05 \n===========================================================\n group1 group2 meandiff lower upper reject\n-----------------------------------------------------------\nHigh income Low income -15.2476 -18.6072 -11.8881 True \nHigh income Medium income -5.8696 -10.1948 -1.5444 True \n Low income Medium income 9.378 5.7743 12.9818 True \n-----------------------------------------------------------\n" ] ], [ [ "## Conclusion", "_____no_output_____" ], [ "By the F statistic and post hoc test above, we can say that the incompe per capita is related to life expectancy. Remembering that this income variable is the ratio of Gross Domestic Prodcut by population, the more productive and economic developed a country is, the more its citizens can live.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec77c3d81badb72b70a833a02f8e9ce49f14b850
8,251
ipynb
Jupyter Notebook
courses/machine_learning/deepdive/07_structured/6_deploy.ipynb
yamadatomonori/training-data-analyst
e0427bf35ffedfc6bd55ec65ebef2b39264456d1
[ "Apache-2.0" ]
58
2019-05-16T00:12:11.000Z
2022-03-14T06:12:12.000Z
courses/machine_learning/deepdive/07_structured/6_deploy.ipynb
yamadatomonori/training-data-analyst
e0427bf35ffedfc6bd55ec65ebef2b39264456d1
[ "Apache-2.0" ]
null
null
null
courses/machine_learning/deepdive/07_structured/6_deploy.ipynb
yamadatomonori/training-data-analyst
e0427bf35ffedfc6bd55ec65ebef2b39264456d1
[ "Apache-2.0" ]
46
2018-03-03T17:17:27.000Z
2022-03-24T14:56:46.000Z
30.787313
553
0.579081
[ [ [ "<h1> Deploying and predicting with model </h1>\n\nThis notebook illustrates:\n<ol>\n<li> Deploying model\n<li> Predicting with model\n</ol>", "_____no_output_____" ] ], [ [ "# change these to try this notebook out\nBUCKET = 'cloud-training-demos-ml'\nPROJECT = 'cloud-training-demos'\nREGION = 'us-central1'", "_____no_output_____" ], [ "import os\nos.environ['BUCKET'] = BUCKET\nos.environ['PROJECT'] = PROJECT\nos.environ['REGION'] = REGION", "_____no_output_____" ], [ "%%bash\nif ! gsutil ls | grep -q gs://${BUCKET}/; then\n gsutil mb -l ${REGION} gs://${BUCKET}\n gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight\nfi", "_____no_output_____" ] ], [ [ "<h2> Deploy trained model </h2>\n<p>\nDeploying the trained model to act as a REST web service is a simple gcloud call.", "_____no_output_____" ] ], [ [ "%bash\ngsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/", "gs://cloud-training-demos-ml/babyweight/trained_model/export/exporter/\ngs://cloud-training-demos-ml/babyweight/trained_model/export/exporter/1516730678/\ngs://cloud-training-demos-ml/babyweight/trained_model/export/exporter/1516731289/\ngs://cloud-training-demos-ml/babyweight/trained_model/export/exporter/1516731738/\n" ], [ "%bash\nMODEL_NAME=\"babyweight\"\nMODEL_VERSION=\"ml_on_gcp\"\nMODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)\necho \"Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes\"\n#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n#gcloud ml-engine models delete ${MODEL_NAME}\n#gcloud ml-engine models create ${MODEL_NAME} --regions $REGION\ngcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version 1.4", "_____no_output_____" ] ], [ [ "<h2> Use model to predict (online prediction) </h2>\n<p>\nSend a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.", "_____no_output_____" ] ], [ [ "from googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nimport json\n\ncredentials = GoogleCredentials.get_application_default()\napi = discovery.build('ml', 'v1', credentials=credentials)\n\nrequest_data = {'instances':\n [\n {\n 'is_male': 'True',\n 'mother_age': 26.0,\n 'plurality': 'Single(1)',\n 'gestation_weeks': 39\n },\n {\n 'is_male': 'False',\n 'mother_age': 29.0,\n 'plurality': 'Single(1)',\n 'gestation_weeks': 38\n },\n {\n 'is_male': 'True',\n 'mother_age': 26.0,\n 'plurality': 'Triplets(3)',\n 'gestation_weeks': 39\n },\n {\n 'is_male': 'Unknown',\n 'mother_age': 29.0,\n 'plurality': 'Multiple(2+)',\n 'gestation_weeks': 38\n },\n ]\n}\n\nparent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'babyweight', 'ml_on_gcp')\nresponse = api.projects().predict(body=request_data, name=parent).execute()\nprint \"response={0}\".format(response)", "[2018-01-23 21:50:04,882] {discovery.py:863} INFO - URL being requested: POST https://ml.googleapis.com/v1/projects/cloud-training-demos/models/babyweight/versions/ml_on_gcp:predict?alt=json\n[2018-01-23 21:50:04,883] {client.py:614} INFO - Attempting refresh to obtain initial access_token\n[2018-01-23 21:50:04,885] {client.py:903} INFO - Refreshing access_token\nresponse={u'predictions': [{u'predictions': [7.659826755523682]}, {u'predictions': [7.222918510437012]}, {u'predictions': [6.318112373352051]}, {u'predictions': [6.186887264251709]}]}\n" ] ], [ [ "The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different).", "_____no_output_____" ], [ "<h2> Use model to predict (batch prediction) </h2>\n<p>\nBatch prediction is commonly used when you thousands to millions of predictions.\nCreate a file withe one instance per line and submit using gcloud.\n<p>\nTODO: Note that the results are not sorted; you will have to use a key to match inputs to predictions and wrap the estimator using ```tf.contrib.estimator.forward_features```. This has not been done.", "_____no_output_____" ] ], [ [ "%writefile inputs.json\n{\"is_male\": \"True\", \"mother_age\": 26.0, \"plurality\": \"Single(1)\", \"gestation_weeks\": 39}", "Writing inputs.json\n" ], [ "%bash\nINPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json\nOUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs\ngsutil cp inputs.json $INPUT\ngsutil -m rm -rf $OUTPUT \ngcloud beta ml-engine jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \\\n --data-format=TEXT --region ${REGION} \\\n --input-paths=gs://${BUCKET}/babyweight/batchpred/inputs.json \\\n --output-path=$OUTPUT \\\n --model=babyweight --version=soln", "_____no_output_____" ] ], [ [ "Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec77c8d22244207334cbced3a4bb7508820cb9e2
865,578
ipynb
Jupyter Notebook
benchmarks/figure_forecasting_benchmarks_figures.ipynb
williamgilpin/dysts
f56fc495c5effa8bcb7002cf4c8d12931832f180
[ "Apache-2.0" ]
71
2021-09-08T15:38:41.000Z
2022-03-28T16:13:56.000Z
benchmarks/figure_forecasting_benchmarks_figures.ipynb
williamgilpin/dysts
f56fc495c5effa8bcb7002cf4c8d12931832f180
[ "Apache-2.0" ]
6
2021-10-05T00:35:02.000Z
2021-12-27T21:39:02.000Z
benchmarks/figure_forecasting_benchmarks_figures.ipynb
williamgilpin/dysts
f56fc495c5effa8bcb7002cf4c8d12931832f180
[ "Apache-2.0" ]
7
2021-11-07T03:26:56.000Z
2022-02-09T12:20:11.000Z
806.689655
95,524
0.952276
[ [ [ "import sys\nimport os\nimport json\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set_style()\nimport pandas as pd\n\n# import dysts\nfrom dysts.datasets import *\nfrom dysts.flows import *\nfrom dysts.base import *\nfrom dysts.utils import *\nfrom dysts.analysis import *\n\nimport degas as dg\ndg.set_style()\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "# Tune hyperparameters", "_____no_output_____" ] ], [ [ "%%bash\n\n# python3 find_hyperparameters.py", "Process is terminated.\n" ] ], [ [ "# Run and record forecasts on test data", "_____no_output_____" ] ], [ [ "%%bash --out output --err error\n\n# python3 compute_benchmarks.py", "Process is terminated.\n" ] ], [ [ "## Load Forecasting Benchmark Results", "_____no_output_____" ] ], [ [ "import os\nimport json\n\nGRANULARITY = 100\n\nwith open(\n os.getcwd()\n + \"/results/results_test_univariate__pts_per_period_\" + str(GRANULARITY) \n + \"__periods_12.json\",\n \"r\",\n) as file:\n all_results = json.load(file)\n\n# with open(os.getcwd() + \"/results/results_test_univariate__pts_per_period_15__periods_12_noise.json\", \"r\") as file:\n# all_results = json.load(file)\n\n\n## Convert R2 into a distance\nall_r2 = list()\nfor equation_name in all_results:\n for model_name in all_results[equation_name]:\n if model_name == \"values\":\n continue\n else:\n ## convert r2 to a pseudodistance\n all_results[equation_name][model_name][\"r2_score\"] = (\n 1 - all_results[equation_name][model_name][\"r2_score\"]\n )\n\n ## Coefficient of variation must be normed\n all_results[equation_name][model_name][\"coefficient_of_variation\"] = np.abs(\n all_results[equation_name][model_name][\"coefficient_of_variation\"]\n )\n\n ## Drop RMSE because it overlaps with MSE\n all_results[equation_name][model_name].pop(\"rmse\", None)\n\n\nfrom darts import TimeSeries\nfrom darts.metrics import mae\n\nwrap_array = lambda x: TimeSeries.from_dataframe(pd.DataFrame(np.squeeze(np.array(x))))\nmae_func = lambda x, y: mae(wrap_array(x), wrap_array(y))\n\n\n## Calculate MASE\nfor equation_name in all_results:\n baseline_onestep = mae_func(\n all_results[equation_name][\"values\"][1:],\n all_results[equation_name][\"values\"][:-1],\n )\n for model_name in all_results[equation_name]:\n if model_name == \"values\":\n continue\n else:\n mae_val = all_results[equation_name][model_name][\"mae\"]\n all_results[equation_name][model_name][\"mase\"] = mae_val / baseline_onestep\n\n## get best models\nall_best_models = list()\nfor equation_name in all_results:\n all_models = list()\n all_smapes = list()\n for model_name in all_results[equation_name]:\n if model_name != \"values\":\n all_models.append(model_name)\n all_smapes.append(all_results[equation_name][model_name][\"smape\"])\n all_best_models.append(all_models[np.argmin(all_smapes)])\n\nall_results_transposed = dict()\nfor model_name in all_results[\"Lorenz\"]:\n if model_name == \"values\":\n continue\n all_results_transposed[model_name] = dict()\nfor equation_name in all_results:\n for model_name in all_results[equation_name]:\n if model_name == \"values\":\n continue\n all_results_transposed[model_name][equation_name] = all_results[equation_name][\n model_name\n ]\n\n\nhist_values = dict()\nfor model_name in all_results_transposed:\n smape_vals = list()\n for key in all_results_transposed[model_name]:\n smape_vals.append(all_results_transposed[model_name][key][\"smape\"])\n\n hist_values[model_name] = smape_vals.copy()\n\nif GRANULARITY == 15:\n hist_values_low = hist_values.copy()\nelif GRANULARITY == 100:\n hist_values_high = hist_values.copy()\nelse:\n raise ValueError(\"Granularity must be 15 or 100 points per period.\")\n\n\nall_metric_names = list(all_results_transposed[\"ARIMA\"][\"Aizawa\"].keys())\nall_metric_names.remove(\"prediction\")\n\n## Compute forecast metrics\nmetric_records = dict()\nfor metric_name in all_metric_names:\n metric_records[metric_name] = list()\nfor model_name in all_results_transposed:\n for equation_name in all_results_transposed[model_name]:\n for metric_name in all_metric_names:\n metric_records[metric_name].append(\n all_results_transposed[model_name][equation_name][metric_name]\n )\ndf_metrics = pd.DataFrame(metric_records)\n\n\n# df_metrics[\"coefficient_of_variation\"] = np.abs(df_metrics[\"coefficient_of_variation\"])\n# df_metrics = df_metrics.drop('rmse', 1)", "_____no_output_____" ] ], [ [ "### Load mathematical properties", "_____no_output_____" ] ], [ [ "import dysts.flows\n\nmax_lyap = list()\nbest_scores = list()\nfor equation_name in all_results:\n \n all_scores_per_equation = list()\n for model_name in all_results[equation_name]:\n if model_name == \"values\":\n continue\n all_scores_per_equation.append(all_results[equation_name][model_name][\"smape\"])\n \n best_scores.append(min(all_scores_per_equation))\n\n eq = getattr(dysts.flows, equation_name)()\n max_lyap.append(eq.maximum_lyapunov_estimated)\n \n", "_____no_output_____" ] ], [ [ "### Correlate forecasting models", "_____no_output_____" ] ], [ [ "model_records = dict()\nfor model_name in all_results_transposed.keys():\n model_records[model_name ] = list()\nfor model_name in all_results_transposed:\n for equation_name in all_results_transposed[model_name]:\n model_records[model_name].append(all_results_transposed[model_name][equation_name][\"smape\"])\ndf_models = pd.DataFrame(model_records)\n\n\ncorr_array = np.array(df_models.corr(method='spearman'))\nnp.fill_diagonal(corr_array, np.nan)\n\ndf_models_sorted = df_models.iloc[:, np.argsort(np.nanmedian(corr_array, axis=0))[::-1]]\ndf_models_sorted = df_models.iloc[:, np.argsort(np.nanmax(corr_array, axis=0))[::-1]]\n\n# df_models_sorted = df_models.iloc[:, sort_order]\n\nax = sns.heatmap(df_models_sorted.corr(method='spearman'), cmap=\"mako\", vmin=0, vmax=1)\nax.set_aspect('equal')\nax.tick_params(axis='both', which='both', length=0)\n\n# dg.better_savefig(\"../private_writing/fig_resources/model_correlation_map.png\", dpi=600)", "_____no_output_____" ] ], [ [ "### Compare forecasting metrics", "_____no_output_____" ] ], [ [ "\ncorr_array = np.array(df_metrics.corr(method='spearman'))\nnp.fill_diagonal(corr_array, np.nan)\n\nmetric_sort_inds = np.argsort(np.nanmedian(corr_array, axis=0))[::-1]\nmetric_sort_inds = np.argsort(np.nanmax(corr_array, axis=0))[::-1]\ndf_metrics_sorted = df_metrics.iloc[:, metric_sort_inds]\n\n\nax = sns.heatmap(df_metrics_sorted.corr(method='spearman'), cmap=\"mako\", vmin=0, vmax=1)\nax.set_aspect('equal')\nax.tick_params(axis='both', which='both', length=0)\n\n# dg.better_savefig(\"../private_writing/fig_resources/metric_map.png\", dpi=600)", "_____no_output_____" ] ], [ [ "### Compare forecasting results against mathematical properties", "_____no_output_____" ] ], [ [ "chosen_metric = \"smape\"\nmodels_df = dict()\nmetrics_df = dict()\nfor equation_name in all_results:\n models_df[equation_name] = dict()\n metrics_df[equation_name] = dict()\n for model_name in all_results[equation_name]:\n if model_name == \"values\": continue\n models_df[equation_name][model_name] = all_results[equation_name][model_name][chosen_metric]\n \n for metric_name in all_results[equation_name][\"ARIMA\"]:\n if metric_name == \"prediction\": continue\n metrics_df[equation_name][metric_name] = all_results[equation_name][\"NBEATSModel\"][metric_name]\n \n \nmodels_df = pd.DataFrame(models_df).transpose()\nmetrics_df = pd.DataFrame(metrics_df).transpose()", "_____no_output_____" ], [ "import dysts.flows\n\nattributes = ['maximum_lyapunov_estimated', 'kaplan_yorke_dimension', 'multiscale_entropy', 'correlation_dimension']\nall_properties = dict()\nfor equation_name in get_attractor_list():\n eq = getattr(dysts.flows, equation_name)()\n attr_vals = [getattr(eq, item, None) for item in attributes]\n \n all_properties[equation_name] = dict(zip(attributes, attr_vals))\n \nall_properties = pd.DataFrame(all_properties).transpose().dropna()", "_____no_output_____" ], [ "cross_corr = pd.concat([metrics_df, all_properties], axis=1, keys=['metrics_df', 'all_properties']).corr(method=\"spearman\").loc['metrics_df', 'all_properties']\nmath_sort_inds = np.argsort(np.max(cross_corr, axis=0))[::-1]\n\nax = sns.heatmap(cross_corr.transpose().iloc[math_sort_inds, metric_sort_inds], cmap=\"mako\", vmin=0, vmax=1)\nax.set_aspect(1)\nax.tick_params(axis='both', which='both', length=0)\n\n# dg.better_savefig(\"../private_writing/fig_resources/metric_math.png\", dpi=600)", "_____no_output_____" ], [ "cross_corr = pd.concat([models_df, all_properties], axis=1, keys=['models_df', 'all_properties']).corr(method=\"spearman\").loc['models_df', 'all_properties']\nmath_sort_inds = np.argsort(np.max(cross_corr, axis=0))[::-1]\n\nax = sns.heatmap(cross_corr.transpose().iloc[math_sort_inds][df_models_sorted.columns], cmap=\"mako\", vmin=0, vmax=1)\nax.set_aspect(1)\nax.tick_params(axis='both', which='both', length=0)\n\n# dg.better_savefig(\"../private_writing/fig_resources/models_math_corr.png\", dpi=600)", "_____no_output_____" ], [ "\n## Easiest and hardest systems\nprint(\"Easiest systems: \", np.unique(models_df.idxmin(axis=0), return_counts=True))\nprint(\"Hardest systems: \", np.unique(models_df.idxmax(axis=0), return_counts=True))\n\n\nall_median_names = list()\nfor key in models_df.keys():\n all_median_names.append(models_df[models_df[key] == models_df[key].quantile(interpolation='nearest')].index[0])\n \nprint(\"Middle systems: \", np.unique(all_median_names, return_counts=True))", "Easiest systems: (array(['ArnoldWeb', 'StickSlipOscillator', 'Torus'], dtype=object), array([ 1, 5, 10]))\nHardest systems: (array(['CellularNeuralNetwork', 'Chua', 'CoevolvingPredatorPrey',\n 'DoublePendulum', 'HyperQi', 'LuChen', 'SprottTorus',\n 'TurchinHanski'], dtype=object), array([2, 1, 2, 1, 2, 1, 6, 1]))\nMiddle systems: (array(['ChenLee', 'Duffing', 'Finance', 'Halvorsen', 'HyperYan',\n 'JerkCircuit', 'LorenzStenflo', 'OscillatingFlow', 'Rossler',\n 'Rucklidge', 'SprottA', 'SprottF', 'SprottG', 'ThomasLabyrinth'],\n dtype='<U15'), array([1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1]))\n" ], [ "plt.figure()\nsol = Hadley().make_trajectory(80000, resample=True, pts_per_period=100)\nplt.plot(sol[:, 0], sol[:, 2], \".\", color=dg.blue, markersize=0.1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\n# dg.better_savefig(\"../private_writing/fig_resources/lorenz.png\", dpi=600)\n", "_____no_output_____" ], [ "plt.figure()\nsol = Lorenz().make_trajectory(80000, resample=True, pts_per_period=100)\nplt.plot(sol[:, 0], sol[:, 2], \".\", color=dg.blue, markersize=0.1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\n# dg.better_savefig(\"../private_writing/fig_resources/lorenz.png\", dpi=600)\n", "_____no_output_____" ], [ "from dysts.flows import SprottTorus, StickSlipOscillator, Torus, SprottG\n\nstyle1 = {\"color\": (0.4, 0.4, 0.4)}\n\nstyle1 = {\"color\": dg.blue}\n\nplt.figure()\nsol = Hadley().make_trajectory(40000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/hadley2.png\", dpi=600)\n\nplt.figure()\nsol = WindmiReduced().make_trajectory(20000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/windmi2.png\", dpi=600)\n\n\nplt.figure()\nsol = HindmarshRose().make_trajectory(30000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/hindmarsh2.png\", dpi=600)\n\n\nplt.figure()\nsol = DoubleGyre().make_trajectory(30000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/gyre2.png\", dpi=600)\n\nplt.figure()\nsol = SprottTorus().make_trajectory(30000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/sprott_torus2.png\", dpi=600)\n\nplt.figure()\nsol = StickSlipOscillator().make_trajectory(20000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/stickslip2.png\", dpi=600)\n\nplt.figure()\nsol = SprottG().make_trajectory(10000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/sprottg2.png\", dpi=600)\n\nplt.figure()\nsol = Torus().make_trajectory(2000, resample=True, pts_per_period=400)\nplt.plot(sol[:, 0], sol[:, 1], **style1)\ndg.fixed_aspect_ratio(1)\ndg.vanish_axes()\ndg.better_savefig(\"../private_writing/fig_resources/torus2.png\", dpi=600)", "/Users/williamgilpin/miniconda3/envs/dysts/lib/python3.8/site-packages/dysts/base.py:287: UserWarning: This system has at least one unbounded variable, which has been mapped to a bounded domain. Pass argument postprocess=False in order to generate trajectories from the raw system.\n warnings.warn(\n/Users/williamgilpin/miniconda3/envs/dysts/lib/python3.8/site-packages/dysts/base.py:287: UserWarning: This system has at least one unbounded variable, which has been mapped to a bounded domain. Pass argument postprocess=False in order to generate trajectories from the raw system.\n warnings.warn(\n" ] ], [ [ "### Compare different forecasting models", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndef mirror_df(df, mirror_val=0):\n \"\"\"\n Create a mirrored augmented dataframe. Used\n for setting the right boundary conditions on kernel \n density plots\n \"\"\"\n if np.isscalar(mirror_val):\n return pd.concat([df, mirror_val - df])\n else:\n all_out_df = [df]\n for val in mirror_val:\n all_out_df.append(val - df)\n return pd.concat(all_out_df)\n \ndflo = pd.DataFrame.from_dict(hist_values_low)\ndflo[\"Granularity\"] = \"Coarse\"\n\ndfhi = pd.DataFrame.from_dict(hist_values_high)\ndfhi[\"Granularity\"] = \"Fine\"\ndata = pd.merge(dflo, dfhi, how=\"outer\")\n\nall_model_names = np.array(list(hist_values_high.keys()))\nall_medians = [np.median(hist_values_high[name]) for name in hist_values_high]\nall_means = [np.mean(hist_values_high[name]) for name in hist_values_high]\nsort_order = np.argsort(all_medians)\n\ndata_long = pd.melt(data, value_vars=all_model_names[sort_order], id_vars=\"Granularity\")", "_____no_output_____" ], [ "plt.figure(figsize=(20, 10))\nax = plt.gca()\n\n\nax = sns.violinplot(\n data=data_long,\n x=\"variable\",\n y=\"value\",\n hue=\"Granularity\",\n order=all_model_names[sort_order],\n linewidth=0,\n size=3,\n alpha=0.1,\n split=True,\n scale=\"area\",\n inner=None,\n palette=\"mako\",\n)\nax = sns.pointplot(\n data=data_long,\n x=\"variable\",\n y=\"value\",\n hue=\"Granularity\",\n order=all_model_names[sort_order],\n linewidth=0,\n size=4,\n palette={\"Coarse\": \"w\", \"Fine\": \"w\"},\n join=False,\n ci=99,\n dodge=0.2,\n)\n\n\nplt.ylim([0, 200])\ndg.fixed_aspect_ratio(1 / 8)\nax.grid(False)\nax.legend_.remove()\n\n# dg.better_savefig(\"../private_writing/fig_resources/forecasting_violins.png\", dpi=600)", "_____no_output_____" ] ], [ [ "## Group models by mathematical properties", "_____no_output_____" ] ], [ [ "equation_names = list(all_results.keys())\nmodel_names = list(all_results[equation_names[0]].keys())\nmodel_names.remove(\"values\")\nscore_names = list(all_results[equation_names[0]][model_names[0]].keys())\nscore_names.remove(\"prediction\")\n\nresults_reduced = dict()\nfor equation_name in equation_names:\n results_reduced[equation_name] = dict()\n for model_name in model_names:\n val = all_results[equation_name][model_name][\"smape\"]\n results_reduced[equation_name][model_name] = val \nresults_reduced = pd.DataFrame.from_dict(results_reduced) \nresults_reduced = results_reduced.iloc[sort_order] # sort by median error", "_____no_output_____" ], [ "sort_inds = all_properties.sort_values(by=\"maximum_lyapunov_estimated\").index\n\ncolor_list = sns.color_palette(\"husl\", results_reduced.shape[0])\n\nplt.figure()\nfor clr, row in zip(color_list, results_reduced[sort_inds].to_numpy()):\n plt.semilogx(\n all_properties[\"maximum_lyapunov_estimated\"][sort_inds],\n row,\n color=clr\n )\nplt.title(\"Score versus Lyapunov exponent\")\n\n\n\n\nsort_inds = all_properties.sort_values(by=\"correlation_dimension\").index\n\nplt.figure()\nfor clr, row in zip(color_list, results_reduced[sort_inds].to_numpy()):\n plt.plot(\n all_properties[\"correlation_dimension\"][sort_inds],\n row,\n color=clr\n )\nplt.title(\"Score versus correlation dimension\")\n\n\nplt.figure()\nfor clr, row in zip(color_list, results_reduced[sort_inds].to_numpy()):\n plt.plot(\n all_properties[\"correlation_dimension\"][sort_inds],\n row,\n color=clr,\n linewidth=4\n )\nplt.legend(results_reduced.index, facecolor='white', framealpha=1, frameon=True)\nplt.title(\"Show legend\")", "_____no_output_____" ], [ "prop_name = \"correlation_dimension\"\nprop_name = \"maximum_lyapunov_estimated\"\nprop_name = \"multiscale_entropy\"\n\nall_vals = list()\nvals = np.array(all_properties[prop_name])\n\nall_windows = list(zip(10 * np.arange(9), 10 * np.arange(2, 11)))\n\nfor window in all_windows:\n sel_inds = all_properties[prop_name][\n all_properties[prop_name].between(\n np.percentile(vals, window[0]), np.percentile(vals, window[1])\n )\n ].index\n all_vals.append(results_reduced[sel_inds].median(axis=1))\n \nplt.figure()\nfor clr, row in zip(color_list, np.array(all_vals).T):\n plt.plot(row, color=clr)\nplt.ylim([0, 180])\ndg.better_savefig(\"../private_writing/fig_resources/\" + prop_name + \"_ranks.png\", dpi=600)", "_____no_output_____" ], [ "\n \nplt.figure()\nfor clr, row in zip(color_list, results_reduced[sort_inds].to_numpy()):\n plt.plot(\n all_properties[\"correlation_dimension\"][sort_inds],\n row,\n color=clr,\n linewidth=4\n# )\nplt.legend(results_reduced.index, facecolor='white', framealpha=1, frameon=True)\nplt.title(\"Show legend\")\n\n# dg.better_savefig(\"../private_writing/fig_resources/lyap_legend.png\", dpi=600)\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec77dc5f437d676e46d9f8570128dad6dd27b34b
888,509
ipynb
Jupyter Notebook
courses/dl2/wgan.ipynb
hbagchi/fastai
10befd8fb354f30dc823c434b34cdcd806baacaa
[ "Apache-2.0" ]
67
2019-05-29T18:55:20.000Z
2022-03-14T10:03:24.000Z
courses/dl2/wgan.ipynb
hbagchi/fastai
10befd8fb354f30dc823c434b34cdcd806baacaa
[ "Apache-2.0" ]
2
2021-11-10T19:44:03.000Z
2022-01-13T03:49:58.000Z
courses/dl2/wgan.ipynb
hbagchi/fastai
10befd8fb354f30dc823c434b34cdcd806baacaa
[ "Apache-2.0" ]
11
2019-01-19T08:10:46.000Z
2021-10-02T06:45:42.000Z
1,633.288603
749,776
0.960872
[ [ [ "%matplotlib inline\n%reload_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "## WGAN", "_____no_output_____" ] ], [ [ "from fastai.conv_learner import *\nfrom fastai.dataset import *\nimport gzip\ntorch.cuda.set_device(3)", "_____no_output_____" ] ], [ [ "Download the LSUN scene classification dataset bedroom category, unzip it, and convert it to jpg files (the scripts folder is here in the `dl2` folder):\n\n```\ncurl 'http://lsun.cs.princeton.edu/htbin/download.cgi?tag=latest&category=bedroom&set=train' -o bedroom.zip\nunzip bedroom.zip\npip install lmdb\npython lsun-data.py {PATH}/bedroom_train_lmdb --out_dir {PATH}/bedroom\n```\n\nThis isn't tested on Windows - if it doesn't work, you could use a Linux box to convert the files, then copy them over. Alternatively, you can download [this 20% sample](https://www.kaggle.com/jhoward/lsun_bedroom) from Kaggle datasets.", "_____no_output_____" ] ], [ [ "PATH = Path('data/lsun/')\nIMG_PATH = PATH/'bedroom'\nCSV_PATH = PATH/'files.csv'\nTMP_PATH = PATH/'tmp'\nTMP_PATH.mkdir(exist_ok=True)", "_____no_output_____" ], [ "files = PATH.glob('bedroom/**/*.jpg')\n\nwith CSV_PATH.open('w') as fo:\n for f in files: fo.write(f'{f.relative_to(IMG_PATH)},0\\n')", "_____no_output_____" ], [ "# Optional - sampling a subset of files\nCSV_PATH = PATH/'files_sample.csv'", "_____no_output_____" ], [ "files = PATH.glob('bedroom/**/*.jpg')\n\nwith CSV_PATH.open('w') as fo:\n for f in files:\n if random.random()<0.1: fo.write(f'{f.relative_to(IMG_PATH)},0\\n')", "_____no_output_____" ], [ "class ConvBlock(nn.Module):\n def __init__(self, ni, no, ks, stride, bn=True, pad=None):\n super().__init__()\n if pad is None: pad = ks//2//stride\n self.conv = nn.Conv2d(ni, no, ks, stride, padding=pad, bias=False)\n self.bn = nn.BatchNorm2d(no) if bn else None\n self.relu = nn.LeakyReLU(0.2, inplace=True)\n \n def forward(self, x):\n x = self.relu(self.conv(x))\n return self.bn(x) if self.bn else x", "_____no_output_____" ], [ "class DCGAN_D(nn.Module):\n def __init__(self, isize, nc, ndf, n_extra_layers=0):\n super().__init__()\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n self.initial = ConvBlock(nc, ndf, 4, 2, bn=False)\n csize,cndf = isize/2,ndf\n self.extra = nn.Sequential(*[ConvBlock(cndf, cndf, 3, 1)\n for t in range(n_extra_layers)])\n\n pyr_layers = []\n while csize > 4:\n pyr_layers.append(ConvBlock(cndf, cndf*2, 4, 2))\n cndf *= 2; csize /= 2\n self.pyramid = nn.Sequential(*pyr_layers)\n \n self.final = nn.Conv2d(cndf, 1, 4, padding=0, bias=False)\n\n def forward(self, input):\n x = self.initial(input)\n x = self.extra(x)\n x = self.pyramid(x)\n return self.final(x).mean(0).view(1)", "_____no_output_____" ], [ "class DeconvBlock(nn.Module):\n def __init__(self, ni, no, ks, stride, pad, bn=True):\n super().__init__()\n self.conv = nn.ConvTranspose2d(ni, no, ks, stride, padding=pad, bias=False)\n self.bn = nn.BatchNorm2d(no)\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, x):\n x = self.relu(self.conv(x))\n return self.bn(x) if self.bn else x", "_____no_output_____" ], [ "class DCGAN_G(nn.Module):\n def __init__(self, isize, nz, nc, ngf, n_extra_layers=0):\n super().__init__()\n assert isize % 16 == 0, \"isize has to be a multiple of 16\"\n\n cngf, tisize = ngf//2, 4\n while tisize!=isize: cngf*=2; tisize*=2\n layers = [DeconvBlock(nz, cngf, 4, 1, 0)]\n\n csize, cndf = 4, cngf\n while csize < isize//2:\n layers.append(DeconvBlock(cngf, cngf//2, 4, 2, 1))\n cngf //= 2; csize *= 2\n\n layers += [DeconvBlock(cngf, cngf, 3, 1, 1) for t in range(n_extra_layers)]\n layers.append(nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))\n self.features = nn.Sequential(*layers)\n\n def forward(self, input): return F.tanh(self.features(input))", "_____no_output_____" ], [ "bs,sz,nz = 64,64,100", "_____no_output_____" ], [ "tfms = tfms_from_stats(inception_stats, sz)\nmd = ImageClassifierData.from_csv(PATH, 'bedroom', CSV_PATH, tfms=tfms, bs=128,\n skip_header=False, continuous=True)", "_____no_output_____" ], [ "md = md.resize(128)", "_____no_output_____" ], [ "x,_ = next(iter(md.val_dl))", "_____no_output_____" ], [ "plt.imshow(md.trn_ds.denorm(x)[0]);", "_____no_output_____" ], [ "netG = DCGAN_G(sz, nz, 3, 64, 1).cuda()\nnetD = DCGAN_D(sz, 3, 64, 1).cuda()", "_____no_output_____" ], [ "def create_noise(b): return V(torch.zeros(b, nz, 1, 1).normal_(0, 1))", "_____no_output_____" ], [ "preds = netG(create_noise(4))\npred_ims = md.trn_ds.denorm(preds)\n\nfig, axes = plt.subplots(2, 2, figsize=(6, 6))\nfor i,ax in enumerate(axes.flat): ax.imshow(pred_ims[i])", "_____no_output_____" ], [ "def gallery(x, nc=3):\n n,h,w,c = x.shape\n nr = n//nc\n assert n == nr*nc\n return (x.reshape(nr, nc, h, w, c)\n .swapaxes(1,2)\n .reshape(h*nr, w*nc, c))", "_____no_output_____" ], [ "optimizerD = optim.RMSprop(netD.parameters(), lr = 1e-4)\noptimizerG = optim.RMSprop(netG.parameters(), lr = 1e-4)", "_____no_output_____" ], [ "def train(niter, first=True):\n gen_iterations = 0\n for epoch in trange(niter):\n netD.train(); netG.train()\n data_iter = iter(md.trn_dl)\n i,n = 0,len(md.trn_dl)\n with tqdm(total=n) as pbar:\n while i < n:\n set_trainable(netD, True)\n set_trainable(netG, False)\n d_iters = 100 if (first and (gen_iterations < 25) or (gen_iterations % 500 == 0)) else 5\n j = 0\n while (j < d_iters) and (i < n):\n j += 1; i += 1\n for p in netD.parameters(): p.data.clamp_(-0.01, 0.01)\n real = V(next(data_iter)[0])\n real_loss = netD(real)\n fake = netG(create_noise(real.size(0)))\n fake_loss = netD(V(fake.data))\n netD.zero_grad()\n lossD = real_loss-fake_loss\n lossD.backward()\n optimizerD.step()\n pbar.update()\n\n set_trainable(netD, False)\n set_trainable(netG, True)\n netG.zero_grad()\n lossG = netD(netG(create_noise(bs))).mean(0).view(1)\n lossG.backward()\n optimizerG.step()\n gen_iterations += 1\n \n print(f'Loss_D {to_np(lossD)}; Loss_G {to_np(lossG)}; '\n f'D_real {to_np(real_loss)}; Loss_D_fake {to_np(fake_loss)}')", "_____no_output_____" ], [ "torch.backends.cudnn.benchmark=True", "_____no_output_____" ], [ "train(1, False)", " 0%| | 0/1 [00:00<?, ?it/s]\n100%|██████████| 18957/18957 [19:48<00:00, 10.74it/s]\nLoss_D [-0.67574]; Loss_G [0.08612]; D_real [-0.1782]; Loss_D_fake [0.49754]\n100%|██████████| 1/1 [19:49<00:00, 1189.02s/it]" ], [ "fixed_noise = create_noise(bs)", "_____no_output_____" ], [ "set_trainable(netD, True)\nset_trainable(netG, True)\noptimizerD = optim.RMSprop(netD.parameters(), lr = 1e-5)\noptimizerG = optim.RMSprop(netG.parameters(), lr = 1e-5)", "_____no_output_____" ], [ "train(1, False)", " 0%| | 0/1 [00:00<?, ?it/s]\n100%|██████████| 18957/18957 [23:31<00:00, 13.43it/s]\nLoss_D [-1.01657]; Loss_G [0.51333]; D_real [-0.50913]; Loss_D_fake [0.50744]\n100%|██████████| 1/1 [23:31<00:00, 1411.84s/it]" ], [ "netD.eval(); netG.eval();\nfake = netG(fixed_noise).data.cpu()\nfaked = np.clip(md.trn_ds.denorm(fake),0,1)\n\nplt.figure(figsize=(9,9))\nplt.imshow(gallery(faked, 8));", "_____no_output_____" ], [ "torch.save(netG.state_dict(), TMP_PATH/'netG_2.h5')\ntorch.save(netD.state_dict(), TMP_PATH/'netD_2.h5')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec77e396a82e38864bdbea6c56d9ecfe46c06b53
49,761
ipynb
Jupyter Notebook
module3-autoencoders/LS_DS_433_Autoencoders_Lecture.ipynb
mljarman/DS-Unit-4-Sprint-3-Deep-Learning
89b77aacfeddf08c0fbca6c818d13c7d3a063b44
[ "MIT" ]
null
null
null
module3-autoencoders/LS_DS_433_Autoencoders_Lecture.ipynb
mljarman/DS-Unit-4-Sprint-3-Deep-Learning
89b77aacfeddf08c0fbca6c818d13c7d3a063b44
[ "MIT" ]
null
null
null
module3-autoencoders/LS_DS_433_Autoencoders_Lecture.ipynb
mljarman/DS-Unit-4-Sprint-3-Deep-Learning
89b77aacfeddf08c0fbca6c818d13c7d3a063b44
[ "MIT" ]
null
null
null
76.910355
30,448
0.798316
[ [ [ "Lambda School Data Science\n\n*Unit 4, Sprint 3, Module 3*\n\n---", "_____no_output_____" ], [ "# Autoencoders\n\n> An autoencoder is a type of artificial neural network used to learn efficient data codings in an unsupervised manner.[1][2] The aim of an autoencoder is to learn a representation (encoding) for a set of data, typically for dimensionality reduction, by training the network to ignore signal “noise”. Along with the reduction side, a reconstructing side is learnt, where the autoencoder tries to generate from the reduced encoding a representation as close as possible to its original input, hence its name. ", "_____no_output_____" ], [ "## Learning Objectives\n*At the end of the lecture you should be to*:\n* <a href=\"#p1\">Part 1</a>: Describe the componenets of an autoencoder\n* <a href=\"#p2\">Part 2</a>: Train an autoencoder\n* <a href=\"#p3\">Part 3</a>: Apply an autoenocder to a basic information retrieval problem\n\n__Problem:__ Is it possible to automatically represent an image as a fixed-sized vector even if it isn’t labeled?\n\n__Solution:__ Use an autoencoder\n\nWhy do we need to represent an image as a fixed-sized vector do you ask? \n\n* __Information Retrieval__\n - [Reverse Image Search](https://en.wikipedia.org/wiki/Reverse_image_search)\n - [Recommendation Systems - Content Based Filtering](https://en.wikipedia.org/wiki/Recommender_system#Content-based_filtering)\n* __Dimensionality Reduction__\n - [Feature Extraction](https://www.kaggle.com/c/vsb-power-line-fault-detection/discussion/78285)\n - [Manifold Learning](https://en.wikipedia.org/wiki/Nonlinear_dimensionality_reduction)\n\nWe've already seen *representation learning* when we talked about word embedding modelings during our NLP week. Today we're going to achieve a similiar goal on images using *autoencoders*. An autoencoder is a neural network that is trained to attempt to copy its input to its output. Usually they are restricted in ways that allow them to copy only approximately. The model often learns useful properties of the data, because it is forced to prioritize which aspecs of the input should be copied. The properties of autoencoders have made them an important part of modern generative modeling approaches. Consider autoencoders a special case of feed-forward networks (the kind we've been studying); backpropagation and gradient descent still work. ", "_____no_output_____" ], [ "# Autoencoder Architecture (Learn)\n<a id=\"p1\"></a>", "_____no_output_____" ], [ "## Overview\n\nThe *encoder* compresses the input data and the *decoder* does the reverse to produce the uncompressed version of the data to create a reconstruction of the input as accurately as possible:\n\n<img src='https://miro.medium.com/max/1400/1*[email protected]' width=800/>\n\nThe learning process gis described simply as minimizing a loss function: \n$ L(x, g(f(x))) $\n\n- $L$ is a loss function penalizing $g(f(x))$ for being dissimiliar from $x$ (such as mean squared error)\n- $f$ is the encoder function\n- $g$ is the decoder function", "_____no_output_____" ], [ "## Follow Along\n### Extremely Simple Autoencoder", "_____no_output_____" ] ], [ [ "from tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\nimport wandb\nfrom wandb.keras import WandbCallback\n\n# this is the size of our encoded representations\nencoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats\n\n# this is our input placeholder\n\n# \"encoded\" is the encoded representation of the input\n\n# \"decoded\" is the lossy reconstruction of the input\n\n\n# this model maps an input to its reconstruction\n", "_____no_output_____" ], [ "# this model maps an input to its encoded representation\n", "_____no_output_____" ], [ "# create a placeholder for an encoded (32-dimensional) input\n\n# retrieve the last layer of the autoencoder model\n\n# create the decoder model\n", "_____no_output_____" ], [ "autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')", "_____no_output_____" ], [ "from tensorflow.keras.datasets import mnist\nimport numpy as np\n(x_train, _), (x_test, _) = mnist.load_data()", "_____no_output_____" ], [ "x_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\nprint(x_train.shape)\nprint(x_test.shape)", "(60000, 784)\n(10000, 784)\n" ], [ "wandb.init(project=\"mnist_autoencoder\", entity=\"ds5\")\n\nautoencoder.fit(..., ...,\n epochs=1000,\n batch_size=256,\n shuffle=True,\n validation_data=(..., ...),\n verbose = False,\n callbacks=...)", "_____no_output_____" ], [ "# encode and decode some digits\n# note that we take them from the *test* set\n", "_____no_output_____" ], [ "# use Matplotlib (don't ask)\nimport matplotlib.pyplot as plt\n\nn = 10 # how many digits we will display\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(decoded_imgs[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "## Challenge\n\nExpected to talk about the components of autoencoder and their purpose. ", "_____no_output_____" ], [ "# Train an Autoencoder (Learn)\n<a id=\"p2\"></a>", "_____no_output_____" ], [ "## Overview\n\nAs long as our architecture maintains an hourglass shape, we can continue to add layers and create a deeper network. ", "_____no_output_____" ], [ "## Follow Along", "_____no_output_____" ], [ "### Deep Autoencoder", "_____no_output_____" ] ], [ [ "input_img = Input(shape=(784,))\n", "_____no_output_____" ], [ "# compile & fit model", "_____no_output_____" ], [ "# use Matplotlib (don't ask)\nimport matplotlib.pyplot as plt\n\nn = 10 # how many digits we will display\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(decoded_imgs[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "### Convolutional autoencoder\n\n> Since our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better.\n\n> Let's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers.", "_____no_output_____" ] ], [ [ "from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.models import Model\nfrom keras import backend as K\n\n# Create Model \n\nautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')", "_____no_output_____" ], [ "from keras.datasets import mnist\nimport numpy as np\n\n(x_train, _), (x_test, _) = mnist.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format\nx_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format", "_____no_output_____" ], [ "wandb.init(project=\"mnist_autoencoder\", entity=\"ds5\")\n\nautoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test),\n verbose=False,\n callbacks=[WandbCallback()])", "_____no_output_____" ], [ "decoded_imgs = autoencoder.predict(x_test)\n\nn = 10\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + n)\n plt.imshow(decoded_imgs[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Visualization of the Representations", "_____no_output_____" ] ], [ [ "encoder = Model(input_img, encoded)\nencoder.predict(x_train)\n\nn = 10\nplt.figure(figsize=(20, 8))\nfor i in range(n):\n ax = plt.subplot(1, n, i)\n plt.imshow(encoded_imgs[i].reshape(4, 4 * 8).T)\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "## Challenge\n\nYou will train an autoencoder at some point in the near future. ", "_____no_output_____" ], [ "# Information Retrieval with Autoencoders (Learn)\n<a id=\"p3\"></a>", "_____no_output_____" ], [ "## Overview\n\nA common usecase for autoencoders is for reverse image search. Let's try to draw an image and see what's most similiar in our dataset. \n\nTo accomplish this we will need to slice our autoendoer in half to extract our reduced features. :) ", "_____no_output_____" ], [ "## Follow Along", "_____no_output_____" ] ], [ [ "encoder = Model(input_img, encoded)\nencoded_imgs = encoder.predict(x_train)", "_____no_output_____" ], [ "encoded_imgs[0].T", "_____no_output_____" ], [ "from sklearn.neighbors import NearestNeighbors\n\nnn = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')\nnn.fit(encoded_imgs)", "_____no_output_____" ], [ "nn.kneighbors(...)", "_____no_output_____" ] ], [ [ "## Challenge\n\nYou should already be familiar with KNN and similarity queries, so the key component of this section is know what to 'slice' from your autoencoder (the encoder) to extract features from your data. ", "_____no_output_____" ], [ "# Review\n\n* <a href=\"#p1\">Part 1</a>: Describe the componenets of an autoencoder\n - Enocder\n - Decoder\n* <a href=\"#p2\">Part 2</a>: Train an autoencoder\n - Can do in Keras Easily\n - Can use a variety of architectures\n - Architectures must follow hourglass shape\n* <a href=\"#p3\">Part 3</a>: Apply an autoenocder to a basic information retrieval problem\n - Extract just the encoder to use for various tasks\n - AE ares good for dimensionality reduction, reverse image search, and may more things. \n", "_____no_output_____" ], [ "# Sources\n\n__References__\n- [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)\n- [Deep Learning Cookbook](http://shop.oreilly.com/product/0636920097471.do)\n\n__Additional Material__", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ec780c74a97a71cea9870aac1a1916757f0f9a62
823,536
ipynb
Jupyter Notebook
docs/source/customizing.ipynb
lepture/nbconvert
6870bebf4a1d86fc6eb8add1a2c1a99e7af1bf06
[ "BSD-3-Clause-Clear" ]
1
2019-09-03T01:16:03.000Z
2019-09-03T01:16:03.000Z
docs/source/customizing.ipynb
lepture/nbconvert
6870bebf4a1d86fc6eb8add1a2c1a99e7af1bf06
[ "BSD-3-Clause-Clear" ]
3
2020-03-24T17:31:36.000Z
2021-02-02T22:09:23.000Z
docs/source/customizing.ipynb
lepture/nbconvert
6870bebf4a1d86fc6eb8add1a2c1a99e7af1bf06
[ "BSD-3-Clause-Clear" ]
null
null
null
32.981017
552
0.490693
[ [ [ "# Customizing nbconvert", "_____no_output_____" ], [ "Under the hood, nbconvert uses [Jinja templates](http://jinja.pocoo.org/docs/latest/) to specify how the notebooks should be formatted. These templates can be fully customized, allowing you to use nbconvert to create notebooks in different formats with different styles as well.", "_____no_output_____" ], [ "## Converting a notebook to an (I)Python script and printing to stdout\n\nOut of the box, nbconvert can be used to convert notebooks to plain Python files. For example, the following command converts the `example.ipynb` notebook to Python and prints out the result:", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout", "[NbConvertApp] Converting notebook example.ipynb to python\n\n# coding: utf-8\n\n# # Example notebook\n\n# ### Markdown cells\n# \n# This is an example notebook that can be converted with `nbconvert` to different formats. This is an example of a markdown cell.\n\n# ### LaTeX Equations\n# \n# Here is an equation:\n# \n# $$\n# y = \\sin(x)\n# $$\n\n# ### Code cells\n\n# In[1]:\n\n\nprint(\"This is a code cell that produces some output\")\n\n\n# ### Inline figures\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "From the code, you can see that non-code cells are also exported. If you wanted to change that behaviour, you would first look to nbconvert [configuration options page](./config_options.rst) to see if there is an option available that can give you your desired behaviour. \n\nIn this case, if you wanted to remove code cells from the output, you could use the `TemplateExporter.exclude_markdown` traitlet directly, as below. ", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout --TemplateExporter.exclude_markdown=True", "[NbConvertApp] Converting notebook example.ipynb to python\n\n# coding: utf-8\n\n# In[1]:\n\n\nprint(\"This is a code cell that produces some output\")\n\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "## Custom Templates \n\nAs mentioned above, if you want to change this behavior, you can use a custom template. The custom template inherits from the Python template and overwrites the markdown blocks so that they are empty. \n\nBelow is an example of a custom template, which we write to a file called `simplepython.tpl`. This template removes markdown cells from the output, and also changes how the execution count numbers are formatted:", "_____no_output_____" ] ], [ [ "%%writefile simplepython.tpl\n\n{% extends 'python.tpl'%}\n\n## remove markdown cells\n{% block markdowncell %}\n{% endblock markdowncell %}\n\n## change the appearance of execution count\n{% block in_prompt %}\n# [{{ cell.execution_count if cell.execution_count else ' ' }}]:\n{% endblock in_prompt %}", "Overwriting simplepython.tpl\n" ] ], [ [ "Using this template, we see that the resulting Python code does not contain anything that was previously in a markdown cell, and only displays execution counts (i.e., `[#]:` not `In[#]:`):", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to python 'example.ipynb' --stdout --template=simplepython.tpl", "[NbConvertApp] Converting notebook example.ipynb to python\n\n\n# coding: utf-8\n\n# [1]:\n\nprint(\"This is a code cell that produces some output\")\n\n\n# [1]:\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)\n\n" ] ], [ [ "### Saving Custom Templates\n\nBy default, nbconvert finds templates from a few locations.\n\nThe recommended place to save custom templates, so that they are globally accessible to nbconvert, is your jupyter data directories:\n\n- share/jupyter\n - nbconvert\n - templates\n - html\n - latex\n\nThe HTML and LaTeX/PDF exporters will search the html and latex subdirectories for templates, respectively.\n\nTo find your jupyter configuration directory you can use:\n\n```python\nfrom jupyter_core.paths import jupyter_path\nprint(jupyter_path('nbconvert','templates'))\n```\n\nAdditionally,\n\n```python\nTemplateExporter.template_path=['.']\n```\n\ndefines an additional list of paths that nbconvert can look for user defined templates. It defaults to searching for custom templates in the current working directory and can be changed through configuration options.", "_____no_output_____" ], [ "## Template structure\n\nNbconvert templates consist of a set of nested blocks. When defining a new\ntemplate, you extend an existing template by overriding some of the blocks.\n\nAll the templates shipped in nbconvert have the basic structure described here,\nthough some may define additional blocks.", "_____no_output_____" ] ], [ [ "from IPython.display import HTML, display\nwith open('template_structure.html') as f:\n display(HTML(f.read()))", "_____no_output_____" ] ], [ [ "### A few gotchas\n\nJinja uses `%`, `{`, and `}` for syntax by default which does not play nicely with LaTeX. In LaTeX, we have the following replacements:\n\n| Syntax | Default | LaTeX |\n|----------|---------|---------|\n| block | {% %} | ((* *)) |\n| variable | {{ }} | ((( ))) |\n| comment | {# #} | ((= =)) |", "_____no_output_____" ], [ "## Templates using cell tags\n\nThe notebook file format supports attaching arbitrary JSON metadata to each cell. In addition, every cell has a special `tags` metadata field that accepts a list of strings that indicate the cell's tags. To apply these, go to the `View → CellToolbar → Tags` option which will create a Tag editor at the top of every cell. \n\nFirst choose a notebook you want to convert to html, and apply the tags: `\"Easy\"`, `\"Medium\"`, or \n`\"Hard\"`. \n\nWith this in place, the notebook can be converted using a custom template.\n\nDesign your template in the cells provided below.\n\nHint: tags are located at `cell.metadata.tags`, the following Python code collects the value of the tag: \n\n```python\ncell['metadata'].get('tags', [])\n```\n\nWhich you can then use inside a Jinja template as in the following:", "_____no_output_____" ] ], [ [ "%%writefile mytemplate.tpl\n\n{% extends 'full.tpl'%}\n{% block any_cell %}\n{% if 'Hard' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid red\">\n {{ super() }}\n </div>\n{% elif 'Medium' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid orange\">\n {{ super() }}\n </div>\n{% elif 'Easy' in cell['metadata'].get('tags', []) %}\n <div style=\"border:thin solid green\">\n {{ super() }}\n </div>\n{% else %}\n {{ super() }}\n{% endif %}\n{% endblock any_cell %}", "Overwriting mytemplate.tpl\n" ] ], [ [ "Now, if we collect the result of using nbconvert with this template, and display the resulting html, we see the following:", "_____no_output_____" ] ], [ [ "example = !jupyter nbconvert --to html 'example.ipynb' --template=mytemplate.tpl --stdout\nexample = example[3:] # have to remove the first three lines which are not proper html\nfrom IPython.display import HTML, display\ndisplay(HTML('\\n'.join(example))) ", "_____no_output_____" ] ], [ [ "## Templates using custom cell metadata \n\nWe demonstrated [above](#Templates-using-cell-tags) how to use cell tags in a template to apply custom styling to a notebook. But remember, the notebook file format supports attaching _arbitrary_ JSON metadata to each cell, not only cell tags. \nHere, we describe an exercise for using an `example.difficulty` metadata field (rather than cell tags) to do the same as before (to mark up different cells as being \"Easy\", \"Medium\" or \"Hard\").\n\n### How to edit cell metadata\n\nTo edit the cell metadata from within the notebook, go to the menu item: `View → Cell Toolbar → Edit Metadata`. This will bring up a toolbar above each cell with a button that says \"Edit Metadata\". Click this button, and a field will pop up in which you will directly edit the cell metadata JSON. \n\n**NB**: Because it is JSON, you will need to ensure that what you write is valid JSON. \n\n### Template challenges: dealing with missing custom metadata fields\n\nOne of the challenges of dealing with custom metadata is to handle the case where the metadata is not present on every cell. This can get somewhat tricky because of JSON objects tendency to be deeply nested coupled with Python's (and therefore Jinja's) approach to calling into dictionaries. Specifically, the following code will error:\n\n```python\nfoo = {}\nfoo[\"bar\"]\n```\n\nAccordingly, it is better to use the [`{}.get` method](https://docs.python.org/3.6/library/stdtypes.html#dict.get) which allows you to set a default value to return if no key is found as the second argument. \n\nHint: if your metadata items are located at `cell.metadata.example.difficulty`, the following Python code would get the value defaulting to an empty string (`''`) if nothing is found:\n\n```python\ncell['metadata'].get('example', {}).get('difficulty', '')\n```", "_____no_output_____" ], [ "### Exercise: Write a template for handling custom metadata\nNow, write a template that will look for `Easy`, `Medium` and `Hard` metadata values for the `cell.metadata.example.difficulty` field and wrap them in a div with a green, orange, or red thin solid border (respectively). \n\n**NB**: This is the same design and logic as used in the previous cell tag example.\n\n#### How to get `example.ipynb`\n\nWe have provided an example file in `example.ipynb` in the nbconvert documentation that has already been marked up with both tags and the above metadata for you to test with. You can get it from [this link to the raw file]( https://raw.githubusercontent.com/jupyter/nbconvert/master/docs/source/example.ipynb) or by cloning the repository [from GitHub](https://github.com/jupyter/nbconvert) and navingating to `nbconvert/docs/source/example.ipynb`. \n\n#### Convert `example.ipynb` using cell tags \n\nFirst, make sure that you can reproduce the previous result using the cell tags template that we have provided above. \n\n**Easy**: If you want to make it easy on yourself, create a new file `my_template.tpl` in the same directory as `example.ipynb` and copy the contents of the cell we use to write `mytemplate.tpl` to the file system. \n\nThen run `jupyter nbconvert --to html 'example.ipynb' --template=mytemplate.tpl` and see if your \n\n**Moderate**: If you want more of a challenge, try recreating the jinja template by modifying the following jinja template file:\n\n```python\n{% extends 'full.tpl'%}\n{% block any_cell %}\n <div style=\"border:thin solid red\">\n {{ super() }}\n </div>\n{% endblock any_cell %}\n```\n\n**Hard**: If you want even more of a challenge, try recreating the jinja template from scratch. \n\n#### Write your template\n\nOnce you've done at least the **Easy** version of the previous step, try modifying your template to use `cell.metadata.example.difficulty` fields rather than cell tags. \n\n#### Convert `example.ipynb` with formatting from custom metadata\n\nOnce you've written your template, try converting `example.ipynb` using the following command (making sure that `your_template.tpl` is in your local directory where you are running the command):\n\n```bash\njupyter nbconvert --to html 'example.ipynb' --template=your_template.tpl --stdout\n```\n\nThe resulting display should pick out different cells to be bordered with green, orange, or red.\n\nIf you do that successfullly, the resulting html document should look like the following cell's contents: ", "_____no_output_____" ], [ "<html>\n<head><meta charset=\"utf-8\" />\n<title>example</title><script src=\"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js\"></script>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js\"></script>\n\n<style type=\"text/css\">\n /*!\n*\n* Twitter Bootstrap\n*\n*/\n/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n font-family: sans-serif;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: 1px dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\nmark {\n background: #ff0;\n color: #000;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\nsup {\n top: -0.5em;\n}\nsub {\n bottom: -0.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n box-sizing: content-box;\n height: 0;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit;\n font: inherit;\n margin: 0;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: textfield;\n box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n border: 0;\n padding: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n background: transparent !important;\n color: #000 !important;\n box-shadow: none !important;\n text-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: 'Glyphicons Halflings';\n src: url('../components/bootstrap/fonts/glyphicons-halflings-regular.eot');\n src: url('../components/bootstrap/fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.woff') format('woff'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../components/bootstrap/fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: 'Glyphicons Halflings';\n font-style: normal;\n font-weight: normal;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\002a\";\n}\n.glyphicon-plus:before {\n content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 13px;\n line-height: 1.42857143;\n color: #000;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 3px;\n}\n.img-thumbnail {\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 2px;\n -webkit-transition: all 0.2s ease-in-out;\n -o-transition: all 0.2s ease-in-out;\n transition: all 0.2s ease-in-out;\n display: inline-block;\n max-width: 100%;\n height: auto;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 18px;\n margin-bottom: 18px;\n border: 0;\n border-top: 1px solid #eeeeee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n margin: -1px;\n padding: 0;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: normal;\n line-height: 1;\n color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 18px;\n margin-bottom: 9px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 9px;\n margin-bottom: 9px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 33px;\n}\nh2,\n.h2 {\n font-size: 27px;\n}\nh3,\n.h3 {\n font-size: 23px;\n}\nh4,\n.h4 {\n font-size: 17px;\n}\nh5,\n.h5 {\n font-size: 13px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 9px;\n}\n.lead {\n margin-bottom: 18px;\n font-size: 14px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 19.5px;\n }\n}\nsmall,\n.small {\n font-size: 92%;\n}\nmark,\n.mark {\n background-color: #fcf8e3;\n padding: .2em;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 8px;\n margin: 36px 0 18px;\n border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 9px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n list-style: none;\n margin-left: -5px;\n}\n.list-inline > li {\n display: inline-block;\n padding-left: 5px;\n padding-right: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 18px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: bold;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 541px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n clear: left;\n text-align: right;\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n border-bottom: 1px dotted #777777;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 9px 18px;\n margin: 0 0 18px;\n font-size: inherit;\n border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n border-right: 5px solid #eeeeee;\n border-left: 0;\n text-align: right;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: '\\00A0 \\2014';\n}\naddress {\n margin-bottom: 18px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 2px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #888;\n background-color: transparent;\n border-radius: 1px;\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: bold;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 8.5px;\n margin: 0 0 9px;\n font-size: 12px;\n line-height: 1.42857143;\n word-break: break-all;\n word-wrap: break-word;\n color: #333333;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 2px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n margin-right: auto;\n margin-left: auto;\n padding-left: 0px;\n padding-right: 0px;\n}\n@media (min-width: 768px) {\n .container {\n width: 768px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 940px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1140px;\n }\n}\n.container-fluid {\n margin-right: auto;\n margin-left: auto;\n padding-left: 0px;\n padding-right: 0px;\n}\n.row {\n margin-left: 0px;\n margin-right: 0px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-left: 0px;\n padding-right: 0px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0%;\n}\n@media (min-width: 768px) {\n .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 992px) {\n .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0%;\n }\n}\ntable {\n background-color: transparent;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 18px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-column;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n float: none;\n display: table-cell;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n overflow-x: auto;\n min-height: 0.01%;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 13.5px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n padding: 0;\n margin: 0;\n border: 0;\n min-width: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 18px;\n font-size: 19.5px;\n line-height: inherit;\n color: #333333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: bold;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 32px;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control::-ms-expand {\n border: 0;\n background-color: transparent;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eeeeee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"].form-control,\n input[type=\"time\"].form-control,\n input[type=\"datetime-local\"].form-control,\n input[type=\"month\"].form-control {\n line-height: 32px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 45px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n min-height: 18px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: normal;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-left: -20px;\n margin-top: 4px \\9;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n vertical-align: middle;\n font-weight: normal;\n cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.form-control-static {\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n min-height: 31px;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-left: 0;\n padding-right: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.form-group-sm select.form-control {\n height: 30px;\n line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 30px;\n padding: 6px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\nselect.input-lg {\n height: 45px;\n line-height: 45px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\n.form-group-lg select.form-control {\n height: 45px;\n line-height: 45px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 45px;\n min-height: 35px;\n padding: 11px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 40px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 32px;\n height: 32px;\n line-height: 32px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n width: 45px;\n height: 45px;\n line-height: 45px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n border-color: #3c763d;\n background-color: #dff0d8;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n border-color: #8a6d3b;\n background-color: #fcf8e3;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n border-color: #a94442;\n background-color: #f2dede;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 23px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #404040;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n margin-top: 0;\n margin-bottom: 0;\n padding-top: 7px;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 25px;\n}\n.form-horizontal .form-group {\n margin-left: 0px;\n margin-right: 0px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n text-align: right;\n margin-bottom: 0;\n padding-top: 7px;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 0px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 11px;\n font-size: 17px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n font-size: 12px;\n }\n}\n.btn {\n display: inline-block;\n margin-bottom: 0;\n font-weight: normal;\n text-align: center;\n vertical-align: middle;\n touch-action: manipulation;\n cursor: pointer;\n background-image: none;\n border: 1px solid transparent;\n white-space: nowrap;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n border-radius: 2px;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n outline: 0;\n background-image: none;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n cursor: not-allowed;\n opacity: 0.65;\n filter: alpha(opacity=65);\n -webkit-box-shadow: none;\n box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n pointer-events: none;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.btn-default:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n background-image: none;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n color: #fff;\n background-color: #286090;\n border-color: #122b40;\n}\n.btn-primary:hover {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n color: #fff;\n background-color: #204d74;\n border-color: #122b40;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n background-image: none;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.btn-success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n background-image: none;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.btn-info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n background-image: none;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.btn-warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n background-image: none;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.btn-danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n background-image: none;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n color: #337ab7;\n font-weight: normal;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity 0.15s linear;\n -o-transition: opacity 0.15s linear;\n transition: opacity 0.15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-property: height, visibility;\n transition-property: height, visibility;\n -webkit-transition-duration: 0.35s;\n transition-duration: 0.35s;\n -webkit-transition-timing-function: ease;\n transition-timing-function: ease;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-top: 4px solid \\9;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n list-style: none;\n font-size: 13px;\n text-align: left;\n background-color: #fff;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.15);\n border-radius: 2px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n background-clip: padding-box;\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 8px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: normal;\n line-height: 1.42857143;\n color: #333333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n text-decoration: none;\n color: #262626;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n outline: 0;\n background-color: #337ab7;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n cursor: not-allowed;\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n left: auto;\n right: 0;\n}\n.dropdown-menu-left {\n left: 0;\n right: auto;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n left: 0;\n right: 0;\n bottom: 0;\n top: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n border-top: 0;\n border-bottom: 4px dashed;\n border-bottom: 4px solid \\9;\n content: \"\";\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 541px) {\n .navbar-right .dropdown-menu {\n left: auto;\n right: 0;\n }\n .navbar-right .dropdown-menu-left {\n left: 0;\n right: auto;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-left: 8px;\n padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-left: 12px;\n padding-right: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 2px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n float: none;\n display: table-cell;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-left: 0;\n padding-right: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group .form-control:focus {\n z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 45px;\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n border-radius: 3px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 45px;\n line-height: 45px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 13px;\n font-weight: normal;\n line-height: 1;\n color: #555555;\n text-align: center;\n background-color: #eeeeee;\n border: 1px solid #ccc;\n border-radius: 2px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 1px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 17px;\n border-radius: 3px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-bottom-right-radius: 0;\n border-top-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-bottom-left-radius: 0;\n border-top-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n z-index: 2;\n margin-left: -1px;\n}\n.nav {\n margin-bottom: 0;\n padding-left: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777777;\n text-decoration: none;\n background-color: transparent;\n cursor: not-allowed;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eeeeee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 8px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 2px 2px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555555;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n cursor: default;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 2px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 2px 2px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 2px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n text-align: center;\n margin-bottom: 5px;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 2px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 2px 2px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 30px;\n margin-bottom: 18px;\n border: 1px solid transparent;\n}\n@media (min-width: 541px) {\n .navbar {\n border-radius: 2px;\n }\n}\n@media (min-width: 541px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n overflow-x: visible;\n padding-right: 0px;\n padding-left: 0px;\n border-top: 1px solid transparent;\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 541px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-left: 0;\n padding-right: 0;\n }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 540px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: 0px;\n margin-left: 0px;\n}\n@media (min-width: 541px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 541px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n@media (min-width: 541px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.navbar-brand {\n float: left;\n padding: 6px 0px;\n font-size: 17px;\n line-height: 18px;\n height: 30px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 541px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: 0px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n margin-right: 0px;\n padding: 9px 10px;\n margin-top: -2px;\n margin-bottom: -2px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 2px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 541px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 3px 0px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 18px;\n}\n@media (max-width: 540px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 18px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 541px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 6px;\n padding-bottom: 6px;\n }\n}\n.navbar-form {\n margin-left: 0px;\n margin-right: 0px;\n padding: 10px 0px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n margin-top: -1px;\n margin-bottom: -1px;\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 540px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 541px) {\n .navbar-form {\n width: auto;\n border: 0;\n margin-left: 0;\n margin-right: 0;\n padding-top: 0;\n padding-bottom: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: -1px;\n margin-bottom: -1px;\n}\n.navbar-btn.btn-sm {\n margin-top: 0px;\n margin-bottom: 0px;\n}\n.navbar-btn.btn-xs {\n margin-top: 4px;\n margin-bottom: 4px;\n}\n.navbar-text {\n margin-top: 6px;\n margin-bottom: 6px;\n}\n@media (min-width: 541px) {\n .navbar-text {\n float: left;\n margin-left: 0px;\n margin-right: 0px;\n }\n}\n@media (min-width: 541px) {\n .navbar-left {\n float: left !important;\n float: left;\n }\n .navbar-right {\n float: right !important;\n float: right;\n margin-right: 0px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n background-color: #e7e7e7;\n color: #555;\n}\n@media (max-width: 540px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n background-color: #080808;\n color: #fff;\n}\n@media (max-width: 540px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 18px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 2px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n content: \"/\\00a0\";\n padding: 0 5px;\n color: #5e5e5e;\n}\n.breadcrumb > .active {\n color: #777777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 18px 0;\n border-radius: 2px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n line-height: 1.42857143;\n text-decoration: none;\n color: #337ab7;\n background-color: #fff;\n border: 1px solid #ddd;\n margin-left: -1px;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-bottom-left-radius: 2px;\n border-top-left-radius: 2px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-bottom-right-radius: 2px;\n border-top-right-radius: 2px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n z-index: 2;\n color: #23527c;\n background-color: #eeeeee;\n border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 3;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n cursor: default;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777777;\n background-color: #fff;\n border-color: #ddd;\n cursor: not-allowed;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 17px;\n line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-bottom-left-radius: 3px;\n border-top-left-radius: 3px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-bottom-right-radius: 3px;\n border-top-right-radius: 3px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-bottom-left-radius: 1px;\n border-top-left-radius: 1px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-bottom-right-radius: 1px;\n border-top-right-radius: 1px;\n}\n.pager {\n padding-left: 0;\n margin: 18px 0;\n list-style: none;\n text-align: center;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777777;\n background-color: #fff;\n cursor: not-allowed;\n}\n.label {\n display: inline;\n padding: .2em .6em .3em;\n font-size: 75%;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n color: #fff;\n line-height: 1;\n vertical-align: middle;\n white-space: nowrap;\n text-align: center;\n background-color: #777777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding-top: 30px;\n padding-bottom: 30px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 20px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n border-radius: 3px;\n padding-left: 0px;\n padding-right: 0px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding-top: 48px;\n padding-bottom: 48px;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-left: 60px;\n padding-right: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 59px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 18px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 2px;\n -webkit-transition: border 0.2s ease-in-out;\n -o-transition: border 0.2s ease-in-out;\n transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-left: auto;\n margin-right: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #000;\n}\n.alert {\n padding: 15px;\n margin-bottom: 18px;\n border: 1px solid transparent;\n border-radius: 2px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n background-color: #dff0d8;\n border-color: #d6e9c6;\n color: #3c763d;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n background-color: #d9edf7;\n border-color: #bce8f1;\n color: #31708f;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n background-color: #fcf8e3;\n border-color: #faebcc;\n color: #8a6d3b;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n background-color: #f2dede;\n border-color: #ebccd1;\n color: #a94442;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n overflow: hidden;\n height: 18px;\n margin-bottom: 18px;\n background-color: #f5f5f5;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n float: left;\n width: 0%;\n height: 100%;\n font-size: 12px;\n line-height: 18px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n -webkit-transition: width 0.6s ease;\n -o-transition: width 0.6s ease;\n transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n zoom: 1;\n overflow: hidden;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-object.img-thumbnail {\n max-width: none;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n margin-bottom: 20px;\n padding-left: 0;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-right-radius: 2px;\n border-top-left-radius: 2px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 2px;\n border-bottom-left-radius: 2px;\n}\na.list-group-item,\nbutton.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n text-decoration: none;\n color: #555;\n background-color: #f5f5f5;\n}\nbutton.list-group-item {\n width: 100%;\n text-align: left;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n background-color: #eeeeee;\n color: #777777;\n cursor: not-allowed;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 18px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 2px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 15px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n border-top-right-radius: 0;\n border-top-left-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-left: 15px;\n padding-right: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-right-radius: 1px;\n border-top-left-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 1px;\n border-top-right-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 1px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 1px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 1px;\n border-bottom-left-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-left-radius: 1px;\n border-bottom-right-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 1px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 1px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n border: 0;\n margin-bottom: 0;\n}\n.panel-group {\n margin-bottom: 18px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 2px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n height: 100%;\n width: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 3px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 1px;\n}\n.close {\n float: right;\n font-size: 19.5px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n opacity: 0.2;\n filter: alpha(opacity=20);\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\nbutton.close {\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n -webkit-appearance: none;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n display: none;\n overflow: hidden;\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n -webkit-transition: -webkit-transform 0.3s ease-out;\n -moz-transition: -moz-transform 0.3s ease-out;\n -o-transition: -o-transform 0.3s ease-out;\n transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 3px;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n background-clip: padding-box;\n outline: 0;\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.modal-backdrop.in {\n opacity: 0.5;\n filter: alpha(opacity=50);\n}\n.modal-header {\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-left: 5px;\n margin-bottom: 0;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 12px;\n opacity: 0;\n filter: alpha(opacity=0);\n}\n.tooltip.in {\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.tooltip.top {\n margin-top: -3px;\n padding: 5px 0;\n}\n.tooltip.right {\n margin-left: 3px;\n padding: 0 5px;\n}\n.tooltip.bottom {\n margin-top: 3px;\n padding: 5px 0;\n}\n.tooltip.left {\n margin-left: -3px;\n padding: 0 5px;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n background-color: #000;\n border-radius: 2px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n bottom: 0;\n right: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: normal;\n letter-spacing: normal;\n line-break: auto;\n line-height: 1.42857143;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n white-space: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n font-size: 13px;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 3px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover-title {\n margin: 0;\n padding: 8px 14px;\n font-size: 13px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 2px 2px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow:after {\n border-width: 10px;\n content: \"\";\n}\n.popover.top > .arrow {\n left: 50%;\n margin-left: -11px;\n border-bottom-width: 0;\n border-top-color: #999999;\n border-top-color: rgba(0, 0, 0, 0.25);\n bottom: -11px;\n}\n.popover.top > .arrow:after {\n content: \" \";\n bottom: 1px;\n margin-left: -10px;\n border-bottom-width: 0;\n border-top-color: #fff;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-left-width: 0;\n border-right-color: #999999;\n border-right-color: rgba(0, 0, 0, 0.25);\n}\n.popover.right > .arrow:after {\n content: \" \";\n left: 1px;\n bottom: -10px;\n border-left-width: 0;\n border-right-color: #fff;\n}\n.popover.bottom > .arrow {\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999999;\n border-bottom-color: rgba(0, 0, 0, 0.25);\n top: -11px;\n}\n.popover.bottom > .arrow:after {\n content: \" \";\n top: 1px;\n margin-left: -10px;\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999999;\n border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n content: \" \";\n right: 1px;\n border-right-width: 0;\n border-left-color: #fff;\n bottom: -10px;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n overflow: hidden;\n width: 100%;\n}\n.carousel-inner > .item {\n display: none;\n position: relative;\n -webkit-transition: 0.6s ease-in-out left;\n -o-transition: 0.6s ease-in-out left;\n transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform 0.6s ease-in-out;\n -moz-transition: -moz-transform 0.6s ease-in-out;\n -o-transition: -o-transform 0.6s ease-in-out;\n transition: transform 0.6s ease-in-out;\n -webkit-backface-visibility: hidden;\n -moz-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000px;\n -moz-perspective: 1000px;\n perspective: 1000px;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n left: 0;\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n left: 0;\n bottom: 0;\n width: 15%;\n opacity: 0.5;\n filter: alpha(opacity=50);\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n}\n.carousel-control.right {\n left: auto;\n right: 0;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n}\n.carousel-control:hover,\n.carousel-control:focus {\n outline: 0;\n color: #fff;\n text-decoration: none;\n opacity: 0.9;\n filter: alpha(opacity=90);\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n margin-top: -10px;\n z-index: 5;\n display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n line-height: 1;\n font-family: serif;\n}\n.carousel-control .icon-prev:before {\n content: '\\2039';\n}\n.carousel-control .icon-next:before {\n content: '\\203a';\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n margin-left: -30%;\n padding-left: 0;\n list-style: none;\n text-align: center;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n border: 1px solid #fff;\n border-radius: 10px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n}\n.carousel-indicators .active {\n margin: 0;\n width: 12px;\n height: 12px;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n left: 15%;\n right: 15%;\n bottom: 20px;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -10px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -10px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -10px;\n }\n .carousel-caption {\n left: 20%;\n right: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after,\n.item_buttons:before,\n.item_buttons:after {\n content: \" \";\n display: table;\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after,\n.item_buttons:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-left: auto;\n margin-right: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table !important;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table !important;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table !important;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table !important;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table !important;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*!\n*\n* Font Awesome\n*\n*/\n/*!\n * Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome\n * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n/* FONT PATH\n * -------------------------- */\n@font-face {\n font-family: 'FontAwesome';\n src: url('../components/font-awesome/fonts/fontawesome-webfont.eot?v=4.2.0');\n src: url('../components/font-awesome/fonts/fontawesome-webfont.eot?#iefix&v=4.2.0') format('embedded-opentype'), url('../components/font-awesome/fonts/fontawesome-webfont.woff?v=4.2.0') format('woff'), url('../components/font-awesome/fonts/fontawesome-webfont.ttf?v=4.2.0') format('truetype'), url('../components/font-awesome/fonts/fontawesome-webfont.svg?v=4.2.0#fontawesomeregular') format('svg');\n font-weight: normal;\n font-style: normal;\n}\n.fa {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n/* makes the font 33% larger relative to the icon container */\n.fa-lg {\n font-size: 1.33333333em;\n line-height: 0.75em;\n vertical-align: -15%;\n}\n.fa-2x {\n font-size: 2em;\n}\n.fa-3x {\n font-size: 3em;\n}\n.fa-4x {\n font-size: 4em;\n}\n.fa-5x {\n font-size: 5em;\n}\n.fa-fw {\n width: 1.28571429em;\n text-align: center;\n}\n.fa-ul {\n padding-left: 0;\n margin-left: 2.14285714em;\n list-style-type: none;\n}\n.fa-ul > li {\n position: relative;\n}\n.fa-li {\n position: absolute;\n left: -2.14285714em;\n width: 2.14285714em;\n top: 0.14285714em;\n text-align: center;\n}\n.fa-li.fa-lg {\n left: -1.85714286em;\n}\n.fa-border {\n padding: .2em .25em .15em;\n border: solid 0.08em #eee;\n border-radius: .1em;\n}\n.pull-right {\n float: right;\n}\n.pull-left {\n float: left;\n}\n.fa.pull-left {\n margin-right: .3em;\n}\n.fa.pull-right {\n margin-left: .3em;\n}\n.fa-spin {\n -webkit-animation: fa-spin 2s infinite linear;\n animation: fa-spin 2s infinite linear;\n}\n@-webkit-keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(359deg);\n transform: rotate(359deg);\n }\n}\n@keyframes fa-spin {\n 0% {\n -webkit-transform: rotate(0deg);\n transform: rotate(0deg);\n }\n 100% {\n -webkit-transform: rotate(359deg);\n transform: rotate(359deg);\n }\n}\n.fa-rotate-90 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);\n -webkit-transform: rotate(90deg);\n -ms-transform: rotate(90deg);\n transform: rotate(90deg);\n}\n.fa-rotate-180 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);\n -webkit-transform: rotate(180deg);\n -ms-transform: rotate(180deg);\n transform: rotate(180deg);\n}\n.fa-rotate-270 {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);\n -webkit-transform: rotate(270deg);\n -ms-transform: rotate(270deg);\n transform: rotate(270deg);\n}\n.fa-flip-horizontal {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);\n -webkit-transform: scale(-1, 1);\n -ms-transform: scale(-1, 1);\n transform: scale(-1, 1);\n}\n.fa-flip-vertical {\n filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);\n -webkit-transform: scale(1, -1);\n -ms-transform: scale(1, -1);\n transform: scale(1, -1);\n}\n:root .fa-rotate-90,\n:root .fa-rotate-180,\n:root .fa-rotate-270,\n:root .fa-flip-horizontal,\n:root .fa-flip-vertical {\n filter: none;\n}\n.fa-stack {\n position: relative;\n display: inline-block;\n width: 2em;\n height: 2em;\n line-height: 2em;\n vertical-align: middle;\n}\n.fa-stack-1x,\n.fa-stack-2x {\n position: absolute;\n left: 0;\n width: 100%;\n text-align: center;\n}\n.fa-stack-1x {\n line-height: inherit;\n}\n.fa-stack-2x {\n font-size: 2em;\n}\n.fa-inverse {\n color: #fff;\n}\n/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n readers do not read off random characters that represent icons */\n.fa-glass:before {\n content: \"\\f000\";\n}\n.fa-music:before {\n content: \"\\f001\";\n}\n.fa-search:before {\n content: \"\\f002\";\n}\n.fa-envelope-o:before {\n content: \"\\f003\";\n}\n.fa-heart:before {\n content: \"\\f004\";\n}\n.fa-star:before {\n content: \"\\f005\";\n}\n.fa-star-o:before {\n content: \"\\f006\";\n}\n.fa-user:before {\n content: \"\\f007\";\n}\n.fa-film:before {\n content: \"\\f008\";\n}\n.fa-th-large:before {\n content: \"\\f009\";\n}\n.fa-th:before {\n content: \"\\f00a\";\n}\n.fa-th-list:before {\n content: \"\\f00b\";\n}\n.fa-check:before {\n content: \"\\f00c\";\n}\n.fa-remove:before,\n.fa-close:before,\n.fa-times:before {\n content: \"\\f00d\";\n}\n.fa-search-plus:before {\n content: \"\\f00e\";\n}\n.fa-search-minus:before {\n content: \"\\f010\";\n}\n.fa-power-off:before {\n content: \"\\f011\";\n}\n.fa-signal:before {\n content: \"\\f012\";\n}\n.fa-gear:before,\n.fa-cog:before {\n content: \"\\f013\";\n}\n.fa-trash-o:before {\n content: \"\\f014\";\n}\n.fa-home:before {\n content: \"\\f015\";\n}\n.fa-file-o:before {\n content: \"\\f016\";\n}\n.fa-clock-o:before {\n content: \"\\f017\";\n}\n.fa-road:before {\n content: \"\\f018\";\n}\n.fa-download:before {\n content: \"\\f019\";\n}\n.fa-arrow-circle-o-down:before {\n content: \"\\f01a\";\n}\n.fa-arrow-circle-o-up:before {\n content: \"\\f01b\";\n}\n.fa-inbox:before {\n content: \"\\f01c\";\n}\n.fa-play-circle-o:before {\n content: \"\\f01d\";\n}\n.fa-rotate-right:before,\n.fa-repeat:before {\n content: \"\\f01e\";\n}\n.fa-refresh:before {\n content: \"\\f021\";\n}\n.fa-list-alt:before {\n content: \"\\f022\";\n}\n.fa-lock:before {\n content: \"\\f023\";\n}\n.fa-flag:before {\n content: \"\\f024\";\n}\n.fa-headphones:before {\n content: \"\\f025\";\n}\n.fa-volume-off:before {\n content: \"\\f026\";\n}\n.fa-volume-down:before {\n content: \"\\f027\";\n}\n.fa-volume-up:before {\n content: \"\\f028\";\n}\n.fa-qrcode:before {\n content: \"\\f029\";\n}\n.fa-barcode:before {\n content: \"\\f02a\";\n}\n.fa-tag:before {\n content: \"\\f02b\";\n}\n.fa-tags:before {\n content: \"\\f02c\";\n}\n.fa-book:before {\n content: \"\\f02d\";\n}\n.fa-bookmark:before {\n content: \"\\f02e\";\n}\n.fa-print:before {\n content: \"\\f02f\";\n}\n.fa-camera:before {\n content: \"\\f030\";\n}\n.fa-font:before {\n content: \"\\f031\";\n}\n.fa-bold:before {\n content: \"\\f032\";\n}\n.fa-italic:before {\n content: \"\\f033\";\n}\n.fa-text-height:before {\n content: \"\\f034\";\n}\n.fa-text-width:before {\n content: \"\\f035\";\n}\n.fa-align-left:before {\n content: \"\\f036\";\n}\n.fa-align-center:before {\n content: \"\\f037\";\n}\n.fa-align-right:before {\n content: \"\\f038\";\n}\n.fa-align-justify:before {\n content: \"\\f039\";\n}\n.fa-list:before {\n content: \"\\f03a\";\n}\n.fa-dedent:before,\n.fa-outdent:before {\n content: \"\\f03b\";\n}\n.fa-indent:before {\n content: \"\\f03c\";\n}\n.fa-video-camera:before {\n content: \"\\f03d\";\n}\n.fa-photo:before,\n.fa-image:before,\n.fa-picture-o:before {\n content: \"\\f03e\";\n}\n.fa-pencil:before {\n content: \"\\f040\";\n}\n.fa-map-marker:before {\n content: \"\\f041\";\n}\n.fa-adjust:before {\n content: \"\\f042\";\n}\n.fa-tint:before {\n content: \"\\f043\";\n}\n.fa-edit:before,\n.fa-pencil-square-o:before {\n content: \"\\f044\";\n}\n.fa-share-square-o:before {\n content: \"\\f045\";\n}\n.fa-check-square-o:before {\n content: \"\\f046\";\n}\n.fa-arrows:before {\n content: \"\\f047\";\n}\n.fa-step-backward:before {\n content: \"\\f048\";\n}\n.fa-fast-backward:before {\n content: \"\\f049\";\n}\n.fa-backward:before {\n content: \"\\f04a\";\n}\n.fa-play:before {\n content: \"\\f04b\";\n}\n.fa-pause:before {\n content: \"\\f04c\";\n}\n.fa-stop:before {\n content: \"\\f04d\";\n}\n.fa-forward:before {\n content: \"\\f04e\";\n}\n.fa-fast-forward:before {\n content: \"\\f050\";\n}\n.fa-step-forward:before {\n content: \"\\f051\";\n}\n.fa-eject:before {\n content: \"\\f052\";\n}\n.fa-chevron-left:before {\n content: \"\\f053\";\n}\n.fa-chevron-right:before {\n content: \"\\f054\";\n}\n.fa-plus-circle:before {\n content: \"\\f055\";\n}\n.fa-minus-circle:before {\n content: \"\\f056\";\n}\n.fa-times-circle:before {\n content: \"\\f057\";\n}\n.fa-check-circle:before {\n content: \"\\f058\";\n}\n.fa-question-circle:before {\n content: \"\\f059\";\n}\n.fa-info-circle:before {\n content: \"\\f05a\";\n}\n.fa-crosshairs:before {\n content: \"\\f05b\";\n}\n.fa-times-circle-o:before {\n content: \"\\f05c\";\n}\n.fa-check-circle-o:before {\n content: \"\\f05d\";\n}\n.fa-ban:before {\n content: \"\\f05e\";\n}\n.fa-arrow-left:before {\n content: \"\\f060\";\n}\n.fa-arrow-right:before {\n content: \"\\f061\";\n}\n.fa-arrow-up:before {\n content: \"\\f062\";\n}\n.fa-arrow-down:before {\n content: \"\\f063\";\n}\n.fa-mail-forward:before,\n.fa-share:before {\n content: \"\\f064\";\n}\n.fa-expand:before {\n content: \"\\f065\";\n}\n.fa-compress:before {\n content: \"\\f066\";\n}\n.fa-plus:before {\n content: \"\\f067\";\n}\n.fa-minus:before {\n content: \"\\f068\";\n}\n.fa-asterisk:before {\n content: \"\\f069\";\n}\n.fa-exclamation-circle:before {\n content: \"\\f06a\";\n}\n.fa-gift:before {\n content: \"\\f06b\";\n}\n.fa-leaf:before {\n content: \"\\f06c\";\n}\n.fa-fire:before {\n content: \"\\f06d\";\n}\n.fa-eye:before {\n content: \"\\f06e\";\n}\n.fa-eye-slash:before {\n content: \"\\f070\";\n}\n.fa-warning:before,\n.fa-exclamation-triangle:before {\n content: \"\\f071\";\n}\n.fa-plane:before {\n content: \"\\f072\";\n}\n.fa-calendar:before {\n content: \"\\f073\";\n}\n.fa-random:before {\n content: \"\\f074\";\n}\n.fa-comment:before {\n content: \"\\f075\";\n}\n.fa-magnet:before {\n content: \"\\f076\";\n}\n.fa-chevron-up:before {\n content: \"\\f077\";\n}\n.fa-chevron-down:before {\n content: \"\\f078\";\n}\n.fa-retweet:before {\n content: \"\\f079\";\n}\n.fa-shopping-cart:before {\n content: \"\\f07a\";\n}\n.fa-folder:before {\n content: \"\\f07b\";\n}\n.fa-folder-open:before {\n content: \"\\f07c\";\n}\n.fa-arrows-v:before {\n content: \"\\f07d\";\n}\n.fa-arrows-h:before {\n content: \"\\f07e\";\n}\n.fa-bar-chart-o:before,\n.fa-bar-chart:before {\n content: \"\\f080\";\n}\n.fa-twitter-square:before {\n content: \"\\f081\";\n}\n.fa-facebook-square:before {\n content: \"\\f082\";\n}\n.fa-camera-retro:before {\n content: \"\\f083\";\n}\n.fa-key:before {\n content: \"\\f084\";\n}\n.fa-gears:before,\n.fa-cogs:before {\n content: \"\\f085\";\n}\n.fa-comments:before {\n content: \"\\f086\";\n}\n.fa-thumbs-o-up:before {\n content: \"\\f087\";\n}\n.fa-thumbs-o-down:before {\n content: \"\\f088\";\n}\n.fa-star-half:before {\n content: \"\\f089\";\n}\n.fa-heart-o:before {\n content: \"\\f08a\";\n}\n.fa-sign-out:before {\n content: \"\\f08b\";\n}\n.fa-linkedin-square:before {\n content: \"\\f08c\";\n}\n.fa-thumb-tack:before {\n content: \"\\f08d\";\n}\n.fa-external-link:before {\n content: \"\\f08e\";\n}\n.fa-sign-in:before {\n content: \"\\f090\";\n}\n.fa-trophy:before {\n content: \"\\f091\";\n}\n.fa-github-square:before {\n content: \"\\f092\";\n}\n.fa-upload:before {\n content: \"\\f093\";\n}\n.fa-lemon-o:before {\n content: \"\\f094\";\n}\n.fa-phone:before {\n content: \"\\f095\";\n}\n.fa-square-o:before {\n content: \"\\f096\";\n}\n.fa-bookmark-o:before {\n content: \"\\f097\";\n}\n.fa-phone-square:before {\n content: \"\\f098\";\n}\n.fa-twitter:before {\n content: \"\\f099\";\n}\n.fa-facebook:before {\n content: \"\\f09a\";\n}\n.fa-github:before {\n content: \"\\f09b\";\n}\n.fa-unlock:before {\n content: \"\\f09c\";\n}\n.fa-credit-card:before {\n content: \"\\f09d\";\n}\n.fa-rss:before {\n content: \"\\f09e\";\n}\n.fa-hdd-o:before {\n content: \"\\f0a0\";\n}\n.fa-bullhorn:before {\n content: \"\\f0a1\";\n}\n.fa-bell:before {\n content: \"\\f0f3\";\n}\n.fa-certificate:before {\n content: \"\\f0a3\";\n}\n.fa-hand-o-right:before {\n content: \"\\f0a4\";\n}\n.fa-hand-o-left:before {\n content: \"\\f0a5\";\n}\n.fa-hand-o-up:before {\n content: \"\\f0a6\";\n}\n.fa-hand-o-down:before {\n content: \"\\f0a7\";\n}\n.fa-arrow-circle-left:before {\n content: \"\\f0a8\";\n}\n.fa-arrow-circle-right:before {\n content: \"\\f0a9\";\n}\n.fa-arrow-circle-up:before {\n content: \"\\f0aa\";\n}\n.fa-arrow-circle-down:before {\n content: \"\\f0ab\";\n}\n.fa-globe:before {\n content: \"\\f0ac\";\n}\n.fa-wrench:before {\n content: \"\\f0ad\";\n}\n.fa-tasks:before {\n content: \"\\f0ae\";\n}\n.fa-filter:before {\n content: \"\\f0b0\";\n}\n.fa-briefcase:before {\n content: \"\\f0b1\";\n}\n.fa-arrows-alt:before {\n content: \"\\f0b2\";\n}\n.fa-group:before,\n.fa-users:before {\n content: \"\\f0c0\";\n}\n.fa-chain:before,\n.fa-link:before {\n content: \"\\f0c1\";\n}\n.fa-cloud:before {\n content: \"\\f0c2\";\n}\n.fa-flask:before {\n content: \"\\f0c3\";\n}\n.fa-cut:before,\n.fa-scissors:before {\n content: \"\\f0c4\";\n}\n.fa-copy:before,\n.fa-files-o:before {\n content: \"\\f0c5\";\n}\n.fa-paperclip:before {\n content: \"\\f0c6\";\n}\n.fa-save:before,\n.fa-floppy-o:before {\n content: \"\\f0c7\";\n}\n.fa-square:before {\n content: \"\\f0c8\";\n}\n.fa-navicon:before,\n.fa-reorder:before,\n.fa-bars:before {\n content: \"\\f0c9\";\n}\n.fa-list-ul:before {\n content: \"\\f0ca\";\n}\n.fa-list-ol:before {\n content: \"\\f0cb\";\n}\n.fa-strikethrough:before {\n content: \"\\f0cc\";\n}\n.fa-underline:before {\n content: \"\\f0cd\";\n}\n.fa-table:before {\n content: \"\\f0ce\";\n}\n.fa-magic:before {\n content: \"\\f0d0\";\n}\n.fa-truck:before {\n content: \"\\f0d1\";\n}\n.fa-pinterest:before {\n content: \"\\f0d2\";\n}\n.fa-pinterest-square:before {\n content: \"\\f0d3\";\n}\n.fa-google-plus-square:before {\n content: \"\\f0d4\";\n}\n.fa-google-plus:before {\n content: \"\\f0d5\";\n}\n.fa-money:before {\n content: \"\\f0d6\";\n}\n.fa-caret-down:before {\n content: \"\\f0d7\";\n}\n.fa-caret-up:before {\n content: \"\\f0d8\";\n}\n.fa-caret-left:before {\n content: \"\\f0d9\";\n}\n.fa-caret-right:before {\n content: \"\\f0da\";\n}\n.fa-columns:before {\n content: \"\\f0db\";\n}\n.fa-unsorted:before,\n.fa-sort:before {\n content: \"\\f0dc\";\n}\n.fa-sort-down:before,\n.fa-sort-desc:before {\n content: \"\\f0dd\";\n}\n.fa-sort-up:before,\n.fa-sort-asc:before {\n content: \"\\f0de\";\n}\n.fa-envelope:before {\n content: \"\\f0e0\";\n}\n.fa-linkedin:before {\n content: \"\\f0e1\";\n}\n.fa-rotate-left:before,\n.fa-undo:before {\n content: \"\\f0e2\";\n}\n.fa-legal:before,\n.fa-gavel:before {\n content: \"\\f0e3\";\n}\n.fa-dashboard:before,\n.fa-tachometer:before {\n content: \"\\f0e4\";\n}\n.fa-comment-o:before {\n content: \"\\f0e5\";\n}\n.fa-comments-o:before {\n content: \"\\f0e6\";\n}\n.fa-flash:before,\n.fa-bolt:before {\n content: \"\\f0e7\";\n}\n.fa-sitemap:before {\n content: \"\\f0e8\";\n}\n.fa-umbrella:before {\n content: \"\\f0e9\";\n}\n.fa-paste:before,\n.fa-clipboard:before {\n content: \"\\f0ea\";\n}\n.fa-lightbulb-o:before {\n content: \"\\f0eb\";\n}\n.fa-exchange:before {\n content: \"\\f0ec\";\n}\n.fa-cloud-download:before {\n content: \"\\f0ed\";\n}\n.fa-cloud-upload:before {\n content: \"\\f0ee\";\n}\n.fa-user-md:before {\n content: \"\\f0f0\";\n}\n.fa-stethoscope:before {\n content: \"\\f0f1\";\n}\n.fa-suitcase:before {\n content: \"\\f0f2\";\n}\n.fa-bell-o:before {\n content: \"\\f0a2\";\n}\n.fa-coffee:before {\n content: \"\\f0f4\";\n}\n.fa-cutlery:before {\n content: \"\\f0f5\";\n}\n.fa-file-text-o:before {\n content: \"\\f0f6\";\n}\n.fa-building-o:before {\n content: \"\\f0f7\";\n}\n.fa-hospital-o:before {\n content: \"\\f0f8\";\n}\n.fa-ambulance:before {\n content: \"\\f0f9\";\n}\n.fa-medkit:before {\n content: \"\\f0fa\";\n}\n.fa-fighter-jet:before {\n content: \"\\f0fb\";\n}\n.fa-beer:before {\n content: \"\\f0fc\";\n}\n.fa-h-square:before {\n content: \"\\f0fd\";\n}\n.fa-plus-square:before {\n content: \"\\f0fe\";\n}\n.fa-angle-double-left:before {\n content: \"\\f100\";\n}\n.fa-angle-double-right:before {\n content: \"\\f101\";\n}\n.fa-angle-double-up:before {\n content: \"\\f102\";\n}\n.fa-angle-double-down:before {\n content: \"\\f103\";\n}\n.fa-angle-left:before {\n content: \"\\f104\";\n}\n.fa-angle-right:before {\n content: \"\\f105\";\n}\n.fa-angle-up:before {\n content: \"\\f106\";\n}\n.fa-angle-down:before {\n content: \"\\f107\";\n}\n.fa-desktop:before {\n content: \"\\f108\";\n}\n.fa-laptop:before {\n content: \"\\f109\";\n}\n.fa-tablet:before {\n content: \"\\f10a\";\n}\n.fa-mobile-phone:before,\n.fa-mobile:before {\n content: \"\\f10b\";\n}\n.fa-circle-o:before {\n content: \"\\f10c\";\n}\n.fa-quote-left:before {\n content: \"\\f10d\";\n}\n.fa-quote-right:before {\n content: \"\\f10e\";\n}\n.fa-spinner:before {\n content: \"\\f110\";\n}\n.fa-circle:before {\n content: \"\\f111\";\n}\n.fa-mail-reply:before,\n.fa-reply:before {\n content: \"\\f112\";\n}\n.fa-github-alt:before {\n content: \"\\f113\";\n}\n.fa-folder-o:before {\n content: \"\\f114\";\n}\n.fa-folder-open-o:before {\n content: \"\\f115\";\n}\n.fa-smile-o:before {\n content: \"\\f118\";\n}\n.fa-frown-o:before {\n content: \"\\f119\";\n}\n.fa-meh-o:before {\n content: \"\\f11a\";\n}\n.fa-gamepad:before {\n content: \"\\f11b\";\n}\n.fa-keyboard-o:before {\n content: \"\\f11c\";\n}\n.fa-flag-o:before {\n content: \"\\f11d\";\n}\n.fa-flag-checkered:before {\n content: \"\\f11e\";\n}\n.fa-terminal:before {\n content: \"\\f120\";\n}\n.fa-code:before {\n content: \"\\f121\";\n}\n.fa-mail-reply-all:before,\n.fa-reply-all:before {\n content: \"\\f122\";\n}\n.fa-star-half-empty:before,\n.fa-star-half-full:before,\n.fa-star-half-o:before {\n content: \"\\f123\";\n}\n.fa-location-arrow:before {\n content: \"\\f124\";\n}\n.fa-crop:before {\n content: \"\\f125\";\n}\n.fa-code-fork:before {\n content: \"\\f126\";\n}\n.fa-unlink:before,\n.fa-chain-broken:before {\n content: \"\\f127\";\n}\n.fa-question:before {\n content: \"\\f128\";\n}\n.fa-info:before {\n content: \"\\f129\";\n}\n.fa-exclamation:before {\n content: \"\\f12a\";\n}\n.fa-superscript:before {\n content: \"\\f12b\";\n}\n.fa-subscript:before {\n content: \"\\f12c\";\n}\n.fa-eraser:before {\n content: \"\\f12d\";\n}\n.fa-puzzle-piece:before {\n content: \"\\f12e\";\n}\n.fa-microphone:before {\n content: \"\\f130\";\n}\n.fa-microphone-slash:before {\n content: \"\\f131\";\n}\n.fa-shield:before {\n content: \"\\f132\";\n}\n.fa-calendar-o:before {\n content: \"\\f133\";\n}\n.fa-fire-extinguisher:before {\n content: \"\\f134\";\n}\n.fa-rocket:before {\n content: \"\\f135\";\n}\n.fa-maxcdn:before {\n content: \"\\f136\";\n}\n.fa-chevron-circle-left:before {\n content: \"\\f137\";\n}\n.fa-chevron-circle-right:before {\n content: \"\\f138\";\n}\n.fa-chevron-circle-up:before {\n content: \"\\f139\";\n}\n.fa-chevron-circle-down:before {\n content: \"\\f13a\";\n}\n.fa-html5:before {\n content: \"\\f13b\";\n}\n.fa-css3:before {\n content: \"\\f13c\";\n}\n.fa-anchor:before {\n content: \"\\f13d\";\n}\n.fa-unlock-alt:before {\n content: \"\\f13e\";\n}\n.fa-bullseye:before {\n content: \"\\f140\";\n}\n.fa-ellipsis-h:before {\n content: \"\\f141\";\n}\n.fa-ellipsis-v:before {\n content: \"\\f142\";\n}\n.fa-rss-square:before {\n content: \"\\f143\";\n}\n.fa-play-circle:before {\n content: \"\\f144\";\n}\n.fa-ticket:before {\n content: \"\\f145\";\n}\n.fa-minus-square:before {\n content: \"\\f146\";\n}\n.fa-minus-square-o:before {\n content: \"\\f147\";\n}\n.fa-level-up:before {\n content: \"\\f148\";\n}\n.fa-level-down:before {\n content: \"\\f149\";\n}\n.fa-check-square:before {\n content: \"\\f14a\";\n}\n.fa-pencil-square:before {\n content: \"\\f14b\";\n}\n.fa-external-link-square:before {\n content: \"\\f14c\";\n}\n.fa-share-square:before {\n content: \"\\f14d\";\n}\n.fa-compass:before {\n content: \"\\f14e\";\n}\n.fa-toggle-down:before,\n.fa-caret-square-o-down:before {\n content: \"\\f150\";\n}\n.fa-toggle-up:before,\n.fa-caret-square-o-up:before {\n content: \"\\f151\";\n}\n.fa-toggle-right:before,\n.fa-caret-square-o-right:before {\n content: \"\\f152\";\n}\n.fa-euro:before,\n.fa-eur:before {\n content: \"\\f153\";\n}\n.fa-gbp:before {\n content: \"\\f154\";\n}\n.fa-dollar:before,\n.fa-usd:before {\n content: \"\\f155\";\n}\n.fa-rupee:before,\n.fa-inr:before {\n content: \"\\f156\";\n}\n.fa-cny:before,\n.fa-rmb:before,\n.fa-yen:before,\n.fa-jpy:before {\n content: \"\\f157\";\n}\n.fa-ruble:before,\n.fa-rouble:before,\n.fa-rub:before {\n content: \"\\f158\";\n}\n.fa-won:before,\n.fa-krw:before {\n content: \"\\f159\";\n}\n.fa-bitcoin:before,\n.fa-btc:before {\n content: \"\\f15a\";\n}\n.fa-file:before {\n content: \"\\f15b\";\n}\n.fa-file-text:before {\n content: \"\\f15c\";\n}\n.fa-sort-alpha-asc:before {\n content: \"\\f15d\";\n}\n.fa-sort-alpha-desc:before {\n content: \"\\f15e\";\n}\n.fa-sort-amount-asc:before {\n content: \"\\f160\";\n}\n.fa-sort-amount-desc:before {\n content: \"\\f161\";\n}\n.fa-sort-numeric-asc:before {\n content: \"\\f162\";\n}\n.fa-sort-numeric-desc:before {\n content: \"\\f163\";\n}\n.fa-thumbs-up:before {\n content: \"\\f164\";\n}\n.fa-thumbs-down:before {\n content: \"\\f165\";\n}\n.fa-youtube-square:before {\n content: \"\\f166\";\n}\n.fa-youtube:before {\n content: \"\\f167\";\n}\n.fa-xing:before {\n content: \"\\f168\";\n}\n.fa-xing-square:before {\n content: \"\\f169\";\n}\n.fa-youtube-play:before {\n content: \"\\f16a\";\n}\n.fa-dropbox:before {\n content: \"\\f16b\";\n}\n.fa-stack-overflow:before {\n content: \"\\f16c\";\n}\n.fa-instagram:before {\n content: \"\\f16d\";\n}\n.fa-flickr:before {\n content: \"\\f16e\";\n}\n.fa-adn:before {\n content: \"\\f170\";\n}\n.fa-bitbucket:before {\n content: \"\\f171\";\n}\n.fa-bitbucket-square:before {\n content: \"\\f172\";\n}\n.fa-tumblr:before {\n content: \"\\f173\";\n}\n.fa-tumblr-square:before {\n content: \"\\f174\";\n}\n.fa-long-arrow-down:before {\n content: \"\\f175\";\n}\n.fa-long-arrow-up:before {\n content: \"\\f176\";\n}\n.fa-long-arrow-left:before {\n content: \"\\f177\";\n}\n.fa-long-arrow-right:before {\n content: \"\\f178\";\n}\n.fa-apple:before {\n content: \"\\f179\";\n}\n.fa-windows:before {\n content: \"\\f17a\";\n}\n.fa-android:before {\n content: \"\\f17b\";\n}\n.fa-linux:before {\n content: \"\\f17c\";\n}\n.fa-dribbble:before {\n content: \"\\f17d\";\n}\n.fa-skype:before {\n content: \"\\f17e\";\n}\n.fa-foursquare:before {\n content: \"\\f180\";\n}\n.fa-trello:before {\n content: \"\\f181\";\n}\n.fa-female:before {\n content: \"\\f182\";\n}\n.fa-male:before {\n content: \"\\f183\";\n}\n.fa-gittip:before {\n content: \"\\f184\";\n}\n.fa-sun-o:before {\n content: \"\\f185\";\n}\n.fa-moon-o:before {\n content: \"\\f186\";\n}\n.fa-archive:before {\n content: \"\\f187\";\n}\n.fa-bug:before {\n content: \"\\f188\";\n}\n.fa-vk:before {\n content: \"\\f189\";\n}\n.fa-weibo:before {\n content: \"\\f18a\";\n}\n.fa-renren:before {\n content: \"\\f18b\";\n}\n.fa-pagelines:before {\n content: \"\\f18c\";\n}\n.fa-stack-exchange:before {\n content: \"\\f18d\";\n}\n.fa-arrow-circle-o-right:before {\n content: \"\\f18e\";\n}\n.fa-arrow-circle-o-left:before {\n content: \"\\f190\";\n}\n.fa-toggle-left:before,\n.fa-caret-square-o-left:before {\n content: \"\\f191\";\n}\n.fa-dot-circle-o:before {\n content: \"\\f192\";\n}\n.fa-wheelchair:before {\n content: \"\\f193\";\n}\n.fa-vimeo-square:before {\n content: \"\\f194\";\n}\n.fa-turkish-lira:before,\n.fa-try:before {\n content: \"\\f195\";\n}\n.fa-plus-square-o:before {\n content: \"\\f196\";\n}\n.fa-space-shuttle:before {\n content: \"\\f197\";\n}\n.fa-slack:before {\n content: \"\\f198\";\n}\n.fa-envelope-square:before {\n content: \"\\f199\";\n}\n.fa-wordpress:before {\n content: \"\\f19a\";\n}\n.fa-openid:before {\n content: \"\\f19b\";\n}\n.fa-institution:before,\n.fa-bank:before,\n.fa-university:before {\n content: \"\\f19c\";\n}\n.fa-mortar-board:before,\n.fa-graduation-cap:before {\n content: \"\\f19d\";\n}\n.fa-yahoo:before {\n content: \"\\f19e\";\n}\n.fa-google:before {\n content: \"\\f1a0\";\n}\n.fa-reddit:before {\n content: \"\\f1a1\";\n}\n.fa-reddit-square:before {\n content: \"\\f1a2\";\n}\n.fa-stumbleupon-circle:before {\n content: \"\\f1a3\";\n}\n.fa-stumbleupon:before {\n content: \"\\f1a4\";\n}\n.fa-delicious:before {\n content: \"\\f1a5\";\n}\n.fa-digg:before {\n content: \"\\f1a6\";\n}\n.fa-pied-piper:before {\n content: \"\\f1a7\";\n}\n.fa-pied-piper-alt:before {\n content: \"\\f1a8\";\n}\n.fa-drupal:before {\n content: \"\\f1a9\";\n}\n.fa-joomla:before {\n content: \"\\f1aa\";\n}\n.fa-language:before {\n content: \"\\f1ab\";\n}\n.fa-fax:before {\n content: \"\\f1ac\";\n}\n.fa-building:before {\n content: \"\\f1ad\";\n}\n.fa-child:before {\n content: \"\\f1ae\";\n}\n.fa-paw:before {\n content: \"\\f1b0\";\n}\n.fa-spoon:before {\n content: \"\\f1b1\";\n}\n.fa-cube:before {\n content: \"\\f1b2\";\n}\n.fa-cubes:before {\n content: \"\\f1b3\";\n}\n.fa-behance:before {\n content: \"\\f1b4\";\n}\n.fa-behance-square:before {\n content: \"\\f1b5\";\n}\n.fa-steam:before {\n content: \"\\f1b6\";\n}\n.fa-steam-square:before {\n content: \"\\f1b7\";\n}\n.fa-recycle:before {\n content: \"\\f1b8\";\n}\n.fa-automobile:before,\n.fa-car:before {\n content: \"\\f1b9\";\n}\n.fa-cab:before,\n.fa-taxi:before {\n content: \"\\f1ba\";\n}\n.fa-tree:before {\n content: \"\\f1bb\";\n}\n.fa-spotify:before {\n content: \"\\f1bc\";\n}\n.fa-deviantart:before {\n content: \"\\f1bd\";\n}\n.fa-soundcloud:before {\n content: \"\\f1be\";\n}\n.fa-database:before {\n content: \"\\f1c0\";\n}\n.fa-file-pdf-o:before {\n content: \"\\f1c1\";\n}\n.fa-file-word-o:before {\n content: \"\\f1c2\";\n}\n.fa-file-excel-o:before {\n content: \"\\f1c3\";\n}\n.fa-file-powerpoint-o:before {\n content: \"\\f1c4\";\n}\n.fa-file-photo-o:before,\n.fa-file-picture-o:before,\n.fa-file-image-o:before {\n content: \"\\f1c5\";\n}\n.fa-file-zip-o:before,\n.fa-file-archive-o:before {\n content: \"\\f1c6\";\n}\n.fa-file-sound-o:before,\n.fa-file-audio-o:before {\n content: \"\\f1c7\";\n}\n.fa-file-movie-o:before,\n.fa-file-video-o:before {\n content: \"\\f1c8\";\n}\n.fa-file-code-o:before {\n content: \"\\f1c9\";\n}\n.fa-vine:before {\n content: \"\\f1ca\";\n}\n.fa-codepen:before {\n content: \"\\f1cb\";\n}\n.fa-jsfiddle:before {\n content: \"\\f1cc\";\n}\n.fa-life-bouy:before,\n.fa-life-buoy:before,\n.fa-life-saver:before,\n.fa-support:before,\n.fa-life-ring:before {\n content: \"\\f1cd\";\n}\n.fa-circle-o-notch:before {\n content: \"\\f1ce\";\n}\n.fa-ra:before,\n.fa-rebel:before {\n content: \"\\f1d0\";\n}\n.fa-ge:before,\n.fa-empire:before {\n content: \"\\f1d1\";\n}\n.fa-git-square:before {\n content: \"\\f1d2\";\n}\n.fa-git:before {\n content: \"\\f1d3\";\n}\n.fa-hacker-news:before {\n content: \"\\f1d4\";\n}\n.fa-tencent-weibo:before {\n content: \"\\f1d5\";\n}\n.fa-qq:before {\n content: \"\\f1d6\";\n}\n.fa-wechat:before,\n.fa-weixin:before {\n content: \"\\f1d7\";\n}\n.fa-send:before,\n.fa-paper-plane:before {\n content: \"\\f1d8\";\n}\n.fa-send-o:before,\n.fa-paper-plane-o:before {\n content: \"\\f1d9\";\n}\n.fa-history:before {\n content: \"\\f1da\";\n}\n.fa-circle-thin:before {\n content: \"\\f1db\";\n}\n.fa-header:before {\n content: \"\\f1dc\";\n}\n.fa-paragraph:before {\n content: \"\\f1dd\";\n}\n.fa-sliders:before {\n content: \"\\f1de\";\n}\n.fa-share-alt:before {\n content: \"\\f1e0\";\n}\n.fa-share-alt-square:before {\n content: \"\\f1e1\";\n}\n.fa-bomb:before {\n content: \"\\f1e2\";\n}\n.fa-soccer-ball-o:before,\n.fa-futbol-o:before {\n content: \"\\f1e3\";\n}\n.fa-tty:before {\n content: \"\\f1e4\";\n}\n.fa-binoculars:before {\n content: \"\\f1e5\";\n}\n.fa-plug:before {\n content: \"\\f1e6\";\n}\n.fa-slideshare:before {\n content: \"\\f1e7\";\n}\n.fa-twitch:before {\n content: \"\\f1e8\";\n}\n.fa-yelp:before {\n content: \"\\f1e9\";\n}\n.fa-newspaper-o:before {\n content: \"\\f1ea\";\n}\n.fa-wifi:before {\n content: \"\\f1eb\";\n}\n.fa-calculator:before {\n content: \"\\f1ec\";\n}\n.fa-paypal:before {\n content: \"\\f1ed\";\n}\n.fa-google-wallet:before {\n content: \"\\f1ee\";\n}\n.fa-cc-visa:before {\n content: \"\\f1f0\";\n}\n.fa-cc-mastercard:before {\n content: \"\\f1f1\";\n}\n.fa-cc-discover:before {\n content: \"\\f1f2\";\n}\n.fa-cc-amex:before {\n content: \"\\f1f3\";\n}\n.fa-cc-paypal:before {\n content: \"\\f1f4\";\n}\n.fa-cc-stripe:before {\n content: \"\\f1f5\";\n}\n.fa-bell-slash:before {\n content: \"\\f1f6\";\n}\n.fa-bell-slash-o:before {\n content: \"\\f1f7\";\n}\n.fa-trash:before {\n content: \"\\f1f8\";\n}\n.fa-copyright:before {\n content: \"\\f1f9\";\n}\n.fa-at:before {\n content: \"\\f1fa\";\n}\n.fa-eyedropper:before {\n content: \"\\f1fb\";\n}\n.fa-paint-brush:before {\n content: \"\\f1fc\";\n}\n.fa-birthday-cake:before {\n content: \"\\f1fd\";\n}\n.fa-area-chart:before {\n content: \"\\f1fe\";\n}\n.fa-pie-chart:before {\n content: \"\\f200\";\n}\n.fa-line-chart:before {\n content: \"\\f201\";\n}\n.fa-lastfm:before {\n content: \"\\f202\";\n}\n.fa-lastfm-square:before {\n content: \"\\f203\";\n}\n.fa-toggle-off:before {\n content: \"\\f204\";\n}\n.fa-toggle-on:before {\n content: \"\\f205\";\n}\n.fa-bicycle:before {\n content: \"\\f206\";\n}\n.fa-bus:before {\n content: \"\\f207\";\n}\n.fa-ioxhost:before {\n content: \"\\f208\";\n}\n.fa-angellist:before {\n content: \"\\f209\";\n}\n.fa-cc:before {\n content: \"\\f20a\";\n}\n.fa-shekel:before,\n.fa-sheqel:before,\n.fa-ils:before {\n content: \"\\f20b\";\n}\n.fa-meanpath:before {\n content: \"\\f20c\";\n}\n/*!\n*\n* IPython base\n*\n*/\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\ncode {\n color: #000;\n}\npre {\n font-size: inherit;\n line-height: inherit;\n}\nlabel {\n font-weight: normal;\n}\n/* Make the page background atleast 100% the height of the view port */\n/* Make the page itself atleast 70% the height of the view port */\n.border-box-sizing {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\n.corner-all {\n border-radius: 2px;\n}\n.no-padding {\n padding: 0px;\n}\n/* Flexible box model classes */\n/* Taken from Alex Russell http://infrequently.org/2009/08/css-3-progress/ */\n/* This file is a compatability layer. It allows the usage of flexible box \nmodel layouts accross multiple browsers, including older browsers. The newest,\nuniversal implementation of the flexible box model is used when available (see\n`Modern browsers` comments below). Browsers that are known to implement this \nnew spec completely include:\n\n Firefox 28.0+\n Chrome 29.0+\n Internet Explorer 11+ \n Opera 17.0+\n\nBrowsers not listed, including Safari, are supported via the styling under the\n`Old browsers` comments below.\n*/\n.hbox {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n.hbox > * {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n}\n.vbox {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\n.vbox > * {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n}\n.hbox.reverse,\n.vbox.reverse,\n.reverse {\n /* Old browsers */\n -webkit-box-direction: reverse;\n -moz-box-direction: reverse;\n box-direction: reverse;\n /* Modern browsers */\n flex-direction: row-reverse;\n}\n.hbox.box-flex0,\n.vbox.box-flex0,\n.box-flex0 {\n /* Old browsers */\n -webkit-box-flex: 0;\n -moz-box-flex: 0;\n box-flex: 0;\n /* Modern browsers */\n flex: none;\n width: auto;\n}\n.hbox.box-flex1,\n.vbox.box-flex1,\n.box-flex1 {\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n.hbox.box-flex,\n.vbox.box-flex,\n.box-flex {\n /* Old browsers */\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n.hbox.box-flex2,\n.vbox.box-flex2,\n.box-flex2 {\n /* Old browsers */\n -webkit-box-flex: 2;\n -moz-box-flex: 2;\n box-flex: 2;\n /* Modern browsers */\n flex: 2;\n}\n.box-group1 {\n /* Deprecated */\n -webkit-box-flex-group: 1;\n -moz-box-flex-group: 1;\n box-flex-group: 1;\n}\n.box-group2 {\n /* Deprecated */\n -webkit-box-flex-group: 2;\n -moz-box-flex-group: 2;\n box-flex-group: 2;\n}\n.hbox.start,\n.vbox.start,\n.start {\n /* Old browsers */\n -webkit-box-pack: start;\n -moz-box-pack: start;\n box-pack: start;\n /* Modern browsers */\n justify-content: flex-start;\n}\n.hbox.end,\n.vbox.end,\n.end {\n /* Old browsers */\n -webkit-box-pack: end;\n -moz-box-pack: end;\n box-pack: end;\n /* Modern browsers */\n justify-content: flex-end;\n}\n.hbox.center,\n.vbox.center,\n.center {\n /* Old browsers */\n -webkit-box-pack: center;\n -moz-box-pack: center;\n box-pack: center;\n /* Modern browsers */\n justify-content: center;\n}\n.hbox.baseline,\n.vbox.baseline,\n.baseline {\n /* Old browsers */\n -webkit-box-pack: baseline;\n -moz-box-pack: baseline;\n box-pack: baseline;\n /* Modern browsers */\n justify-content: baseline;\n}\n.hbox.stretch,\n.vbox.stretch,\n.stretch {\n /* Old browsers */\n -webkit-box-pack: stretch;\n -moz-box-pack: stretch;\n box-pack: stretch;\n /* Modern browsers */\n justify-content: stretch;\n}\n.hbox.align-start,\n.vbox.align-start,\n.align-start {\n /* Old browsers */\n -webkit-box-align: start;\n -moz-box-align: start;\n box-align: start;\n /* Modern browsers */\n align-items: flex-start;\n}\n.hbox.align-end,\n.vbox.align-end,\n.align-end {\n /* Old browsers */\n -webkit-box-align: end;\n -moz-box-align: end;\n box-align: end;\n /* Modern browsers */\n align-items: flex-end;\n}\n.hbox.align-center,\n.vbox.align-center,\n.align-center {\n /* Old browsers */\n -webkit-box-align: center;\n -moz-box-align: center;\n box-align: center;\n /* Modern browsers */\n align-items: center;\n}\n.hbox.align-baseline,\n.vbox.align-baseline,\n.align-baseline {\n /* Old browsers */\n -webkit-box-align: baseline;\n -moz-box-align: baseline;\n box-align: baseline;\n /* Modern browsers */\n align-items: baseline;\n}\n.hbox.align-stretch,\n.vbox.align-stretch,\n.align-stretch {\n /* Old browsers */\n -webkit-box-align: stretch;\n -moz-box-align: stretch;\n box-align: stretch;\n /* Modern browsers */\n align-items: stretch;\n}\ndiv.error {\n margin: 2em;\n text-align: center;\n}\ndiv.error > h1 {\n font-size: 500%;\n line-height: normal;\n}\ndiv.error > p {\n font-size: 200%;\n line-height: normal;\n}\ndiv.traceback-wrapper {\n text-align: left;\n max-width: 800px;\n margin: auto;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\nbody {\n background-color: #fff;\n /* This makes sure that the body covers the entire window and needs to\n be in a different element than the display: box in wrapper below */\n position: absolute;\n left: 0px;\n right: 0px;\n top: 0px;\n bottom: 0px;\n overflow: visible;\n}\nbody > #header {\n /* Initially hidden to prevent FLOUC */\n display: none;\n background-color: #fff;\n /* Display over codemirror */\n position: relative;\n z-index: 100;\n}\nbody > #header #header-container {\n padding-bottom: 5px;\n padding-top: 5px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\nbody > #header .header-bar {\n width: 100%;\n height: 1px;\n background: #e7e7e7;\n margin-bottom: -1px;\n}\n@media print {\n body > #header {\n display: none !important;\n }\n}\n#header-spacer {\n width: 100%;\n visibility: hidden;\n}\n@media print {\n #header-spacer {\n display: none;\n }\n}\n#ipython_notebook {\n padding-left: 0px;\n padding-top: 1px;\n padding-bottom: 1px;\n}\n@media (max-width: 991px) {\n #ipython_notebook {\n margin-left: 10px;\n }\n}\n[dir=\"rtl\"] #ipython_notebook {\n float: right !important;\n}\n#noscript {\n width: auto;\n padding-top: 16px;\n padding-bottom: 16px;\n text-align: center;\n font-size: 22px;\n color: red;\n font-weight: bold;\n}\n#ipython_notebook img {\n height: 28px;\n}\n#site {\n width: 100%;\n display: none;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n overflow: auto;\n}\n@media print {\n #site {\n height: auto !important;\n }\n}\n/* Smaller buttons */\n.ui-button .ui-button-text {\n padding: 0.2em 0.8em;\n font-size: 77%;\n}\ninput.ui-button {\n padding: 0.3em 0.9em;\n}\nspan#login_widget {\n float: right;\n}\nspan#login_widget > .button,\n#logout {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\nspan#login_widget > .button:focus,\n#logout:focus,\nspan#login_widget > .button.focus,\n#logout.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\nspan#login_widget > .button:hover,\n#logout:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\nspan#login_widget > .button:active,\n#logout:active,\nspan#login_widget > .button.active,\n#logout.active,\n.open > .dropdown-togglespan#login_widget > .button,\n.open > .dropdown-toggle#logout {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\nspan#login_widget > .button:active:hover,\n#logout:active:hover,\nspan#login_widget > .button.active:hover,\n#logout.active:hover,\n.open > .dropdown-togglespan#login_widget > .button:hover,\n.open > .dropdown-toggle#logout:hover,\nspan#login_widget > .button:active:focus,\n#logout:active:focus,\nspan#login_widget > .button.active:focus,\n#logout.active:focus,\n.open > .dropdown-togglespan#login_widget > .button:focus,\n.open > .dropdown-toggle#logout:focus,\nspan#login_widget > .button:active.focus,\n#logout:active.focus,\nspan#login_widget > .button.active.focus,\n#logout.active.focus,\n.open > .dropdown-togglespan#login_widget > .button.focus,\n.open > .dropdown-toggle#logout.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\nspan#login_widget > .button:active,\n#logout:active,\nspan#login_widget > .button.active,\n#logout.active,\n.open > .dropdown-togglespan#login_widget > .button,\n.open > .dropdown-toggle#logout {\n background-image: none;\n}\nspan#login_widget > .button.disabled:hover,\n#logout.disabled:hover,\nspan#login_widget > .button[disabled]:hover,\n#logout[disabled]:hover,\nfieldset[disabled] span#login_widget > .button:hover,\nfieldset[disabled] #logout:hover,\nspan#login_widget > .button.disabled:focus,\n#logout.disabled:focus,\nspan#login_widget > .button[disabled]:focus,\n#logout[disabled]:focus,\nfieldset[disabled] span#login_widget > .button:focus,\nfieldset[disabled] #logout:focus,\nspan#login_widget > .button.disabled.focus,\n#logout.disabled.focus,\nspan#login_widget > .button[disabled].focus,\n#logout[disabled].focus,\nfieldset[disabled] span#login_widget > .button.focus,\nfieldset[disabled] #logout.focus {\n background-color: #fff;\n border-color: #ccc;\n}\nspan#login_widget > .button .badge,\n#logout .badge {\n color: #fff;\n background-color: #333;\n}\n.nav-header {\n text-transform: none;\n}\n#header > span {\n margin-top: 10px;\n}\n.modal_stretch .modal-dialog {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n min-height: 80vh;\n}\n.modal_stretch .modal-dialog .modal-body {\n max-height: calc(100vh - 200px);\n overflow: auto;\n flex: 1;\n}\n@media (min-width: 768px) {\n .modal .modal-dialog {\n width: 700px;\n }\n}\n@media (min-width: 768px) {\n select.form-control {\n margin-left: 12px;\n margin-right: 12px;\n }\n}\n/*!\n*\n* IPython auth\n*\n*/\n.center-nav {\n display: inline-block;\n margin-bottom: -4px;\n}\n/*!\n*\n* IPython tree view\n*\n*/\n/* We need an invisible input field on top of the sentense*/\n/* \"Drag file onto the list ...\" */\n.alternate_upload {\n background-color: none;\n display: inline;\n}\n.alternate_upload.form {\n padding: 0;\n margin: 0;\n}\n.alternate_upload input.fileinput {\n text-align: center;\n vertical-align: middle;\n display: inline;\n opacity: 0;\n z-index: 2;\n width: 12ex;\n margin-right: -12ex;\n}\n.alternate_upload .btn-upload {\n height: 22px;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\n[dir=\"rtl\"] #tabs li {\n float: right;\n}\nul#tabs {\n margin-bottom: 4px;\n}\n[dir=\"rtl\"] ul#tabs {\n margin-right: 0px;\n}\nul#tabs a {\n padding-top: 6px;\n padding-bottom: 4px;\n}\nul.breadcrumb a:focus,\nul.breadcrumb a:hover {\n text-decoration: none;\n}\nul.breadcrumb i.icon-home {\n font-size: 16px;\n margin-right: 4px;\n}\nul.breadcrumb span {\n color: #5e5e5e;\n}\n.list_toolbar {\n padding: 4px 0 4px 0;\n vertical-align: middle;\n}\n.list_toolbar .tree-buttons {\n padding-top: 1px;\n}\n[dir=\"rtl\"] .list_toolbar .tree-buttons {\n float: left !important;\n}\n[dir=\"rtl\"] .list_toolbar .pull-right {\n padding-top: 1px;\n float: left !important;\n}\n[dir=\"rtl\"] .list_toolbar .pull-left {\n float: right !important;\n}\n.dynamic-buttons {\n padding-top: 3px;\n display: inline-block;\n}\n.list_toolbar [class*=\"span\"] {\n min-height: 24px;\n}\n.list_header {\n font-weight: bold;\n background-color: #EEE;\n}\n.list_placeholder {\n font-weight: bold;\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n}\n.list_container {\n margin-top: 4px;\n margin-bottom: 20px;\n border: 1px solid #ddd;\n border-radius: 2px;\n}\n.list_container > div {\n border-bottom: 1px solid #ddd;\n}\n.list_container > div:hover .list-item {\n background-color: red;\n}\n.list_container > div:last-child {\n border: none;\n}\n.list_item:hover .list_item {\n background-color: #ddd;\n}\n.list_item a {\n text-decoration: none;\n}\n.list_item:hover {\n background-color: #fafafa;\n}\n.list_header > div,\n.list_item > div {\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n line-height: 22px;\n}\n.list_header > div input,\n.list_item > div input {\n margin-right: 7px;\n margin-left: 14px;\n vertical-align: baseline;\n line-height: 22px;\n position: relative;\n top: -1px;\n}\n.list_header > div .item_link,\n.list_item > div .item_link {\n margin-left: -1px;\n vertical-align: baseline;\n line-height: 22px;\n}\n.new-file input[type=checkbox] {\n visibility: hidden;\n}\n.item_name {\n line-height: 22px;\n height: 24px;\n}\n.item_icon {\n font-size: 14px;\n color: #5e5e5e;\n margin-right: 7px;\n margin-left: 7px;\n line-height: 22px;\n vertical-align: baseline;\n}\n.item_buttons {\n line-height: 1em;\n margin-left: -5px;\n}\n.item_buttons .btn,\n.item_buttons .btn-group,\n.item_buttons .input-group {\n float: left;\n}\n.item_buttons > .btn,\n.item_buttons > .btn-group,\n.item_buttons > .input-group {\n margin-left: 5px;\n}\n.item_buttons .btn {\n min-width: 13ex;\n}\n.item_buttons .running-indicator {\n padding-top: 4px;\n color: #5cb85c;\n}\n.item_buttons .kernel-name {\n padding-top: 4px;\n color: #5bc0de;\n margin-right: 7px;\n float: left;\n}\n.toolbar_info {\n height: 24px;\n line-height: 24px;\n}\n.list_item input:not([type=checkbox]) {\n padding-top: 3px;\n padding-bottom: 3px;\n height: 22px;\n line-height: 14px;\n margin: 0px;\n}\n.highlight_text {\n color: blue;\n}\n#project_name {\n display: inline-block;\n padding-left: 7px;\n margin-left: -2px;\n}\n#project_name > .breadcrumb {\n padding: 0px;\n margin-bottom: 0px;\n background-color: transparent;\n font-weight: bold;\n}\n#tree-selector {\n padding-right: 0px;\n}\n[dir=\"rtl\"] #tree-selector a {\n float: right;\n}\n#button-select-all {\n min-width: 50px;\n}\n#select-all {\n margin-left: 7px;\n margin-right: 2px;\n}\n.menu_icon {\n margin-right: 2px;\n}\n.tab-content .row {\n margin-left: 0px;\n margin-right: 0px;\n}\n.folder_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f114\";\n}\n.folder_icon:before.pull-left {\n margin-right: .3em;\n}\n.folder_icon:before.pull-right {\n margin-left: .3em;\n}\n.notebook_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f02d\";\n position: relative;\n top: -1px;\n}\n.notebook_icon:before.pull-left {\n margin-right: .3em;\n}\n.notebook_icon:before.pull-right {\n margin-left: .3em;\n}\n.running_notebook_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f02d\";\n position: relative;\n top: -1px;\n color: #5cb85c;\n}\n.running_notebook_icon:before.pull-left {\n margin-right: .3em;\n}\n.running_notebook_icon:before.pull-right {\n margin-left: .3em;\n}\n.file_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f016\";\n position: relative;\n top: -2px;\n}\n.file_icon:before.pull-left {\n margin-right: .3em;\n}\n.file_icon:before.pull-right {\n margin-left: .3em;\n}\n#notebook_toolbar .pull-right {\n padding-top: 0px;\n margin-right: -1px;\n}\nul#new-menu {\n left: auto;\n right: 0;\n}\n[dir=\"rtl\"] #new-menu {\n text-align: right;\n}\n.kernel-menu-icon {\n padding-right: 12px;\n width: 24px;\n content: \"\\f096\";\n}\n.kernel-menu-icon:before {\n content: \"\\f096\";\n}\n.kernel-menu-icon-current:before {\n content: \"\\f00c\";\n}\n#tab_content {\n padding-top: 20px;\n}\n#running .panel-group .panel {\n margin-top: 3px;\n margin-bottom: 1em;\n}\n#running .panel-group .panel .panel-heading {\n background-color: #EEE;\n padding-top: 4px;\n padding-bottom: 4px;\n padding-left: 7px;\n padding-right: 7px;\n line-height: 22px;\n}\n#running .panel-group .panel .panel-heading a:focus,\n#running .panel-group .panel .panel-heading a:hover {\n text-decoration: none;\n}\n#running .panel-group .panel .panel-body {\n padding: 0px;\n}\n#running .panel-group .panel .panel-body .list_container {\n margin-top: 0px;\n margin-bottom: 0px;\n border: 0px;\n border-radius: 0px;\n}\n#running .panel-group .panel .panel-body .list_container .list_item {\n border-bottom: 1px solid #ddd;\n}\n#running .panel-group .panel .panel-body .list_container .list_item:last-child {\n border-bottom: 0px;\n}\n[dir=\"rtl\"] #running .col-sm-8 {\n float: right !important;\n}\n.delete-button {\n display: none;\n}\n.duplicate-button {\n display: none;\n}\n.rename-button {\n display: none;\n}\n.shutdown-button {\n display: none;\n}\n.dynamic-instructions {\n display: inline-block;\n padding-top: 4px;\n}\n/*!\n*\n* IPython text editor webapp\n*\n*/\n.selected-keymap i.fa {\n padding: 0px 5px;\n}\n.selected-keymap i.fa:before {\n content: \"\\f00c\";\n}\n#mode-menu {\n overflow: auto;\n max-height: 20em;\n}\n.edit_app #header {\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n.edit_app #menubar .navbar {\n /* Use a negative 1 bottom margin, so the border overlaps the border of the\n header */\n margin-bottom: -1px;\n}\n.dirty-indicator {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-dirty {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator-dirty.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-dirty.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-clean {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n width: 20px;\n}\n.dirty-indicator-clean.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-clean.pull-right {\n margin-left: .3em;\n}\n.dirty-indicator-clean:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f00c\";\n}\n.dirty-indicator-clean:before.pull-left {\n margin-right: .3em;\n}\n.dirty-indicator-clean:before.pull-right {\n margin-left: .3em;\n}\n#filename {\n font-size: 16pt;\n display: table;\n padding: 0px 5px;\n}\n#current-mode {\n padding-left: 5px;\n padding-right: 5px;\n}\n#texteditor-backdrop {\n padding-top: 20px;\n padding-bottom: 20px;\n}\n@media not print {\n #texteditor-backdrop {\n background-color: #EEE;\n }\n}\n@media print {\n #texteditor-backdrop #texteditor-container .CodeMirror-gutter,\n #texteditor-backdrop #texteditor-container .CodeMirror-gutters {\n background-color: #fff;\n }\n}\n@media not print {\n #texteditor-backdrop #texteditor-container .CodeMirror-gutter,\n #texteditor-backdrop #texteditor-container .CodeMirror-gutters {\n background-color: #fff;\n }\n}\n@media not print {\n #texteditor-backdrop #texteditor-container {\n padding: 0px;\n background-color: #fff;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n }\n}\n/*!\n*\n* IPython notebook\n*\n*/\n/* CSS font colors for translated ANSI colors. */\n.ansibold {\n font-weight: bold;\n}\n/* use dark versions for foreground, to improve visibility */\n.ansiblack {\n color: black;\n}\n.ansired {\n color: darkred;\n}\n.ansigreen {\n color: darkgreen;\n}\n.ansiyellow {\n color: #c4a000;\n}\n.ansiblue {\n color: darkblue;\n}\n.ansipurple {\n color: darkviolet;\n}\n.ansicyan {\n color: steelblue;\n}\n.ansigray {\n color: gray;\n}\n/* and light for background, for the same reason */\n.ansibgblack {\n background-color: black;\n}\n.ansibgred {\n background-color: red;\n}\n.ansibggreen {\n background-color: green;\n}\n.ansibgyellow {\n background-color: yellow;\n}\n.ansibgblue {\n background-color: blue;\n}\n.ansibgpurple {\n background-color: magenta;\n}\n.ansibgcyan {\n background-color: cyan;\n}\n.ansibggray {\n background-color: gray;\n}\ndiv.cell {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n border-radius: 2px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n border-width: 1px;\n border-style: solid;\n border-color: transparent;\n width: 100%;\n padding: 5px;\n /* This acts as a spacer between cells, that is outside the border */\n margin: 0px;\n outline: none;\n border-left-width: 1px;\n padding-left: 5px;\n background: linear-gradient(to right, transparent -40px, transparent 1px, transparent 1px, transparent 100%);\n}\ndiv.cell.jupyter-soft-selected {\n border-left-color: #90CAF9;\n border-left-color: #E3F2FD;\n border-left-width: 1px;\n padding-left: 5px;\n border-right-color: #E3F2FD;\n border-right-width: 1px;\n background: #E3F2FD;\n}\n@media print {\n div.cell.jupyter-soft-selected {\n border-color: transparent;\n }\n}\ndiv.cell.selected {\n border-color: #ababab;\n border-left-width: 0px;\n padding-left: 6px;\n background: linear-gradient(to right, #42A5F5 -40px, #42A5F5 5px, transparent 5px, transparent 100%);\n}\n@media print {\n div.cell.selected {\n border-color: transparent;\n }\n}\ndiv.cell.selected.jupyter-soft-selected {\n border-left-width: 0;\n padding-left: 6px;\n background: linear-gradient(to right, #42A5F5 -40px, #42A5F5 7px, #E3F2FD 7px, #E3F2FD 100%);\n}\n.edit_mode div.cell.selected {\n border-color: #66BB6A;\n border-left-width: 0px;\n padding-left: 6px;\n background: linear-gradient(to right, #66BB6A -40px, #66BB6A 5px, transparent 5px, transparent 100%);\n}\n@media print {\n .edit_mode div.cell.selected {\n border-color: transparent;\n }\n}\n.prompt {\n /* This needs to be wide enough for 3 digit prompt numbers: In[100]: */\n min-width: 14ex;\n /* This padding is tuned to match the padding on the CodeMirror editor. */\n padding: 0.4em;\n margin: 0px;\n font-family: monospace;\n text-align: right;\n /* This has to match that of the the CodeMirror class line-height below */\n line-height: 1.21429em;\n /* Don't highlight prompt number selection */\n -webkit-touch-callout: none;\n -webkit-user-select: none;\n -khtml-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n /* Use default cursor */\n cursor: default;\n}\n@media (max-width: 540px) {\n .prompt {\n text-align: left;\n }\n}\ndiv.inner_cell {\n min-width: 0;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\n/* input_area and input_prompt must match in top border and margin for alignment */\ndiv.input_area {\n border: 1px solid #cfcfcf;\n border-radius: 2px;\n background: #f7f7f7;\n line-height: 1.21429em;\n}\n/* This is needed so that empty prompt areas can collapse to zero height when there\n is no content in the output_subarea and the prompt. The main purpose of this is\n to make sure that empty JavaScript output_subareas have no height. */\ndiv.prompt:empty {\n padding-top: 0;\n padding-bottom: 0;\n}\ndiv.unrecognized_cell {\n padding: 5px 5px 5px 0px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\ndiv.unrecognized_cell .inner_cell {\n border-radius: 2px;\n padding: 5px;\n font-weight: bold;\n color: red;\n border: 1px solid #cfcfcf;\n background: #eaeaea;\n}\ndiv.unrecognized_cell .inner_cell a {\n color: inherit;\n text-decoration: none;\n}\ndiv.unrecognized_cell .inner_cell a:hover {\n color: inherit;\n text-decoration: none;\n}\n@media (max-width: 540px) {\n div.unrecognized_cell > div.prompt {\n display: none;\n }\n}\ndiv.code_cell {\n /* avoid page breaking on code cells when printing */\n}\n@media print {\n div.code_cell {\n page-break-inside: avoid;\n }\n}\n/* any special styling for code cells that are currently running goes here */\ndiv.input {\n page-break-inside: avoid;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.input {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n }\n}\n/* input_area and input_prompt must match in top border and margin for alignment */\ndiv.input_prompt {\n color: #303F9F;\n border-top: 1px solid transparent;\n}\ndiv.input_area > div.highlight {\n margin: 0.4em;\n border: none;\n padding: 0px;\n background-color: transparent;\n}\ndiv.input_area > div.highlight > pre {\n margin: 0px;\n border: none;\n padding: 0px;\n background-color: transparent;\n}\n/* The following gets added to the <head> if it is detected that the user has a\n * monospace font with inconsistent normal/bold/italic height. See\n * notebookmain.js. Such fonts will have keywords vertically offset with\n * respect to the rest of the text. The user should select a better font.\n * See: https://github.com/ipython/ipython/issues/1503\n *\n * .CodeMirror span {\n * vertical-align: bottom;\n * }\n */\n.CodeMirror {\n line-height: 1.21429em;\n /* Changed from 1em to our global default */\n font-size: 14px;\n height: auto;\n /* Changed to auto to autogrow */\n background: none;\n /* Changed from white to allow our bg to show through */\n}\n.CodeMirror-scroll {\n /* The CodeMirror docs are a bit fuzzy on if overflow-y should be hidden or visible.*/\n /* We have found that if it is visible, vertical scrollbars appear with font size changes.*/\n overflow-y: hidden;\n overflow-x: auto;\n}\n.CodeMirror-lines {\n /* In CM2, this used to be 0.4em, but in CM3 it went to 4px. We need the em value because */\n /* we have set a different line-height and want this to scale with that. */\n padding: 0.4em;\n}\n.CodeMirror-linenumber {\n padding: 0 8px 0 4px;\n}\n.CodeMirror-gutters {\n border-bottom-left-radius: 2px;\n border-top-left-radius: 2px;\n}\n.CodeMirror pre {\n /* In CM3 this went to 4px from 0 in CM2. We need the 0 value because of how we size */\n /* .CodeMirror-lines */\n padding: 0;\n border: 0;\n border-radius: 0;\n}\n/*\n\nOriginal style from softwaremaniacs.org (c) Ivan Sagalaev <[email protected]>\nAdapted from GitHub theme\n\n*/\n.highlight-base {\n color: #000;\n}\n.highlight-variable {\n color: #000;\n}\n.highlight-variable-2 {\n color: #1a1a1a;\n}\n.highlight-variable-3 {\n color: #333333;\n}\n.highlight-string {\n color: #BA2121;\n}\n.highlight-comment {\n color: #408080;\n font-style: italic;\n}\n.highlight-number {\n color: #080;\n}\n.highlight-atom {\n color: #88F;\n}\n.highlight-keyword {\n color: #008000;\n font-weight: bold;\n}\n.highlight-builtin {\n color: #008000;\n}\n.highlight-error {\n color: #f00;\n}\n.highlight-operator {\n color: #AA22FF;\n font-weight: bold;\n}\n.highlight-meta {\n color: #AA22FF;\n}\n/* previously not defined, copying from default codemirror */\n.highlight-def {\n color: #00f;\n}\n.highlight-string-2 {\n color: #f50;\n}\n.highlight-qualifier {\n color: #555;\n}\n.highlight-bracket {\n color: #997;\n}\n.highlight-tag {\n color: #170;\n}\n.highlight-attribute {\n color: #00c;\n}\n.highlight-header {\n color: blue;\n}\n.highlight-quote {\n color: #090;\n}\n.highlight-link {\n color: #00c;\n}\n/* apply the same style to codemirror */\n.cm-s-ipython span.cm-keyword {\n color: #008000;\n font-weight: bold;\n}\n.cm-s-ipython span.cm-atom {\n color: #88F;\n}\n.cm-s-ipython span.cm-number {\n color: #080;\n}\n.cm-s-ipython span.cm-def {\n color: #00f;\n}\n.cm-s-ipython span.cm-variable {\n color: #000;\n}\n.cm-s-ipython span.cm-operator {\n color: #AA22FF;\n font-weight: bold;\n}\n.cm-s-ipython span.cm-variable-2 {\n color: #1a1a1a;\n}\n.cm-s-ipython span.cm-variable-3 {\n color: #333333;\n}\n.cm-s-ipython span.cm-comment {\n color: #408080;\n font-style: italic;\n}\n.cm-s-ipython span.cm-string {\n color: #BA2121;\n}\n.cm-s-ipython span.cm-string-2 {\n color: #f50;\n}\n.cm-s-ipython span.cm-meta {\n color: #AA22FF;\n}\n.cm-s-ipython span.cm-qualifier {\n color: #555;\n}\n.cm-s-ipython span.cm-builtin {\n color: #008000;\n}\n.cm-s-ipython span.cm-bracket {\n color: #997;\n}\n.cm-s-ipython span.cm-tag {\n color: #170;\n}\n.cm-s-ipython span.cm-attribute {\n color: #00c;\n}\n.cm-s-ipython span.cm-header {\n color: blue;\n}\n.cm-s-ipython span.cm-quote {\n color: #090;\n}\n.cm-s-ipython span.cm-link {\n color: #00c;\n}\n.cm-s-ipython span.cm-error {\n color: #f00;\n}\n.cm-s-ipython span.cm-tab {\n background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAMCAYAAAAkuj5RAAAAAXNSR0IArs4c6QAAAGFJREFUSMft1LsRQFAQheHPowAKoACx3IgEKtaEHujDjORSgWTH/ZOdnZOcM/sgk/kFFWY0qV8foQwS4MKBCS3qR6ixBJvElOobYAtivseIE120FaowJPN75GMu8j/LfMwNjh4HUpwg4LUAAAAASUVORK5CYII=);\n background-position: right;\n background-repeat: no-repeat;\n}\ndiv.output_wrapper {\n /* this position must be relative to enable descendents to be absolute within it */\n position: relative;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n z-index: 1;\n}\n/* class for the output area when it should be height-limited */\ndiv.output_scroll {\n /* ideally, this would be max-height, but FF barfs all over that */\n height: 24em;\n /* FF needs this *and the wrapper* to specify full width, or it will shrinkwrap */\n width: 100%;\n overflow: auto;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 2px 8px rgba(0, 0, 0, 0.8);\n box-shadow: inset 0 2px 8px rgba(0, 0, 0, 0.8);\n display: block;\n}\n/* output div while it is collapsed */\ndiv.output_collapsed {\n margin: 0px;\n padding: 0px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\ndiv.out_prompt_overlay {\n height: 100%;\n padding: 0px 0.4em;\n position: absolute;\n border-radius: 2px;\n}\ndiv.out_prompt_overlay:hover {\n /* use inner shadow to get border that is computed the same on WebKit/FF */\n -webkit-box-shadow: inset 0 0 1px #000;\n box-shadow: inset 0 0 1px #000;\n background: rgba(240, 240, 240, 0.5);\n}\ndiv.output_prompt {\n color: #D84315;\n}\n/* This class is the outer container of all output sections. */\ndiv.output_area {\n padding: 0px;\n page-break-inside: avoid;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\ndiv.output_area .MathJax_Display {\n text-align: left !important;\n}\ndiv.output_area .rendered_html table {\n margin-left: 0;\n margin-right: 0;\n}\ndiv.output_area .rendered_html img {\n margin-left: 0;\n margin-right: 0;\n}\ndiv.output_area img,\ndiv.output_area svg {\n max-width: 100%;\n height: auto;\n}\ndiv.output_area img.unconfined,\ndiv.output_area svg.unconfined {\n max-width: none;\n}\n/* This is needed to protect the pre formating from global settings such\n as that of bootstrap */\n.output {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.output_area {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: vertical;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: vertical;\n -moz-box-align: stretch;\n display: box;\n box-orient: vertical;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: column;\n align-items: stretch;\n }\n}\ndiv.output_area pre {\n margin: 0;\n padding: 0;\n border: 0;\n vertical-align: baseline;\n color: black;\n background-color: transparent;\n border-radius: 0;\n}\n/* This class is for the output subarea inside the output_area and after\n the prompt div. */\ndiv.output_subarea {\n overflow-x: auto;\n padding: 0.4em;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n max-width: calc(100% - 14ex);\n}\ndiv.output_scroll div.output_subarea {\n overflow-x: visible;\n}\n/* The rest of the output_* classes are for special styling of the different\n output types */\n/* all text output has this class: */\ndiv.output_text {\n text-align: left;\n color: #000;\n /* This has to match that of the the CodeMirror class line-height below */\n line-height: 1.21429em;\n}\n/* stdout/stderr are 'text' as well as 'stream', but execute_result/error are *not* streams */\ndiv.output_stderr {\n background: #fdd;\n /* very light red background for stderr */\n}\ndiv.output_latex {\n text-align: left;\n}\n/* Empty output_javascript divs should have no height */\ndiv.output_javascript:empty {\n padding: 0;\n}\n.js-error {\n color: darkred;\n}\n/* raw_input styles */\ndiv.raw_input_container {\n line-height: 1.21429em;\n padding-top: 5px;\n}\npre.raw_input_prompt {\n /* nothing needed here. */\n}\ninput.raw_input {\n font-family: monospace;\n font-size: inherit;\n color: inherit;\n width: auto;\n /* make sure input baseline aligns with prompt */\n vertical-align: baseline;\n /* padding + margin = 0.5em between prompt and cursor */\n padding: 0em 0.25em;\n margin: 0em 0.25em;\n}\ninput.raw_input:focus {\n box-shadow: none;\n}\np.p-space {\n margin-bottom: 10px;\n}\ndiv.output_unrecognized {\n padding: 5px;\n font-weight: bold;\n color: red;\n}\ndiv.output_unrecognized a {\n color: inherit;\n text-decoration: none;\n}\ndiv.output_unrecognized a:hover {\n color: inherit;\n text-decoration: none;\n}\n.rendered_html {\n color: #000;\n /* any extras will just be numbers: */\n}\n.rendered_html em {\n font-style: italic;\n}\n.rendered_html strong {\n font-weight: bold;\n}\n.rendered_html u {\n text-decoration: underline;\n}\n.rendered_html :link {\n text-decoration: underline;\n}\n.rendered_html :visited {\n text-decoration: underline;\n}\n.rendered_html h1 {\n font-size: 185.7%;\n margin: 1.08em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h2 {\n font-size: 157.1%;\n margin: 1.27em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h3 {\n font-size: 128.6%;\n margin: 1.55em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h4 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n}\n.rendered_html h5 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n font-style: italic;\n}\n.rendered_html h6 {\n font-size: 100%;\n margin: 2em 0 0 0;\n font-weight: bold;\n line-height: 1.0;\n font-style: italic;\n}\n.rendered_html h1:first-child {\n margin-top: 0.538em;\n}\n.rendered_html h2:first-child {\n margin-top: 0.636em;\n}\n.rendered_html h3:first-child {\n margin-top: 0.777em;\n}\n.rendered_html h4:first-child {\n margin-top: 1em;\n}\n.rendered_html h5:first-child {\n margin-top: 1em;\n}\n.rendered_html h6:first-child {\n margin-top: 1em;\n}\n.rendered_html ul {\n list-style: disc;\n margin: 0em 2em;\n padding-left: 0px;\n}\n.rendered_html ul ul {\n list-style: square;\n margin: 0em 2em;\n}\n.rendered_html ul ul ul {\n list-style: circle;\n margin: 0em 2em;\n}\n.rendered_html ol {\n list-style: decimal;\n margin: 0em 2em;\n padding-left: 0px;\n}\n.rendered_html ol ol {\n list-style: upper-alpha;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol {\n list-style: lower-alpha;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol ol {\n list-style: lower-roman;\n margin: 0em 2em;\n}\n.rendered_html ol ol ol ol ol {\n list-style: decimal;\n margin: 0em 2em;\n}\n.rendered_html * + ul {\n margin-top: 1em;\n}\n.rendered_html * + ol {\n margin-top: 1em;\n}\n.rendered_html hr {\n color: black;\n background-color: black;\n}\n.rendered_html pre {\n margin: 1em 2em;\n}\n.rendered_html pre,\n.rendered_html code {\n border: 0;\n background-color: #fff;\n color: #000;\n font-size: 100%;\n padding: 0px;\n}\n.rendered_html blockquote {\n margin: 1em 2em;\n}\n.rendered_html table {\n margin-left: auto;\n margin-right: auto;\n border: 1px solid black;\n border-collapse: collapse;\n}\n.rendered_html tr,\n.rendered_html th,\n.rendered_html td {\n border: 1px solid black;\n border-collapse: collapse;\n margin: 1em 2em;\n}\n.rendered_html td,\n.rendered_html th {\n text-align: left;\n vertical-align: middle;\n padding: 4px;\n}\n.rendered_html th {\n font-weight: bold;\n}\n.rendered_html * + table {\n margin-top: 1em;\n}\n.rendered_html p {\n text-align: left;\n}\n.rendered_html * + p {\n margin-top: 1em;\n}\n.rendered_html img {\n display: block;\n margin-left: auto;\n margin-right: auto;\n}\n.rendered_html * + img {\n margin-top: 1em;\n}\n.rendered_html img,\n.rendered_html svg {\n max-width: 100%;\n height: auto;\n}\n.rendered_html img.unconfined,\n.rendered_html svg.unconfined {\n max-width: none;\n}\ndiv.text_cell {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n}\n@media (max-width: 540px) {\n div.text_cell > div.prompt {\n display: none;\n }\n}\ndiv.text_cell_render {\n /*font-family: \"Helvetica Neue\", Arial, Helvetica, Geneva, sans-serif;*/\n outline: none;\n resize: none;\n width: inherit;\n border-style: none;\n padding: 0.5em 0.5em 0.5em 0.4em;\n color: #000;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\na.anchor-link:link {\n text-decoration: none;\n padding: 0px 20px;\n visibility: hidden;\n}\nh1:hover .anchor-link,\nh2:hover .anchor-link,\nh3:hover .anchor-link,\nh4:hover .anchor-link,\nh5:hover .anchor-link,\nh6:hover .anchor-link {\n visibility: visible;\n}\n.text_cell.rendered .input_area {\n display: none;\n}\n.text_cell.rendered .rendered_html {\n overflow-x: auto;\n overflow-y: hidden;\n}\n.text_cell.unrendered .text_cell_render {\n display: none;\n}\n.cm-header-1,\n.cm-header-2,\n.cm-header-3,\n.cm-header-4,\n.cm-header-5,\n.cm-header-6 {\n font-weight: bold;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n}\n.cm-header-1 {\n font-size: 185.7%;\n}\n.cm-header-2 {\n font-size: 157.1%;\n}\n.cm-header-3 {\n font-size: 128.6%;\n}\n.cm-header-4 {\n font-size: 110%;\n}\n.cm-header-5 {\n font-size: 100%;\n font-style: italic;\n}\n.cm-header-6 {\n font-size: 100%;\n font-style: italic;\n}\n/*!\n*\n* IPython notebook webapp\n*\n*/\n@media (max-width: 767px) {\n .notebook_app {\n padding-left: 0px;\n padding-right: 0px;\n }\n}\n#ipython-main-app {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n height: 100%;\n}\ndiv#notebook_panel {\n margin: 0px;\n padding: 0px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n height: 100%;\n}\ndiv#notebook {\n font-size: 14px;\n line-height: 20px;\n overflow-y: hidden;\n overflow-x: auto;\n width: 100%;\n /* This spaces the page away from the edge of the notebook area */\n padding-top: 20px;\n margin: 0px;\n outline: none;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n min-height: 100%;\n}\n@media not print {\n #notebook-container {\n padding: 15px;\n background-color: #fff;\n min-height: 0;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n }\n}\n@media print {\n #notebook-container {\n width: 100%;\n }\n}\ndiv.ui-widget-content {\n border: 1px solid #ababab;\n outline: none;\n}\npre.dialog {\n background-color: #f7f7f7;\n border: 1px solid #ddd;\n border-radius: 2px;\n padding: 0.4em;\n padding-left: 2em;\n}\np.dialog {\n padding: 0.2em;\n}\n/* Word-wrap output correctly. This is the CSS3 spelling, though Firefox seems\n to not honor it correctly. Webkit browsers (Chrome, rekonq, Safari) do.\n */\npre,\ncode,\nkbd,\nsamp {\n white-space: pre-wrap;\n}\n#fonttest {\n font-family: monospace;\n}\np {\n margin-bottom: 0;\n}\n.end_space {\n min-height: 100px;\n transition: height .2s ease;\n}\n.notebook_app > #header {\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n@media not print {\n .notebook_app {\n background-color: #EEE;\n }\n}\nkbd {\n border-style: solid;\n border-width: 1px;\n box-shadow: none;\n margin: 2px;\n padding-left: 2px;\n padding-right: 2px;\n padding-top: 1px;\n padding-bottom: 1px;\n}\n/* CSS for the cell toolbar */\n.celltoolbar {\n border: thin solid #CFCFCF;\n border-bottom: none;\n background: #EEE;\n border-radius: 2px 2px 0px 0px;\n width: 100%;\n height: 29px;\n padding-right: 4px;\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n /* Old browsers */\n -webkit-box-pack: end;\n -moz-box-pack: end;\n box-pack: end;\n /* Modern browsers */\n justify-content: flex-end;\n display: -webkit-flex;\n}\n@media print {\n .celltoolbar {\n display: none;\n }\n}\n.ctb_hideshow {\n display: none;\n vertical-align: bottom;\n}\n/* ctb_show is added to the ctb_hideshow div to show the cell toolbar.\n Cell toolbars are only shown when the ctb_global_show class is also set.\n*/\n.ctb_global_show .ctb_show.ctb_hideshow {\n display: block;\n}\n.ctb_global_show .ctb_show + .input_area,\n.ctb_global_show .ctb_show + div.text_cell_input,\n.ctb_global_show .ctb_show ~ div.text_cell_render {\n border-top-right-radius: 0px;\n border-top-left-radius: 0px;\n}\n.ctb_global_show .ctb_show ~ div.text_cell_render {\n border: 1px solid #cfcfcf;\n}\n.celltoolbar {\n font-size: 87%;\n padding-top: 3px;\n}\n.celltoolbar select {\n display: block;\n width: 100%;\n height: 32px;\n padding: 6px 12px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 2px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 1px;\n width: inherit;\n font-size: inherit;\n height: 22px;\n padding: 0px;\n display: inline-block;\n}\n.celltoolbar select:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.celltoolbar select::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.celltoolbar select:-ms-input-placeholder {\n color: #999;\n}\n.celltoolbar select::-webkit-input-placeholder {\n color: #999;\n}\n.celltoolbar select::-ms-expand {\n border: 0;\n background-color: transparent;\n}\n.celltoolbar select[disabled],\n.celltoolbar select[readonly],\nfieldset[disabled] .celltoolbar select {\n background-color: #eeeeee;\n opacity: 1;\n}\n.celltoolbar select[disabled],\nfieldset[disabled] .celltoolbar select {\n cursor: not-allowed;\n}\ntextarea.celltoolbar select {\n height: auto;\n}\nselect.celltoolbar select {\n height: 30px;\n line-height: 30px;\n}\ntextarea.celltoolbar select,\nselect[multiple].celltoolbar select {\n height: auto;\n}\n.celltoolbar label {\n margin-left: 5px;\n margin-right: 5px;\n}\n.completions {\n position: absolute;\n z-index: 110;\n overflow: hidden;\n border: 1px solid #ababab;\n border-radius: 2px;\n -webkit-box-shadow: 0px 6px 10px -1px #adadad;\n box-shadow: 0px 6px 10px -1px #adadad;\n line-height: 1;\n}\n.completions select {\n background: white;\n outline: none;\n border: none;\n padding: 0px;\n margin: 0px;\n overflow: auto;\n font-family: monospace;\n font-size: 110%;\n color: #000;\n width: auto;\n}\n.completions select option.context {\n color: #286090;\n}\n#kernel_logo_widget {\n float: right !important;\n float: right;\n}\n#kernel_logo_widget .current_kernel_logo {\n display: none;\n margin-top: -1px;\n margin-bottom: -1px;\n width: 32px;\n height: 32px;\n}\n#menubar {\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n margin-top: 1px;\n}\n#menubar .navbar {\n border-top: 1px;\n border-radius: 0px 0px 2px 2px;\n margin-bottom: 0px;\n}\n#menubar .navbar-toggle {\n float: left;\n padding-top: 7px;\n padding-bottom: 7px;\n border: none;\n}\n#menubar .navbar-collapse {\n clear: left;\n}\n.nav-wrapper {\n border-bottom: 1px solid #e7e7e7;\n}\ni.menu-icon {\n padding-top: 4px;\n}\nul#help_menu li a {\n overflow: hidden;\n padding-right: 2.2em;\n}\nul#help_menu li a i {\n margin-right: -1.2em;\n}\n.dropdown-submenu {\n position: relative;\n}\n.dropdown-submenu > .dropdown-menu {\n top: 0;\n left: 100%;\n margin-top: -6px;\n margin-left: -1px;\n}\n.dropdown-submenu:hover > .dropdown-menu {\n display: block;\n}\n.dropdown-submenu > a:after {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n display: block;\n content: \"\\f0da\";\n float: right;\n color: #333333;\n margin-top: 2px;\n margin-right: -10px;\n}\n.dropdown-submenu > a:after.pull-left {\n margin-right: .3em;\n}\n.dropdown-submenu > a:after.pull-right {\n margin-left: .3em;\n}\n.dropdown-submenu:hover > a:after {\n color: #262626;\n}\n.dropdown-submenu.pull-left {\n float: none;\n}\n.dropdown-submenu.pull-left > .dropdown-menu {\n left: -100%;\n margin-left: 10px;\n}\n#notification_area {\n float: right !important;\n float: right;\n z-index: 10;\n}\n.indicator_area {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n}\n#kernel_indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n border-left: 1px solid;\n}\n#kernel_indicator .kernel_indicator_name {\n padding-left: 5px;\n padding-right: 5px;\n}\n#modal_indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n}\n#readonly-indicator {\n float: right !important;\n float: right;\n color: #777;\n margin-left: 5px;\n margin-right: 5px;\n width: 11px;\n z-index: 10;\n text-align: center;\n width: auto;\n margin-top: 2px;\n margin-bottom: 0px;\n margin-left: 0px;\n margin-right: 0px;\n display: none;\n}\n.modal_indicator:before {\n width: 1.28571429em;\n text-align: center;\n}\n.edit_mode .modal_indicator:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f040\";\n}\n.edit_mode .modal_indicator:before.pull-left {\n margin-right: .3em;\n}\n.edit_mode .modal_indicator:before.pull-right {\n margin-left: .3em;\n}\n.command_mode .modal_indicator:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: ' ';\n}\n.command_mode .modal_indicator:before.pull-left {\n margin-right: .3em;\n}\n.command_mode .modal_indicator:before.pull-right {\n margin-left: .3em;\n}\n.kernel_idle_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f10c\";\n}\n.kernel_idle_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_idle_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_busy_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f111\";\n}\n.kernel_busy_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_busy_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_dead_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f1e2\";\n}\n.kernel_dead_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_dead_icon:before.pull-right {\n margin-left: .3em;\n}\n.kernel_disconnected_icon:before {\n display: inline-block;\n font: normal normal normal 14px/1 FontAwesome;\n font-size: inherit;\n text-rendering: auto;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n content: \"\\f127\";\n}\n.kernel_disconnected_icon:before.pull-left {\n margin-right: .3em;\n}\n.kernel_disconnected_icon:before.pull-right {\n margin-left: .3em;\n}\n.notification_widget {\n color: #777;\n z-index: 10;\n background: rgba(240, 240, 240, 0.5);\n margin-right: 4px;\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.notification_widget:focus,\n.notification_widget.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.notification_widget:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.notification_widget:active,\n.notification_widget.active,\n.open > .dropdown-toggle.notification_widget {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.notification_widget:active:hover,\n.notification_widget.active:hover,\n.open > .dropdown-toggle.notification_widget:hover,\n.notification_widget:active:focus,\n.notification_widget.active:focus,\n.open > .dropdown-toggle.notification_widget:focus,\n.notification_widget:active.focus,\n.notification_widget.active.focus,\n.open > .dropdown-toggle.notification_widget.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.notification_widget:active,\n.notification_widget.active,\n.open > .dropdown-toggle.notification_widget {\n background-image: none;\n}\n.notification_widget.disabled:hover,\n.notification_widget[disabled]:hover,\nfieldset[disabled] .notification_widget:hover,\n.notification_widget.disabled:focus,\n.notification_widget[disabled]:focus,\nfieldset[disabled] .notification_widget:focus,\n.notification_widget.disabled.focus,\n.notification_widget[disabled].focus,\nfieldset[disabled] .notification_widget.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.notification_widget .badge {\n color: #fff;\n background-color: #333;\n}\n.notification_widget.warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.notification_widget.warning:focus,\n.notification_widget.warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.notification_widget.warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.notification_widget.warning:active,\n.notification_widget.warning.active,\n.open > .dropdown-toggle.notification_widget.warning {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.notification_widget.warning:active:hover,\n.notification_widget.warning.active:hover,\n.open > .dropdown-toggle.notification_widget.warning:hover,\n.notification_widget.warning:active:focus,\n.notification_widget.warning.active:focus,\n.open > .dropdown-toggle.notification_widget.warning:focus,\n.notification_widget.warning:active.focus,\n.notification_widget.warning.active.focus,\n.open > .dropdown-toggle.notification_widget.warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.notification_widget.warning:active,\n.notification_widget.warning.active,\n.open > .dropdown-toggle.notification_widget.warning {\n background-image: none;\n}\n.notification_widget.warning.disabled:hover,\n.notification_widget.warning[disabled]:hover,\nfieldset[disabled] .notification_widget.warning:hover,\n.notification_widget.warning.disabled:focus,\n.notification_widget.warning[disabled]:focus,\nfieldset[disabled] .notification_widget.warning:focus,\n.notification_widget.warning.disabled.focus,\n.notification_widget.warning[disabled].focus,\nfieldset[disabled] .notification_widget.warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.notification_widget.warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.notification_widget.success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.notification_widget.success:focus,\n.notification_widget.success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.notification_widget.success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.notification_widget.success:active,\n.notification_widget.success.active,\n.open > .dropdown-toggle.notification_widget.success {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.notification_widget.success:active:hover,\n.notification_widget.success.active:hover,\n.open > .dropdown-toggle.notification_widget.success:hover,\n.notification_widget.success:active:focus,\n.notification_widget.success.active:focus,\n.open > .dropdown-toggle.notification_widget.success:focus,\n.notification_widget.success:active.focus,\n.notification_widget.success.active.focus,\n.open > .dropdown-toggle.notification_widget.success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.notification_widget.success:active,\n.notification_widget.success.active,\n.open > .dropdown-toggle.notification_widget.success {\n background-image: none;\n}\n.notification_widget.success.disabled:hover,\n.notification_widget.success[disabled]:hover,\nfieldset[disabled] .notification_widget.success:hover,\n.notification_widget.success.disabled:focus,\n.notification_widget.success[disabled]:focus,\nfieldset[disabled] .notification_widget.success:focus,\n.notification_widget.success.disabled.focus,\n.notification_widget.success[disabled].focus,\nfieldset[disabled] .notification_widget.success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.notification_widget.success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.notification_widget.info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.notification_widget.info:focus,\n.notification_widget.info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.notification_widget.info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.notification_widget.info:active,\n.notification_widget.info.active,\n.open > .dropdown-toggle.notification_widget.info {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.notification_widget.info:active:hover,\n.notification_widget.info.active:hover,\n.open > .dropdown-toggle.notification_widget.info:hover,\n.notification_widget.info:active:focus,\n.notification_widget.info.active:focus,\n.open > .dropdown-toggle.notification_widget.info:focus,\n.notification_widget.info:active.focus,\n.notification_widget.info.active.focus,\n.open > .dropdown-toggle.notification_widget.info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.notification_widget.info:active,\n.notification_widget.info.active,\n.open > .dropdown-toggle.notification_widget.info {\n background-image: none;\n}\n.notification_widget.info.disabled:hover,\n.notification_widget.info[disabled]:hover,\nfieldset[disabled] .notification_widget.info:hover,\n.notification_widget.info.disabled:focus,\n.notification_widget.info[disabled]:focus,\nfieldset[disabled] .notification_widget.info:focus,\n.notification_widget.info.disabled.focus,\n.notification_widget.info[disabled].focus,\nfieldset[disabled] .notification_widget.info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.notification_widget.info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.notification_widget.danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.notification_widget.danger:focus,\n.notification_widget.danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.notification_widget.danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.notification_widget.danger:active,\n.notification_widget.danger.active,\n.open > .dropdown-toggle.notification_widget.danger {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.notification_widget.danger:active:hover,\n.notification_widget.danger.active:hover,\n.open > .dropdown-toggle.notification_widget.danger:hover,\n.notification_widget.danger:active:focus,\n.notification_widget.danger.active:focus,\n.open > .dropdown-toggle.notification_widget.danger:focus,\n.notification_widget.danger:active.focus,\n.notification_widget.danger.active.focus,\n.open > .dropdown-toggle.notification_widget.danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.notification_widget.danger:active,\n.notification_widget.danger.active,\n.open > .dropdown-toggle.notification_widget.danger {\n background-image: none;\n}\n.notification_widget.danger.disabled:hover,\n.notification_widget.danger[disabled]:hover,\nfieldset[disabled] .notification_widget.danger:hover,\n.notification_widget.danger.disabled:focus,\n.notification_widget.danger[disabled]:focus,\nfieldset[disabled] .notification_widget.danger:focus,\n.notification_widget.danger.disabled.focus,\n.notification_widget.danger[disabled].focus,\nfieldset[disabled] .notification_widget.danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.notification_widget.danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\ndiv#pager {\n background-color: #fff;\n font-size: 14px;\n line-height: 20px;\n overflow: hidden;\n display: none;\n position: fixed;\n bottom: 0px;\n width: 100%;\n max-height: 50%;\n padding-top: 8px;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n /* Display over codemirror */\n z-index: 100;\n /* Hack which prevents jquery ui resizable from changing top. */\n top: auto !important;\n}\ndiv#pager pre {\n line-height: 1.21429em;\n color: #000;\n background-color: #f7f7f7;\n padding: 0.4em;\n}\ndiv#pager #pager-button-area {\n position: absolute;\n top: 8px;\n right: 20px;\n}\ndiv#pager #pager-contents {\n position: relative;\n overflow: auto;\n width: 100%;\n height: 100%;\n}\ndiv#pager #pager-contents #pager-container {\n position: relative;\n padding: 15px 0px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\ndiv#pager .ui-resizable-handle {\n top: 0px;\n height: 8px;\n background: #f7f7f7;\n border-top: 1px solid #cfcfcf;\n border-bottom: 1px solid #cfcfcf;\n /* This injects handle bars (a short, wide = symbol) for \n the resize handle. */\n}\ndiv#pager .ui-resizable-handle::after {\n content: '';\n top: 2px;\n left: 50%;\n height: 3px;\n width: 30px;\n margin-left: -15px;\n position: absolute;\n border-top: 1px solid #cfcfcf;\n}\n.quickhelp {\n /* Old browsers */\n display: -webkit-box;\n -webkit-box-orient: horizontal;\n -webkit-box-align: stretch;\n display: -moz-box;\n -moz-box-orient: horizontal;\n -moz-box-align: stretch;\n display: box;\n box-orient: horizontal;\n box-align: stretch;\n /* Modern browsers */\n display: flex;\n flex-direction: row;\n align-items: stretch;\n line-height: 1.8em;\n}\n.shortcut_key {\n display: inline-block;\n width: 21ex;\n text-align: right;\n font-family: monospace;\n}\n.shortcut_descr {\n display: inline-block;\n /* Old browsers */\n -webkit-box-flex: 1;\n -moz-box-flex: 1;\n box-flex: 1;\n /* Modern browsers */\n flex: 1;\n}\nspan.save_widget {\n margin-top: 6px;\n}\nspan.save_widget span.filename {\n height: 1em;\n line-height: 1em;\n padding: 3px;\n margin-left: 16px;\n border: none;\n font-size: 146.5%;\n border-radius: 2px;\n}\nspan.save_widget span.filename:hover {\n background-color: #e6e6e6;\n}\nspan.checkpoint_status,\nspan.autosave_status {\n font-size: small;\n}\n@media (max-width: 767px) {\n span.save_widget {\n font-size: small;\n }\n span.checkpoint_status,\n span.autosave_status {\n display: none;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n span.checkpoint_status {\n display: none;\n }\n span.autosave_status {\n font-size: x-small;\n }\n}\n.toolbar {\n padding: 0px;\n margin-left: -5px;\n margin-top: 2px;\n margin-bottom: 5px;\n box-sizing: border-box;\n -moz-box-sizing: border-box;\n -webkit-box-sizing: border-box;\n}\n.toolbar select,\n.toolbar label {\n width: auto;\n vertical-align: middle;\n margin-right: 2px;\n margin-bottom: 0px;\n display: inline;\n font-size: 92%;\n margin-left: 0.3em;\n margin-right: 0.3em;\n padding: 0px;\n padding-top: 3px;\n}\n.toolbar .btn {\n padding: 2px 8px;\n}\n.toolbar .btn-group {\n margin-top: 0px;\n margin-left: 5px;\n}\n#maintoolbar {\n margin-bottom: -3px;\n margin-top: -8px;\n border: 0px;\n min-height: 27px;\n margin-left: 0px;\n padding-top: 11px;\n padding-bottom: 3px;\n}\n#maintoolbar .navbar-text {\n float: none;\n vertical-align: middle;\n text-align: right;\n margin-left: 5px;\n margin-right: 0px;\n margin-top: 0px;\n}\n.select-xs {\n height: 24px;\n}\n.pulse,\n.dropdown-menu > li > a.pulse,\nli.pulse > a.dropdown-toggle,\nli.pulse.open > a.dropdown-toggle {\n background-color: #F37626;\n color: white;\n}\n/**\n * Primary styles\n *\n * Author: Jupyter Development Team\n */\n/** WARNING IF YOU ARE EDITTING THIS FILE, if this is a .css file, It has a lot\n * of chance of beeing generated from the ../less/[samename].less file, you can\n * try to get back the less file by reverting somme commit in history\n **/\n/*\n * We'll try to get something pretty, so we\n * have some strange css to have the scroll bar on\n * the left with fix button on the top right of the tooltip\n */\n@-moz-keyframes fadeOut {\n from {\n opacity: 1;\n }\n to {\n opacity: 0;\n }\n}\n@-webkit-keyframes fadeOut {\n from {\n opacity: 1;\n }\n to {\n opacity: 0;\n }\n}\n@-moz-keyframes fadeIn {\n from {\n opacity: 0;\n }\n to {\n opacity: 1;\n }\n}\n@-webkit-keyframes fadeIn {\n from {\n opacity: 0;\n }\n to {\n opacity: 1;\n }\n}\n/*properties of tooltip after \"expand\"*/\n.bigtooltip {\n overflow: auto;\n height: 200px;\n -webkit-transition-property: height;\n -webkit-transition-duration: 500ms;\n -moz-transition-property: height;\n -moz-transition-duration: 500ms;\n transition-property: height;\n transition-duration: 500ms;\n}\n/*properties of tooltip before \"expand\"*/\n.smalltooltip {\n -webkit-transition-property: height;\n -webkit-transition-duration: 500ms;\n -moz-transition-property: height;\n -moz-transition-duration: 500ms;\n transition-property: height;\n transition-duration: 500ms;\n text-overflow: ellipsis;\n overflow: hidden;\n height: 80px;\n}\n.tooltipbuttons {\n position: absolute;\n padding-right: 15px;\n top: 0px;\n right: 0px;\n}\n.tooltiptext {\n /*avoid the button to overlap on some docstring*/\n padding-right: 30px;\n}\n.ipython_tooltip {\n max-width: 700px;\n /*fade-in animation when inserted*/\n -webkit-animation: fadeOut 400ms;\n -moz-animation: fadeOut 400ms;\n animation: fadeOut 400ms;\n -webkit-animation: fadeIn 400ms;\n -moz-animation: fadeIn 400ms;\n animation: fadeIn 400ms;\n vertical-align: middle;\n background-color: #f7f7f7;\n overflow: visible;\n border: #ababab 1px solid;\n outline: none;\n padding: 3px;\n margin: 0px;\n padding-left: 7px;\n font-family: monospace;\n min-height: 50px;\n -moz-box-shadow: 0px 6px 10px -1px #adadad;\n -webkit-box-shadow: 0px 6px 10px -1px #adadad;\n box-shadow: 0px 6px 10px -1px #adadad;\n border-radius: 2px;\n position: absolute;\n z-index: 1000;\n}\n.ipython_tooltip a {\n float: right;\n}\n.ipython_tooltip .tooltiptext pre {\n border: 0;\n border-radius: 0;\n font-size: 100%;\n background-color: #f7f7f7;\n}\n.pretooltiparrow {\n left: 0px;\n margin: 0px;\n top: -16px;\n width: 40px;\n height: 16px;\n overflow: hidden;\n position: absolute;\n}\n.pretooltiparrow:before {\n background-color: #f7f7f7;\n border: 1px #ababab solid;\n z-index: 11;\n content: \"\";\n position: absolute;\n left: 15px;\n top: 10px;\n width: 25px;\n height: 25px;\n -webkit-transform: rotate(45deg);\n -moz-transform: rotate(45deg);\n -ms-transform: rotate(45deg);\n -o-transform: rotate(45deg);\n}\nul.typeahead-list i {\n margin-left: -10px;\n width: 18px;\n}\nul.typeahead-list {\n max-height: 80vh;\n overflow: auto;\n}\nul.typeahead-list > li > a {\n /** Firefox bug **/\n /* see https://github.com/jupyter/notebook/issues/559 */\n white-space: normal;\n}\n.cmd-palette .modal-body {\n padding: 7px;\n}\n.cmd-palette form {\n background: white;\n}\n.cmd-palette input {\n outline: none;\n}\n.no-shortcut {\n display: none;\n}\n.command-shortcut:before {\n content: \"(command)\";\n padding-right: 3px;\n color: #777777;\n}\n.edit-shortcut:before {\n content: \"(edit)\";\n padding-right: 3px;\n color: #777777;\n}\n#find-and-replace #replace-preview .match,\n#find-and-replace #replace-preview .insert {\n background-color: #BBDEFB;\n border-color: #90CAF9;\n border-style: solid;\n border-width: 1px;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview .replace .match {\n background-color: #FFCDD2;\n border-color: #EF9A9A;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview .replace .insert {\n background-color: #C8E6C9;\n border-color: #A5D6A7;\n border-radius: 0px;\n}\n#find-and-replace #replace-preview {\n max-height: 60vh;\n overflow: auto;\n}\n#find-and-replace #replace-preview pre {\n padding: 5px 10px;\n}\n.terminal-app {\n background: #EEE;\n}\n.terminal-app #header {\n background: #fff;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.2);\n}\n.terminal-app .terminal {\n width: 100%;\n float: left;\n font-family: monospace;\n color: white;\n background: black;\n padding: 0.4em;\n border-radius: 2px;\n -webkit-box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.4);\n box-shadow: 0px 0px 12px 1px rgba(87, 87, 87, 0.4);\n}\n.terminal-app .terminal,\n.terminal-app .terminal dummy-screen {\n line-height: 1em;\n font-size: 14px;\n}\n.terminal-app .terminal .xterm-rows {\n padding: 10px;\n}\n.terminal-app .terminal-cursor {\n color: black;\n background: white;\n}\n.terminal-app #terminado-container {\n margin-top: 20px;\n}\n/*# sourceMappingURL=style.min.css.map */\n </style>\n<style type=\"text/css\">\n .highlight .hll { background-color: #ffffcc }\n.highlight { background: #f8f8f8; }\n.highlight .c { color: #408080; font-style: italic } /* Comment */\n.highlight .err { border: 1px solid #FF0000 } /* Error */\n.highlight .k { color: #008000; font-weight: bold } /* Keyword */\n.highlight .o { color: #666666 } /* Operator */\n.highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n.highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n.highlight .cp { color: #BC7A00 } /* Comment.Preproc */\n.highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n.highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */\n.highlight .cs { color: #408080; font-style: italic } /* Comment.Special */\n.highlight .gd { color: #A00000 } /* Generic.Deleted */\n.highlight .ge { font-style: italic } /* Generic.Emph */\n.highlight .gr { color: #FF0000 } /* Generic.Error */\n.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n.highlight .gi { color: #00A000 } /* Generic.Inserted */\n.highlight .go { color: #888888 } /* Generic.Output */\n.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n.highlight .gs { font-weight: bold } /* Generic.Strong */\n.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n.highlight .gt { color: #0044DD } /* Generic.Traceback */\n.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n.highlight .kp { color: #008000 } /* Keyword.Pseudo */\n.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n.highlight .kt { color: #B00040 } /* Keyword.Type */\n.highlight .m { color: #666666 } /* Literal.Number */\n.highlight .s { color: #BA2121 } /* Literal.String */\n.highlight .na { color: #7D9029 } /* Name.Attribute */\n.highlight .nb { color: #008000 } /* Name.Builtin */\n.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n.highlight .no { color: #880000 } /* Name.Constant */\n.highlight .nd { color: #AA22FF } /* Name.Decorator */\n.highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */\n.highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n.highlight .nf { color: #0000FF } /* Name.Function */\n.highlight .nl { color: #A0A000 } /* Name.Label */\n.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */\n.highlight .nv { color: #19177C } /* Name.Variable */\n.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n.highlight .w { color: #bbbbbb } /* Text.Whitespace */\n.highlight .mb { color: #666666 } /* Literal.Number.Bin */\n.highlight .mf { color: #666666 } /* Literal.Number.Float */\n.highlight .mh { color: #666666 } /* Literal.Number.Hex */\n.highlight .mi { color: #666666 } /* Literal.Number.Integer */\n.highlight .mo { color: #666666 } /* Literal.Number.Oct */\n.highlight .sa { color: #BA2121 } /* Literal.String.Affix */\n.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */\n.highlight .sc { color: #BA2121 } /* Literal.String.Char */\n.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */\n.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n.highlight .s2 { color: #BA2121 } /* Literal.String.Double */\n.highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */\n.highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n.highlight .sx { color: #008000 } /* Literal.String.Other */\n.highlight .sr { color: #BB6688 } /* Literal.String.Regex */\n.highlight .s1 { color: #BA2121 } /* Literal.String.Single */\n.highlight .ss { color: #19177C } /* Literal.String.Symbol */\n.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */\n.highlight .fm { color: #0000FF } /* Name.Function.Magic */\n.highlight .vc { color: #19177C } /* Name.Variable.Class */\n.highlight .vg { color: #19177C } /* Name.Variable.Global */\n.highlight .vi { color: #19177C } /* Name.Variable.Instance */\n.highlight .vm { color: #19177C } /* Name.Variable.Magic */\n.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */\n </style>\n<style type=\"text/css\">\n \n/* Temporary definitions which will become obsolete with Notebook release 5.0 */\n.ansi-black-fg { color: #3E424D; }\n.ansi-black-bg { background-color: #3E424D; }\n.ansi-black-intense-fg { color: #282C36; }\n.ansi-black-intense-bg { background-color: #282C36; }\n.ansi-red-fg { color: #E75C58; }\n.ansi-red-bg { background-color: #E75C58; }\n.ansi-red-intense-fg { color: #B22B31; }\n.ansi-red-intense-bg { background-color: #B22B31; }\n.ansi-green-fg { color: #00A250; }\n.ansi-green-bg { background-color: #00A250; }\n.ansi-green-intense-fg { color: #007427; }\n.ansi-green-intense-bg { background-color: #007427; }\n.ansi-yellow-fg { color: #DDB62B; }\n.ansi-yellow-bg { background-color: #DDB62B; }\n.ansi-yellow-intense-fg { color: #B27D12; }\n.ansi-yellow-intense-bg { background-color: #B27D12; }\n.ansi-blue-fg { color: #208FFB; }\n.ansi-blue-bg { background-color: #208FFB; }\n.ansi-blue-intense-fg { color: #0065CA; }\n.ansi-blue-intense-bg { background-color: #0065CA; }\n.ansi-magenta-fg { color: #D160C4; }\n.ansi-magenta-bg { background-color: #D160C4; }\n.ansi-magenta-intense-fg { color: #A03196; }\n.ansi-magenta-intense-bg { background-color: #A03196; }\n.ansi-cyan-fg { color: #60C6C8; }\n.ansi-cyan-bg { background-color: #60C6C8; }\n.ansi-cyan-intense-fg { color: #258F8F; }\n.ansi-cyan-intense-bg { background-color: #258F8F; }\n.ansi-white-fg { color: #C5C1B4; }\n.ansi-white-bg { background-color: #C5C1B4; }\n.ansi-white-intense-fg { color: #A1A6B2; }\n.ansi-white-intense-bg { background-color: #A1A6B2; }\n\n.ansi-bold { font-weight: bold; }\n\n </style>\n\n\n<style type=\"text/css\">\n/* Overrides of notebook CSS for static HTML export */\nbody {\n overflow: visible;\n padding: 8px;\n}\n\ndiv#notebook {\n overflow: visible;\n border-top: none;\n}\n\n@media print {\n div.cell {\n display: block;\n page-break-inside: avoid;\n } \n div.output_wrapper { \n display: block;\n page-break-inside: avoid; \n }\n div.output { \n display: block;\n page-break-inside: avoid; \n }\n}\n</style>\n\n<!-- Custom stylesheet, it must be in the same directory as the html file -->\n<link rel=\"stylesheet\" href=\"custom.css\">\n\n<!-- Loading mathjax macro -->\n<!-- Load mathjax -->\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_HTML\"></script>\n <!-- MathJax configuration -->\n <script type=\"text/x-mathjax-config\">\n MathJax.Hub.Config({\n tex2jax: {\n inlineMath: [ ['$','$'], [\"\\\\(\",\"\\\\)\"] ],\n displayMath: [ ['$$','$$'], [\"\\\\[\",\"\\\\]\"] ],\n processEscapes: true,\n processEnvironments: true\n },\n // Center justify equations in code and markdown cells. Elsewhere\n // we use CSS to left justify single line equations in code cells.\n displayAlign: 'center',\n \"HTML-CSS\": {\n styles: {'.MathJax_Display': {\"margin\": 0}},\n linebreaks: { automatic: true }\n }\n });\n </script>\n <!-- End of mathjax configuration --></head>\n<body>\n <div tabindex=\"-1\" id=\"notebook\" class=\"border-box-sizing\">\n <div class=\"container\" id=\"notebook-container\">\n\n\n <div style=\"border:thin solid red\">\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h1 id=\"Example-notebook\">Example notebook<a class=\"anchor-link\" href=\"#Example-notebook\">&#182;</a></h1>\n</div>\n</div>\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Markdown-cells\">Markdown cells<a class=\"anchor-link\" href=\"#Markdown-cells\">&#182;</a></h3><p>This is an example notebook that can be converted with <code>nbconvert</code> to different formats. This is an example of a markdown cell.</p>\n\n</div>\n</div>\n</div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"LaTeX-Equations\">LaTeX Equations<a class=\"anchor-link\" href=\"#LaTeX-Equations\">&#182;</a></h3><p>Here is an equation:</p>\n$$\ny = \\sin(x)\n$$\n</div>\n</div>\n</div>\n\n\n\n <div style=\"border:thin solid green\">\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Code-cells\">Code cells<a class=\"anchor-link\" href=\"#Code-cells\">&#182;</a></h3>\n</div>\n</div>\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[1]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span><span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s2\">&quot;This is a code cell that produces some output&quot;</span><span class=\"p\">)</span>\n</pre></div>\n\n</div>\n</div>\n</div>\n\n<div class=\"output_wrapper\">\n<div class=\"output\">\n\n\n<div class=\"output_area\">\n\n<div class=\"prompt\"></div>\n\n\n<div class=\"output_subarea output_stream output_stdout output_text\">\n<pre>This is a code cell that produces some output\n</pre>\n</div>\n</div>\n\n</div>\n</div>\n\n</div>\n\n\n\n \n<div class=\"cell border-box-sizing text_cell rendered\">\n<div class=\"prompt input_prompt\">\n</div>\n<div class=\"inner_cell\">\n<div class=\"text_cell_render border-box-sizing rendered_html\">\n<h3 id=\"Inline-figures\">Inline figures<a class=\"anchor-link\" href=\"#Inline-figures\">&#182;</a></h3>\n</div>\n</div>\n</div>\n\n\n\n <div style=\"border:thin solid orange\">\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[1]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span><span class=\"kn\">import</span> <span class=\"nn\">matplotlib.pyplot</span> <span class=\"k\">as</span> <span class=\"nn\">plt</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">ion</span><span class=\"p\">()</span>\n\n<span class=\"n\">x</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">linspace</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">2</span> <span class=\"o\">*</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">pi</span><span class=\"p\">,</span> <span class=\"mi\">100</span><span class=\"p\">)</span>\n<span class=\"n\">y</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sin</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">)</span>\n<span class=\"n\">plt</span><span class=\"o\">.</span><span class=\"n\">plot</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">y</span><span class=\"p\">)</span>\n</pre></div>\n\n</div>\n</div>\n</div>\n\n<div class=\"output_wrapper\">\n<div class=\"output\">\n\n\n<div class=\"output_area\">\n\n<div class=\"prompt output_prompt\">Out[1]:</div>\n\n\n\n\n<div class=\"output_text output_subarea output_execute_result\">\n<pre>[&lt;matplotlib.lines.Line2D at 0x1111b2160&gt;]</pre>\n</div>\n\n</div>\n\n<div class=\"output_area\">\n\n<div class=\"prompt\"></div>\n\n\n\n\n<div class=\"output_png output_subarea \">\n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4lfX9//HnO5sMEkLCyoAAYW9iUHAwBSeKC6yKOHBb\na2vFr7Zaq63WVlHEgThwax1AFWWjKCIEZEPIYCSsJISRQfbn90cO/pKYkHFOcp/xflzXuXLOfe47\n5xVa88rnXh8xxqCUUkqd5mV1AKWUUs5Fi0EppVQ1WgxKKaWq0WJQSilVjRaDUkqparQYlFJKVaPF\noJRSqhotBqWUUtVoMSillKrGx+oATREREWG6dOlidQyllHIpGzZsyDHGRNa3nksWQ5cuXUhKSrI6\nhlJKuRQR2deQ9XRXklJKqWq0GJRSSlWjxaCUUqoaLQallFLVaDEopZSqxiHFICJviUiWiGyr430R\nkZdEJFVEtojIkCrvTRWRFNtjqiPyKKWUajpHjRjeASac4f2LgHjbYzrwKoCIhAOPA8OAROBxEWnj\noExKKaWawCHXMRhjvheRLmdYZSLwrqmcR3StiISJSEdgJLDUGJMLICJLqSyYjxyRSzVOQXEZqVn5\npGXnc7ywlOKyCorLymnl60271v60Cwmga2QQHUNbWR1VKdWMWuoCtyggo8rrTNuyupb/hohMp3K0\nQWxsbPOk9DCnSspZm36UlclZfLc7m31HCxu0XafQAAZ3bsOIbhFc3L8DYYF+zZxUKdWSWqoYpJZl\n5gzLf7vQmDnAHICEhIRa11ENs+vwSd79aR/zfzlAYUnliGB4t7ZcMzSa7u1C6N4umIhgP/x9vPHz\n8aKwpIysvGKOnCwi+XAeG/YdY8O+Y3y95RCPL9zGBT3acfXQaC7s0x4vr9r+J1VKuZKWKoZMIKbK\n62jgoG35yBrLV7VQJo+zbk8u/1mSzM97cvH38eKygZ24fGAnEuPCCfD1rnO7kABfQgJ86RYZzPBu\nEUwbEYcxhu0HT7Jg0wEWbj7Isp1H6BYZxN0ju3P5oE74eusJb0q5Kqnc7e+Ab1R5jOErY0y/Wt67\nBLgXuJjKA80vGWMSbQefNwCnz1LaCAw9fcyhLgkJCUbvldRwKUfyePbbXSzbmUX71v7cMiKOaxNi\naBPkmF1A5RWGRVsPMXtlKrsO59GlbSCPX9aXUb3aOeT7K6UcQ0Q2GGMS6lvPISMGEfmIyr/8I0Qk\nk8ozjXwBjDGvAYuoLIVUoBCYZnsvV0T+Dqy3fasn6ysF1XBFpeW8uDyF179LI8jPh4fG9+SWEXG0\n8qt7dNAU3l7CZQM7cemAjizfmcU/vtnJtHfWM75ve/56WV+iwvRgtVKuxGEjhpakI4b6bco4zkP/\n3UxKVj7XJkQz46LehDtohFCfkrIK5v6QzqzlqYjAkxP7cdWQKET0+INSVmroiEF3BLsZYwyvrkpj\n0is/kl9cxjvTzuJfVw9ssVIA8PPx4u6R3Vn64Pn0jwrlT//dzB8+2UReUWmLZVBKNZ1Lzsegapdf\nXMZD/93MN9sOc8mAjvxzUn9aB/halie6TSAf3n42s1emMnPZbn7JOM7cmxKIbx9iWSalVP10xOAm\n9uYUMPHlH1iy4wiPXtybl6cMtrQUTvP2Eu4fE88nd5xDYUk5k15Zw+qUbKtjKaXOQIvBDWzNPMFV\nr64ht6CE925N5Pbzuzrd/vyzuoQz/54RRLVpxc1vr+f9tQ2aSEopZQEtBhf3Q0oOk+f8RICvN5/d\nNZzh3SKsjlSnqLBWfHbXcC7oEclj87fx4rIUXPHkB6XcnRaDC/t22yGmvbOOmPBAvrh7ON0ig62O\nVK9gfx/euCmBq4ZE88Ky3fxrcbKWg1JORg8+u6gl2w9z74e/MCA6lLenJRLayvrjCQ3l7SU8d/UA\nAny9eHVVGkWl5fz10j5Ot/tLKU+lxeCCVuw6wj0fbqRvVCjzbkkkxAkOMjeWl5fw1BX98Pfx5q0f\n9+DjJfzfxb21HJRyAloMLmZ1SjZ3vreRXh1a866LlsJpIsJfLu1NeUUFb6zeQ1igH/eM6m51LKU8\nnhaDC9l24AR3vreBrpFBvHera+0+qouI8PhlfTlxqpTnFicT2sqXG87ubHUspTyaFoOLyMgtZNo7\n6wlt5cu8WxLdag4ELy/huWsGkldUxl8WbCMi2I8J/TpaHUspj6VnJbmA44Ul3Pz2OopLy5l3SyLt\nWwdYHcnhfL29mP27IQyOCeOBTzaxJfO41ZGU8lhaDE6utLyCO9/fQEbuKd5w89tJBPh6M+emBCKC\n/bltXhKHTpyyOpJSHkmLwck9/fVO1qbn8sxV/RnWta3VcZpdRLA/b049i8KScm59J4mC4jKrIynl\ncbQYnNinSRm8s2Yvt50bx6Qh0VbHaTE9O4Qw6/rB7Dp8kj9/vkUvgFOqhTmkGERkgogki0iqiMyo\n5f0XRGST7bFbRI5Xea+8ynsLHZHHHWzcf4zHvtzGefERzLiol9VxWtyonu340/iefL3lEG//uNfq\nOEp5FLvPShIRb2A2MI7KOZzXi8hCY8yO0+sYY/5QZf37gMFVvsUpY8wge3O4k9yCEu75YCPtQ/2Z\nNWUwPh46f/JdF3Tjl/3H+ceinQyIDiWhS7jVkZTyCI74jZMIpBpj0o0xJcDHwMQzrD8F+MgBn+uW\nKioMf/x0E0fzS3j1d0Pd6rTUxhIR/nPtQKLbtOLuDzaSlVdkdSSlPIIjiiEKyKjyOtO27DdEpDMQ\nB6yosjhARJJEZK2IXOGAPC7tjdXprEzO5rFLe9MvKtTqOJZrHeDLazcO5WRRKQ9+spmKCj3eoFRz\nc0Qx1HZzm7r+650MfGaMKa+yLNY2B+n1wEwR6Vbrh4hMtxVIUna2e070smFfLv9anMzF/Ttwo179\n+6teHVrz+GV9+SE1hzdWp1sdRym354hiyARiqryOBg7Wse5kauxGMsYctH1NB1ZR/fhD1fXmGGMS\njDEJkZGR9mZ2OieLSrn/o01EhbXimasG6M3kaph8VgwX9evAc4uT2ZyhF78p1ZwcUQzrgXgRiRMR\nPyp/+f/m7CIR6Qm0AX6qsqyNiPjbnkcAI4AdNbf1BE8s3M7hk0XMnDzIKabkdDYiwj8n9ScyxJ/7\nP/6FfL2+QalmY3cxGGPKgHuBxcBO4FNjzHYReVJELq+y6hTgY1P9pPTeQJKIbAZWAs9UPZvJUyza\neogvNh7gnlHdGRLbxuo4Tiss0I+Z1w0iI7eQJ/+33eo4SrktccWLhxISEkxSUpLVMRziyMkixs/8\nns7hgXx213B8PfTU1MZ49ttdvLoqjbduTmB0r/ZWx1HKZYjIBtsx3TPS30IWMsbw0GdbKC6t4IXr\nBmkpNNADY+Pp2T6Ehz/fyvHCEqvjKOV29DeRhf67IZPvd2fzyMW96OoC8zU7C38fb/5z7UCOFZTw\n+ELdpaSUo2kxWOTwiSL+/tUOhsWFc8MwPTW1sfpFhXLf6HgWbDrIt9sOWR1HKbeixWABYwyPfrmV\n0vIKnr1qAF5eempqU9w9qhv9olrz2PztnCgstTqOUm5Di8ECCzcfZPmuLP50YU+6RARZHcdl+Xp7\n8cykARwrLOEfi3ZaHUcpt6HF0MJyC0p4YuF2BseGMW1EnNVxXF6/qFBuP68rnyRlsCY1x+o4SrkF\nLYYW9o9FO8krKuPZqwbgrbuQHOKBsfF0bhvII19upai0vP4NlFJnpMXQgn5KO8pnGzKZfn5Xerjx\nFJ0tLcDXm39O6s++o4XMXJZidRylXJ4WQwspLivn0S+3EhseyH2j462O43aGd4vgmqHRzF2dTsqR\nPKvjKOXStBhayKur0kjPKeDvV/SjlZ+31XHc0oyLehHk78Nj87fpdKBK2UGLoQXsO1rAK6vSuGxg\nJy7o4X53hnUWbYP9eXhCL37ek8v8TQesjqOUy9JiaAFP/m8Hvl7CY5f0tjqK25t8VgwDY8J4+utd\nnDil1zYo1RRaDM1s+c4jLN+VxQNje9C+dYDVcdyel5fw9BX9yC0o5vklyVbHUcolaTE0o6LScv72\nvx10bxfMzSO6WB3HY/SLCuWGszvz3tp97Dp80uo4SrkcLYZm9Mb36ezPLeRvl/fVO6e2sAfH9aB1\nK1+eWLhdD0Qr1Uj626qZHDx+itmrUrmkf0dGdI+wOo7HCQv0448X9mRtei7fbDtsdRylXIpDikFE\nJohIsoikisiMWt6/WUSyRWST7XFblfemikiK7THVEXmcwbPf7sIYeOTiXlZH8VjXJ8bSq0MIT3+9\nU6+IVqoR7C4GEfEGZgMXAX2AKSLSp5ZVPzHGDLI95tq2DQceB4YBicDjIuLyc1tu2HeMBZsOMv38\nrkS3CbQ6jsfy9hKeuLwvB46f4vXv0q2Oo5TLcMSIIRFINcakG2NKgI+BiQ3cdjyw1BiTa4w5BiwF\nJjggk2UqKgxPfrWD9q39ufOCblbH8Xhnd23LJf078up3qRw+UWR1HKVcgiOKIQrIqPI607aspqtE\nZIuIfCYiMY3c1mUs2HyAzRnH+fP4yqtwlfVmXNSLigr4t56+qlSDOKIYartFaM3TQP4HdDHGDACW\nAfMasW3liiLTRSRJRJKys7ObHLY5FZaU8ew3yQyIDuXKwS7db24lJjyQaSO68PnGTLYdOGF1HKWc\nniOKIROIqfI6GjhYdQVjzFFjTLHt5RvA0IZuW+V7zDHGJBhjEiIjnfO2EnNX7+HwySL+cmkfnZXN\nydw9qjttAv146usdevqqUvVwRDGsB+JFJE5E/IDJwMKqK4hIxyovLwdOT7e1GLhQRNrYDjpfaFvm\ncrLzinn9uzTG923PWV3CrY6jaght5csfxsazNj2XZTuzrI6jlFOzuxiMMWXAvVT+Qt8JfGqM2S4i\nT4rI5bbV7heR7SKyGbgfuNm2bS7wdyrLZT3wpG2Zy5m5bDfFZRU8PEFPT3VWUxJj6RYZxD8X7aS0\nvMLqOEo5LXHFYXVCQoJJSkqyOsavUrPyGT/ze24YFsvfJvazOo46g2U7jnDbu0n8/Yp+3Hh2Z6vj\nKNWiRGSDMSahvvX0ymcHeOabXQT6enP/GJ2Ax9mN6d2OxC7hvLgshYLiMqvjKOWUtBjstG5PLst2\nHuHOkd1oG+xvdRxVDxFhxsW9yMkv5o3VetGbUrXRYrCDMYZnvtlJ+9b+3DIizuo4qoGGxLbhon4d\nmPN9Otl5xfVvoJSH0WKww9IdR9i4/zgPjO2h03W6mIfG96S4rIJZK1KsjqKU09FiaKLyCsNzi5Pp\nGhnENUOjrY6jGqlrZDBTEmP48Of97M0psDqOUk5Fi6GJPt+YSUpWPg9d2BMfnWvBJd0/Oh5fby9e\nWLbb6ihKORX9jdYERaXlzFy6m4ExYUzo18HqOKqJ2rUOYNqILizcfJCdh3SmN6VO02JogvfX7uPg\niSIeHt8TEb31hSu74/xuhPj78O/FeoM9pU7TYmik/OIyXl2VxrndIxiuM7O5vNBAX+4c2Y3lu7JI\n2uuSF90r5XBaDI309g97OFpQwp/G97Q6inKQacPjiAzx51+Lk/UGe0qhxdAoxwtLmLM6nXF92jMo\nJszqOMpBWvl5c//o7qzbk8vqlByr4yhlOS2GRnj9+3Tyi8v444U9rI6iHOy6s2KJCmvFv5foqEEp\nLYYGysor4u0f93D5wE706tDa6jjKwfx8vPj92Hi2ZJ5g6Y4jVsdRylJaDA30yso0SssND4zV0YK7\nmjQ4iriIIJ5fupuKCh01KM+lxdAAh06c4sN1+7l6SDRxEUFWx1HNxMfbiwfGxrPrcB5fbz1kdRyl\nLKPF0ACzV6ZijOHe0d2tjqKa2WUDOtGzfQgvLNtNmU7mozyUQ4pBRCaISLKIpIrIjFref1BEdojI\nFhFZLiKdq7xXLiKbbI+FNbe1WuaxQj5Zn8G1CTHEhAdaHUc1My8v4Q/j4knPLmDBplqnH1fK7dld\nDCLiDcwGLgL6AFNEpE+N1X4BEowxA4DPgH9Vee+UMWaQ7XE5TmbW8lREREcLHmR83w706dial1ak\n6KhBeSRHjBgSgVRjTLoxpgT4GJhYdQVjzEpjTKHt5VrAJW5Huu9oAZ9tzOT6xFg6hrayOo5qISLC\nH8b1YN/RQr745YDVcZRqcY4ohiggo8rrTNuyutwKfFPldYCIJInIWhG5oq6NRGS6bb2k7Oxs+xI3\n0EvLU/H1Fu4e2a1FPk85j7G929E/KpRZK1Io1VGD8jCOKIba7iJX67l+InIDkAA8V2VxrG1y6uuB\nmSJS629hY8wcY0yCMSYhMjLS3sz12pNTwJe/ZHLDsM60ax3Q7J+nnEvlqCGejNxTfL4h0+o4SrUo\nRxRDJhBT5XU08JujdiIyFngUuNwY8+t8isaYg7av6cAqYLADMtlt1vIU/Hy8uOMCHS14qlE92zEw\nJoxZK1IpKdNRg/IcjiiG9UC8iMSJiB8wGah2dpGIDAZep7IUsqosbyMi/rbnEcAIYIcDMtklPTuf\n+ZsOcOPZnYkM8bc6jrKIiPCHsfEcOH6Kz3TUoDyI3cVgjCkD7gUWAzuBT40x20XkSRE5fZbRc0Aw\n8N8ap6X2BpJEZDOwEnjGGGN5McxakYqfjxfTz9fRgqe7oEckg2LCmL1SRw3Kc/g44psYYxYBi2os\n+2uV52Pr2G4N0N8RGRwlLTufBZsOcNt5XXW0oBARHhgbz81vr+ezDZlcPyzW6khKNTu98rmGl1ek\n4u/jzfTzu1odRTkJHTUoT6PFUEW6bbRw4zmdiQjW0YKqdHrUcOD4KT7fqMcalPvTYqjiZduxhdvP\n09GCqu70qOFlPUNJeQAtBps9OQXM33SAG4bpmUjqt0SE3+uoQXkILQabl1ek4uvtxfQLdLSgajey\nRyQDo0OZvTJVr4ZWbk2Lgcp7Is3fdIDfDetMuxC9ylnV7vSoIfPYKb7cqPdQUu5Li4HK+RZ8vIQ7\ndbSg6jGqZ+U9lF5emap3XlVuy+OLISO3kC82HmBKYqzeE0nVS0S4f0w8+3MLma/zNSg35fHF8Mqq\nNLxEuFPviaQaaGzvdvTp2JrZOmpQbsqji6HyHjgZXHdWDB1CdbSgGub0qGFPTgH/26KjBuV+PLoY\nXluVBsBdOt+CaqQL+7SnV4cQXl6RSnlFrXeZV8pleWwxHD5RxCfrM7gmIYZOYTo7m2ocLy/hvtHx\npGUXsGjrIavjKOVQHlsMr32XRoUx3KXHFlQTXdSvA/Htgnl5RSoVOmpQbsQjiyErr4iP1u1n0pAo\nYsIDrY6jXJSXl3Dv6O4kH8ljyY7DVsdRymE8shje+D6dsgrDPaO6Wx1FubhLB3Sia0QQLy1PxRgd\nNSj34JBiEJEJIpIsIqkiMqOW9/1F5BPb+z+LSJcq7z1iW54sIuMdkedMcvKLeX/tfiYO7ETntkHN\n/XHKzXl7CfeM6s6OQydZtjOr/g2UcgF2F4OIeAOzgYuAPsAUEelTY7VbgWPGmO7AC8Cztm37UDkV\naF9gAvCK7fs1m7mr91BUVs49o3W0oBxj4qBOdG4byKwVKTpqUG7BESOGRCDVGJNujCkBPgYm1lhn\nIjDP9vwzYIyIiG35x8aYYmPMHiDV9v2axbGCEt77aS+XDuhEt8jg5voY5WF8vL24e2Q3tmSe4Lvd\n2VbHUcpujiiGKCCjyutM27Ja17HNEX0CaNvAbR3mrR/3UFBSzn06WlAOduXgaKLCWvHSch01qOaR\nmpXPtLfXsf9oYbN/liOKQWpZVvO/jLrWaci2ld9AZLqIJIlIUnZ20/4qyy0o4ZIBHenRPqRJ2ytV\nFz8fL+4a2Y2N+4+zJu2o1XGUG5q9MpW16bkE+Tfr3nbAMcWQCcRUeR0N1LxPwK/riIgPEArkNnBb\nAIwxc4wxCcaYhMjIyCYFffrK/rw0eXCTtlWqPtckRNOhdQAvLk+xOopyM3tyCn6ddrhtC0w77Ihi\nWA/Ei0iciPhReTB5YY11FgJTbc+vBlaYyvH2QmCy7aylOCAeWOeATHXy9qptkKKU/fx9vLnzgq6s\n25PL2nQdNSjHeWVl5URit50X1yKfZ3cx2I4Z3AssBnYCnxpjtovIkyJyuW21N4G2IpIKPAjMsG27\nHfgU2AF8C9xjjCm3N5NSVpmcGEtkiD+zVuioQTlGRm4hX/xygOuHxbbYRGI+jvgmxphFwKIay/5a\n5XkRcE0d2z4NPO2IHEpZLcDXmzvO78pTX+9kw75chnYOtzqScnGvrErDW4Q7zm+52/d45JXPSjWn\n64fF0jbIj5eWp1odRbk4q6YG0GJQysEC/Xy4/fyufLc7m00Zx62Oo1zY699VTg1wZwtPDaDFoFQz\nuOHszoQF+jJLz1BSTXT4RBEfr8vg6qExRLXw1ABaDEo1g2B/H247N47lu7LYduCE1XGUC3r9+8qp\nAe62YCIxLQalmslNw7vQOsCHl3TUoBopK6+ID3+2bmoALQalmknrAF9uOTeOJTuOsOPgSavjKBdi\n9dQAWgxKNaNpw+MI8ffR6xpUg+XkF/Pe2n22u/ZaMzWAFoNSzSg00JdpI7rwzbbDJB/OszqOcgFv\nrE6npKzC0onEtBiUama3nBtHsL8PL+moQdUjt6CE937ax2UDrZ0aQItBqWYWFujH1OGdWbT1EClH\ndNSg6vbG6nROlVo/NYAWg1It4NZzu9LK15uXVujV0Kp2uQUlzFtTOZFY93bWTg2gxaBUCwgP8uOm\nc7rw1ZaDpGbpqEH91lzbaOF+J5hITItBqRZy+3lxlaMGvYeSquGYbbRwcf+OxDvBRGJaDEq1kLbB\n/tx0Thf+p6MGVcObP1ROO3z/6HirowBaDEq1qNOjhll6rEHZHCso4Z01e7mkf0d6drB+tABaDEq1\nqNOjhoWbD5KalW91HOUE5v6QTkFJGfePcY7RAthZDCISLiJLRSTF9rVNLesMEpGfRGS7iGwRkeuq\nvPeOiOwRkU22xyB78ijlCv7/sQa9rsHT5RaU8M6PlccWnGW0APaPGGYAy40x8cBy2+uaCoGbjDF9\ngQnATBEJq/L+Q8aYQbbHJjvzKOX0qh5r0OsaPNvc1ekUlpbzgBONFsD+YpgIzLM9nwdcUXMFY8xu\nY0yK7flBIAuItPNzlXJp08/vSqCvNy/qqMFjVb1uwRnORKrK3mJob4w5BGD72u5MK4tIIuAHpFVZ\n/LRtF9MLIuJvZx6lXEJ4kB83j+jC11sP6T2UPNQbttGCM1y3UFO9xSAiy0RkWy2PiY35IBHpCLwH\nTDPGVNgWPwL0As4CwoGHz7D9dBFJEpGk7Ozsxny0Uk7p9vO6EuTnw4vLd1sdRbWwnPxi3vlxL5c5\n4WgBGlAMxpixxph+tTwWAEdsv/BP/+LPqu17iEhr4GvgMWPM2irf+5CpVAy8DSSeIcccY0yCMSYh\nMlL3RCnXFxboxy0jurBo62Gdr8HDvLYqjeKycn4/1rmOLZxm766khcBU2/OpwIKaK4iIH/Al8K4x\n5r813jtdKkLl8YltduZRyqXcem5XQgJ8mLlMRw2e4sjJIt5bu48rB0dbegfVM7G3GJ4BxolICjDO\n9hoRSRCRubZ1rgXOB26u5bTUD0RkK7AViACesjOPUi4lNNCX287typIdR9iaqXNDe4JXVqZSVmG4\nf4zzHVs4TYwxVmdotISEBJOUlGR1DKUcIq+olPP+tZJBMWG8M63OvanKDRw8foqRz61i0pAonrlq\nQIt/vohsMMYk1LeeXvmslMVCAny584JurErOJmlvrtVxVDN6eWUqBsO9TngmUlVaDEo5gZvO6UxE\nsD/PLU7GFUfxqn77jhbw6foMJp8VS3SbQKvjnJEWg1JOINDPh3tGdePnPbmsSTtqdRzVDGYuS8HH\nWyyfna0htBiUchJTEmPpGBqgowY3tPtIHvM3HWDqOV1o1zrA6jj10mJQykkE+Hpz/5h4NmUcZ9nO\nWi8JUi7qP0uSCfLz4c4LulkdpUG0GJRyItcMjSYuIoh/L06mvEJHDe5gc8ZxFm8/wm3nxdEmyM/q\nOA2ixaCUE/Hx9uLBcT1IPpLHgk0HrI6jHODfS5JpE+jLrefGWR2lwbQYlHIyl/TvSJ+OrXlh2W5K\nyirq30A5rTWpOaxOyeHukd0JCfC1Ok6DaTEo5WS8vISHJvQkI/cUH6/fb3Uc1UTGGJ79dhedQgO4\n8ZzOVsdpFC0GpZzQyB6RJMaF89LyVAqKy6yOo5rgm22H2Zx5ggfG9SDA19vqOI2ixaCUExIRHp7Q\ni5z8Yuau3mN1HNVIZeUV/HtxMvHtgrlqSLTVcRpNi0EpJzW0cxvG923PnO/TyMkvtjqOaoRPkzJJ\nzyngofE98fYSq+M0mhaDUk7szxN6UVRWwUs6BajLKCwpY+ay3QyJDWNcn/ZWx2kSLQalnFi3yGCu\nOyuGD3/ez56cAqvjqAaYu3oPWXnFPHpJbyqnmnE9WgxKObkHxsTj6+3FvxcnWx1F1SMrr4jXvktj\nQt8ODO0cbnWcJtNiUMrJtWsdwO3nxfH11kNs3H/M6jjqDGYuS6GkrIKHL+pldRS72FUMIhIuIktF\nJMX2tU0d65VXmb1tYZXlcSLys237T2zTgCqlarjjgm5Ehvjz1Fc79AZ7Tio1K49P1mdww9mdiYsI\nsjqOXewdMcwAlhtj4oHltte1OWWMGWR7XF5l+bPAC7btjwG32plHKbcU5O/DH8f1YOP+43y99ZDV\ncVQtnvlmF4G2GyG6OnuLYSIwz/Z8HnBFQzeUyqMyo4HPmrK9Up7mmoQYenUI4dlvd1FUWm51HFXF\nDyk5LNuZxV2juhHuIjfKOxN7i6G9MeYQgO1ruzrWCxCRJBFZKyKnf/m3BY4bY05f1pkJRNmZRym3\n5e0lPHZJHzJyTzFvzV6r4yibsvIK/v7VDmLCW3HLCNe5Ud6Z+NS3gogsAzrU8tajjficWGPMQRHp\nCqwQka3AyVrWq3PnqYhMB6YDxMbGNuKjlXIf58ZHMKpnJC+vSOXqodG0Dfa3OpLH+yQpg+Qjebz6\nuyEud+uLutQ7YjDGjDXG9KvlsQA4IiIdAWxfa51dxBhz0PY1HVgFDAZygDAROV1O0cDBM+SYY4xJ\nMMYkREaojnEAAAAPnklEQVRGNuJHVMq9PHpJb06VlvPvJbutjuLxThaV8p8lu0mMC2dCv9r+fnZN\n9u5KWghMtT2fCiyouYKItBERf9vzCGAEsMNUnlqxErj6TNsrparr3i6EqcO78PH6/Ww7cMLqOB7t\n5RWpHCss4a+X9nHZi9lqY28xPAOME5EUYJztNSKSICJzbev0BpJEZDOVRfCMMWaH7b2HgQdFJJXK\nYw5v2plHKY9w/5h4wgP9eGLhdj191SJp2fm8/eMerh4STb+oUKvjOFS9xxjOxBhzFBhTy/Ik4Dbb\n8zVA/zq2TwcS7cmglCcKbeXLQ+N7MuOLrSzcfJCJg/S8jZZkjOGJhdsJ8PHmzxNc+2K22uiVz0q5\nqGsSYugfFco/F+2isETnbGhJi7cfYXVKDn8Y14PIEPc7AUCLQSkX5e0lPHF5Hw6fLGLWilSr43iM\nUyXl/P2rHfRsH8JNLjYzW0NpMSjlwoZ2DufahGje+D6dlCN5VsfxCK9+l8aB46f428S++Hi7569Q\n9/yplPIgD0/oRZC/D39ZsE0PRDezvTkFvPZdGpcP7MTZXdtaHafZaDEo5eLaBvvz8IRerE3PZcGm\nOi8FUnYyxvDY/G34e3vx6CW9rY7TrLQYlHIDk8+KYWBMGE99vZMTp0qtjuOWFm4+yA+pOfx5Qk/a\ntw6wOk6z0mJQyg14eQlPX9GP3IJinv12l9Vx3M7xwhL+/tUOBsaEcf0w9zzgXJUWg1Juol9UKLed\n15UPf97Pz+lHrY7jVp79dhfHCkv5x5X98PZynyuc66LFoJQb+cPYHsSEt+KRL7bqrbkd5Of0o3y0\nLoNbz42jbyf3usK5LloMSrmRVn7e/OPK/qTnFPCyXttgt1Ml5Tz8+RZiwwN5YKzrT8DTUFoMSrmZ\n8+IjuWpINK99l8aOg7Xd3V411PNLk9l7tJBnrupPoJ9ddxByKVoMSrmhxy7pTVigH3/872ZKyiqs\njuOSNu4/xps/7OF3w2IZ3i3C6jgtSotBKTfUJsiPf07qz85DJ3l5RYrVcVxOUWk5f/5sCx1aBzDj\nIve7SV59tBiUclPj+rRn0pAoZq9KY3PGcavjuJT/LEkmNSuff0zqT0iAr9VxWpwWg1Ju7PHL+hIZ\n7M8f/7tZz1JqoDVpOcz9YQ83nB3LyJ51TWPv3rQYlHJjoa18efbqAaRm5euFbw1w4lQpf/p0M3Ft\ng3j04j5Wx7GMXcUgIuEislREUmxf29SyzigR2VTlUSQiV9jee0dE9lR5b5A9eZRSv3VBj0imntOZ\nt3/cy8rkWqdlVzZ/XbCNrLxiXrhuEK38vK2OYxl7RwwzgOXGmHhgue11NcaYlcaYQcaYQcBooBBY\nUmWVh06/b4zZZGcepVQtHrm4N706hPCnTzeTlVdkdRynNP+XAyzYdJD7x8QzMCbM6jiWsrcYJgLz\nbM/nAVfUs/7VwDfGmEI7P1cp1QgBvt7MmjKY/OIy/vjpZioq9PbcVaVl5/N/X24lsUs4d4/sZnUc\ny9lbDO2NMYcAbF/rO1IzGfioxrKnRWSLiLwgInXOkSci00UkSUSSsrOz7UutlAeKbx/CXy7tw+qU\nHOasTrc6jtMoKi3nng82EuDrzUtTBrvt5DuNUe+/gIgsE5FttTwmNuaDRKQj0B9YXGXxI0Av4Cwg\nHHi4ru2NMXOMMQnGmITIyMjGfLRSyuZ3w2K5pH9H/vXtLn5K0xvtAfztf9vZdTiP568dSIdQ976d\ndkPVWwzGmLHGmH61PBYAR2y/8E//4j/Tka1rgS+NMb/eLN4Yc8hUKgbeBhLt+3GUUmciIjx79QDi\nIoK476ONHD7h2ccbvtiYyUfrMrhrZDePPTW1NvaOmRYCU23PpwILzrDuFGrsRqpSKkLl8YltduZR\nStUj2N+H128cSmFJOfd8uNFjb5mxJfM4M77YyrC4cP44rofVcZyKvcXwDDBORFKAcbbXiEiCiMw9\nvZKIdAFigO9qbP+BiGwFtgIRwFN25lFKNUD3diH86+oBbNh3jCe/2m51nBaXlVfE9Hc3EBnszyu/\nG6LHFWqw63aBxpijwJhalicBt1V5vReIqmW90fZ8vlKq6S4d0ImtB07w+nfpdI8M5uYRcVZHahEl\nZRXc/f5Gjp8q4fO7htM2uM5zXjyW59xHVin1Gw+P78We7AKe/GoHnSOCGOXm+9mNMTzyxVaS9h1j\n1pTBHjPxTmPp+EkpD+blJcycPIjeHVtz34e/kHw4z+pIzer5pbv5fGMmD4yN57KBnayO47S0GJTy\ncIF+PsydmkCQvzdT31pHRq57Xn/64c/7mbUilesSYvj9GM+Zja0ptBiUUnQMbcW8WxIpLCnjxjd/\nJjuv2OpIDrV0xxEem7+VkT0jeerKflSeCKnqosWglAKgV4fWvD0tkSMni7nprXWcOFVa/0YuYGVy\nFvd8sJH+UaHMvn4IvnoGUr30X0gp9auhndvw+o1DSc3KY6oblMP3u7O5470NxLcP5t1bhhHkr+fb\nNIQWg1KqmvN7RDL7+iFsP3iC699YS25BidWRmuTH1BxufzeJrhFBvH/rMEIDPW8mtqbSYlBK/caF\nfTvwxk0JpGblM3nOTy53q+6vtxxi2tvr6dI2iA9uG0abID+rI7kULQalVK1G9mzH2zefReaxU1z9\n6k+kZuVbHalB3vtpL/d+tJEB0aF8csfZegFbE2gxKKXqNLx7BB/cNozCkjImvfIjP6bmWB2pTuUV\nhme/3cVfFmxnTK92vHfrMMICdaTQFFoMSqkzGhzbhi/vHkGH0ACmvrWO99fuwxjnmujnWEEJ095Z\nz6ur0piSGMtrNwz16Kk57aXFoJSqV0x4IJ/fNZwR3SN4bP427vvoF04WOccZS9sOnOCyl39gbdpR\n/jmpP/+c1F9vimcn/ddTSjVISIAvb918Fg+N78k32w5z8Yur2bDvmGV5SssreGl5Cle+8iNl5YZP\n7jibKYmxluVxJ1oMSqkG8/YS7hnVnU/vOAdj4OrX1vCX+ds4Udiyo4cdB09yxewfeX7pbi7q15Fv\nfn8eg2PbtGgGdybOtq+wIRISEkxSUpLVMZTyaCeLSnl+yW7e/WkvbQL9eHhCLyYNiWrW3TiHTpzi\n+SWVN8ILD/LjqSv6M6Ffh2b7PHcjIhuMMQn1rmdPMYjINcATQG8g0TYPQ23rTQBeBLyBucaY0xP6\nxAEfUznf80bgRmNMvVfTaDEo5Ty2HzzBX+ZvY+P+48SGB3LnBd24amgU/j6OO/ibkVvIuz/t5d2f\n9mEM3HROZ+4d3V3POmqkliqG3kAF8Drwp9qKQUS8gd1UzvCWCawHphhjdojIp8AXxpiPReQ1YLMx\n5tX6PleLQSnnUlFhWLbzCLNXprI58wSRIf5cMagTEwdF0bdT6ybdtK6otJw1aTl8+PN+lu/KQoCJ\ng6J4cFwPYsIDHf9DeICGFoO9M7jttH3YmVZLBFKNMem2dT8GJorITmA0cL1tvXlUjj7qLQallHPx\n8hIu7NuBcX3a80NqDvPW7OOdNXt5Y/Ue4iKCOLtrOENi2zA4NoyosMDfnEpqjOFoQQkpR/JJPnyS\n1Sk5/JiWQ1FpBRHBftwzsjvXD4ulU1gri35Cz9ISd5SKAjKqvM4EhgFtgePGmLIqy38z/adSynWI\nCOfFR3JefCTHC0tYtPUwS3Yc5usth/ho3f//NRDi70N4sB8VxlBcWkFhSTn5xWW/vh8T3orrEmIY\n2bMdw7u3dehuKVW/eotBRJYBtR3dedQYs6ABn1HbcMKcYXldOaYD0wFiY/WUNKWcXVigH9cPi+X6\nYbFUVBjSsvPZeuAEh08WkXWymKMFJfh6Cf6+Xvj7eBMbHkh8+2C6twumQ+sAnTPBQvUWgzFmrJ2f\nkQnEVHkdDRwEcoAwEfGxjRpOL68rxxxgDlQeY7Azk1KqBXl5CfHtQ4hvH2J1FNUALXEdw3ogXkTi\nRMQPmAwsNJVHvVcCV9vWmwo0ZASilFKqGdlVDCJypYhkAucAX4vIYtvyTiKyCMA2GrgXWAzsBD41\nxmy3fYuHgQdFJJXKYw5v2pNHKaWU/fQCN6WU8hANPV1Vb4mhlFKqGi0GpZRS1WgxKKWUqkaLQSml\nVDVaDEoppapxybOSRCQb2NfEzSOovLjOVbl6fnD9n8HV84Pr/wyunh+s+Rk6G2Mi61vJJYvBHiKS\n1JDTtZyVq+cH1/8ZXD0/uP7P4Or5wbl/Bt2VpJRSqhotBqWUUtV4YjHMsTqAnVw9P7j+z+Dq+cH1\nfwZXzw9O/DN43DEGpZRSZ+aJIwallFJn4FHFICITRCRZRFJFZIbVeRpDRN4SkSwR2WZ1lqYQkRgR\nWSkiO0Vku4j83upMjSUiASKyTkQ2236Gv1mdqSlExFtEfhGRr6zO0hQisldEtorIJhFxubtpikiY\niHwmIrts/z2cY3WmmjxmV5KIeAO7gXFUTh60HphijNlhabAGEpHzgXzgXWNMP6vzNJaIdAQ6GmM2\nikgIsAG4wlX+/QGkckqxIGNMvoj4Aj8AvzfGrLU4WqOIyINAAtDaGHOp1XkaS0T2AgnGGJe8jkFE\n5gGrjTFzbXPUBBpjjludqypPGjEkAqnGmHRjTAnwMTDR4kwNZoz5Hsi1OkdTGWMOGWM22p7nUTk3\nh0vN8W0q5dte+toeLvWXlYhEA5cAc63O4olEpDVwPra5Z4wxJc5WCuBZxRAFZFR5nYmL/WJyFyLS\nBRgM/Gxtksaz7YbZBGQBS40xrvYzzAT+DFRYHcQOBlgiIhtsc8G7kq5ANvC2bXfeXBEJsjpUTZ5U\nDLXNLO5Sf+25AxEJBj4HHjDGnLQ6T2MZY8qNMYOonKM8UURcZreeiFwKZBljNlidxU4jjDFDgIuA\ne2y7WV2FDzAEeNUYMxgoAJzueKcnFUMmEFPldTRw0KIsHsm2X/5z4ANjzBdW57GHbfi/CphgcZTG\nGAFcbttH/zEwWkTetzZS4xljDtq+ZgFfUrmb2FVkAplVRpqfUVkUTsWTimE9EC8icbYDPpOBhRZn\n8hi2A7dvAjuNMc9bnacpRCRSRMJsz1sBY4Fd1qZqOGPMI8aYaGNMFyr//7/CGHODxbEaRUSCbCcv\nYNsFcyHgMmfqGWMOAxki0tO2aAzgdCdg+FgdoKUYY8pE5F5gMeANvGWM2W5xrAYTkY+AkUCEiGQC\njxtj3rQ2VaOMAG4Ettr20QP8nzFmkYWZGqsjMM92hpsX8KkxxiVP+XRh7YEvK//OwAf40BjzrbWR\nGu0+4APbH6jpwDSL8/yGx5yuqpRSqmE8aVeSUkqpBtBiUEopVY0Wg1JKqWq0GJRSSlWjxaCUUqoa\nLQallFLVaDEopZSqRotBKaVUNf8PSkPz2rqC2OEAAAAASUVORK5CYII=\n\"\n>\n</div>\n\n</div>\n\n</div>\n</div>\n\n</div>\n </div>\n\n\n\n \n<div class=\"cell border-box-sizing code_cell rendered\">\n<div class=\"input\">\n<div class=\"prompt input_prompt\">In&nbsp;[&nbsp;]:</div>\n<div class=\"inner_cell\">\n <div class=\"input_area\">\n<div class=\" highlight hl-ipython3\"><pre><span></span> \n</pre></div>\n\n</div>\n</div>\n</div>\n\n</div>\n\n\n </div>\n </div>\n</body>\n\n \n\n\n</html>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ec7821cb5de5cf08c1db53c16c8a550e5806e18a
27,658
ipynb
Jupyter Notebook
notebooks/placement_experiments_50pct_phi25.ipynb
kLabUM/hydraulic-controller-placement
b2cfbee19bb41d69702f4c218c9dba80bd6e4fae
[ "MIT" ]
null
null
null
notebooks/placement_experiments_50pct_phi25.ipynb
kLabUM/hydraulic-controller-placement
b2cfbee19bb41d69702f4c218c9dba80bd6e4fae
[ "MIT" ]
null
null
null
notebooks/placement_experiments_50pct_phi25.ipynb
kLabUM/hydraulic-controller-placement
b2cfbee19bb41d69702f4c218c9dba80bd6e4fae
[ "MIT" ]
3
2019-01-18T21:04:28.000Z
2020-09-04T14:45:02.000Z
26.800388
158
0.548702
[ [ [ "import json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom matplotlib import cm\nfrom pysheds.grid import Grid\nfrom pysheds.view import Raster\nfrom matplotlib import colors\nimport seaborn as sns\nimport warnings\nfrom partition import differentiated_linear_weights, differentiated_power_weights, threshold_weights, controller_placement_algorithm, naive_partition\n\nwarnings.filterwarnings('ignore')\nsns.set_palette('husl', 8)\nsns.set()\n\n%matplotlib inline", "_____no_output_____" ], [ "output = {}", "_____no_output_____" ] ], [ [ "# Generate graph", "_____no_output_____" ] ], [ [ "grid = Grid.from_raster('../data/n30w100_dir', data_name='dir')\n\ndirmap = (64, 128, 1, 2, 4, 8, 16, 32)\n\n# Specify pour point\nx, y = -97.294167, 32.73750\n\n# Delineate the catchment\ngrid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch',\n recursionlimit=15000, xytype='label')\n\n# Clip the bounding box to the catchment\ngrid.clip_to('catch', pad=(1,1,1,1))\n\n#Compute flow accumulation\ngrid.accumulation(data='catch', out_name='acc', dirmap=dirmap)\n\n# Compute flow distance\ngrid.flow_distance(data='catch', x=x, y=y, dirmap=dirmap, out_name='dist', xytype='label')\ndist = grid.view('dist', nodata=0, dtype=np.float64)", "_____no_output_____" ], [ "dist_weights = (np.where(grid.view('acc') >= 100, 0.04, 0) \n + np.where((0 < grid.view('acc')) & (grid.view('acc') <= 100), 1, 0)).ravel()\n\ndists = grid.flow_distance(data='catch', x=x, y=y, weights=dist_weights,\n dirmap=dirmap, out_name='dist', xytype='label', inplace=False)", "_____no_output_____" ] ], [ [ "# Linear weighting", "_____no_output_____" ] ], [ [ "weights = differentiated_linear_weights(dists)", "_____no_output_____" ], [ "acc = grid.accumulation(data='catch', dirmap=dirmap, inplace=False)\nwacc = grid.accumulation(data='catch', weights=weights, dirmap=dirmap, inplace=False)", "_____no_output_____" ], [ "k = 1\nc = 6000\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, acc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 2\nc = 3300\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 3\nc = 6300 // 3\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 4\nc = 6500 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 5\nc = 6300 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 6\nc = 6580 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 8\nc = 6800 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 10\nc = 6500 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 12\nc = 6300 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 14\nc = 6800 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 16\nc = 6500 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 18\nc = 6720 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 20\nc = 6720 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "k = 30\nc = 6720 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "(np.asarray(cells_per_catch) > 101).all()", "_____no_output_____" ], [ "k = 35\nc = 6720 // k\nfdir = grid.view('catch')\nsubs, ixes = controller_placement_algorithm(fdir, c, k, weights=weights, dist_weights=dist_weights,\n grid=grid, compute_weights=differentiated_linear_weights)\nixy, ixx = np.unravel_index(ixes, wacc.shape)", "_____no_output_____" ], [ "cells_per_catch = [np.count_nonzero(sub) for sub in subs]\nnumcells = sum(cells_per_catch)\npct_cells = float(numcells / acc.max())\n\nexperiment = {}\nexperiment['weighting'] = 'linear'\nexperiment['num_controllers'] = k\nexperiment['max_accumulation'] = c\nexperiment['cells_controlled'] = numcells\nexperiment['pct_controlled'] = pct_cells\nexperiment['controller_locs'] = [int(ix) for ix in ixes]\nexperiment['cells_per_catch'] = cells_per_catch\nexperiment['phi'] = 10\noutput.update({'linear_k{0}_50pct_phi25'.format(k) : experiment})", "_____no_output_____" ], [ "pct_cells", "_____no_output_____" ], [ "(np.asarray(cells_per_catch) > 101).all()", "_____no_output_____" ], [ "with open('../data/experiments_differentiated_50pct_phi25.json', 'w') as outfile:\n json.dump(output, outfile)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec782a333b54f929ba33cdd8a62e3f4d6de698e0
110,483
ipynb
Jupyter Notebook
section_particle_filter/mcl11.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
148
2019-03-27T00:20:16.000Z
2022-03-30T22:34:11.000Z
section_particle_filter/mcl11.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
3
2018-11-07T04:33:13.000Z
2018-12-31T01:35:16.000Z
section_particle_filter/mcl11.ipynb
kentaroy47/LNPR_BOOK_CODES
f0d1bef336423ebdf04539ce833f0ce4cffc51f5
[ "MIT" ]
116
2019-04-18T08:35:53.000Z
2022-03-24T05:17:46.000Z
115.689005
69,811
0.787823
[ [ [ "import sys \nsys.path.append('../scripts/')\nfrom robot import *\nfrom scipy.stats import multivariate_normal", "_____no_output_____" ], [ "class Particle: ###impllikelihood\n def __init__(self, init_pose, weight):\n self.pose = init_pose\n self.weight = weight\n \n def motion_update(self, nu, omega, time, noise_rate_pdf): \n ns = noise_rate_pdf.rvs()\n pnu = nu + ns[0]*math.sqrt(nu/time) + ns[1]*math.sqrt(omega/time)\n pomega = omega + ns[2]*math.sqrt(nu/time) + ns[3]*math.sqrt(omega/time)\n self.pose = IdealRobot.state_transition(pnu, pomega, time, self.pose)\n \n def observation_update(self, observation, envmap, distance_dev_rate, direction_dev):\n for d in observation:\n obs_pos = d[0]\n obs_id = d[1]\n \n ###パーティクルの位置と地図からランドマークの距離と方角を算出###\n pos_on_map = envmap.landmarks[obs_id].pos\n particle_suggest_pos = IdealCamera.observation_function(self.pose, pos_on_map)\n \n ###尤度の計算###\n distance_dev = distance_dev_rate*particle_suggest_pos[0]\n cov = np.diag(np.array([distance_dev**2, direction_dev**2]))\n self.weight *= multivariate_normal(mean=particle_suggest_pos, cov=cov).pdf(obs_pos)", "_____no_output_____" ], [ "class Mcl: \n def __init__(self, envmap, init_pose, num, motion_noise_stds={\"nn\":0.19, \"no\":0.001, \"on\":0.13, \"oo\":0.2}, \\\n distance_dev_rate=0.14, direction_dev=0.05): \n self.particles = [Particle(init_pose, 1.0/num) for i in range(num)]\n self.map = envmap #以下4行追加\n self.distance_dev_rate = distance_dev_rate\n self.direction_dev = direction_dev\n\n v = motion_noise_stds\n c = np.diag([v[\"nn\"]**2, v[\"no\"]**2, v[\"on\"]**2, v[\"oo\"]**2])\n self.motion_noise_rate_pdf = multivariate_normal(cov=c)\n \n def motion_update(self, nu, omega, time):\n for p in self.particles: p.motion_update(nu, omega, time, self.motion_noise_rate_pdf)\n \n def observation_update(self, observation): #17行目で引数を追加\n for p in self.particles: p.observation_update(observation, self.map, self.distance_dev_rate, self.direction_dev) \n \n def draw(self, ax, elems): #次のように変更\n xs = [p.pose[0] for p in self.particles]\n ys = [p.pose[1] for p in self.particles]\n vxs = [math.cos(p.pose[2])*p.weight*len(self.particles) for p in self.particles] #重みを要素に反映\n vys = [math.sin(p.pose[2])*p.weight*len(self.particles) for p in self.particles] #重みを要素に反映\n elems.append(ax.quiver(xs, ys, vxs, vys, \\\n angles='xy', scale_units='xy', scale=1.5, color=\"blue\", alpha=0.5)) #変更", "_____no_output_____" ], [ "class EstimationAgent(Agent): \n def __init__(self, time_interval, nu, omega, estimator): \n super().__init__(nu, omega)\n self.estimator = estimator\n self.time_interval = time_interval\n \n self.prev_nu = 0.0\n self.prev_omega = 0.0\n \n def decision(self, observation=None): \n self.estimator.motion_update(self.prev_nu, self.prev_omega, self.time_interval)\n self.prev_nu, self.prev_omega = self.nu, self.omega\n self.estimator.observation_update(observation)\n return self.nu, self.omega\n \n def draw(self, ax, elems):\n self.estimator.draw(ax, elems)", "_____no_output_____" ], [ "def trial():\n time_interval = 0.1\n world = World(30, time_interval, debug=False) \n\n ### 地図を生成して3つランドマークを追加 ###\n m = Map()\n for ln in [(-4,2), (2,-3), (3,3)]: m.append_landmark(Landmark(*ln))\n world.append(m) \n\n ### ロボットを作る ###\n initial_pose = np.array([0, 0, 0]).T\n estimator = Mcl(m, initial_pose, 100) #地図mを渡す\n a = EstimationAgent(time_interval, 0.2, 10.0/180*math.pi, estimator)\n r = Robot(initial_pose, sensor=Camera(m), agent=a, color=\"red\")\n world.append(r)\n\n world.draw()\n \ntrial()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec7831b0c0c3d4155bb4714db759c568ea1e8320
23,921
ipynb
Jupyter Notebook
05_Merge/Fictitous Names/Exercises.ipynb
aamanlamba/pandas_exercises
d0921d75fddf6cfd7786f1c4e460f747c8526221
[ "BSD-3-Clause" ]
null
null
null
05_Merge/Fictitous Names/Exercises.ipynb
aamanlamba/pandas_exercises
d0921d75fddf6cfd7786f1c4e460f747c8526221
[ "BSD-3-Clause" ]
null
null
null
05_Merge/Fictitous Names/Exercises.ipynb
aamanlamba/pandas_exercises
d0921d75fddf6cfd7786f1c4e460f747c8526221
[ "BSD-3-Clause" ]
null
null
null
27.654335
117
0.336441
[ [ [ "# Fictitious Names", "_____no_output_____" ], [ "### Introduction:\n\nThis time you will create a data again \n\nSpecial thanks to [Chris Albon](http://chrisalbon.com/) for sharing the dataset and materials.\nAll the credits to this exercise belongs to him. \n\nIn order to understand about it go [here](https://blog.codinghorror.com/a-visual-explanation-of-sql-joins/).\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Step 2. Create the 3 DataFrames based on the following raw data", "_____no_output_____" ] ], [ [ "raw_data_1 = {\n 'subject_id': ['1', '2', '3', '4', '5'],\n 'first_name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'], \n 'last_name': ['Anderson', 'Ackerman', 'Ali', 'Aoni', 'Atiches']}\n\nraw_data_2 = {\n 'subject_id': ['4', '5', '6', '7', '8'],\n 'first_name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'], \n 'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan']}\n\nraw_data_3 = {\n 'subject_id': ['1', '2', '3', '4', '5', '7', '8', '9', '10', '11'],\n 'test_id': [51, 15, 15, 61, 16, 14, 15, 1, 61, 16]}", "_____no_output_____" ] ], [ [ "### Step 3. Assign each to a variable called data1, data2, data3", "_____no_output_____" ] ], [ [ "data1 = pd.DataFrame(raw_data_1)\ndata2 = pd.DataFrame(raw_data_2)\ndata3 = pd.DataFrame(raw_data_3)\ndata1.head(3)\n\n", "_____no_output_____" ] ], [ [ "### Step 4. Join the two dataframes along rows and assign all_data", "_____no_output_____" ] ], [ [ "all_data = pd.concat([data1,data2])\nall_data", "_____no_output_____" ] ], [ [ "### Step 5. Join the two dataframes along columns and assing to all_data_col", "_____no_output_____" ] ], [ [ "all_data_col = pd.concat([data1,data2],axis=1)\nall_data_col", "_____no_output_____" ] ], [ [ "### Step 6. Print data3", "_____no_output_____" ] ], [ [ "data3", "_____no_output_____" ] ], [ [ "### Step 7. Merge all_data and data3 along the subject_id value", "_____no_output_____" ] ], [ [ "pd.merge(all_data, data3, on='subject_id')", "_____no_output_____" ] ], [ [ "### Step 8. Merge only the data that has the same 'subject_id' on both data1 and data2", "_____no_output_____" ] ], [ [ "pd.merge(data1, data2, on='subject_id', how='inner')", "_____no_output_____" ] ], [ [ "### Step 9. Merge all values in data1 and data2, with matching records from both sides where available.", "_____no_output_____" ] ], [ [ "pd.merge(data1, data2, on='subject_id', how='outer')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec783a9a0b3652aec331ce2af4d71805d84c77b2
131,754
ipynb
Jupyter Notebook
pythonhomework.ipynb
BijoyMaji/py_home_work_4
2b0426be53096191fdeb9880eee75e46810aebb7
[ "MIT" ]
null
null
null
pythonhomework.ipynb
BijoyMaji/py_home_work_4
2b0426be53096191fdeb9880eee75e46810aebb7
[ "MIT" ]
null
null
null
pythonhomework.ipynb
BijoyMaji/py_home_work_4
2b0426be53096191fdeb9880eee75e46810aebb7
[ "MIT" ]
null
null
null
327.746269
40,780
0.936108
[ [ [ "Q1. First part theoory question,f(x)=2-∑ {4}{n^2\\pi^2", "_____no_output_____" ], [ "import numpy as np\nn=int(input('enter the number \\n'))\nxsum = 0.0\nfor i in range(0,n+1):\n xsum +=1./(2*i+1)**2\nprint (\"calculated value,actual value\")\nac=(np.pi)**2/8.\nprint (xsum,ac)", "enter the number \n10\ncalculated value,actual value\n1.2109888848175334 1.2337005501361697\n" ], [ "Q2\n", "_____no_output_____" ], [ "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\ndef model(u,t):\n x=u[0]\n y=u[1]\n dxdt=y\n dydt=-2*y-x\n return np.array([dxdt,dydt])\nu0=[1,0] #initial condition\nt=np.linspace(0,20,100)\nsol=odeint(model,u0,t)\nx=sol[:,0]\ny=sol[:,1]\nplt.plot(t,x)\nplt.show()", "_____no_output_____" ], [ "#2(b)\nimport math\nx=int(input('angle in degree: \\n'))\ntol=float (input(\"Tolerance:\"))\nx=x*math.pi/180\nxsum,term=0,x\nn=1\nwhile abs(term)>tol:\n xsum+=term\n term=term*(-x**2/(2*n*(2*n+1)))\n n+=1\nprint ('calculated value,actual value')\nprint (xsum,math.sin(x))", "angle in degree: \n30\nTolerance:0.001\ncalculated value,actual value\n0.49967417939436376 0.49999999999999994\n" ], [ "import numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nt=np.linspace(-4,4,500)\nv=(4*signal.square(2*np.pi*0.25*t)+4)/2\nL=2 # half period\ndef f(t):return (4*4./np.pi*sum([1.0/n*np.sin(n*np.pi*t/L) for n in\nrange(1,10,2)])+4)/2\nplt.plot(t,v,t,f(t))\nplt.show()", "_____no_output_____" ], [ "from scipy.integrate import quad\nfrom scipy.special import legendre\np2=legendre(2)\np3=legendre(3)\nI1=quad(p2*p3,-1,1)\nI2=quad(p2*p2,-1,1)\nprint('for pn different, value of integral=',I1)\nprint('for pn same, value of integral=',I2)", "for pn different, value of integral= (0.0, 2.9638938867155025e-15)\nfor pn same, value of integral= (0.4000000000000001, 4.440892098500627e-15)\n" ], [ "import numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy.special import legendre\nN=1000\nxvalue=np.linspace(-1,1,N)\nfor i in range(1,5):\n pn=legendre(i)\n plt.plot(xvalue,pn(xvalue),label=\"n=\"+str(i))\n plt.legend(loc='upper center')\nplt.title('Legendre Polynomials')\nplt.show()", "_____no_output_____" ], [ "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\ndef model(u,t):\n x=u[0]\n y=u[1]\n dxdt=y\n dydt=4.5-x/0.9 -0.5*y\n return np.array([dxdt,dydt])\nu0=[1,0] #initial condition\nt=np.linspace(0,10,100)\nsol=odeint(model,u0,t)\nx=sol[:,0]\ny=sol[:,1]\nplt.plot(t,y)\nplt.show()", "_____no_output_____" ], [ "#5(b)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as s\ny=s.triang(51)\nL=50.0\ndef f(x):\n return 8.0/(np.pi**2)*sum([((-1.0)**((n-1)/2.))/n**2*np.sin(n*np.pi*x/L) for n in range(1,20,2)])\nx=np.linspace(0,50,100)\nplt.plot(x,f(x)),plt.plot(y)\nplt.show()", "_____no_output_____" ], [ "#6(b)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nx=np.array([1,2,3,4,5])\ny=np.array([.5,3.8,7.9,16.5,27.3])\nf=np.polyfit(x,y,2)\np=np.poly1d(f)\nplt.plot(x,p(x),label='fit')\nplt.scatter(x,y,label='data')\nplt.show()", "_____no_output_____" ], [ "import numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nn=100\nx=np.linspace(0.0,2,n)\nfreq=np.linspace(0.0,2,n//2)\ny=np.exp(-x**2)\nyf=fft(y)\nya=np.abs(yf)\nplt.plot(freq,ya[:n//2])\nplt.show()", "_____no_output_____" ], [ "#9(b)\nimport cmath\nz=-5+12j\nprint (cmath.sqrt(z))", "(2+3j)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7846551c87653fb39fbff034d5a2d0af848437
21,076
ipynb
Jupyter Notebook
CrossEntropy/.ipynb_checkpoints/CEM-checkpoint.ipynb
jiruifu-jerry0219/DRLND_Jerry
6a342f99119d466f8ae96202452b034f1a2e70e1
[ "MIT" ]
null
null
null
CrossEntropy/.ipynb_checkpoints/CEM-checkpoint.ipynb
jiruifu-jerry0219/DRLND_Jerry
6a342f99119d466f8ae96202452b034f1a2e70e1
[ "MIT" ]
null
null
null
CrossEntropy/.ipynb_checkpoints/CEM-checkpoint.ipynb
jiruifu-jerry0219/DRLND_Jerry
6a342f99119d466f8ae96202452b034f1a2e70e1
[ "MIT" ]
null
null
null
64.452599
2,410
0.634039
[ [ [ "# Cross-Entropy Method\n\n---\n\nIn this notebook, we will train the Cross-Entropy Method with OpenAI Gym's MountainCarContinuous environment.", "_____no_output_____" ], [ "### 1. Import the Necessary Packages", "_____no_output_____" ] ], [ [ "import gym\nimport math\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable", "_____no_output_____" ], [ "# env = gym.make('CartPole-v1')\n# # env_s = env.action_space\n# print(env_s.n)", "2\n" ] ], [ [ "### 2. Instantiate the Environment and Agent", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nenv = gym.make('MountainCarContinuous-v0')\nenv = gym.make('CartPole-v1')\nenv.seed(101)\nnp.random.seed(101)\n\nprint('observation space:', env.observation_space)\nprint('action space:', env.action_space)\nprint(' - low:', env.action_space.low)\nprint(' - high:', env.action_space.high)\n\nclass Agent(nn.Module):\n def __init__(self, env, h_size=16):\n super(Agent, self).__init__()\n self.env = env\n # state, hidden layer, action sizes\n self.s_size = env.observation_space.shape[0]\n self.h_size = h_size\n self.a_size = env.action_space.shape[0]\n # define layers\n self.fc1 = nn.Linear(self.s_size, self.h_size)\n self.fc2 = nn.Linear(self.h_size, self.a_size)\n \n def set_weights(self, weights):\n s_size = self.s_size\n h_size = self.h_size\n a_size = self.a_size\n # separate the weights for each layer\n fc1_end = (s_size*h_size)+h_size\n fc1_W = torch.from_numpy(weights[:s_size*h_size].reshape(s_size, h_size))\n fc1_b = torch.from_numpy(weights[s_size*h_size:fc1_end])\n fc2_W = torch.from_numpy(weights[fc1_end:fc1_end+(h_size*a_size)].reshape(h_size, a_size))\n fc2_b = torch.from_numpy(weights[fc1_end+(h_size*a_size):])\n # set the weights for each layer\n self.fc1.weight.data.copy_(fc1_W.view_as(self.fc1.weight.data))\n self.fc1.bias.data.copy_(fc1_b.view_as(self.fc1.bias.data))\n self.fc2.weight.data.copy_(fc2_W.view_as(self.fc2.weight.data))\n self.fc2.bias.data.copy_(fc2_b.view_as(self.fc2.bias.data))\n \n def get_weights_dim(self):\n return (self.s_size+1)*self.h_size + (self.h_size+1)*self.a_size\n \n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = torch.tanh(self.fc2(x))\n action = x.cpu().data\n return action.numpy()\n \n def evaluate(self, weights, gamma=1.0, max_t=5000):\n self.set_weights(weights)\n episode_return = 0.0\n state = self.env.reset()\n for t in range(max_t):\n state = torch.from_numpy(state).float().to(device)\n action = self.forward(state)\n state, reward, done, _ = self.env.step(action)\n episode_return += reward * math.pow(gamma, t)\n if done:\n break\n return episode_return\n \nagent = Agent(env).to(device)", "observation space: Box(-3.4028234663852886e+38, 3.4028234663852886e+38, (4,), float32)\naction space: Discrete(2)\n" ] ], [ [ "### 3. Train the Agent with the Cross-Entropy Method\n\nRun the code cell below to train the agent from scratch. Alternatively, you can skip to the next code cell to load the pre-trained weights from file.", "_____no_output_____" ] ], [ [ "def cem(n_iterations=500, max_t=1000, gamma=1.0, print_every=10, pop_size=50, elite_frac=0.2, sigma=0.5):\n \"\"\"PyTorch implementation of the cross-entropy method.\n \n Params\n ======\n n_iterations (int): maximum number of training iterations\n max_t (int): maximum number of timesteps per episode\n gamma (float): discount rate\n print_every (int): how often to print average score (over last 100 episodes)\n pop_size (int): size of population at each iteration\n elite_frac (float): percentage of top performers to use in update\n sigma (float): standard deviation of additive noise\n \"\"\"\n n_elite=int(pop_size*elite_frac)\n\n scores_deque = deque(maxlen=100)\n scores = []\n best_weight = sigma*np.random.randn(agent.get_weights_dim())\n\n for i_iteration in range(1, n_iterations+1):\n weights_pop = [best_weight + (sigma*np.random.randn(agent.get_weights_dim())) for i in range(pop_size)]\n rewards = np.array([agent.evaluate(weights, gamma, max_t) for weights in weights_pop])\n\n elite_idxs = rewards.argsort()[-n_elite:]\n elite_weights = [weights_pop[i] for i in elite_idxs]\n best_weight = np.array(elite_weights).mean(axis=0)\n\n reward = agent.evaluate(best_weight, gamma=1.0)\n scores_deque.append(reward)\n scores.append(reward)\n \n torch.save(agent.state_dict(), 'checkpoint.pth')\n \n if i_iteration % print_every == 0:\n print('Episode {}\\tAverage Score: {:.2f}'.format(i_iteration, np.mean(scores_deque)))\n\n if np.mean(scores_deque)>=90.0:\n print('\\nEnvironment solved in {:d} iterations!\\tAverage Score: {:.2f}'.format(i_iteration-100, np.mean(scores_deque)))\n break\n return scores\n\nscores = cem()\n\n# plot the scores\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()\nplt.savefig('result.png')\n\n#shut down computer after training\n# import os\n# os.system(\"shutdown /s /t 1\")", "_____no_output_____" ] ], [ [ "### 4. Watch a Smart Agent!\n\nIn the next code cell, you will load the trained weights from file to watch a smart agent!", "_____no_output_____" ] ], [ [ "# load the weights from file\nagent.load_state_dict(torch.load('checkpoint.pth'))\n\nstate = env.reset()\nn = 0\ni = 0\nwhile True:\n i += 1\n state = torch.from_numpy(state).float().to(device)\n with torch.no_grad():\n action = agent(state)\n env.render()\n n += 1\n next_state, reward, done, _ = env.step(action)\n state = next_state\n if i >= 1000:\n break\n if done:\n print('Finished the task in: {} steps'.format(n))\n n = 0\n state = env.reset()\n\nenv.close()", "Finished the task in: 194 steps\nFinished the task in: 103 steps\nFinished the task in: 133 steps\nFinished the task in: 137 steps\nFinished the task in: 135 steps\nFinished the task in: 131 steps\nFinished the task in: 132 steps\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec787b2873d3e90646d6cbc314ebf19b5595b409
218,340
ipynb
Jupyter Notebook
monte-carlo/Monte_Carlo.ipynb
manucalop/deep-reinforcement-learning
0479cd8c811f02c944b2c3afe99fa66a60885d71
[ "MIT" ]
null
null
null
monte-carlo/Monte_Carlo.ipynb
manucalop/deep-reinforcement-learning
0479cd8c811f02c944b2c3afe99fa66a60885d71
[ "MIT" ]
null
null
null
monte-carlo/Monte_Carlo.ipynb
manucalop/deep-reinforcement-learning
0479cd8c811f02c944b2c3afe99fa66a60885d71
[ "MIT" ]
null
null
null
469.548387
202,904
0.934936
[ [ [ "# Monte Carlo Methods\n\nIn this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms. \n\nWhile we have provided some starter code, you are welcome to erase these hints and write your code from scratch.\n\n### Part 0: Explore BlackjackEnv\n\nWe begin by importing the necessary packages.", "_____no_output_____" ] ], [ [ "import sys\nimport gym\nimport numpy as np\nfrom collections import defaultdict\n\nfrom plot_utils import plot_blackjack_values, plot_policy", "_____no_output_____" ] ], [ [ "Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.", "_____no_output_____" ] ], [ [ "env = gym.make('Blackjack-v1')", "_____no_output_____" ] ], [ [ "Each state is a 3-tuple of:\n- the player's current sum $\\in \\{0, 1, \\ldots, 31\\}$,\n- the dealer's face up card $\\in \\{1, \\ldots, 10\\}$, and\n- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).\n\nThe agent has two potential actions:\n\n```\n STICK = 0\n HIT = 1\n```\nVerify this by running the code cell below.", "_____no_output_____" ] ], [ [ "print(env.observation_space)\nprint(env.action_space)", "Tuple(Discrete(32), Discrete(11), Discrete(2))\nDiscrete(2)\n" ] ], [ [ "Execute the code cell below to play Blackjack with a random policy. \n\n(_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)", "_____no_output_____" ] ], [ [ "for i_episode in range(3):\n state = env.reset()\n while True:\n print(state)\n action = env.action_space.sample()\n state, reward, done, info = env.step(action)\n if done:\n print('End game! Reward: ', reward)\n print('You won :)\\n') if reward > 0 else print('You lost :(\\n')\n break", "(12, 9, False)\nEnd game! Reward: -1.0\nYou lost :(\n\n(17, 9, False)\nEnd game! Reward: -1.0\nYou lost :(\n\n(15, 7, False)\nEnd game! Reward: -1.0\nYou lost :(\n\n" ] ], [ [ "### Part 1: MC Prediction\n\nIn this section, you will write your own implementation of MC prediction (for estimating the action-value function). \n\nWe will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy. \n\nThe function accepts as **input**:\n- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.\n\nIt returns as **output**:\n- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \\ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.", "_____no_output_____" ] ], [ [ "def generate_episode_from_limit_stochastic(bj_env):\n episode = []\n state = bj_env.reset()\n while True:\n probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]\n action = np.random.choice(np.arange(2), p=probs)\n next_state, reward, done, info = bj_env.step(action)\n episode.append((state, action, reward))\n state = next_state\n if done:\n break\n return episode", "_____no_output_____" ] ], [ [ "Execute the code cell below to play Blackjack with the policy. \n\n(*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)", "_____no_output_____" ] ], [ [ "for i in range(3):\n print(generate_episode_from_limit_stochastic(env))", "[((16, 8, False), 0, -1.0)]\n[((8, 2, False), 0, -1.0)]\n[((17, 3, False), 1, -1.0)]\n" ], [ "gamma=0.9\nreturns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\nN = defaultdict(lambda: np.zeros(env.action_space.n))\nQ = defaultdict(lambda: np.zeros(env.action_space.n))\n\nepisode = generate_episode_from_limit_stochastic(env)\nprint(episode)\n\nstates, actions, rewards = zip(*episode)\ndiscounts = np.array([gamma**i for i in range(len(rewards)+1)])\nprint(discounts)\ntype(states)\nN = defaultdict(lambda: np.zeros(env.action_space.n))\nfor i, state in enumerate(states):\n returns_sum[state][actions[i]] += sum(rewards[i:]*discounts[:-(1+i)])\n N[state][actions[i]] += 1.0\n Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]\n print(rewards[i:], discounts[:-(1+i)])\nprint(Q)", "[((18, 2, True), 1, 0.0), ((17, 2, False), 1, -1.0)]\n[1. 0.9 0.81]\n(0.0, -1.0) [1. 0.9]\n(-1.0,) [1.]\ndefaultdict(<function <lambda> at 0x7fcfa85db9d0>, {(18, 2, True): array([ 0. , -0.9]), (17, 2, False): array([ 0., -1.])})\n" ] ], [ [ "Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.\n\nYour algorithm has three arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `generate_episode`: This is a function that returns an episode of interaction.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.", "_____no_output_____" ] ], [ [ "def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):\n # initialize empty dictionaries of arrays\n returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\n N = defaultdict(lambda: np.zeros(env.action_space.n))\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n ## TODO: complete the function\n # generate an episode\n episode = generate_episode(env)\n # obtain the states, actions, and rewards\n states, actions, rewards = zip(*episode)\n # prepare for discounting\n discounts = np.array([gamma**i for i in range(len(rewards)+1)])\n # update the sum of the returns, number of visits, and action-value \n # function estimates for each state-action pair in the episode\n for i, state in enumerate(states):\n returns_sum[state][actions[i]] += sum(rewards[i:]*discounts[:-(1+i)])\n N[state][actions[i]] += 1.0\n Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]\n \n return Q", "_____no_output_____" ] ], [ [ "Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.\n\nTo check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.", "_____no_output_____" ] ], [ [ "# obtain the action-value function\nQ = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)\n\n# obtain the corresponding state-value function\nV_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \\\n for k, v in Q.items())\n\n# plot the state-value function\nplot_blackjack_values(V_to_plot)", "Episode 500000/500000." ] ], [ [ "### Part 2: MC Control\n\nIn this section, you will write your own implementation of constant-$\\alpha$ MC control. \n\nYour algorithm has four arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `alpha`: This is the step-size parameter for the update step.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.\n- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.\n\n(_Feel free to define additional functions to help you to organize your code._)", "_____no_output_____" ] ], [ [ "def mc_control(env, num_episodes, alpha, gamma=1.0):\n nA = env.action_space.n\n # initialize empty dictionary of arrays\n Q = defaultdict(lambda: np.zeros(nA))\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n ## TODO: complete the function\n \n return policy, Q", "_____no_output_____" ] ], [ [ "Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.", "_____no_output_____" ] ], [ [ "# obtain the estimated optimal policy and action-value function\npolicy, Q = mc_control(env, ?, ?)", "_____no_output_____" ] ], [ [ "Next, we plot the corresponding state-value function.", "_____no_output_____" ] ], [ [ "# obtain the corresponding state-value function\nV = dict((k,np.max(v)) for k, v in Q.items())\n\n# plot the state-value function\nplot_blackjack_values(V)", "_____no_output_____" ] ], [ [ "Finally, we visualize the policy that is estimated to be optimal.", "_____no_output_____" ] ], [ [ "# plot the policy\nplot_policy(policy)", "_____no_output_____" ] ], [ [ "The **true** optimal policy $\\pi_*$ can be found in Figure 5.2 of the [textbook](http://go.udacity.com/rl-textbook) (and appears below). Compare your final estimate to the optimal policy - how close are you able to get? If you are not happy with the performance of your algorithm, take the time to tweak the decay rate of $\\epsilon$, change the value of $\\alpha$, and/or run the algorithm for more episodes to attain better results.\n\n![True Optimal Policy](images/optimal.png)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec788df280e7268415154b76dd6a8c9183a97f1c
1,541
ipynb
Jupyter Notebook
d04_homework1.ipynb
zera888/-zera
cbc191f46941145a984dcf984bdc20f289e68321
[ "BSL-1.0" ]
null
null
null
d04_homework1.ipynb
zera888/-zera
cbc191f46941145a984dcf984bdc20f289e68321
[ "BSL-1.0" ]
null
null
null
d04_homework1.ipynb
zera888/-zera
cbc191f46941145a984dcf984bdc20f289e68321
[ "BSL-1.0" ]
null
null
null
21.109589
54
0.491239
[ [ [ "import numpy as np\nenglish_score = np.array([55,89,76,65,48,70])\nmath_score = np.array([60,85,60,68,55,60])\nchinese_score = ([65,90,82,72,66,77])\n#\n#有多少學生英文成績比設數學高\n#\nn = 0\nprint('有多少學生英文成績比數學高?\\n')\nx = english_score > math_score\n# print(x)\nfor i in range(0,6):\n if english_score[i] > math_score[i]:\n n += 1\nprint(\"有 %d 個學生的英文成績比數學高。\" % (n))\nprint()\n#\n#是否全班同學最高分數都是國文?\n#\nprint('是否全班同學最高分數都是國文?\\n')\ny = chinese_score > english_score\nz = chinese_score > math_score\nk = np.logical_and(y,z)\n\nprint(k)", "有多少學生英文成績比數學高?\n\n有 3 個學生的英文成績比數學高。\n\n是否全班同學最高分數都是國文?\n\n[ True True True True True True]\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec7894462b4b3f033de081d442baf0ee8e9a65b3
5,721
ipynb
Jupyter Notebook
00_core.ipynb
castorfou/hello_nbdev
2e6897d23da1c5bedde63899b057c6abeea219de
[ "Apache-2.0" ]
null
null
null
00_core.ipynb
castorfou/hello_nbdev
2e6897d23da1c5bedde63899b057c6abeea219de
[ "Apache-2.0" ]
3
2021-05-20T23:22:16.000Z
2022-02-26T10:29:24.000Z
00_core.ipynb
castorfou/hello_nbdev
2e6897d23da1c5bedde63899b057c6abeea219de
[ "Apache-2.0" ]
null
null
null
19.659794
193
0.477364
[ [ [ "# default_exp core", "_____no_output_____" ] ], [ [ "# module core\n\n> Everything about core.", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.showdoc import *", "_____no_output_____" ], [ "#export\ndef say_hello(to):\n \"Say hello to somebody\"\n return f'Hello {to}!'\n", "_____no_output_____" ] ], [ [ "We can use this to say hello to anyone", "_____no_output_____" ], [ "## example", "_____no_output_____" ] ], [ [ "say_hello(\"Sylvain\")", "_____no_output_____" ] ], [ [ "## test", "_____no_output_____" ] ], [ [ "assert say_hello(\"Jeremy\")==\"Hello Jeremy!\"", "_____no_output_____" ] ], [ [ "# HelloSayer class", "_____no_output_____" ], [ "By using `#exports` with an s, this will be exported both in lib and docs.", "_____no_output_____" ] ], [ [ "#exports\nclass HelloSayer:\n \"Say hello to `to` using `say_hello`\"\n def __init__(self, to): self.to = to\n\n def say(self):\n \"\"\"\n Do the saying, and it can be a very long proper docstring\n \"\"\"\n return say_hello(self.to)\n\n def say_emphasis(self, emphasis):\n \"\"\"\n Do the saying, and add other things too\n Input:\n emphasis: String\n Output:String\n \n \"\"\"\n return say_hello(self.to+emphasis)\n", "_____no_output_____" ] ], [ [ "By calling show_doc, this will be in the documentation", "_____no_output_____" ] ], [ [ "show_doc(HelloSayer.say)", "_____no_output_____" ], [ "show_doc(HelloSayer.say_emphasis)", "_____no_output_____" ] ], [ [ "## example", "_____no_output_____" ] ], [ [ "o = HelloSayer(\"Alexis\")\no.say()", "_____no_output_____" ], [ "o.say_emphasis('!!!')", "_____no_output_____" ] ], [ [ "# export nbdev\n\n(not for the documentation, to see if it can be hidden somehow)", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import notebook2script; notebook2script()", "Converted 00_core.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec78d0dc9b6d9870be147970cdde03a2fe22f3be
3,078
ipynb
Jupyter Notebook
Hubspot/Hubspot_get_association.ipynb.ipynb
haresh121/awesome-notebooks
f85065ec71bfe86e7e256f0f172c7bb722184152
[ "BSD-3-Clause" ]
null
null
null
Hubspot/Hubspot_get_association.ipynb.ipynb
haresh121/awesome-notebooks
f85065ec71bfe86e7e256f0f172c7bb722184152
[ "BSD-3-Clause" ]
null
null
null
Hubspot/Hubspot_get_association.ipynb.ipynb
haresh121/awesome-notebooks
f85065ec71bfe86e7e256f0f172c7bb722184152
[ "BSD-3-Clause" ]
null
null
null
30.475248
1,002
0.655945
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "<img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/HubSpot_Logo.svg/220px-HubSpot_Logo.svg.png\" alt=\"drawing\" width=\"200\" align='left'/>", "_____no_output_____" ], [ "# Hubspot - Get association\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hubspot/Hubspot_get_association.ipynb.ipynb\" target=\"_parent\"><img src=\"https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg==\"/></a>", "_____no_output_____" ] ], [ [ "from naas_drivers import hubspot\n\n# Enter your hubspot api key\nauth_token = 'YOUR_HUBSPOT_API_KEY'\n\n# Connect to hubspot\nhs = hubspot.connect(auth_token)", "_____no_output_____" ], [ "# Get association\nobjet = 'deal'\nobjet_id = '3517567364'\nassociation = 'contact'\nhs.associations.get(objet, objet_id, association)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
ec78d674224630bf95eabdf92987f6fec0a081a5
15,160
ipynb
Jupyter Notebook
nbs/index.ipynb
ElliottP-13/adaptnlp
7c6d8296ed19f6dfd23032bbc1caba0917dd0d0a
[ "Apache-2.0" ]
null
null
null
nbs/index.ipynb
ElliottP-13/adaptnlp
7c6d8296ed19f6dfd23032bbc1caba0917dd0d0a
[ "Apache-2.0" ]
null
null
null
nbs/index.ipynb
ElliottP-13/adaptnlp
7c6d8296ed19f6dfd23032bbc1caba0917dd0d0a
[ "Apache-2.0" ]
null
null
null
45.525526
289
0.646042
[ [ [ "# Welcome to AdaptNLP\n> A high level framework and library for running, training, and deploying state-of-the-art Natural Language Processing (NLP) models for end to end tasks.", "_____no_output_____" ], [ "<p align=\"center\">\n <a href=\"https://github.com/Novetta/adaptnlp\"> <img src=\"https://raw.githubusercontent.com/novetta/adaptnlp/master/docs/assets/images/company_logo.png\" width=\"400\"/></a>\n</p>\n\n![CI](https://github.com/Novetta/adaptnlp/workflows/CI/badge.svg) \n[![PyPI](https://img.shields.io/pypi/v/adaptnlp?color=blue&label=pypi%20version)](https://pypi.org/project/adaptnlp/#description)\n\n\nAdaptNLP allows users ranging from beginner python coders to experienced machine learning engineers to leverage\nstate-of-the-art NLP models and training techniques in one easy-to-use python package.\n\nBuilt atop Zalando Research's Flair and Hugging Face's Transformers library, AdaptNLP provides Machine\nLearning Researchers and Scientists a modular and **adaptive** approach to a variety of NLP tasks with an\n**Easy** API for training, inference, and deploying NLP-based microservices.\n\n## Key Features\n\n - **[Full Guides and API Documentation](https://novetta.github.io/adaptnlp)**\n - Jupyter Notebook [Tutorials](https://novetta.github.io/adaptnlp/tutorial-intro)\n - Unified API for NLP Tasks with SOTA Pretrained Models (Adaptable with Flair and Transformer's Models)\n - Token Tagging \n - Sequence Classification\n - Embeddings\n - Question Answering\n - Summarization\n - Translation\n - Text Generation\n - <em> More in development </em>\n - Training and Fine-tuning Interface\n - Integration with Transformer's Trainer Module for fast and easy transfer learning with custom datasets\n - Jeremy's **[ULM-FIT](https://arxiv.org/abs/1801.06146)** approach for transfer learning in NLP\n - Fine-tuning Transformer's language models and task-specific predictive heads like Flair's `SequenceClassifier`\n - [Rapid NLP Model Deployment](https://github.com/Novetta/adaptnlp/tree/master/rest) with Sebastián's [FastAPI](https://github.com/tiangolo/fastapi) Framework\n - Containerized FastAPI app\n - Immediately deploy any custom trained Flair or AdaptNLP model\n - [Dockerizing AdaptNLP with GPUs](https://hub.docker.com/r/achangnovetta/adaptnlp)\n - Easily build and run AdaptNLP containers leveraging NVIDIA GPUs with Docker", "_____no_output_____" ], [ "## Quick Start\n\n### Requirements and Installation for Linux/Mac\n\nNote: AdaptNLP will be using the latest stable torch version (v1.7 as of 11/2/20) which requires Python 3.7+. Please downgrade torch<=1.6 if using Python 3.6\n\n#### Virtual Environment\nTo avoid dependency clustering and issues, it would be wise to install AdaptNLP in a virtual environment.\nTo create a new python 3.7+ virtual environment, run this command and then activate it however your operating\nsystem specifies:\n\n```\npython -m venv venv-adaptnlp\n```", "_____no_output_____" ], [ "## Requirements and Installation for Windows\n\n#### PyTorch Install\nPyTorch needs to be manually installed on Windows environments. If it's not already installed, proceed to http://pytorch.org/get-started/locally to select your preferences and then run the given install command. Note that the current version of PyTorch we use relies on cuda 10.1.\n\n#### AdaptNLP Install\nInstall using pip:\n```\npip install adaptnlp\n```\n\nIf you want to work on AdaptNLP, `pip install adaptnlp[dev]` will install its development tools.", "_____no_output_____" ], [ "## Examples and General Use\n\nOnce you have installed AdaptNLP, here are a few examples of what you can run with AdaptNLP modules:\n\n### Named Entity Recognition with `EasyTokenTagger`\n\n```python\nfrom adaptnlp import EasyTokenTagger\n\n## Example Text\nexample_text = \"Novetta's headquarters is located in Mclean, Virginia.\"\n\n## Load the token tagger module and tag text with the NER model \ntagger = EasyTokenTagger()\nsentences = tagger.tag_text(text=example_text, model_name_or_path=\"ner\")\n\n## Output tagged token span results in Flair's Sentence object model\nfor sentence in sentences:\n for entity in sentence.get_spans(\"ner\"):\n print(entity)\n\n```\n\n### English Sentiment Classifier `EasySequenceClassifier`\n\n```python\nfrom adaptnlp import EasySequenceClassifier \nfrom pprint import pprint\n\n## Example Text\nexample_text = \"This didn't work at all\"\n\n## Load the sequence classifier module and classify sequence of text with the multi-lingual sentiment model \nclassifier = EasySequenceClassifier()\nsentences = classifier.tag_text(\n text=example_text,\n model_name_or_path=\"nlptown/bert-base-multilingual-uncased-sentiment\",\n mini_batch_size=1,\n)\n\n## Output labeled text results in Flair's Sentence object model\nprint(\"Tag Score Outputs:\\n\")\nfor sentence in sentences:\n pprint({sentence.to_original_text(): sentence.labels})\n\n```\n\n### Span-based Question Answering `EasyQuestionAnswering`\n\n```python\nfrom adaptnlp import EasyQuestionAnswering \nfrom pprint import pprint\n\n## Example Query and Context \nquery = \"What is the meaning of life?\"\ncontext = \"Machine Learning is the meaning of life.\"\ntop_n = 5\n\n## Load the QA module and run inference on results \nqa = EasyQuestionAnswering()\nbest_answer, best_n_answers = qa.predict_qa(query=query, context=context, n_best_size=top_n, mini_batch_size=1, model_name_or_path=\"distilbert-base-uncased-distilled-squad\")\n\n## Output top answer as well as top 5 answers\nprint(best_answer)\npprint(best_n_answers)\n```\n\n### Summarization `EasySummarizer`\n\n```python\nfrom adaptnlp import EasySummarizer\n\n# Text from encyclopedia Britannica on Einstein\ntext = \"\"\"Einstein would write that two “wonders” deeply affected his early years. The first was his encounter with a compass at age five. \n He was mystified that invisible forces could deflect the needle. This would lead to a lifelong fascination with invisible forces. \n The second wonder came at age 12 when he discovered a book of geometry, which he devoured, calling it his 'sacred little geometry \n book'. Einstein became deeply religious at age 12, even composing several songs in praise of God and chanting religious songs on \n the way to school. This began to change, however, after he read science books that contradicted his religious beliefs. This challenge \n to established authority left a deep and lasting impression. At the Luitpold Gymnasium, Einstein often felt out of place and victimized \n by a Prussian-style educational system that seemed to stifle originality and creativity. One teacher even told him that he would \n never amount to anything.\"\"\"\n\nsummarizer = EasySummarizer()\n\n# Summarize\nsummaries = summarizer.summarize(text = text, model_name_or_path=\"t5-small\", mini_batch_size=1, num_beams = 4, min_length=0, max_length=100, early_stopping=True)\n\nprint(\"Summaries:\\n\")\nfor s in summaries:\n print(s, \"\\n\")\n```\n\n### Translation `EasyTranslator`\n```python\nfrom adaptnlp import EasyTranslator\n\ntext = [\"Machine learning will take over the world very soon.\",\n \"Machines can speak in many languages.\",]\n\ntranslator = EasyTranslator()\n\n# Translate\ntranslations = translator.translate(text = text, t5_prefix=\"translate English to German\", model_name_or_path=\"t5-small\", mini_batch_size=1, min_length=0, max_length=100, early_stopping=True)\n\nprint(\"Translations:\\n\")\nfor t in translations:\n print(t, \"\\n\")\n```", "_____no_output_____" ], [ "## Tutorials\n\n### NLP Tasks\n\n 1. [Token Classification: NER, POS, Chunk, and Frame Tagging](https://novetta.github.io/adaptnlp/tutorial.token_tagging.html)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/05a_tutorial.token_tagging.ipynb)\n 2. [Sequence Classification: Sentiment](https://novetta.github.io/adaptnlp/tutorial.sequence_classification)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/06a_tutorial.sequence_classification.ipynb)\n 3. [Embeddings: Transformer Embeddings e.g. BERT, XLM, GPT2, XLNet, roBERTa, ALBERT](https://novetta.github.io/adaptnlp/tutorial.embeddings)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/04a_tutorial.embeddings.ipynb)\n 4. [Question Answering: Span-based Question Answering Model](https://novetta.github.io/adaptnlp/tutorial.question_answering)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/10a_tutorial.question_answering.ipynb)\n 5. [Summarization: Abstractive and Extractive](https://novetta.github.io/adaptnlp/tutorial.summarization)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/07a_tutorial.summarization.ipynb)\n 6. [Translation: Seq2Seq](https://novetta.github.io/adaptnlp/tutorial.translation)\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/08a_tutorial.translation.ipynb)\n\n### Custom Fine-Tuning and Training with Transformer Models\n\n - Fine-tuning a Transformers Language Model\n - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Novetta/adaptnlp/blob/master/nbs/20a_tutorial.fine_tuning_lm.ipynb)\n \nCheckout the [documentation](https://novetta.github.io/adaptnlp) for more information.\n ", "_____no_output_____" ], [ "## REST Service \n\nWe use FastAPI for standing up endpoints for serving state-of-the-art NLP models with AdaptNLP.\n\n![Swagger Example](https://raw.githubusercontent.com/novetta/adaptnlp/master/docs/assets/images/fastapi-docs.png)\n\nThe [REST](https://github.com/Novetta/adaptnlp/tree/master/rest) directory contains more detail on deploying a REST API locally or with docker in a very easy and\nfast way.", "_____no_output_____" ], [ "## Docker\n\nAdaptNLP official docker images are up on [Docker Hub](https://hub.docker.com/r/achangnovetta/adaptnlp).\n\nImages have AdaptNLP installed from source in developer mode with tutorial notebooks available, and will default to launching a jupyter server from where you can start \nrunning the tutorial and workshop notebooks.\n\nImages can build with GPU support if NVIDA-Docker is correctly installed.\n\n### Pull and Run AdaptNLP Immediately\nSimply run an image with AdaptNLP installed from source in developer mode by running:\n```\ndocker run -itp 8888:8888 achangnovetta/adaptnlp:latest\n```\nRun an image with AdaptNLP running on GPUs if you have nvidia drivers and nvidia-docker 19.03+ installed:\n```\ndocker run -itp 8888:8888 --gpus all achangnovetta/adaptnlp:latest\n```\n\nCheck `localhost:8888` or `localhost:8888/lab` to access the container notebook servers.\n\n\n### Build\n\nRefer to the `docker/` directory and run the following to build and run adaptnlp from the available images.\n\nNote: A container with GPUs enabled requires Docker version 19.03+ and nvida-docker installed\n```\n# From the repo directory\ndocker build -t achangnovetta/adaptnlp:latest -f docker/runtime/Dockerfile.cuda11.0-runtime-ubuntu18.04-py3.8 .\ndocker run -itp 8888:8888 achangnovetta/adaptnlp:latest\n```\nIf you want to use CUDA compatible GPUs \n```\ndocker run -itp 8888:8888 --gpus all achangnovetta/adaptnlp:latest\n```\n\nCheck `localhost:8888` or `localhost:8888/lab` to access the container notebook servers.", "_____no_output_____" ], [ "## Contact\n\nPlease contact the author Zachary Mueller at [email protected] with questions or comments regarding AdaptNLP.\n\nFollow us on Twitter at [@TheZachMueller](https://twitter.com/TheZachMueller) and [@AdaptNLP](https://twitter.com/AdaptNLP) for\nupdates and NLP dialogue.", "_____no_output_____" ], [ "## License\n\nThis project is licensed under the terms of the Apache 2.0 license.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec78e2f1298d7c84229a92e0abeacfb2ee4e8fe6
1,019,947
ipynb
Jupyter Notebook
Exploring Datasets.ipynb
MartinSeeler/python-data-exploration
dc7a076eeb7c569cec7d867d9ae173b5d531560f
[ "MIT" ]
16
2017-11-30T19:14:49.000Z
2022-01-15T03:44:10.000Z
Exploring Datasets.ipynb
MartinSeeler/python-data-exploration
dc7a076eeb7c569cec7d867d9ae173b5d531560f
[ "MIT" ]
1
2020-10-01T08:07:45.000Z
2020-10-01T08:31:29.000Z
Exploring Datasets.ipynb
MartinSeeler/python-data-exploration
dc7a076eeb7c569cec7d867d9ae173b5d531560f
[ "MIT" ]
11
2018-02-07T12:10:35.000Z
2021-07-02T10:22:14.000Z
545.426203
454,258
0.936474
[ [ [ "# Exploring Datasets with Python", "_____no_output_____" ], [ "In this short demo we will analyse a given dataset from 1978, which contains information about politicians having affairs. \n\nTo analyse it, we will use a [Jupyter Notebook](http://jupyter.org/), which is basically a *REPL++* for Python. Entering a command with shift executes the line and prints the result.", "_____no_output_____" ] ], [ [ "4 + 4", "_____no_output_____" ], [ "def sum(a, b):\n return a + b\n\nsum(40, 2)", "_____no_output_____" ] ], [ [ "To work with common files like CSV, JSON, Excel files etc., we will use [Pandas](http://pandas.pydata.org/), _an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language_™. Let's import it!", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport warnings\nwarnings.filterwarnings(action='once')", "_____no_output_____" ] ], [ [ "Our dataset is given as a CSV file. Pandas provides an easy way to read our file with `read_csv`. The path of the file to read is relative to our notebook file. The path can also be an URL, supporting HTTP, FTP and also S3 if your data is stored inside an AWS S3 Bucket!", "_____no_output_____" ] ], [ [ "affairs = pd.read_csv('affairs.csv')", "_____no_output_____" ] ], [ [ "The first thing we will check is the size of our dataset. We can use `info()` to get the number of entries of each column.", "_____no_output_____" ] ], [ [ "affairs.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 601 entries, 0 to 600\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 sex 601 non-null object \n 1 age 601 non-null float64\n 2 ym 601 non-null float64\n 3 child 601 non-null object \n 4 religious 601 non-null int64 \n 5 education 601 non-null int64 \n 6 occupation 601 non-null int64 \n 7 rate 601 non-null int64 \n 8 nbaffairs 601 non-null int64 \ndtypes: float64(2), int64(5), object(2)\nmemory usage: 42.4+ KB\n" ] ], [ [ "Now we know how many data is inside our file. Pandas is smart enough to parse the column titles by itself and estimate the data types of each column.\n\nYou may be curious how the data looks like. Let's see by using `head()`, which will print the first 5 rows.", "_____no_output_____" ] ], [ [ "affairs.head()", "_____no_output_____" ] ], [ [ "We can access a column of our dataset by using bracket notation and the name of the column.", "_____no_output_____" ] ], [ [ "affairs['sex'].head()", "_____no_output_____" ] ], [ [ "For categorical features like `sex`, you can also get the distributions of each value by using `value_counts()`.", "_____no_output_____" ] ], [ [ "affairs['sex'].value_counts()", "_____no_output_____" ] ], [ [ "But what about numerical values? It definitly makes no sense to count each distinct value. Therefore, we can use `describe()`.", "_____no_output_____" ] ], [ [ "affairs['age'].describe()", "_____no_output_____" ] ], [ [ "You can also access values like `mean` or `max` directly with the corrsponding methods. Let's see who is the oldest cheater!", "_____no_output_____" ] ], [ [ "affairs['age'].max()", "_____no_output_____" ] ], [ [ "This works for the whole dataframe as well. Pandas knows which values are numerical based on the datatype and hides the categorical features for you.", "_____no_output_____" ] ], [ [ "affairs.describe()", "_____no_output_____" ] ], [ [ "There is also an easy way to filter your dataset. Let's say we want to have a subset of our data containing only woman. This is also possible with the bracket notation!", "_____no_output_____" ] ], [ [ "affairs[affairs['sex'] == 'female'].head()", "_____no_output_____" ] ], [ [ "The above statement returns a new dataframe (not a copy, modifying this data will modify the original as well), which can be accessed like before. Let's see how the numerical distribution is for our females.", "_____no_output_____" ] ], [ [ "affairs[affairs['sex'] == 'female'].describe()", "_____no_output_____" ] ], [ [ "We can also create new rows. Specify the new column name in brackets and provide a function to set the data. We will create a new column containing True or False, wheather or not the person is below 30.", "_____no_output_____" ] ], [ [ "affairs['below_30'] = affairs['age'] < 30", "_____no_output_____" ], [ "affairs['below_30'].value_counts()", "_____no_output_____" ], [ "affairs.head()", "_____no_output_____" ] ], [ [ "We can use this to normalize our columns with better values. Take for example `religious`. The number have the following meaning: 1 = not, 2 = mildly, 3 = fairly, 4 = strongly. We can easily replace them inline with the following code.", "_____no_output_____" ] ], [ [ "rel_meanings = ['not', 'mildly', 'fairly', 'strongly']", "_____no_output_____" ], [ "affairs['religious'] = affairs['religious'].apply(lambda x: rel_meanings[min(x, 4)-1])", "_____no_output_____" ], [ "affairs.head()", "_____no_output_____" ] ], [ [ "This should be enought about Pandas. Let's get some visualisations!", "_____no_output_____" ], [ "## Visualize Data", "_____no_output_____" ], [ "To visualize our data, we will use [Seaborn](https://seaborn.pydata.org), a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics. Let's import it.", "_____no_output_____" ] ], [ [ "import seaborn as sns", "_____no_output_____" ] ], [ [ "To see our charts directly in our notebook, we have to execute the following:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nsns.set()\nsns.set_context('talk')", "_____no_output_____" ] ], [ [ "Seaborn together with Pandas makes it pretty easy to create charts to analyze our data. We can pass our Dataframes and Series directly into Seaborn methods. We will see how in the following sections.", "_____no_output_____" ], [ "### Univariate Plotting", "_____no_output_____" ], [ "Let's start by visualizing the distribution of the age our our people. We can achieve this with a simple method called `displot` by passing our series of ages as argument.", "_____no_output_____" ] ], [ [ "sns.displot(affairs['age'], kde=True)", "_____no_output_____" ] ], [ [ "The chart above calculates a kernel density as well. To get a real histogram, we have to disable the `kde` feature. We can also increase the number of buckets for our histogram by setting `bins` to a higher number.", "_____no_output_____" ] ], [ [ "sns.displot(x=affairs['age'], bins=50, rug=True, kde=False)", "_____no_output_____" ] ], [ [ "Interesting! The ages of the people in this dataset seem to end with two or seven.\n\nWe can do the same for every numerical column, e.g. the years of marriage.", "_____no_output_____" ] ], [ [ "sns.displot(affairs['ym'], bins=10, kde=False)", "_____no_output_____" ] ], [ [ "The average age of our people is around 32, but the most people are married for more than 14 years!", "_____no_output_____" ], [ "### Bivariate Plotting", "_____no_output_____" ], [ "Numbers get even more interesting when we can compare them to other numbers! Lets start comparing the number of years married vs the number of affairs. Seaborn provides us with a method called `jointplot` for this use case.", "_____no_output_____" ] ], [ [ "sns.jointplot(x=affairs['ym'],y=affairs['nbaffairs'])", "_____no_output_____" ] ], [ [ "To get a better feeling of how the number of affairs is affected by the number of years married, we can use a regression model by specifying `kind` as `reg`.", "_____no_output_____" ] ], [ [ "sns.jointplot(x=affairs['ym'], y=affairs['nbaffairs'], kind='reg')", "_____no_output_____" ] ], [ [ "We can also use a kernel to kompare the density of two columns against each other, e.g. `age` and `ym`.", "_____no_output_____" ] ], [ [ "sns.jointplot(x=affairs['ym'], y=affairs['age'], kind='kde', shade=True)", "_____no_output_____" ] ], [ [ "We can get an even better comparison by plotting everything vs everything! Seaborn provides this with the `pairplot` method.", "_____no_output_____" ] ], [ [ "sns.pairplot(affairs.drop(\"below_30\", axis=1))", "_____no_output_____" ] ], [ [ "You won't see any special in this data. We need to separate them by some kind of criteria. We can use our categorical values to do this! Seaborn uses a parameter called `hue` to do this. Let's separate our data by `sex` first. To make things even more interesting, let's create a regression for every plot, too!", "_____no_output_____" ] ], [ [ "sns.pairplot(affairs.drop('below_30', axis=1), hue='sex', kind='reg')", "_____no_output_____" ] ], [ [ "To get even better separation, we can use `lmplot` to compare just the fields we need.\n\nLet's say we're interested in the number of affairs vs years married. We also whant to separate them by `sex`, `child` and `religious`. We will use `sns.lmplot(x=\"ym\", y=\"nbaffairs\", hue=\"sex\", col=\"child\", row=\"religious\", data=affairs)` to achieve this.", "_____no_output_____" ] ], [ [ "sns.lmplot(x=\"ym\", y=\"nbaffairs\", hue=\"sex\", col=\"child\", row=\"religious\", data=affairs)", "_____no_output_____" ] ], [ [ "Here are some categorical plots to explore the dataset even further.", "_____no_output_____" ] ], [ [ "sns.boxplot(x=\"sex\", y=\"ym\", hue=\"child\", data=affairs);", "_____no_output_____" ], [ "sns.violinplot(x=\"religious\", y=\"nbaffairs\", hue=\"sex\", data=affairs, split=True);", "_____no_output_____" ] ], [ [ "We can also get the correlations between the values by using Pandas builtin method `corr()`.", "_____no_output_____" ] ], [ [ "affairs.corr()", "_____no_output_____" ] ], [ [ "Feed these stats into Seaborns `heatmap` method will provide us with the visual representation.", "_____no_output_____" ] ], [ [ "sns.heatmap(affairs.corr(), cmap='coolwarm')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec78e9d6a4c39eaeb4aab105586ce0955924aa97
53,974
ipynb
Jupyter Notebook
jupyter/01 - csv2json.ipynb
alueschow/cerl-thesaurus-networks
480b694188b028a27a0474f890018a424e83ed20
[ "MIT" ]
null
null
null
jupyter/01 - csv2json.ipynb
alueschow/cerl-thesaurus-networks
480b694188b028a27a0474f890018a424e83ed20
[ "MIT" ]
null
null
null
jupyter/01 - csv2json.ipynb
alueschow/cerl-thesaurus-networks
480b694188b028a27a0474f890018a424e83ed20
[ "MIT" ]
null
null
null
30.736902
861
0.499704
[ [ [ "# Tutorial: CSV2JSON", "_____no_output_____" ], [ "The _CSV2JSON_ class is used to transform metadata from a CSV to JSON format that can be used in further analysis.\n\nAuthor: Andreas Lüschow\n\nLast updated: 2021/07/28", "_____no_output_____" ], [ "-----", "_____no_output_____" ], [ "## Import", "_____no_output_____" ], [ "Import the appropriate class from __Bibliometa__:", "_____no_output_____" ] ], [ [ "from bibliometa.conversion import CSV2JSON", "_____no_output_____" ] ], [ [ "As you can see from the following output, the _CSV2JSON_ class has a lot of built-in functions:", "_____no_output_____" ] ], [ [ "dir(CSV2JSON)", "_____no_output_____" ] ], [ [ "We are only interested in the public methods, so let's have a look at them:", "_____no_output_____" ] ], [ [ "[m for m in dir(CSV2JSON) if not m.startswith('_')]", "_____no_output_____" ] ], [ [ "The usage of _CSV2JSON_ class is quite simple: There are two methods to work with the class configuration, and only one method to actually start the conversion process.", "_____no_output_____" ], [ "-----", "_____no_output_____" ], [ "## Configuration", "_____no_output_____" ], [ "Most __Bibliometa__ classes come with already predefined configuration for their class attributes. In this case, you can see the default configuration using the _get_config()_ function on a new _CSV2JSON_ object. So let's create an object first:", "_____no_output_____" ] ], [ [ "c2j = CSV2JSON()", "_____no_output_____" ] ], [ [ "### Input, output, year range", "_____no_output_____" ], [ "And now let's have a look at the default configuration values:", "_____no_output_____" ] ], [ [ "c2j.get_config()", "_____no_output_____" ] ], [ [ "As a shortcut, you can also simply print out the object itself which will return a representation of the configuration values:", "_____no_output_____" ] ], [ [ "c2j", "_____no_output_____" ] ], [ [ "There are a lot of configuration options, let's go through them step by step.\n\n__i__ (str): Input CSV file\n* Path to the CSV file that will be converted.\n\n__o__ (str): Output JSON file\n* Path to the JSON file that will be created. If the path contains folders that are not existent yet, they will be created during the conversion process.\n\n**from_** (int): Year where conversion starts\n* Data from the input file are processed year by year. Only those data sets that are within a certain interval are considered in the conversion process. The _from__ parameter (mind the trailing underscore!) is used to define the \"starting year\", i.e., the \"oldest\" year that is respected in the conversion process.\n\n__to__ (int): Year where conversion ends\n* This is the last year that is considered in the conversion process.\n\n__step__ (int): Interval between two years\n* Using this parameter you can define how many \"year slices\" the conversion will produce. For example, let's assume that *from_* == 1750 and _to_ == 1850. Setting the parameter _step_ to 10 would create a JSON file for 1750, 1760, 1770, ... up to 1850 each. Setting _step_ to 25 would create JSON files for 1750, 1775, 1800, ... up to 1850. If you need only one single year, you have to set both *from_* and _to_ to the same year, _step_ will have no effect then. However, the parameter _step_ has to be always greater than zero; otherwise an error will be thrown.", "_____no_output_____" ], [ "At this point, let's try to change a configuration parameter using the _set_config()_ function. After each function call the current configuration is printed out automatically to check if your changes worked as expected:", "_____no_output_____" ] ], [ [ "c2j.set_config(from_=500)", "_____no_output_____" ] ], [ [ "Calling the _set_config()_ function with keyword arguments allows you to change the configuration parameters according to your needs. Since it is a bit cumbersome to find the parameters in the output above, you can also use keyword arguments with the _get_config()_ function to check for specific configuration parameters:", "_____no_output_____" ] ], [ [ "c2j.get_config(\"from_\")", "_____no_output_____" ], [ "c2j.get_config(\"from_\", \"to\", \"step\") ", "_____no_output_____" ] ], [ [ "As you can see, working with configuration parameters is quite simple.", "_____no_output_____" ] ], [ [ "c2j.get_config(\"i\")", "_____no_output_____" ], [ "c2j.set_config(i=\"../data/my_own_data.csv\")\nc2j.get_config(\"i\")", "_____no_output_____" ] ], [ [ "Actually, if you know the parameter you want to change, you can also set and get configuration parameters using a dot notation. This is the preferred way if you need to change or access only a single parameter value, since the output does not include the parameter key itself:", "_____no_output_____" ] ], [ [ "c2j.config.i = \"../data/my_very_own_data.csv\"\nc2j.config.i", "_____no_output_____" ] ], [ [ "However, if you need to change or access more than one configuration parameter, using the _set_config()_ and _get_config()_ functions is the way to go.", "_____no_output_____" ], [ "### Field definitions", "_____no_output_____" ], [ "Bur for now let's go back to explaining the remaining configuration parameters.", "_____no_output_____" ] ], [ [ "c2j.get_config()", "_____no_output_____" ] ], [ [ "__fields__ (list of dict): Fields and subfields to consider\n* This parameter defines which columns from the input data will be converted to JSON. Each field/subfield combination needs to be represented in a single dictionary with the keys _content_, _type_, and _categories_:\n\n `{'content': ('515', 'a'),\n 'type': ('515', '0'),\n 'categories': ['actv']}`\n \n These three keys need to have a unique representation in the input CSV. In the example above, this means that we need a column containing values from field 515, subfield a, another column with values from field 515, subfield 0, and that content in field 515, subfield 0, has to be identical to a value from the \"categories\" list to be considered for conversion.\n \n__subfield_sep__ (str): Separator between fields and subfields\n* This separator is used to combine field and subfield values to a single string. For example, if your input CSV contains columns such as \"515\\\\$a\" and \"515\\\\$0\", the dollar sign \\\\$ is your subfield separator.\n\n__split_char__ (str): Character between values in cells\n* This/These character(s) is/are used to distinguish different values in the same CSV cell. \n\n__csv_sep__ (str): CSV separator\n* The seperator that is used between single CSV fields (usually something like \"\\t\" or \",\" or \";\").", "_____no_output_____" ], [ "To understand the explanations above let's have a look into the example input CSV file that comes with the tutorial (using standard python code and the pandas library):", "_____no_output_____" ] ], [ [ "import pandas as pd\n\npath = \"../data/examples/demo.csv\" # This will later be our input file (parameter \"i\")\ndf = pd.read_csv(path, sep=\"\\t\") # There's the CSV separator\ndf.head()", "_____no_output_____" ] ], [ [ "You can see nine columns:\n* index column of the Pandas DataFrame (which has no name)\n* id \n* name\n* 515\\\\$a (mind the dollar sign as subfield separator!)\n* 515\\\\$0\n* 515\\\\$z\n* 350\\\\$0\n* 350\\\\$a\n* 340\\\\$x\n\nYou can also see that mutiple values in a cell are divided by the string \" ### \", which is our _split_char_ parameter.", "_____no_output_____" ] ], [ [ "c2j.get_config(\"split_char\")", "_____no_output_____" ] ], [ [ "The _fields_ parameter in our configuration looks as follows:", "_____no_output_____" ] ], [ [ "c2j.get_config(\"fields\")", "_____no_output_____" ] ], [ [ "Looking at the example CSV, this means that values in column \"515\\\\$a\" (\"content\") are considered during the conversion process only if the corresponding value in column \"515\\\\$0\" (\"type\") has the value \"actv\" (\"categories).\n\nThe following row would thus be ignored (it contains only \"brth\" and \"deat\" values in column \"515\\\\$0\"):", "_____no_output_____" ] ], [ [ "df[df[\"id\"] == \"cnp01300387\"]", "_____no_output_____" ] ], [ [ "However, this row would be converted:", "_____no_output_____" ] ], [ [ "df[df[\"id\"] == \"cnp02161976\"]", "_____no_output_____" ] ], [ [ "To be more pecise, only the two values \"Barcelona\" and \"Cervera\" from column \"515\\\\$a\" would be considered in the conversion, since they are the only ones with a corresponding \"actv\" value in column \"515\\\\$0\".\n\nTo be even more precise, this row from the CSV file would only be considered during conversion if a year between 1515 and 1597 is used in the conversion. How can we know? For this, we have to look at two other configuration parameters, _datefield_ and _date_indicator_.", "_____no_output_____" ], [ "### Dates", "_____no_output_____" ] ], [ [ "c2j.get_config()", "_____no_output_____" ] ], [ [ "__datefield__ (tuple): Field and subfield of date information\n* The example configuration defines that information about dates for a single row can be found in column \"340\\\\$x\"\n\n__date_indicator__ (list of str): Indicators in datefield that are accepted\n* Two values are possible: \"0\" means that biographical dates are considered; \"1\" means that activity dates are considered.\n\n__interval_lower__ (int): Lower interval for single dates\n* This parameter defines up to which lower bound data sets from the input CSV are considered for conversion if there is only a single year value available in the date column (see below for an example).\n\n__interval_upper__ (int): Upper interval for single dates\n* See the explanation for the previous parameter; here, the upper bound is defined.\n\nYou can see the date indicator at position 5 in the datefield column after the field number 340 and a space:", "_____no_output_____" ] ], [ [ "df[df[\"340$x\"].notna()][\"340$x\"]", "_____no_output_____" ] ], [ [ "You can also see the values of subfield \"\\\\$x\" For example, in row with ID 11 this subfield has the value \"a1803a1883\" which means that the person represented in this data set lived from 1803 to 1883. Hence, this row would only be considered during conversion if a year between 1803 and 1883 is used in the configuration. (Which would be the case if we would set *from_* == 1800 and _to_ == 1825, or *from_* == 1700 and _to_ == 1850, or even *from_* == 1883 and _to_ == 1884 etc.)\n\nIn row with ID 73638, there is only one year in subfield \"\\\\$x\" available: 1734, which is the begin of an activity (because the date indicator is set to == 1 in this row). In these cases, the data set is only considered for conversion if \"1\" is given in parameter _datefield_ and if the value in subfield \"\\\\$x\" is within the interval defined by the current conversion year and the _intervall_lower_ and _interval_upper_ parameters.\n\nRow with ID 73638 would hence be considered in the following example cases:\n* *from_* == 1700, _to_ == 1800, _step_ == 1, _interval_lower_ == 0, _interval_upper_ == 0 (because _step_ == 1, which means that every single year between 1700 and 1800 in considered)\n* *from_* == 1700, _to_ == 1800, _step_ == 10, _interval_lower_ == 5, _interval_upper_ == 5 (because 1734 is within the interval 1730 (+/-5 years))\n* *from_* == 1730, _to_ == 1740, _step_ == 2, _interval_lower_ == 0, _interval_upper_ == 0 (because 1734 is already considered by using a _step_ == 2 from 1730 to 1740)\n* *from_* == 1700, _to_ == 1750, _step_ == 25, _interval_lower_ == 0, _interval_upper_ == 10 (because 1734 is within the upper 10-year-range of year 1725)", "_____no_output_____" ], [ "### Logging, encoding, verbose", "_____no_output_____" ], [ "There are only a couple of configuration parameters left, let's have a look at them.", "_____no_output_____" ] ], [ [ "c2j.get_config()", "_____no_output_____" ] ], [ [ "__log__ (str): Path to log file\n* The conversion process and its errors are documented in a log file. If _verbose_ == True (see below), the logging information is also shown on standard output if its level is _log_level_std_ or above.\n\n__log_level_std__ (str): Logging level considered for standard output\n* Only log messages with this level (or above) are shown on the standard output. This parameter has no effect if _verbose_ == False. Possible severity levels can be found in the documentation of the logging package `loguru`: https://loguru.readthedocs.io/en/stable/api/logger.html\n\n__log_level_file__ (str): Logging level considered for log file\n* Only log messages with this level (or above) are shown in the log file.\n\n__verbose__ (bool): Show detailed information on standard output\n* Whether logging information is not only written to the log file but also shown on the standard output.\n\n__encoding__ (str): File encoding\n* File encoding of input and output files. The default value is \"utf-8\" and there is usually no need to change this.", "_____no_output_____" ], [ "-----", "_____no_output_____" ], [ "## The Conversion Process", "_____no_output_____" ], [ "Configuration parameters can already be passed when a _CSV2JSON_ object is constructed. For some classes in __Bibliometa__ this may be useful when a verbose output of the class initialization is desired. (However, in the case of the class _CSV2JSON_ there is nothing that can be shown, so passing the configuration parameters during initialization or afterwards makes no difference.)\n\nWe start with creating a new _CSV2JSON_ object that has the standard configuration values:", "_____no_output_____" ] ], [ [ "c2j = CSV2JSON()\nc2j", "_____no_output_____" ] ], [ [ "In the next step we define our custom configuration parameters as Python constants for better code readability and maintenance:", "_____no_output_____" ] ], [ [ "INPUT_FILE = \"../data/examples/demo.csv\"\nOUTPUT_FILE = \"../data/output/json/demo.json\"\nLOGFILE=\"../data/logs/csv2json_demo.out\"\nYEARS = (1700, 1750)\nSTEP = 10\nFIELDS = [\n {\"content\": (\"515\", \"a\"),\n \"type\": (\"515\", \"0\"),\n \"categories\": [\"actv\"]}\n]\nDATE_INDICATOR = [\"1\"] # i.e., only activity dates are considered\nDATEFIELD = (\"340\", \"x\")\nINTERVALS = (5,5)", "_____no_output_____" ] ], [ [ "Now we can update our _CSV2JSON_ object accordingly.", "_____no_output_____" ] ], [ [ "c2j.set_config(i=INPUT_FILE,\n o=OUTPUT_FILE,\n log=LOGFILE,\n from_=YEARS[0],\n to=YEARS[1],\n step=STEP,\n fields=FIELDS,\n date_indicator=DATE_INDICATOR,\n datefield=DATEFIELD,\n interval_lower=INTERVALS[0],\n interval_upper=INTERVALS[1],\n )", "_____no_output_____" ] ], [ [ "Finally, we start the conversion process by calling the _start()_ function. This function does not take any parameters.\n\nA progress bar for each year will indicate the conversion progress. Since we have defined *from_* == 1700, _to_ == 1750 and _step_ == 10, we will get six progress bars (one for 1700, 1710, 1720, 1730, 1740, and 1750 each).", "_____no_output_____" ] ], [ [ "c2j.start()", "_____no_output_____" ] ], [ [ "Let's have a look at the produced files, starting with the log file.", "_____no_output_____" ] ], [ [ "with open(c2j.config.log, \"r\", encoding=c2j.config.encoding) as f:\n log_text = f.read().splitlines()\n\nlog_text", "_____no_output_____" ] ], [ [ "Note that new content is always appended to the log file and the file is not deleted nor cleared when you start a new conversion process.", "_____no_output_____" ], [ "Now let's see which JSON files were produced:", "_____no_output_____" ] ], [ [ "import os\n\nos.listdir(os.path.dirname(OUTPUT_FILE))", "_____no_output_____" ] ], [ [ "As you can see, the appropriate year was appended to the file name defined in OUTPUT_FILE for each conversion\nstep.\n\nHow does the content of the JSON files look like?", "_____no_output_____" ] ], [ [ "import json\n\nwith open(os.path.join(os.path.dirname(OUTPUT_FILE), 'demo_1710.json'), \"r\", encoding=c2j.config.encoding) as f:\n d = json.load(f)\n \nfor i in list(d.items())[:5]:\n print(i)", "('cnp01287518', {'515': {'a': ['Breslau']}})\n('cnp01417964', {'515': {'a': ['Altorf']}})\n('cnp01418304', {'515': {'a': ['Kiel']}})\n('cnp01289912', {'515': {'a': ['Venedig']}})\n('cnp01415306', {'515': {'a': ['Halle']}})\n" ] ], [ [ "Looking at the first 5 entries in the JSON file, you can see that for each data set in the input file (represented by its ID) that fulfills the requirements (i.e., an appropriate date is given in the date column and field types are as defined), a JSON element is created that represents field, subfield and content values.\n\nWe can check this by looking at the original input data:", "_____no_output_____" ] ], [ [ "df[df[\"id\"] == \"cnp01287518\"][\"340$x\"].values", "_____no_output_____" ] ], [ [ "There are two dates available for this data set. The first one is ignored in the conversion process because the date indicator is \"0\" (and we set our configuration to only allow the date indicator \"1\"). However, the second date for this data set has the correct date indicator and -- as can be seen in the subfield \\\\$x -- the person represented in this data set was active between 1678 and 1750. Since the JSON file we looked at was created for the year 1710 and this year is within the 1678--1750 range, the data sets was converted correctly. \n\nMoreover, this person should be in ALL our six JSON files created, because all years between 1700 and 1750 are within the activity years of this person. Let's check this:", "_____no_output_____" ] ], [ [ "for file in os.listdir(os.path.dirname(OUTPUT_FILE)):\n with open(os.path.join(os.path.dirname(OUTPUT_FILE), file), \"r\", encoding=c2j.config.encoding) as f:\n d = json.load(f)\n print(file, d[\"cnp01287518\"])", "demo_1710.json {'515': {'a': ['Breslau']}}\ndemo_1720.json {'515': {'a': ['Breslau']}}\ndemo_1740.json {'515': {'a': ['Breslau']}}\ndemo_1750.json {'515': {'a': ['Breslau']}}\ndemo_1730.json {'515': {'a': ['Breslau']}}\ndemo_1700.json {'515': {'a': ['Breslau']}}\n" ] ], [ [ "Indeed, the data for person \"cnp01287518\" is available in all JSON files.\n\nSo let's check if the person is correctly ignored when our conversion years are not within the 1678--1750 range. To test this, we have to run a new conversion with other configuration options. We can however re-use our previous configuration and just replace the new year parameters. Everything else stays the same.", "_____no_output_____" ] ], [ [ "c2j.config.from_ = 1751\nc2j.config.to = 1751", "_____no_output_____" ] ], [ [ "Running the conversion is as simple as before:", "_____no_output_____" ] ], [ [ "c2j.start()", "_____no_output_____" ] ], [ [ "Again we look at the produced JSON file:", "_____no_output_____" ] ], [ [ "with open(os.path.join(os.path.dirname(OUTPUT_FILE), 'demo_1751.json'), \"r\", encoding=c2j.config.encoding) as f:\n d = json.load(f)\n \nfor i in list(d.items())[:5]:\n print(i) ", "('cnp01414461', {'515': {'a': ['Dannenbüttel']}})\n('cnp01414528', {'515': {'a': ['Salzwedel']}})\n('cnp01414634', {'515': {'a': ['Halle', 'Pulsnitz']}})\n('cnp01415266', {'515': {'a': ['Rodleben', 'Roßlau']}})\n('cnp02047759', {'515': {'a': ['Coswig']}})\n" ] ], [ [ "It seems the conversion was successful. But is person \"cnp01287518\" available?", "_____no_output_____" ] ], [ [ "person_id = \"cnp01287518\"\nif person_id not in d.keys():\n raise KeyError(f\"Data set with ID {person_id} is not in the data!\")", "_____no_output_____" ] ], [ [ "This shows that the data set with ID \"cnp01287518\" was ignored in the conversion process because the dates given in column \"340\\\\$x\" are not within the years defined in the conversion configuration.", "_____no_output_____" ], [ "-----", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec78ef8aa2fdd5c848c23e39509142e6810f58d1
62,017
ipynb
Jupyter Notebook
codes/compare_optimizer/Haarchecker_3qubit.ipynb
vutuanhai237/QuantumTomography
52916096482d7e7cd29782c049478bbba901d9bd
[ "MIT" ]
2
2021-12-11T07:49:46.000Z
2022-03-04T07:11:30.000Z
codes/compare_optimizer/Haarchecker_3qubit.ipynb
vutuanhai237/QuantumTomography
52916096482d7e7cd29782c049478bbba901d9bd
[ "MIT" ]
null
null
null
codes/compare_optimizer/Haarchecker_3qubit.ipynb
vutuanhai237/QuantumTomography
52916096482d7e7cd29782c049478bbba901d9bd
[ "MIT" ]
null
null
null
305.502463
56,846
0.929745
[ [ [ "import qiskit\nimport numpy as np, matplotlib.pyplot as plt\nimport qtm.base, qtm.constant, qtm.nqubit, qtm.onequbit, qtm.encoding", "_____no_output_____" ], [ "num_qubits = 3\nnum_layers = 3\npsi = 2*np.random.rand(2**num_qubits)-1\npsi = psi / np.linalg.norm(psi)\nthetas_origin = np.random.uniform(low = 0, high = 2*np.pi, size = num_qubits*num_layers*5)", "_____no_output_____" ], [ "thetas = thetas_origin.copy()\nencoder = qtm.encoding.Encoding(psi, 'amplitude_encoding')\nqc = encoder.qcircuit\n\nthetas, loss_values_sgd = qtm.base.fit(\n qc, num_steps = 200, thetas = thetas, \n create_circuit_func = qtm.nqubit.create_haarchecker_linear, \n grad_func = qtm.base.grad_loss,\n loss_func = qtm.base.loss_basis,\n optimizer = qtm.base.sgd,\n verbose = 1,\n num_layers = num_layers,\n encoder = encoder\n)\n", "Step: 100%|██████████| 200/200 [26:26<00:00, 7.93s/it]\n" ], [ "thetas = thetas_origin.copy()\nencoder = qtm.encoding.Encoding(psi, 'amplitude_encoding')\nqc = encoder.qcircuit\n\nthetas, loss_values_adam = qtm.base.fit(\n qc, num_steps = 200, thetas = thetas, \n create_circuit_func = qtm.nqubit.create_haarchecker_linear, \n grad_func = qtm.base.grad_loss,\n loss_func = qtm.base.loss_basis,\n optimizer = qtm.base.adam,\n verbose = 1,\n num_layers = num_layers,\n encoder = encoder\n)\n", "Step: 100%|██████████| 200/200 [28:21<00:00, 8.51s/it]\n" ], [ "thetas = thetas_origin.copy()\nencoder = qtm.encoding.Encoding(psi, 'amplitude_encoding')\nqc = encoder.qcircuit\n\nthetas, loss_values_qng = qtm.base.fit(\n qc, num_steps = 200, thetas = thetas, \n create_circuit_func = qtm.nqubit.create_haarchecker_linear, \n grad_func = qtm.base.grad_loss,\n loss_func = qtm.base.loss_basis,\n optimizer = qtm.base.qng,\n verbose = 1,\n num_layers = num_layers,\n encoder = encoder\n)\n", "Step: 100%|██████████| 200/200 [25:36<00:00, 7.68s/it]\n" ], [ "thetas = thetas_origin.copy()\nencoder = qtm.encoding.Encoding(psi, 'amplitude_encoding')\nqc = encoder.qcircuit\n\nthetas, loss_values_qng_adam = qtm.base.fit(\n qc, num_steps = 200, thetas = thetas, \n create_circuit_func = qtm.nqubit.create_haarchecker_linear, \n grad_func = qtm.base.grad_loss,\n loss_func = qtm.base.loss_basis,\n optimizer = qtm.base.qng_adam,\n verbose = 1,\n num_layers = num_layers,\n encoder = encoder\n)\n", "Step: 100%|██████████| 200/200 [27:09<00:00, 8.15s/it]\n" ], [ "plt.plot(loss_values_sgd, label=\"SGD\")\nplt.plot(loss_values_adam, label=\"Adam\")\nplt.plot(loss_values_qng, label=\"QNG\")\nplt.plot(loss_values_qng_adam, label=\"qng_adam\")\nplt.xlabel(\"Step\")\nplt.ylabel(\"Loss value\")\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec78fe312c4d9084d31c324b6277bea8867f5ced
8,320
ipynb
Jupyter Notebook
0.setup/setup_and_data_prep.ipynb
tom5610/amazon-sagemaker-mlu-workshop-2022
13a3deef62dbbb1941060f937972c90414d2e77f
[ "MIT" ]
4
2022-03-08T02:46:59.000Z
2022-03-20T22:01:09.000Z
0.setup/setup_and_data_prep.ipynb
tom5610/amazon-sagemaker-mlu-workshop-2022
13a3deef62dbbb1941060f937972c90414d2e77f
[ "MIT" ]
null
null
null
0.setup/setup_and_data_prep.ipynb
tom5610/amazon-sagemaker-mlu-workshop-2022
13a3deef62dbbb1941060f937972c90414d2e77f
[ "MIT" ]
1
2022-03-18T10:06:09.000Z
2022-03-18T10:06:09.000Z
33.821138
619
0.599159
[ [ [ "# Workshop Setup and Data Preparation\n\nThis notebook works well with the `Python 3 (Data Science)` kernel on SageMaker Studio.\n\n---\n\n---", "_____no_output_____" ], [ "## Contents\n\n1. [Introduction](#Introduction)\n1. [Prerequisites](#Prerequisites)\n1. [Downloading the dataset](#Downloading)\n1. [Upload the dataset to Amazon S3](#Uploading)", "_____no_output_____" ], [ "## Introduction\n\n> ***This notebook must be completed before all other labs in the workshop.***\n\nIn the notebook, you will setup the workshop environment and prepare the data for other labs. To finish the notebook execution, you may:\n * Run all `code` cells with Menu `Run` -> `Run All Cells`\n * Run each `code` cell with `Shift + Enter`\n \nIn the labs, we'll use the **[Direct Marketing Dataset](https://archive.ics.uci.edu/ml/datasets/bank+marketing)** as per:\n\n> *\\[Moro et al., 2014\\] S. Moro, P. Cortez and P. Rita. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014*\n\nPlease walk through the cell execution results and try to understand the dataset before jumping on other labs.", "_____no_output_____" ], [ "## Setup\n\nSetup the environment variables for data set download.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nimport zipfile\n\nimport sagemaker\n\nsession = sagemaker.Session()\nrole = sagemaker.get_execution_role()\n\nbucket = session.default_bucket()\nprefix = \"mlu-workshop/direct-marketing\"\n\ndata_folder = \"../data\"", "_____no_output_____" ] ], [ [ "## Downloading the dataset<a name=\"Downloading\"></a>\n\nHere, we'll download [the dataset](https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip) from the SageMaker sample data S3 bucket.", "_____no_output_____" ] ], [ [ "!wget -P $data_folder -N https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip\n\n\nwith zipfile.ZipFile(f\"{data_folder}/bank-additional.zip\", \"r\") as zip_ref:\n print(\"Unzipping...\")\n zip_ref.extractall(data_folder)\nprint(\"Done\")\n\ndata_file_path = f\"{data_folder}/bank-additional/bank-additional-full.csv\"", "_____no_output_____" ] ], [ [ "## View the downloaded dataset<a name=\"View\"></a>\n\nIt's recommended to perform a check of the dataset to make sure that it has no obvious errors. \n\n> The [the dataset](https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip) is small and it's easy to inspect it in the notebook environment. If you have a larger dataset that will not fit in a notebook instance memory, inspect the dataset offline using a big data analytics tool like Apache Spark. [Deequ](https://github.com/awslabs/deequ) is a library built on top of Apache Spark that can be helpful for performing checks on large datasets. If you are keen to use SageMaker Autopilot, please note that it is capable of handling datasets up to 5 GB.\n\nRead the data into a Pandas data frame and take a look.", "_____no_output_____" ] ], [ [ "data = pd.read_csv(data_file_path)\nwith pd.option_context(\"display.max_columns\", 500):\n # Make sure we can see all of the columns\n display(data)", "_____no_output_____" ] ], [ [ "Let's talk about the data. At a high level, we can see:\n\n* We have a little over 40K customer records, and 20 features for each customer\n* The features are mixed; some numeric, some categorical\n* The data appears to be sorted, at least by `time` and `contact`, maybe more\n\n_**Specifics on each of the features:**_\n\n*Demographics:*\n* `age`: Customer's age (numeric)\n* `job`: Type of job (categorical: 'admin.', 'services', ...)\n* `marital`: Marital status (categorical: 'married', 'single', ...)\n* `education`: Level of education (categorical: 'basic.4y', 'high.school', ...)\n\n*Past customer events:*\n* `default`: Has credit in default? (categorical: 'no', 'unknown', ...)\n* `housing`: Has housing loan? (categorical: 'no', 'yes', ...)\n* `loan`: Has personal loan? (categorical: 'no', 'yes', ...)\n\n*Past direct marketing contacts:*\n* `contact`: Contact communication type (categorical: 'cellular', 'telephone', ...)\n* `month`: Last contact month of year (categorical: 'may', 'nov', ...)\n* `day_of_week`: Last contact day of the week (categorical: 'mon', 'fri', ...)\n* `duration`: Last contact duration, in seconds (numeric). Important note: If duration = 0 then `y` = 'no'.\n \n*Campaign information:*\n* `campaign`: Number of contacts performed during this campaign and for this client (numeric, includes last contact)\n* `pdays`: Number of days that passed by after the client was last contacted from a previous campaign (numeric)\n* `previous`: Number of contacts performed before this campaign and for this client (numeric)\n* `poutcome`: Outcome of the previous marketing campaign (categorical: 'nonexistent','success', ...)\n\n*External environment factors:*\n* `emp.var.rate`: Employment variation rate - quarterly indicator (numeric)\n* `cons.price.idx`: Consumer price index - monthly indicator (numeric)\n* `cons.conf.idx`: Consumer confidence index - monthly indicator (numeric)\n* `euribor3m`: Euribor 3 month rate - daily indicator (numeric)\n* `nr.employed`: Number of employees - quarterly indicator (numeric)\n\n*Target variable:*\n* `y`: Has the client subscribed a term deposit? (binary: 'yes','no')", "_____no_output_____" ], [ "### Store the shared variables", "_____no_output_____" ] ], [ [ "%store bucket\n%store prefix\n%store data_folder\n%store data_file_path", "_____no_output_____" ] ], [ [ "### Next\n\nSince the data has been download, we are ready to kick off our first lab - Autopilot Experiment. \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec7914d26f074a720d46d4c35910dd428c17aed2
64,654
ipynb
Jupyter Notebook
lezgi/MorphSegmenter_Example-Lezgi.ipynb
umoqnier/FLExMorphSegmenter
970c28fc770511927d6a225db987c877a13049c1
[ "MIT" ]
2
2020-05-08T10:59:06.000Z
2021-08-30T18:40:10.000Z
lezgi/MorphSegmenter_Example-Lezgi.ipynb
umoqnier/FLExMorphSegmenter
970c28fc770511927d6a225db987c877a13049c1
[ "MIT" ]
null
null
null
lezgi/MorphSegmenter_Example-Lezgi.ipynb
umoqnier/FLExMorphSegmenter
970c28fc770511927d6a225db987c877a13049c1
[ "MIT" ]
null
null
null
39.543731
1,015
0.489931
[ [ [ "# Automatic Prediction of Lezgi Morpheme Breaks\n\nThis program does supervised morphological analysis and glossing of affixes. It is intended to quickly increase the amount of accessible data from low resource, and often endangered, languages. This classifier can be used on any language but it expects 2000-3000 words of cleanly annotated data. \n\nThis example is designed for Lezgi [lez], a Nakh-Daghestanian language spoken in Russia and Azerbaijan. Lezgi is an agglutinating language that is overwhelmingly suffixing. The training and test data came from a collection of 21 transcribed oral narratives spoken in the Qusar dialect of northwest Azerbaijan. Nine texts with about 2,500 words were used for training data after having been cleanly annotated with morpheme breaks and part of speech. All but three of affixes were glossed. Many of the stems are not glossed. The FlexText XML export labels each morpheme as stem, suffix, or prefix. \n\nThis program is considered successful if it reaches 80% accuracy. This goal comes from the Pareto Principle - the idea that 20% of one's effort produces 80% of one's results, and vice versa. This program should accurately complete 80% of the annotations, leaving the most interesting and informative 20% for the human linguist to complete.This project was inspired by an ongoing fieldwork project. A native Lezgi speaker who has no background in linguistics has been annotating the collection of texts. She has quickly learned basic morphology and gained FLEx skills. However, simultaneously learning and doing basic linguistic analysis produces inaccurate and inconsistent annotations. It is also time-consuming. Many of the mistakes are due to the repetitive nature of the work. Not every part of speech has inflectional morphology. The annotator is most likely to skip over essential words with simple morphology, such as ergative case-marked arguments, and concentrate on morphologicaly complex words. \n\nOnce the training is complete, the program should predict morpheme breaks and affix glosses for any text that has been labeled with parts of speech. Identifying parts of speech is required because this seems a reasonable task for a non-linguist native speaker. The data used in this example does include two distinctions in Lezgi that might be difficult without linguistic training. Participles are distinguished from verbs, but Lezgi participles end in a unique letter. Demonstrative pronouns are distinguished from pronouns. This distinction was used primarily because it was already consistently annotated in the data. \n", "_____no_output_____" ], [ " ## Preprocessing Data\n \nThis process assumes that 1) the data has been analyzed in FLEx and exported as a FlexText, then saved with an .xml file extension, 2) words have been annotated in FLEx for part of speech, (for this example - verb, participle, adjective, adverb, noun/proper noun, particle, (personal) pronoun, demonstrative, and postposition), 3) morpheme breaks are consistent, and 4) all affixes, but not stems, are glossed.", "_____no_output_____" ] ], [ [ "#API for parsing XML docs\nimport xml.etree.ElementTree as ET\nfrom itertools import chain\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nimport sklearn\nimport pycrfsuite\nfrom collections import Counter", "_____no_output_____" ], [ "def XMLtoWords(filename):\n '''Takes FLExText text as .xml. Returns data as list: [[[[[[morpheme, gloss], pos],...],words],sents]].\n Ignores punctuation. Morph_types can be: stem, suffix, prefix, or phrase when lexical item is made up of two words.'''\n \n datalists = []\n\n #open XML doc using xml parser\n root = ET.parse(filename).getroot()\n\n for text in root:\n for paragraphs in text:\n #Only get paragraphs, ignore metadata.\n if paragraphs.tag == 'paragraphs':\n for paragraph in paragraphs:\n #jump straight into items under phrases\n for phrase in paragraph[0]:\n sent = []\n #ignore first item which is the sentence number\n for word in phrase[1]:\n #ignore punctuation tags which have no attributes\n if word.attrib:\n lexeme = []\n for node in word:\n if node.tag == 'morphemes':\n for morph in node:\n morpheme = []\n #note morph type \n morph_type = morph.get('type')\n #Treat MWEs or unlabled morphemes as stems.\n if morph_type == None or morph_type == 'phrase':\n morph_type = 'stem' \n for item in morph:\n #get morpheme token\n if item.get('type') == 'txt':\n form = item.text\n #get rid of hyphens demarcating affixes\n if morph_type == 'suffix':\n form = form[1:]\n if morph_type == 'prefix':\n form = form[:-1]\n morpheme.append(form)\n #get affix glosses\n if item.get('type') == 'gls' and morph_type != 'stem':\n morpheme.append(item.text)\n #get stem \"gloss\" = 'stem'\n if morph_type == 'stem':\n morpheme.append(morph_type)\n lexeme.append(morpheme)\n #get word's POS\n if node.get('type') == 'pos':\n lexeme.append(node.text)\n sent.append(lexeme)\n datalists.append(sent)\n return datalists", "_____no_output_____" ], [ "def WordsToLetter(wordlists):\n '''Takes data from XMLtoWords: [[[[[[morpheme, gloss], pos],...],words],sents]]. \n Returns [[[[[letter, POS, BIO-label],...],words],sents]]'''\n\n letterlists = []\n \n for phrase in wordlists:\n sent = []\n for lexeme in phrase:\n word = []\n #Skip POS label\n for morpheme in lexeme[:-1]:\n #use gloss as BIO label\n label = morpheme[1]\n #Break morphemes into letters\n for i in range(len(morpheme[0])):\n letter = [morpheme[0][i]]\n #add POS label to each letter\n letter.append(lexeme[-1])\n #add BIO label\n if i == 0:\n letter.append('B-' + label)\n else:\n letter.append('I-' + label)\n #letter.append('I')\n word.append(letter)\n sent.append(word)\n letterlists.append(sent)\n \n return letterlists", "_____no_output_____" ] ], [ [ "The call below takes the data from the FLExText XML export. The data is read from the XML file and broken down by morphemes. Then it is broken down by letter. Each letter is associated with the word's part of speech tag and a BIO label. The BIO label for stems is \"stem\". The label for affixes is their gloss. \"B\" denotes the initial letter of a morpheme. I marks non-initial letters.\n\nWith a corpus of a little less than 2,500 words, I originally tried a 90/10 split. The accuracy results ranged from 92% to 97% but the test data was seeing a dozen or less labels. An 80/20 random split ranges less than 2% in accuracy, but still averages about 94%. However, the number of labels the test data encounters is nearly doubled.", "_____no_output_____" ] ], [ [ "#Randomize and split the data\ntraindata,testdata = train_test_split(WordsToLetter(XMLtoWords(\"FLExTxtExport2.xml\")),test_size=0.2)\n\nprint(\"Train: \", len(traindata), \"Tests: \", len(testdata))", "Train: 64 Tests: 16\n" ], [ "testdata[0]", "_____no_output_____" ] ], [ [ "## CRFSuite \n### Define Features\n\nIt is assumed that a \"phrase\" in FLEx is equivalent to a complete sentence. In reality, some \"phrases\" contain more than one sentence, some contain only a sentence fragment. This means that the word position in the sentence is often inaccurate, but it was retained to take into account Lezgi's strong tendency for verb-final word order. Affixes are rarely more than 3 letters long, so features include the previous and next 1-4 letters. This ensures that the program is viewing at least one letter in the previous/next morpheme. More often it is viewing the whole previous/next 1-2 morphemes. \n\nSince Lezgi is primarily suffixing, the position of a letter in a word is counted from the end of the word. ", "_____no_output_____" ] ], [ [ "def extractFeatures(sent):\n '''Takes data as [[[[[letter, POS, BIO-label],...],words],sents]].\n Returns list of words with characters as features list: [[[[[letterfeatures],POS,BIO-label],letters],words]]'''\n \n featurelist = []\n senlen = len(sent)\n \n #each word in a sentence\n for i in range(senlen):\n word = sent[i]\n wordlen = len(word)\n lettersequence = ''\n #each letter in a word\n for j in range(wordlen):\n letter = word[j][0]\n #gathering previous letters\n lettersequence += letter\n #ignore digits \n if not letter.isdigit():\n features = [\n 'bias',\n 'letterLowercase=' + letter.lower(),\n 'postag=' + word[j][1],\n ] \n #position of word in sentence and pos tags sequence\n if i > 0:\n features.append('prevpostag=' + sent[i-1][0][1])\n if i != senlen-1:\n features.append('nxtpostag=' + sent[i+1][0][1])\n else:\n features.append('EOS')\n else:\n features.append('BOS')\n #Don't get pos tag if sentence is 1 word long\n if i != senlen-1:\n features.append('nxtpostag=' + sent[i+1][0][1])\n #position of letter in word\n if j == 0:\n features.append('BOW')\n elif j == wordlen-1:\n features.append('EOW')\n else:\n features.append('letterposition=-%s' % str(wordlen-1-j))\n #letter sequences before letter\n if j >= 4:\n features.append('prev4letters=' + lettersequence[j-4:j].lower() + '>')\n if j >= 3:\n features.append('prev3letters=' + lettersequence[j-3:j].lower() + '>')\n if j >= 2:\n features.append('prev2letters=' + lettersequence[j-2:j].lower() + '>')\n if j >= 1:\n features.append('prevletter=' + lettersequence[j-1:j].lower() + '>')\n #letter sequences after letter\n if j <= wordlen-2:\n nxtlets = word[j+1][0]\n features.append('nxtletter=<' + nxtlets.lower())\n #print('\\nnextletter:', nxtlet)\n if j <= wordlen-3:\n nxtlets += word[j+2][0]\n features.append('nxt2letters=<' + nxtlets.lower())\n #print('next2let:', nxt2let)\n if j <= wordlen-4:\n nxtlets += word[j+3][0]\n features.append('nxt3letters=<' + nxtlets.lower())\n if j <= wordlen-5:\n nxtlets += word[j+4][0]\n features.append('nxt4letters=<' + nxtlets.lower())\n \n featurelist.append(features)\n \n return featurelist\n\ndef extractLabels(sent):\n labels = []\n for word in sent:\n for letter in word:\n labels.append(letter[2])\n return labels\n\ndef extractTokens(sent):\n tokens = []\n for word in sent:\n for letter in word:\n tokens.append(letter[0])\n return tokens\n\ndef sent2features(data):\n return [extractFeatures(sent) for sent in data]\n\ndef sent2labels(data):\n return [extractLabels(sent) for sent in data]\n\ndef sent2tokens(data):\n return [extractTokens(sent) for sent in data]", "_____no_output_____" ], [ "X_train = sent2features(traindata)\nY_train = sent2labels(traindata)\n\nX_test = sent2features(testdata)\nY_test = sent2labels(testdata)", "_____no_output_____" ], [ "X_test[0]", "_____no_output_____" ] ], [ [ "### Train the model", "_____no_output_____" ] ], [ [ "trainer = pycrfsuite.Trainer(verbose=False)\n\nfor xseq, yseq in zip(X_train, Y_train):\n trainer.append(xseq, yseq)", "_____no_output_____" ] ], [ [ "Set training parameters. L-BFGS (what is this) is default. Using Elastic Net (L1 + L2) regularization [ditto?].", "_____no_output_____" ] ], [ [ "trainer.set_params({\n 'c1': 1.0, #coefficient for L1 penalty\n 'c2': 1e-3, #coefficient for L2 penalty\n 'max_iterations': 50 #early stopping\n })", "_____no_output_____" ] ], [ [ "The program saves the trained model to a file:", "_____no_output_____" ] ], [ [ "model_filename = 'LING5800_lezgi.crfsuite'\ntrainer.train(model_filename)", "_____no_output_____" ] ], [ [ "### Make Predictions", "_____no_output_____" ] ], [ [ "tagger = pycrfsuite.Tagger()\ntagger.open(model_filename)", "_____no_output_____" ] ], [ [ "First, let's use the trained model to make predications for just one example sentence from the test data. The predicted labels are printed out for comparison above the correct labels. Most examples have 100% accuracy.", "_____no_output_____" ] ], [ [ "example_sent = testdata[0]\nprint('Letters:', ' '.join(extractTokens(example_sent)), end='\\n')\n\nprint('Predicted:', ' '.join(tagger.tag(extractFeatures(example_sent))))\nprint('Correct:', ' '.join(extractLabels(example_sent)))", "Letters: в о б щ е м р а з и х ь а н а ч и б у р и м и д и н и ч и б у б а д и л а г ь а н а к и в а ъ\nPredicted: B-stem I-stem I-stem I-stem I-stem I-stem B-stem I-stem I-stem I-stem B-stem I-stem I-stem B-AOR I-AOR B-NEG B-stem B-PL I-PL I-PL B-stem I-stem I-stem B-OBL I-OBL B-FOC I-FOC B-stem I-stem B-stem I-stem I-stem I-stem B-ERG I-ERG B-stem I-stem I-stem I-stem I-stem B-AOR I-AOR B-stem I-stem B-stem I-stem I-stem\nCorrect: B-stem I-stem I-stem I-stem I-stem I-stem B-stem I-stem I-stem I-stem B-stem I-stem I-stem B-AOR I-AOR B-NEG B-stem B-PL I-PL I-PL B-stem I-stem I-stem B-OBL I-OBL B-FOC I-FOC B-stem I-stem B-stem I-stem I-stem I-stem B-ERG I-ERG B-stem I-stem I-stem I-stem I-stem B-AOR I-AOR B-stem I-stem B-stem I-stem I-stem\n" ] ], [ [ "## Evaluate the Model\n\nThe following function will evaluate how well the model performs. Unlike CRF example found at https://github.com/scrapinghub/python-crfsuite/blob/master/examples/CoNLL%202002.ipynb, this model is not designed to disregard \"O\" labels, since all characters that are not part of a word (e.g. digits and punctuation) are already eliminated during pre-processing.", "_____no_output_____" ] ], [ [ "def bio_classification_report(y_correct, y_pred):\n '''Takes list of correct and predicted labels from tagger.tag. \n Prints a classification report for a list of BIO-encoded sequences.\n It computes letter-level metrics.'''\n\n labeler = LabelBinarizer()\n y_correct_combined = labeler.fit_transform(list(chain.from_iterable(y_correct)))\n y_pred_combined = labeler.transform(list(chain.from_iterable(y_pred)))\n \n tagset = set(labeler.classes_)\n tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])\n class_indices = {cls: idx for idx, cls in enumerate(labeler.classes_)}\n \n return classification_report(\n y_correct_combined,\n y_pred_combined,\n labels = [class_indices[cls] for cls in tagset],\n target_names = tagset)", "_____no_output_____" ] ], [ [ "First, we will predict BIO labels in the test data:", "_____no_output_____" ] ], [ [ "Y_pred = [tagger.tag(xseq) for xseq in X_test]", "_____no_output_____" ] ], [ [ "Get results for labeled position evaluation. This evaluates how well the classifier performed on each morpheme as a whole and their tags, rather than evaluating character-level.", "_____no_output_____" ] ], [ [ "def concatenateLabels(y_list):\n '''Return list of morpheme labels [[B-label, I-label,...]morph,[B-label,...]]'''\n \n morphs_list = []\n labels_list = []\n morph = []\n for sent in y_list:\n for label in sent:\n labels_list.append(label)\n if label[0] == 'I':\n #build morpheme shape, adding to first letter\n morph.append(label)\n else:\n # Once processed first morph, add new morphemes & gloss labels to output\n if morph:\n morphs_list.append(morph)\n #Extract morpheme features\n morph = [label]\n \n return morphs_list, labels_list\n\ndef countMorphemes(morphlist):\n counts = {}\n for morpheme in morphlist:\n counts[morpheme[0][2:]] = counts.get(morpheme[0][2:], 0) + 1\n return counts\n\ndef eval_labeled_positions(y_correct, y_pred):\n \n #group the labels by morpheme and get list of morphemes\n correctmorphs,_ = concatenateLabels(y_correct)\n predmorphs,predLabels = concatenateLabels(y_pred)\n #Count instances of each morpheme\n test_morphcts = countMorphemes(correctmorphs)\n pred_morphcts = countMorphemes(predmorphs)\n \n correctMorphemects = {}\n idx = 0\n num_correct = 0\n for morpheme in correctmorphs:\n correct = True\n for label in morpheme:\n if label != predLabels[idx]:\n correct = False\n idx += 1\n if correct == True:\n num_correct += 1\n correctMorphemects[morpheme[0][2:]] = correctMorphemects.get(morpheme[0][2:], 0) + 1\n #calculate P, R F1 for each morpheme\n results = ''\n for firstlabel in correctMorphemects.keys():\n lprec = correctMorphemects[firstlabel]/pred_morphcts[firstlabel]\n lrecall = correctMorphemects[firstlabel]/test_morphcts[firstlabel]\n results += firstlabel + '\\t\\t{0:.2f}'.format(lprec) + '\\t\\t' + '{0:.2f}'.format(lrecall) + '\\t' + '{0:.2f}'.format((2*lprec*lrecall)/(lprec+lrecall)) +'\\t\\t' + str(test_morphcts[firstlabel]) + '\\n'\n #overall results\n precision = num_correct/len(predmorphs)\n recall = num_correct/len(correctmorphs)\n \n print('\\t\\tPrecision\\tRecall\\tf1-score\\tInstances\\n\\n' + results + '\\ntotal/avg\\t{0:.2f}'.format(precision) + '\\t\\t' + '{0:.2f}'.format(recall) + '\\t' + '{0:.2f}'.format((2*precision*recall)/(precision+recall)))", "_____no_output_____" ] ], [ [ "Then, we check the results and print a report of the results. These results are for character level.", "_____no_output_____" ] ], [ [ "eval_labeled_positions(Y_test, Y_pred)", "\t\tPrecision\tRecall\tf1-score\tInstances\n\nstem\t\t0.97\t\t0.97\t0.97\t\t129\nFOC\t\t0.75\t\t1.00\t0.86\t\t9\nAOR\t\t1.00\t\t1.00\t1.00\t\t15\nOBL\t\t0.93\t\t0.59\t0.72\t\t22\nDAT\t\t1.00\t\t0.88\t0.93\t\t16\nGEN\t\t0.80\t\t0.57\t0.67\t\t7\nSUPER\t\t1.00\t\t1.00\t1.00\t\t2\nELAT\t\t1.00\t\t1.00\t1.00\t\t3\nFUT\t\t0.50\t\t1.00\t0.67\t\t1\nPL\t\t0.50\t\t0.67\t0.57\t\t3\nPTP\t\t1.00\t\t1.00\t1.00\t\t4\nTEMP\t\t1.00\t\t1.00\t1.00\t\t1\nPERF\t\t1.00\t\t1.00\t1.00\t\t1\nSBST\t\t1.00\t\t1.00\t1.00\t\t1\nNEG\t\t0.50\t\t1.00\t0.67\t\t1\nIMPF\t\t1.00\t\t1.00\t1.00\t\t1\nSUB\t\t1.00\t\t1.00\t1.00\t\t1\n\ntotal/avg\t0.94\t\t0.86\t0.90\n" ], [ "print(bio_classification_report(Y_test, Y_pred))", " precision recall f1-score support\n\n B-AOR 1.00 1.00 1.00 15\n I-AOR 1.00 1.00 1.00 15\n B-COND 0.00 0.00 0.00 1\n I-COND 0.00 0.00 0.00 2\n B-DAT 1.00 1.00 1.00 6\n B-ELAT 1.00 1.00 1.00 2\n I-ELAT 1.00 1.00 1.00 2\n B-ERG 1.00 0.50 0.67 2\n I-ERG 1.00 0.50 0.67 2\n B-FOC 0.83 1.00 0.91 5\n I-FOC 0.83 1.00 0.91 5\n B-FUT 1.00 0.50 0.67 2\n I-FUT 1.00 0.50 0.67 2\n B-GEN 0.75 1.00 0.86 3\n B-HORT 0.00 0.00 0.00 1\n B-IMPF 1.00 1.00 1.00 3\n I-IMPF 1.00 1.00 1.00 7\n B-INESS 0.00 0.00 0.00 1\n B-NEG 1.00 1.00 1.00 1\n B-OBL 0.80 1.00 0.89 8\n I-OBL 0.75 1.00 0.86 6\n B-PL 1.00 0.60 0.75 5\n I-PL 1.00 0.67 0.80 6\n B-PTP 1.00 1.00 1.00 2\n B-Q 0.00 0.00 0.00 1\n I-Q 0.00 0.00 0.00 1\n B-SUB 1.00 1.00 1.00 1\n B-SUPER 1.00 1.00 1.00 1\n B-stem 1.00 1.00 1.00 107\n I-stem 0.97 0.98 0.98 322\n\n micro avg 0.97 0.96 0.96 537\n macro avg 0.76 0.71 0.72 537\nweighted avg 0.96 0.96 0.96 537\n samples avg 0.96 0.96 0.96 537\n\n" ] ], [ [ "The model, with a 80/20 split, produces an average accuracy of 94% with a less than 2% range over randomized test data. This is significantly above the targeted accuracy of 80%. Table 1 shows the results of one run. \n\n|__label__|__precision__|__recall__|__f1-score__|__instances__|\n|---------|-------------|----------|------------|-------------|\n|B-AOR|1.00|0.88|0.94|17|\n|B-DAT|0.92|1.00|0.96|11|\n|B-ELAT|0.67|1.00|0.80|2|\n|B-ENT|0.33|0.50|0.40|2|\n|B-ERG|0.00|0.00|0.00|3|\n|B-FOC|0.86|1.00|0.92|6|\n|B-FUT|0.00|0.00|0.00|2|\n|B-GEN|0.50|0.33|0.40|6|\n|B-HORT|0.00|0.00|0.00|1|\n|I|0.95|0.99|0.97|480|\n|B-INESS|1.00|0.33|0.50|3|\n|B-MSDR|0.00|0.00|0.00|2|\n|B-NEG|1.00|1.00|1.00|1|\n|B-OBL|0.80|0.60|0.69|20|\n|B-PL|0.50|0.50|0.50|2|\n|B-POESS|1.00|1.00|1.00|3|\n|B-PTP|1.00|0.67|0.80|3|\n|B-SBST|1.00|0.50|0.67|2|\n|B-SUPER|0.67|1.00|0.80|2|\n|B-TEMP|0.00|0.00|0.00|1|\n|B-UNK|0.00|0.00|0.00|1|\n|B-stem|1.00|0.99|0.99|138|\n|__avg / total__|__0.94__|__0.94__|__0.94__|__708__|\n\n<center>Table 1: Results of morpheme predictions</center>\n\nAs might be expected, the classifier has less success predicting less frequent labels. This makes the results of the I labels (non-initial letters in a morpheme) surprising, until one considers that transitions between morphemes may not always be clear. Other results become more interesting with some knowledge of Lezgi morphology. The inessive (INESS) and ergative (ERG) case and the oblique stem morpheme (OBL) are identical. The only difference between the first two is the tendency of sentence position, even with Lezgi's free word order. The difference between the latter two is that the ergative morpheme is word final and the the oblique stem is follow by another case morpheme. \n\n|__precision__|__recall__|__f1-score__|\n|-----|------|------|\n|0.54|0.49|0.49|\n\n<center>Table 2: Average score of affix labels only.</center>\n\nThe classifier has most success identifying stem morphemes (STEM) and non-initial letters (I), the majority of which belong to stem morphemes. It has less success with identifying affixes. The classifier is clearly adept at splitting affixes from stems and this is already helpful to human annotators but it would be less helpful splitting strings of affixes and correcly glossing them. Table 2 shows average precision, recall, and f1-score of affix labels is much less accurate than the overall accuracy. This is most likely due in part to homonymic affixes and in part to the fewer instances of affixes compared to stems. As the more texts are correctly annotated with the help of the model, more data can be fed into the training, hopefully increasing the accuracy and incrementally speeding the annotation process.\n\nThe data was also run on a bidirectional sequence-to-sequence deep neural network with attention. The hidden layer size was set at 128, the batch size as 32, the teacher forcing ratio at 0.5. The results in Table 3 indicate that with a small amount of data a supervised classifier can produce equal or better results than a neural network.\n\n|epochs|accuracy|\n|------|--------|\n|50|0.57|\n|100|0.75|\n|200|0.90|\n|300|0.92|\n|__500__|__0.93__|\n|600|0.89|\n|1000|0.91|\n\n<center>Table 3: Results of deep neural network</center>", "_____no_output_____" ], [ "## What the Classifier Learned\n\nBy using methods of the crfsuite, we can look insider classifier and see what it learned. From the example printout in Table 3, we can see, for example, that the stem, elative (ELAT), imperfective (IMPF), aorist (AOR), perfective (PF), and plural (PL) morphemes most often consist of more than one letter but superessive (SUPER), oblique (OBL), and subessive (SUB) morphemes usually consist of just one letter. We can also see that temporal converb (TEMP) morpheme often follows the participle (PTP) morpheme, and another case morpheme tends to follow the oblique, superessive, and subessive case morphemes. These patterns correspond to the facts of Lezgi morphology. On the other hand, both Table 4 and Table 5 indciate that is highly likelythat a genitive case (GEN) morpheme will be a prefix, which is impossible. This indicates that the affix type (prefix or suffix) might be a useful feature to include.\n\n|-|-|-|weights|\n|---|---|----|-----|\n|B-SUPER| ->| B-ELAT| 4.820010|\n|B-OBL| ->| B-SPSS| 3.806645|\n|B-SUB| ->| B-ELAT| 3.444584|\n|B-OBL| ->| B-DAT| 2.946830|\n|B-stem| ->| I| 2.258064|\n|B-OBL| ->| B-GEN| 2.247354|\n|I| ->| B-OBL| 1.913825|\n|B-stem| ->| B-OBL| 1.862016|\n|B-ELAT| ->| I| 1.711584|\n|B-PTP| ->| B-TEMP| 1.620690|\n|B-IMPF| ->| I| 1.300227|\n|B-AOR| ->| I| 1.252594|\n|B-PERF| ->| I| 1.135483|\n|B-PL| ->| I| 1.043438|\n|B-GEN| ->| B-stem| 0.956780|\n\n<center>Table 4: Top most likely transitions</center>\n\nOn the other hand, Table 5, for example, indicates that the negative affix rarely follows a non-initial letter of another morpheme. This is accurate because the negative affix is the only prefix in the language. It is not surprising that the transition still has a greater than zero probability since it is often only one letter long and this letter may be found at the beginning of any word.\n\n|-|-|-|weights|\n|---|---|---|----|\n|B-ERG| ->| B-stem| 0.295926|\n|B-TEMP| ->| I| 0.254567|\n|B-SBST| ->| I| 0.249661|\n|I| ->| B-NEG| 0.221662|\n|B-INF| ->| B-stem| 0.196340|\n|I| ->| B-DAT| 0.057729|\n|B-NEG| ->| B-stem |0.013683|\n|I| ->| B-stem| 0.009557|\n|I| ->| B-ERG| 0.000074|\n|I| ->| B-SUPER| -0.000692|\n|I| ->| B-FOC| -0.003919|\n|I| ->| B-SBST| -0.023268|\n|B-OBL| ->| I| -0.034257|\n|B-INESS| ->| I| -0.157967|\n|I| ->| B-GEN| -1.180139|\n\n<center>Table 5: Top most unlikely transitions</center>", "_____no_output_____" ] ], [ [ "info = tagger.info()\n\ndef print_transitions(trans_features):\n '''Print info from the crfsuite.'''\n \n for (label_from, label_to), weight in trans_features:\n print(\"%-6s -> %-7s %0.6f\" % (label_from, label_to, weight))\n\nprint(\"Top likely transitions:\")\nprint_transitions(Counter(info.transitions).most_common(15))\n\nprint(\"\\nTop unlikely transitions:\")\nprint_transitions(Counter(info.transitions).most_common()[-15:])", "Top likely transitions:\nB-stem -> I-stem 7.338007\nB-ELAT -> I-ELAT 6.792164\nI-stem -> I-stem 6.099357\nB-TEMP -> I-TEMP 5.672273\nB-IMPF -> I-IMPF 5.637289\nB-INELAT -> I-INELAT 5.626798\nB-ENT -> I-ENT 5.488343\nB-AOR -> I-AOR 5.305114\nB-AOC -> I-AOC 5.273412\nB-FUT -> I-FUT 5.196156\nB-PERF -> I-PERF 5.178553\nB-OBL -> I-OBL 5.074970\nB-PL -> I-PL 5.025918\nB-POESS -> I-POESS 5.010790\nI-PERF -> I-PERF 4.984551\n\nTop unlikely transitions:\nI-ELAT -> B-stem 0.439823\nI-PL -> B-ERG 0.379337\nB-PTP -> B-TEMP 0.335421\nI-ERG -> B-stem 0.322590\nI-stem -> B-stem 0.293954\nI-ERG -> B-FOC 0.290467\nB-stem -> B-PL 0.199258\nI-ENT -> B-NEG 0.194905\nI-stem -> B-HORT 0.148437\nI-ENT -> B-PTP 0.104741\nB-SPSS -> B-stem 0.074526\nI-stem -> B-AOC 0.039212\nI-AOR -> B-stem 0.002330\nI-PERF -> B-PTP 0.001912\nI-stem -> B-GEN -0.713777\n" ] ], [ [ "We can make some observations about the state features. For example, Table 6 indicates that the model rightly recognized that the stem is nearly always at the beginning of the word and there are no consistent feature to identify the non-initial letters of various morphemes. \n\n|weight|label|feature|\n|---|---|----|\n|13.385742| B-stem| BOW|\n|6.80475| I| bias|\n|5.169367| B-PL| nxt2letters=<ур|\n|5.142534| B-DAT| letterLowercase=з|\n|4.858094| B-NEG| letterLowercase=ш|\n|4.568794| B-PTP| letterLowercase=й|\n|4.513613| B-PST| letterLowercase=й|\n|4.361416| B-ADSS| letterLowercase=в|\n|4.269127| B-PL| nxtletter=<р|\n|4.216564| B-FOC| nxtletter=<и|\n|4.203677| B-GEN| letterLowercase=н|\n|4.023482| B-INF| letterLowercase=з|\n|3.977504| B-IMPF| letterLowercase=з|\n|3.868088| B-NEG| letterLowercase=ч|\n|3.636859| B-FOC| letterLowercase=н|\n\n<center>Table 6: Top positive features</center>\n\nTable 7 indicates that certain letter sequences might be less likely to begin a morpheme. One interesting observation that could be easily confirmed by a corpus study is that the focus particle is least likely to occur on a verb than on any other lexical category. \n\n|weight|label|feature|\n|---|----|---|\n|-0.606766| I| prev2letters=ча>|\n|-0.679616| I| letterLowercase=ч|\n|-0.704380| I| prevletter=ш>|\n|-0.741532| I| prev2letters=ич>|\n|-0.833423| B-FOC| postag=v|\n|-0.937032| B-FOC| bias|\n|-1.029693| I| prev3letters=вал>|\n|-1.071785| I| nxtletter=<й|\n|-1.073034| I| prev3letters=гьу>|\n|-1.126576| I| prev2letters=ди>|\n|-1.150632| B-AOR| bias|\n|-1.201650| I| letterLowercase=н|\n|-1.240373| I| letterLowercase=з|\n|-1.250568| I| prevletter=р>|\n\n<center>Table 7: Top negative</center>", "_____no_output_____" ] ], [ [ "def print_state_features(state_features):\n for (attr, label), weight in state_features:\n print(\"%0.6f %-6s %s\" % (weight, label, attr)) \n\nprint(\"Top positive:\")\nprint_state_features(Counter(info.state_features).most_common(15))\n\nprint(\"\\nTop negative:\")\nprint_state_features(Counter(info.state_features).most_common()[-15:])", "Top positive:\n12.498452 B-stem BOW\n5.503005 B-DAT letterLowercase=з\n4.684468 B-GEN letterLowercase=н\n4.270364 B-ADSS letterLowercase=в\n4.201987 B-NEG letterLowercase=ш\n4.123131 I-PL letterLowercase=р\n3.740163 B-INF letterLowercase=з\n3.468910 B-MSDR letterLowercase=н\n3.321002 B-PL nxt2letters=<ур\n3.148994 B-PTP postag=ptcp\n3.123837 I-NEG.PST prevpostag=adj\n3.006395 B-HORT letterLowercase=н\n2.971788 B-PTP letterLowercase=й\n2.937903 B-PST letterLowercase=й\n2.905472 I-TEMP prevletter=л>\n\nTop negative:\n-0.808025 B-DAT postag=v\n-0.815216 I-stem letterLowercase=в\n-0.822264 I-stem prev3letters=ава>\n-0.839039 I-stem prevletter=д>\n-0.904920 I-stem nxt2letters=<ур\n-0.996087 I-stem prev2letters=ед>\n-0.996862 I-stem prev2letters=ча>\n-1.050426 I-PL postag=v\n-1.158361 I-stem prev2letters=уш>\n-1.293499 I-stem letterLowercase=з\n-1.349291 I-stem prevletter=н>\n-1.570701 I-stem letterLowercase=д\n-1.877090 B-OBL postag=v\n-1.973153 I-stem prev2letters=да>\n-2.218771 I-stem letterLowercase=й\n" ] ], [ [ "## Future steps\n\nThe goal of this project was to find a way to speed the work on annotator and improve their accuracy. Since the model reach over the 80% accuracy goal, there seems little reason to try to improve the features, although an examination of the transitions and state features point to a few adjustments that might increase accuracy. The bigggest problem seems to be the almost 50% reduction in predicting the affix glosses. However, the small number of instances found in the test data indicate that this will be improved as the amount of supervised examples increases. The model as it is can speed this increase.\n\nIt should be assumed that few annotators will have programming skills. This is especially true for speakers of minority languages which are often are in areas with limited educational opportunities. The results of this classifier should be checked and corrected by trained annotators. Ideally, this program would be exapnded to write the predicted breaks and glosses to an XML file compatible with FLEx or ELAN or another interface familiar to the annotator or easy to learn. In meantime, the data could be output to an CSV file and presented to the annotator as an spreadsheet.\n\nEven with carefully annotated training data by a linguist familiar with FLEX and Lezgi morphology, mistakes were made. A few POS tags and affix glosses were missing. This prevents the program from working, but does not tell the user where or what the missing data are. Pre-processing functions should be adjusted so that they present the troublesome morphemes with glosses as a list to the user so that they can be found and corrected using FLEx's concordance feature. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec791e697e5359214e6eb4e1e732309b62a4750d
5,116
ipynb
Jupyter Notebook
nlp/precenciales/HuggingfacePipeline.ipynb
carseven/master-ai
ae40dba9c8fb24de0ed6b8cbb381d640efb1a76e
[ "MIT" ]
null
null
null
nlp/precenciales/HuggingfacePipeline.ipynb
carseven/master-ai
ae40dba9c8fb24de0ed6b8cbb381d640efb1a76e
[ "MIT" ]
null
null
null
nlp/precenciales/HuggingfacePipeline.ipynb
carseven/master-ai
ae40dba9c8fb24de0ed6b8cbb381d640efb1a76e
[ "MIT" ]
1
2021-10-05T21:15:09.000Z
2021-10-05T21:15:09.000Z
37.072464
627
0.574277
[ [ [ "!pip install transformers\n!pip install sentencepiece\nimport sentencepiece\nfrom transformers import pipeline", "_____no_output_____" ], [ "nlp = pipeline('sentiment-analysis', model='finiteautomata/beto-sentiment-analysis')\nresult = nlp(\"Odio esta película\")\nprint(result)", "_____no_output_____" ], [ "nlp = pipeline(\"ner\", model=\"mrm8488/bert-spanish-cased-finetuned-ner\")\nprint(nlp(\"Me llamo Antonio García García. Vivo en Madrid y trabajo en Minsait, S.A.\"))", "_____no_output_____" ], [ "nlp = pipeline(\"summarization\", model=\"mrm8488/t5-base-finetuned-spa-squadv1\")\ndocument = \"Franz Peter Schubert, a pesar de su corta vida, dejó un gran legado, que incluye más de seiscientas obras vocales seculares (principalmente lieder), siete sinfonías completas, música sacra, óperas, música incidental y gran cantidad de obras para piano y música de cámara. Sus obras principales incluyen el Quinteto La trucha, la Sinfonía inacabada, la Sinfonía Grande, las tres últimas sonatas para piano (D. 958, 959 y 960), la ópera Fierrabras (D. 796), la música incidental de la obra de teatro Rosamunda (D. 797) y los ciclos de canciones La bella molinera (D. 795) y Viaje de invierno (D. 911).\"\nprint(nlp(document))\n", "_____no_output_____" ], [ "\nnlp = pipeline('question-answering', model='mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es')\nresult = nlp({\n 'question': \"¿Quién ganó el Tour de Francia de 1991?\",\n 'context': \"Perico Delgado ganó el Tour de Francia en 1988. Poco después, en 1991, Miguel Induráin ganó su primer Tour de Francia. Después ganaría consecutivamente los de 1992, 1993, 1994 y 1995. \"\n})\nprint(result)\nresult = nlp({\n 'question': \"¿Quién ganó el Tour de Francia de 1988?\",\n 'context': \"Perico Delgado ganó el Tour de Francia en 1988. Poco después, en 1991, Miguel Induráin ganó su primer Tour de Francia. Después ganaría consecutivamente los de 1992, 1993, 1994 y 1995. \"\n})\nprint(result)", "_____no_output_____" ], [ "nlp = pipeline('translation', model='Helsinki-NLP/opus-mt-es-en')\nprint(nlp(\"Perico Delgado ganó el Tour de Francia en 1988. Poco después, en 1991, Miguel Induráin ganó su primer Tour de Francia. Después ganaría consecutivamente los de 1992, 1993, 1994 y 1995.\"))", "_____no_output_____" ], [ "nlp = pipeline('text-generation', model='datificate/gpt2-small-spanish')\nprint(nlp(\"En 1991 Miguel Induráin ganó su primer Tour de Francia\"))", "_____no_output_____" ], [ "from transformers import AutoModelForSequenceClassification, AutoTokenizer, TextClassificationPipeline\n\nmodel = AutoModelForSequenceClassification.from_pretrained(\"joeddav/distilbert-base-uncased-go-emotions-student\")\ntokenizer = AutoTokenizer.from_pretrained(\"joeddav/distilbert-base-uncased-go-emotions-student\")\nclassifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True)\n\nresults = classifier([\"Everything will be fine!!\", \"We're going to have a really bad time...\"])\n\nprint(sorted(results[0], key=lambda x: x['score'], reverse=True))\nprint(sorted(results[1], key=lambda x: x['score'], reverse=True))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7958c52af8ccbc8f0e5d385741d658ee6ecd74
13,109
ipynb
Jupyter Notebook
.ipynb_checkpoints/(D1) simple statistics from a subset of the change-my-view corpus-checkpoint.ipynb
szmurlo/Cornell-Conversational-Analysis-Toolkit
c6ec53b60ee3c85f7edd40d134783b4cb8c3f649
[ "MIT" ]
null
null
null
.ipynb_checkpoints/(D1) simple statistics from a subset of the change-my-view corpus-checkpoint.ipynb
szmurlo/Cornell-Conversational-Analysis-Toolkit
c6ec53b60ee3c85f7edd40d134783b4cb8c3f649
[ "MIT" ]
null
null
null
.ipynb_checkpoints/(D1) simple statistics from a subset of the change-my-view corpus-checkpoint.ipynb
szmurlo/Cornell-Conversational-Analysis-Toolkit
c6ec53b60ee3c85f7edd40d134783b4cb8c3f649
[ "MIT" ]
null
null
null
38.330409
366
0.602105
[ [ [ "import os\nimport convokit\nfrom convokit import Corpus, Parser, PolitenessStrategies\nimport timeit\nimport re\nfrom numpy import mean\nfrom scipy import stats\n\n\nconvokitPath='C:\\\\Users\\\\Andrew\\\\Desktop\\\\Cornell-Conversational-Analysis-Toolkit\\\\'\ncorpusPath='C:\\\\Users\\\\Andrew\\\\Desktop\\\\CMV data\\\\change-my-view-corpus'\nos.chdir(convokitPath)", "_____no_output_____" ], [ "corpus = convokit.Corpus(corpusPath)", "_____no_output_____" ], [ "utterance_ids = corpus.get_utterance_ids()", "_____no_output_____" ], [ "len(utterance_ids)", "_____no_output_____" ] ], [ [ "Since this corpus is too large to parse with my laptop, we select the utterance ids for the groups of utterances that we are interested in. The following code finds the utterance ids for OP's comments (including the original post) and the challenger's comments (for both successful and unsuccessful arguments). Every other comment in the thread is excluded.\n\nNote: this subset of data is still larger than the 'pair_data.json' in the data provided by the changemyview paper (see readme for citation) because I have also matched the OP replies to the challenger's comments.", "_____no_output_____" ] ], [ [ "#we want the original post made by op, the challenger's comments and all of OP's responses to the challengers\n#these three lists are utterance ids for the original post, challenger comments and op replies respectively\nopPost=[]\nchallengerComments=[]\nopReplies=[]\nfor iD in utterance_ids:\n \n if corpus.get_utterance(iD).id==corpus.get_utterance(iD).root:\n opPost.append(iD)\n if corpus.get_utterance(iD).user.name != corpus.get_utterance(corpus.get_utterance(iD).root).user.name and corpus.get_utterance(iD).meta['success']==0:\n challengerComments.append(iD)\n if corpus.get_utterance(iD).user.name != corpus.get_utterance(corpus.get_utterance(iD).root).user.name and corpus.get_utterance(iD).meta['success']==1:\n challengerComments.append(iD)\n\n if corpus.get_utterance(iD).id!=corpus.get_utterance(iD).root and corpus.get_utterance(iD).user.name == corpus.get_utterance(corpus.get_utterance(iD).root).user.name and corpus.get_utterance(iD).meta['success']==0:\n opReplies.append(iD)\n if corpus.get_utterance(iD).id!=corpus.get_utterance(iD).root and corpus.get_utterance(iD).user.name == corpus.get_utterance(corpus.get_utterance(iD).root).user.name and corpus.get_utterance(iD).meta['success']==1:\n opReplies.append(iD)\n \n#subset challenger and op replies for later use (into successful and unsuccessful arguments)\nchallengerPos=[]\nchallengerNeg=[]\nfor iD in challengerComments:\n if corpus.get_utterance(iD).meta['success']==1:\n challengerPos.append(iD)\n if corpus.get_utterance(iD).meta['success']==0:\n challengerNeg.append(iD)\n#these are OP's replies to successful and unsuccessful challengers \nopReplyPos=[]\nopReplyNeg=[]\nfor iD in opReplies:\n if corpus.get_utterance(iD).meta['success']==1:\n opReplyPos.append(iD)\n if corpus.get_utterance(iD).meta['success']==0:\n opReplyNeg.append(iD)", "_____no_output_____" ], [ "subset=opPost+challengerComments+opReplies", "_____no_output_____" ], [ "#collect utterance dict given the subset of ids\nutterance_list=[]\nfor iD in subset:\n utterance_list.append(corpus.get_utterance(iD))", "_____no_output_____" ] ], [ [ "Create the subset corpus that we are interested (note: the original data from the paper only contained the challenger replies and the original post, nothing else was included -- I collected OP's reples from the 'all' data)", "_____no_output_____" ] ], [ [ "#this subset separates OP comments and challenger utterances from all other comments in every conversation (thread)\ncorpus = convokit.Corpus(utterances=utterance_list,version=1)", "_____no_output_____" ], [ "corpus.print_summary_stats()", "Number of Users: 4648\nNumber of Utterances: 14754\nNumber of Conversations: 2509\n" ], [ "len(challengerComments)", "_____no_output_____" ], [ "len(opReplies)", "_____no_output_____" ] ], [ [ "Simple statistics:", "_____no_output_____" ] ], [ [ "print(corpus.meta)\n\ncorpus.print_summary_stats()\n\nutts = list(corpus.iter_utterances()) #list of all uterrance objects in the corpus\nsucc_length = [] #length of all comments in succesful threads\nroot_succ_length = [] #length of successful root replies\nsucc_deltas = [] #num deltas given to users commenting in all succesful threads\nroot_succ_deltas = [] #num deltas given to root commenters in succesful threads\nunsucc_length = [] #length of all comments in unsuccesful threads\nroot_unsucc_length = [] #length of unsuccessful root replies \nunsucc_deltas = [] #num deltas given to users commenting in all unsuccesful threads\nroot_unsucc_deltas = [] #num deltas given to root commenters in unsuccessful threads\n\nfor i in utts:\n if i.root != i.id: #exclude the original post\n if i.meta['success'] == 1: #if succesful\n succ_length.append(len((i.text).split())) #num words\n if i.reply_to == i.root: #it's a root comment\n root_succ_length.append(len((i.text).split()))\n \n if i.meta['author_flair_text']:\n r = re.search(r'\\d+',i.meta['author_flair_text'])#number of delta given to author. Note: have not checked this regex\n if r: #ignore weird cases that don't fit pattern, e.g. inf\n succ_deltas.append(int(r.group())) \n if i.reply_to == i.root: #it's a root comment\n root_succ_deltas.append(int(r.group()))\n else:\n succ_deltas.append(0)\n if i.reply_to == i.root: #it's a root comment\n root_succ_deltas.append(0) \n else:\n unsucc_length.append(len((i.text).split())) #num words\n if i.reply_to == i.root: #it's a root comment\n root_unsucc_length.append(len((i.text).split()))\n \n if i.meta['author_flair_text']:\n r = re.search(r'\\d+',i.meta['author_flair_text'])\n if r: #ignore weird cases that don't fit pattern, e.g. inf\n unsucc_deltas.append(int(r.group()))\n if i.reply_to == i.root: #it's a root comment\n root_unsucc_deltas.append(int(r.group()))\n else:\n unsucc_deltas.append(0)\n if i.reply_to == i.root: #it's a root comment\n root_unsucc_deltas.append(0) \n \n\n#length of comments\nprint('Average number of words in a succesful comment is ' + str(mean(succ_length)))\nprint('Average number of words in an unsuccesful comment is ' + str(mean(unsucc_length)))\np_val = stats.ttest_ind(succ_length,unsucc_length,equal_var=False)[1] #using Welch's t-test, because I have no reason to assume variances are the same.\nprint('p-value for number of words is ' + str(p_val))\n\n#length of root comments\nprint('Average number of words in a succesful root comment is ' + str(mean(root_succ_length)))\nprint('Average number of words in an unsuccesful rootcomment is ' + str(mean(root_unsucc_length)))\np_val = stats.ttest_ind(root_succ_length,root_unsucc_length,equal_var=False)[1] #using Welch's t-test, because I have no reason to assume variances are the same.\nprint('p-value for number of words in root comments is ' + str(p_val))\n\n#deltas to commenters\nprint('Average number of deltas assigned to an author of a succesful comment is ' + str(mean(succ_deltas)))\nprint('Average number of deltas assigned to an author of a unsuccesful comment is ' + str(mean(unsucc_deltas)))\np_val = stats.ttest_ind(succ_deltas,unsucc_deltas,equal_var=False)[1] #using Welch's t-test, because I have no reason to assume variances are the same.\nprint('p-value for number of deltas assigned to commenters is ' + str(p_val))\n\n#deltas to root commenters\nprint('Average number of deltas assigned to an author of a succesful root comment is ' + str(mean(root_succ_deltas)))\nprint('Average number of deltas assigned to an author of a unsuccesful root comment is ' + str(mean(root_unsucc_deltas)))\np_val = stats.ttest_ind(root_succ_deltas,root_unsucc_deltas,equal_var=False)[1] #using Welch's t-test, because I have no reason to assume variances are the same.\nprint('p-value for number of deltas assigned to root commenters is ' + str(p_val))", "{}\nNumber of Users: 4648\nNumber of Utterances: 14754\nNumber of Conversations: 2509\nAverage number of words in a succesful comment is 178.56688918558078\nAverage number of words in an unsuccesful comment is 170.46014721345952\np-value for number of words is 0.013164020755471142\nAverage number of words in a succesful root comment is 279.62375448385814\nAverage number of words in an unsuccesful rootcomment is 214.5878836189717\np-value for number of words in root comments is 5.352967211621296e-29\nAverage number of deltas assigned to an author of a succesful comment is 7.111748998664886\nAverage number of deltas assigned to an author of a unsuccesful comment is 6.252576235541535\np-value for number of deltas assigned to commenters is 0.004002939827993687\nAverage number of deltas assigned to an author of a succesful root comment is 12.029095257074532\nAverage number of deltas assigned to an author of a unsuccesful root comment is 7.894380231167796\np-value for number of deltas assigned to root commenters is 2.8630260787064124e-14\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec795faf1fe93440e3ac24842e4f12b42451616d
2,904
ipynb
Jupyter Notebook
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
paipeng/PyTorch-Tutorial
9626a06ecb4e04fe4425f9350a658014caeaa274
[ "MIT" ]
20
2018-07-27T15:14:44.000Z
2022-03-10T06:44:46.000Z
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
paipeng/PyTorch-Tutorial
9626a06ecb4e04fe4425f9350a658014caeaa274
[ "MIT" ]
4
2020-08-16T03:27:03.000Z
2020-11-16T09:21:48.000Z
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
paipeng/PyTorch-Tutorial
9626a06ecb4e04fe4425f9350a658014caeaa274
[ "MIT" ]
19
2018-07-27T07:42:22.000Z
2021-05-12T01:36:10.000Z
21.671642
87
0.496212
[ [ [ "# 303 Build NN Quickly\n\nView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou\n\nDependencies:\n* torch: 0.1.11", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn.functional as F", "_____no_output_____" ], [ "# replace following class code with an easy sequential network\nclass Net(torch.nn.Module):\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer\n self.predict = torch.nn.Linear(n_hidden, n_output) # output layer\n\n def forward(self, x):\n x = F.relu(self.hidden(x)) # activation function for hidden layer\n x = self.predict(x) # linear output\n return x", "_____no_output_____" ], [ "net1 = Net(1, 10, 1)", "_____no_output_____" ], [ "# easy and fast way to build your network\nnet2 = torch.nn.Sequential(\n torch.nn.Linear(1, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 1)\n)\n", "_____no_output_____" ], [ "print(net1) # net1 architecture\nprint(net2) # net2 architecture", "Net (\n (hidden): Linear (1 -> 10)\n (predict): Linear (10 -> 1)\n)\nSequential (\n (0): Linear (1 -> 10)\n (1): ReLU ()\n (2): Linear (10 -> 1)\n)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec796ff69e9f7bd860fd5e8dc25af93e7f8c28f6
69,314
ipynb
Jupyter Notebook
biosignalsnotebooks_notebooks/Categories/MainFiles/contacts.ipynb
csavur/biosignalsnotebooks
c99596741a854c58bdefb429906023ac48ddc3b7
[ "MIT" ]
1
2020-06-26T05:05:11.000Z
2020-06-26T05:05:11.000Z
notebookToHtml/biosignalsnotebooks_html_publish/Categories/MainFiles/contacts.ipynb
csavur/biosignalsnotebooks
c99596741a854c58bdefb429906023ac48ddc3b7
[ "MIT" ]
null
null
null
notebookToHtml/biosignalsnotebooks_html_publish/Categories/MainFiles/contacts.ipynb
csavur/biosignalsnotebooks
c99596741a854c58bdefb429906023ac48ddc3b7
[ "MIT" ]
null
null
null
42.628536
5,243
0.504992
[ [ [ "<div style=\"background-color:black\">\n <table width=\"100%\">\n <tr>\n <td style=\"border-right:solid 3px #009EE3\" width=\"50%\">\n <img src=\"../../images/plux_logo.png\" width=\"50%\">\n </td>\n <td style=\"text-align:left\">\n <strong>Lisbon Office</strong>\n <br>\n Phone <i>(+351) 211 956 542</i>\n <br>\n Fax <i>(+351) 211 956 546</i>\n <br>\n Av. 5 de Outubro, 70 - 2&#186;\n <br>\n 1050-059 Lisboa\n <br><br>\n <strong>Support or Suggestions</strong>\n <br>\n E-mail <i><a src=\"mailto:[email protected]\">[email protected]</a></i>\n </td>\n </tr>\n </table>\n</div>", "_____no_output_____" ], [ "<span class=\"color6\">**Auxiliary Code Segment (should not be replicated by\nthe user)**</span>", "_____no_output_____" ] ], [ [ "from biosignalsnotebooks.__notebook_support__ import css_style_apply\ncss_style_apply()", "_____no_output_____" ], [ "%%html\n<script>\n // AUTORUN ALL CELLS ON NOTEBOOK-LOAD!\n require(\n ['base/js/namespace', 'jquery'], \n function(jupyter, $) {\n $(jupyter.events).on(\"kernel_ready.Kernel\", function () {\n console.log(\"Auto-running all cells-below...\");\n jupyter.actions.call('jupyter-notebook:run-all-cells-below');\n jupyter.actions.call('jupyter-notebook:save-notebook');\n });\n }\n );\n</script>", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
ec797dd50da240f72ca1ae3858bd1029e51ab160
222,484
ipynb
Jupyter Notebook
Covid19/COVID19XRAY.ipynb
naveeen684/Covid19--Xray-Interpretable-Machine-Learning-
6559ae55aee7232d0df069305ce125a0a4c3a7fc
[ "MIT" ]
4
2020-05-20T04:01:38.000Z
2020-09-02T05:40:28.000Z
Covid19/COVID19XRAY.ipynb
naveeen684/Covid19--Xray-Interpretable-Machine-Learning-
6559ae55aee7232d0df069305ce125a0a4c3a7fc
[ "MIT" ]
null
null
null
Covid19/COVID19XRAY.ipynb
naveeen684/Covid19--Xray-Interpretable-Machine-Learning-
6559ae55aee7232d0df069305ce125a0a4c3a7fc
[ "MIT" ]
null
null
null
146.951123
74,580
0.860844
[ [ [ "from keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import VGG16\nfrom keras.layers import AveragePooling2D\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\nimport os", "_____no_output_____" ], [ "init_lr = 1e-3\nepochs = 25\nbatch_size = 8", "_____no_output_____" ], [ "import pandas as pd \ndf = pd.read_csv('metadata.csv') \ndf.head()\ndf.columns", "_____no_output_____" ], [ "fol=df[\"folder\"].astype(str)\nfilename=df[\"filename\"].astype(str)\ny=df[\"finding\"].astype(str)\ny", "_____no_output_____" ], [ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\ndata=[]\nlabels=[]\n#dim=(500,500)\n\nfor i in range(len(fol)):\n if fol[i]=='images':\n path=os.path.join(fol[i],filename[i])\n img=cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image = cv2.resize(img, (224, 224))\n \n label = y[i]\n \n data.append(image)\n labels.append(label)\n", "_____no_output_____" ], [ "labels=[1 if i == 'COVID-19' else 0 for i in labels]\ndata = np.array(data) / 255.0\nlabels = np.array(labels)\nprint(labels)\n\nlb = LabelBinarizer()\nlabels = lb.fit_transform(labels)\nlabels = to_categorical(labels)\nprint(labels)", "[1 1 1 1 1 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1\n 1 1 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 0 1 1 1 1]\n[[0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]]\n" ], [ "(trainX, testX, trainY, testY) = train_test_split(data, labels,\n test_size=0.20, stratify=labels, random_state=42)\n\ntrainAug = ImageDataGenerator(\n rotation_range=15,\n fill_mode=\"nearest\")", "_____no_output_____" ], [ "baseModel = VGG16(weights=\"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5\", include_top=False,\n input_tensor=Input(shape=(224, 224, 3)))\n\n\nheadModel = baseModel.output\nheadModel = AveragePooling2D(pool_size=(4, 4))(headModel)\nheadModel = Flatten(name=\"flatten\")(headModel)\nheadModel = Dense(64, activation=\"relu\")(headModel)\nheadModel = Dropout(0.5)(headModel)\nheadModel = Dense(2, activation=\"softmax\")(headModel)\n\n\nmodel = Model(inputs=baseModel.input, outputs=headModel)\nmodel.load_weights('Hmodel.h5')\n\nfor layer in baseModel.layers:\n layer.trainable = False", "_____no_output_____" ], [ "print(\"compiling model...\")\nopt = Adam(lr=init_lr, decay=init_lr / epochs)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\n\nprint(\"training head...\")\nH = model.fit_generator(\n trainAug.flow(data, labels, batch_size=batch_size),\n steps_per_epoch=len(trainX) // batch_size,\n validation_data=(testX, testY),\n validation_steps=len(testX) // batch_size,\n epochs=5)", "compiling model...\ntraining head...\nWARNING:tensorflow:Variable *= will be deprecated. Use variable.assign_mul if you want assignment to the variable value or 'x = x * y' if you want a new python Tensor object.\nEpoch 1/5\n22/22 [==============================] - 40s 2s/step - loss: 0.5362 - acc: 0.7122 - val_loss: 0.5743 - val_acc: 0.7391\nEpoch 2/5\n22/22 [==============================] - 45s 2s/step - loss: 0.4316 - acc: 0.8239 - val_loss: 0.5890 - val_acc: 0.7391\nEpoch 3/5\n22/22 [==============================] - 44s 2s/step - loss: 0.5214 - acc: 0.7326 - val_loss: 0.5602 - val_acc: 0.7826\nEpoch 4/5\n22/22 [==============================] - 44s 2s/step - loss: 0.4766 - acc: 0.7611 - val_loss: 0.5752 - val_acc: 0.7391\nEpoch 5/5\n22/22 [==============================] - 45s 2s/step - loss: 0.4940 - acc: 0.8011 - val_loss: 0.5456 - val_acc: 0.7609\n" ], [ "filename = 'Hmodel.h5'\nmodel.save_weights(filename)", "_____no_output_____" ], [ "filename='MModel.h5'\nmodel.save(filename)", "_____no_output_____" ], [ "from keras.models import load_model\n\nloaded_model=load_model('MModel.h5')\nloaded_model.predict(testX)", "_____no_output_____" ], [ "from lime import lime_image", "_____no_output_____" ], [ "explainer = lime_image.LimeImageExplainer()\ni=45", "_____no_output_____" ], [ "explanation = explainer.explain_instance(testX[i], loaded_model.predict, \n top_labels=2, hide_color=0, \n num_samples=200)", "100% |########################################################################|\n" ], [ "from skimage.segmentation import mark_boundaries", "_____no_output_____" ], [ "pred=loaded_model.predict(testX)\nprint(pred[i])\nprint(testY[i])", "[0.04639148 0.9536085 ]\n[0. 1.]\n" ], [ "temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], \n positive_only=True, num_features=5, hide_rest=True)\nplt.imshow(mark_boundaries(temp / 2 + 0.5, mask))", "_____no_output_____" ], [ "temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], \n positive_only=True, num_features=5, hide_rest=False)\nplt.imshow(mark_boundaries(temp / 2 + 0.5, mask))", "_____no_output_____" ], [ "\ntemp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)\nplt.imshow(mark_boundaries(temp / 2 + 0.5, mask))\n", "_____no_output_____" ], [ "temp, mask = explanation.get_image_and_mask(explanation.top_labels[1], positive_only=True, num_features=5, hide_rest=True)\nplt.imshow(mark_boundaries(temp / 2 + 0.5, mask))", "_____no_output_____" ], [ "Xray_out=loaded_model.predict(data)", "_____no_output_____" ], [ "Xray_out", "_____no_output_____" ], [ "from sklearn.preprocessing import LabelEncoder \ndf=df.fillna(df.mean())\nle = LabelEncoder() \n\nsex=df[\"sex\"].astype(str) \nsex= le.fit_transform(sex) \n\n\ncli=df[\"clinical notes\"].astype(str)\ncli= le.fit_transform(cli) \n\nother=df[\"other notes\"].astype(str)\nother= le.fit_transform(other) \n\ntemp=df[\"temperature\"].astype(float)\n\n\ndf[\"pO2 saturation\"].fillna(df[\"pO2 saturation\"].mean())\npo2=df[\"pO2 saturation\"].astype(float)\n\ndf[\"leukocyte count\"].fillna(df[\"leukocyte count\"].mean())\nleu=df[\"leukocyte count\"].astype(float)\n\ndf[\"neutrophil count\"].fillna(df[\"neutrophil count\"].mean())\nneu=df[\"neutrophil count\"].astype(float)\n\n\ndf[\"lymphocyte count\"].fillna(df[\"lymphocyte count\"].mean())\nlym=df[\"lymphocyte count\"].astype(float)", "_____no_output_____" ], [ "a=[]\nb=[]\nc=[]\nd=[]\ne=[]\nf=[]\ng=[]\nh=[]\n\nfor i in range(len(fol)):\n if fol[i]=='images':\n a.append(sex[i])\n b.append(cli[i])\n c.append(other[i])\n d.append(temp[i])\n e.append(po2)\n f.append(leu)\n g.append(neu)\n h.append(lym)\n \na=np.asarray(a)\nb=np.asarray(b)\nc=np.asarray(c)\nd=np.asarray(d)\ne=np.asarray(e)\nf=np.asarray(f)\ng=np.asarray(g)\nh=np.asarray(h)\nx=Xray_out.transpose()\nx1=x[0]\nx2=x[1]\nlist(x1)\nlist(x2)", "_____no_output_____" ], [ "x=np.column_stack((a,d,e,f,g,h,x1,x2))", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "(x_train, x_test, y_train, y_test) = train_test_split(x, labels,\n test_size=0.20, stratify=labels, random_state=42)\nx_test.shape", "_____no_output_____" ], [ "import sklearn.ensemble\nimport pickle\n\nloaded_model = sklearn.ensemble.RandomForestClassifier(n_estimators=500)\nloaded_model.fit(x_train, y_train)", "_____no_output_____" ], [ "result = loaded_model.score(x_test, y_test)\nprint(result)\nfilename = 'RF.sav'\npickle.dump(loaded_model, open(filename, 'wb'))\nfilename = 'RF.sav'\nloaded_model = pickle.load(open(filename, 'rb'))\nresult = loaded_model.score(x_test, y_test)\nprint(result)", "0.782608695652174\n0.782608695652174\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec797dfee375ea1cb701f667fb0fc2688d83dd3b
6,441
ipynb
Jupyter Notebook
scratch_nbs/00_data.transforms.ipynb
ArtificialSoftwareEngineering/InterpretingCodeGeneration
c861736a57f2df4a991fc0d951ad1aab705b3774
[ "Apache-2.0" ]
null
null
null
scratch_nbs/00_data.transforms.ipynb
ArtificialSoftwareEngineering/InterpretingCodeGeneration
c861736a57f2df4a991fc0d951ad1aab705b3774
[ "Apache-2.0" ]
null
null
null
scratch_nbs/00_data.transforms.ipynb
ArtificialSoftwareEngineering/InterpretingCodeGeneration
c861736a57f2df4a991fc0d951ad1aab705b3774
[ "Apache-2.0" ]
null
null
null
25.458498
112
0.482379
[ [ [ "# default_exp data.transforms", "_____no_output_____" ], [ "# hide\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Data Transforms\n\n> API details. @Nathan", "_____no_output_____" ] ], [ [ "# export\nimport re\n\nimport pandas as pd\n\nfrom random import shuffle\nfrom typing import Callable, Optional", "_____no_output_____" ], [ "# hide\nfrom nbdev.showdoc import *", "_____no_output_____" ] ], [ [ "## Semantic Preserving", "_____no_output_____" ] ], [ [ "# export\ndef java_comment_remover(mthd: str) -> str:\n \"\"\"\n Remove all comments from a given java method. Code from https://stackoverflow.com/a/241506/5768407.\n\n :param mthd: the method to have its comments removed\n :returns: returns the method with its comments removed\n \"\"\"\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n \n pattern = re.compile(\n r'//.*?$|</>\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n return re.sub(pattern, replacer, mthd)", "_____no_output_____" ] ], [ [ "## Non-Semantic Preserving", "_____no_output_____" ] ], [ [ "# export\ndef line_randomizer(mthd: str) -> str:\n \"\"\"\n Randomize the lines in a given method.\n\n :param mthd: the method to have its lines randomized\n :returns: returns the method with its lines randomized\n \"\"\"\n mthd = mthd.split('\\n')\n shuffle(mthd)\n \n return '\\n'.join(mthd)\n\ndef code_token_randomizer(mthd: str) -> str:\n \"\"\"\n Randomize the tokens in a given method.\n\n :param mthd: the method to have its code tokens randomized\n :returns: returns the method with its code tokens randomized\n \"\"\"\n mthd = mthd.split(' ')\n shuffle(mthd)\n \n return ' '.join(mthd)", "_____no_output_____" ], [ "# hide\nfrom icodegen.data import *\n\n# From: https://www.geeksforgeeks.org/methods-in-java/\ndf_fake = pd.DataFrame([\n '''public int addTwoInt(int a, int b){ \n \n // adding two integer value. \n sum = a + b; \n \n //returning summation of two values. \n return sum; \n }'''\n], columns = ['code']); df_fake", "_____no_output_____" ], [ "# export\ndef transform_df(df: pd.DataFrame, transform: Callable, n: Optional[int] = None) -> pd.DataFrame:\n \"\"\"\n Transform the given pandas dataframe using the given transformation.\n\n :param df: the dataframe containing each method to be transformed\n :param transform: the transformation that will be applied to each method in the dataframe\n :param n: the number of methods to evaluate. If none, the entire dataframe will be used\n :returns: returns a modified dataframe with the methods transformed\n \"\"\"\n if n is None: n = len(df)\n\n df = df.iloc[:n].copy()\n df.code = df.code.apply(transform)\n\n return df", "_____no_output_____" ], [ "NO_CMT_MTHD = '''public int addTwoInt(int a, int b){ \n \n \n sum = a + b; \n \n \n return sum; \n }'''\n\ndf_no_cmt = transform_df(df_fake, java_comment_remover)\n\nassert NO_CMT_MTHD == df_no_cmt.code.values[0]", "_____no_output_____" ], [ "import random\n\nNO_RND_LINES_MTHD = ''' \n return sum; \n //returning summation of two values. \n \n }\npublic int addTwoInt(int a, int b){ \n // adding two integer value. \n sum = a + b; '''\n\nrandom.seed(4)\ndf_rnd_lines = transform_df(df_fake, line_randomizer)\n\nassert NO_RND_LINES_MTHD == df_rnd_lines.code.values[0]", "_____no_output_____" ], [ "NO_RND_TOKS_MTHD = ''' values. a, two return adding \n int \n integer \n two public of } a sum; // b){ \n + //returning b; int \n \n \n value. sum addTwoInt(int summation = '''\n\nrandom.seed(4)\ndf_rnd_toks = transform_df(df_fake, code_token_randomizer)\n\nassert NO_RND_TOKS_MTHD == df_rnd_toks.code.values[0]", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec79880ed118f4c5d827ec61944e79548ba99c11
15,201
ipynb
Jupyter Notebook
examples/homomorphic-encryption/Tutorial_0_TenSEAL_Syft_Data_Owner.ipynb
znreza/PySyft
d396783b5ffbb1491e6b397199638b097319d012
[ "Apache-2.0" ]
null
null
null
examples/homomorphic-encryption/Tutorial_0_TenSEAL_Syft_Data_Owner.ipynb
znreza/PySyft
d396783b5ffbb1491e6b397199638b097319d012
[ "Apache-2.0" ]
null
null
null
examples/homomorphic-encryption/Tutorial_0_TenSEAL_Syft_Data_Owner.ipynb
znreza/PySyft
d396783b5ffbb1491e6b397199638b097319d012
[ "Apache-2.0" ]
null
null
null
34.391403
280
0.626472
[ [ [ "<img src=\"https://github.com/OpenMined/design-assets/raw/master/logos/OM/horizontal-primary-light.png\" alt=\"he-black-box\" width=\"600\"/>\n\n\n# Homomorphic Encryption using Duet: Data Owner\n## Tutorial 0: Basic operations\n\n\nWelcome!\nThis tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.\n\nWe will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). \nTenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.\n\n\nIf you want to learn more about TenSEAL, we recommend the following tutorials:\n- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).\n- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).\n- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).\n\n\nLet's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features.", "_____no_output_____" ], [ "## Homomorphic Encryption\n\n__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.\n\n<img src=\"https://github.com/OpenMined/TenSEAL/raw/master/tutorials/assets/he-black-box.png\" alt=\"he-black-box\" width=\"600\"/>\n\nThis means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. ", "_____no_output_____" ], [ "### Setup\n\nAll modules are imported here, make sure everything is installed by running the cell below.", "_____no_output_____" ] ], [ [ "import syft as sy\nimport tenseal as ts\nimport pytest\n\nsy.load_lib(\"tenseal\")", "_____no_output_____" ] ], [ [ "### Start Duet Data Owner instance", "_____no_output_____" ] ], [ [ "# Start Duet local instance\nduet = sy.launch_duet(loopback=True)", "_____no_output_____" ] ], [ [ "### Theory: Homomorphic encryption schemes\n\n__TenSEAL__ supports two encryption schemes:\n - __BFV__, a scheme for operations on integers.\n - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it.\n \nThere are a few major steps for each scheme:\n 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption.\n 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key).\n 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint.", "_____no_output_____" ], [ "### Theory: Homomorphic encryption parameters\n\n__TenSEAL__ requires a few parameters to set the keys up:\n - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse)\n - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better).\n - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption.", "_____no_output_____" ], [ "### Theory: Homomorphic encryption keys\n\n__TenSEAL__ generates a few keys internally, each with another use case:\n - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__.\n - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist.\n - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist.\n - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist.", "_____no_output_____" ], [ "### TenSEAL Context\n\nNow that we had a short introduction, let's get to work.\n\nThe first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.", "_____no_output_____" ] ], [ [ "context = ts.Context(\n ts.SCHEME_TYPE.CKKS,\n poly_modulus_degree=8192,\n coeff_mod_bit_sizes=[60, 40, 40, 60]\n)\ncontext.global_scale = 2**40\ncontext", "_____no_output_____" ] ], [ [ "### Encrypt the data\n", "_____no_output_____" ] ], [ [ "v1 = [0, 1, 2, 3, 4]\nv2 = [4, 3, 2, 1, 0]\n\nenc_v1 = ts.ckks_vector(context, v1)\nenc_v2 = ts.ckks_vector(context, v2)\n(enc_v1, enc_v2)", "_____no_output_____" ] ], [ [ "### Make Context and Encrypted Vectors Referenceable over Duet", "_____no_output_____" ] ], [ [ "# tag them so our partner can easily reference it\nctx_ptr = context.send(duet, pointable=True, tags=[\"context\"])\nenc_v1_ptr = enc_v1.send(duet, pointable=True, tags=[\"enc_v1\"])\nenc_v2_ptr = enc_v2.send(duet, pointable=True, tags=[\"enc_v2\"])", "_____no_output_____" ], [ "# we can see that our three objects are now inside the store we control\nduet.store.pandas", "_____no_output_____" ] ], [ [ "### <img src=\"https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png\" alt=\"he-black-box\" width=\"100\"/> Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.", "_____no_output_____" ] ], [ [ "# We can see our duet partner has requested the two encrypted vectors and the public context\nduet.requests.pandas", "_____no_output_____" ] ], [ [ "### Approve the requests", "_____no_output_____" ] ], [ [ "duet.requests[0].accept()\nduet.requests[0].accept()\nduet.requests[0].accept()", "_____no_output_____" ], [ "# The requests should have been handled\nduet.requests.pandas", "_____no_output_____" ] ], [ [ "### <img src=\"https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png\" alt=\"he-black-box\" width=\"100\"/> Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint.", "_____no_output_____" ], [ "### Get the computation results from store and decrypt them locally", "_____no_output_____" ] ], [ [ "# Validate the encrypted add\n\nresult_add = duet.store[\"result_add\"].get(delete_obj=False)\nresult_add.link_context(context)\n\nresult_add", "_____no_output_____" ], [ "decrypted_result = result_add.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted - plain add\n\nresult_iadd = duet.store[\"result_iadd\"].get(delete_obj=False)\nresult_iadd.link_context(context)\n\ndecrypted_result = result_iadd.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted subtraction\n\nresult_sub = duet.store[\"result_sub\"].get(delete_obj=False)\nresult_sub.link_context(context)\n\ndecrypted_result = result_sub.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted multiplication\n\nresult_mul = duet.store[\"result_mul\"].get(delete_obj=False)\nresult_mul.link_context(context)\n\ndecrypted_result = result_mul.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted power\n\nresult_pow = duet.store[\"result_pow\"].get(delete_obj=False)\nresult_pow.link_context(context)\n\ndecrypted_result = result_pow.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted negation\n\nresult_neg = duet.store[\"result_neg\"].get(delete_obj=False)\nresult_neg.link_context(context)\n\ndecrypted_result = result_neg.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]\n\ndecrypted_result", "_____no_output_____" ], [ "# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3\n\nresult_poly = duet.store[\"result_poly\"].get(delete_obj=False)\nresult_poly.link_context(context)\n\ndecrypted_result = result_poly.decrypt()\nassert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]\n\ndecrypted_result", "_____no_output_____" ] ], [ [ "### <img src=\"https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png\" alt=\"he-black-box\" width=\"100\"/> Checkpoint 3 : Well done!", "_____no_output_____" ], [ "# Congratulations!!! - Time to Join the Community!\n\nCongratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!\n\n### Star PySyft and TenSEAL on GitHub\n\nThe easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building.\n\n- [Star PySyft](https://github.com/OpenMined/PySyft)\n- [Star TenSEAL](https://github.com/OpenMined/TenSEAL)\n\n### Join our Slack!\n\nThe best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org). #lib_tenseal and #code_tenseal are the main channels for the TenSEAL project.\n\n### Donate\n\nIf you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!\n\n[OpenMined's Open Collective Page](https://opencollective.com/openmined)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec79a53efb1db70e45e19eceee2287492ea7ddeb
27,880
ipynb
Jupyter Notebook
7.23.ipynb
poiujng/python
c1c164d514f97de575e41868e015f1776c03c911
[ "Apache-2.0" ]
null
null
null
7.23.ipynb
poiujng/python
c1c164d514f97de575e41868e015f1776c03c911
[ "Apache-2.0" ]
null
null
null
7.23.ipynb
poiujng/python
c1c164d514f97de575e41868e015f1776c03c911
[ "Apache-2.0" ]
null
null
null
20.484938
343
0.440531
[ [ [ "# 对象和类\n- 一个学生,一张桌子,一个圆都是对象\n- 对象是类的一个实例,你可以创建多个对象,创建类的一个实例过程被称为实例化,\n- 在Python中对象就是实例,而实例就是对象", "_____no_output_____" ], [ "## 定义类\nclass ClassName:\n\n do something\n \n- class 类的表示与def 一样\n- 类名最好使用驼峰式\n- 在Python2中类是需要继承基类object的,在Python中默认继承,可写可不写\n- 可以将普通代码理解为皮肤,而函数可以理解为内衣,那么类可以理解为外套", "_____no_output_____" ] ], [ [ "class bobo:\n pass", "_____no_output_____" ], [ "bobo", "_____no_output_____" ] ], [ [ "## 定义一个不含初始化__init__的简单类\nclass ClassName:\n\n joker = “Home”\n \n def func():\n print('Worker')\n \n- 尽量少使用", "_____no_output_____" ] ], [ [ "#一切类必须要初始化#在类中,所有函数的第一个参数,都是标识类,不是参数\n#一切函数必须初始化\nclass bobo:\n def __init__ (self):\n print('我是初始化')", "_____no_output_____" ], [ "bobo()", "我是初始化\n" ], [ "class bobo:\n def __init__ (self):\n print('我是初始化')\n def print_(self):\n print('hello world')", "_____no_output_____" ], [ "ff=bobo()", "我是初始化\n" ], [ "class bobo:\n def __init__ (self):\n print('我是初始化')\n def print_(self,name):\n print('hello world',name)", "_____no_output_____" ], [ "ff=bobo()", "我是初始化\n" ], [ "ff.print_( 'name100')", "hello world name100\n" ], [ "class bobo:\n def __init__ (self):\n print('我是初始化')\n self.num1=num1\n self.num2=num2\n def print_(self,name):\n print('hello world',name)\n def SUM(self):\n return self.num1+self.num2\n def cheng(self):\n returnself.num1*self.num2", "_____no_output_____" ], [ "ff()", "_____no_output_____" ] ], [ [ "\n\n## 定义一个标准类\n- __init__ 代表初始化,可以初始化任何动作\n- 此时类调用要使用(),其中()可以理解为开始初始化\n- 初始化内的元素,类中其他的函数可以共享\n![](../Photo/85.png)", "_____no_output_____" ] ], [ [ "class Joker:\n def __init__(self):\n print('我开始初始化')\n def print_(self,name):\n print(\"Hello world\",name)\n def sum(self,num1,num2):\n return num1 + num2\n def cheng(self,num1,num2):\n return num1 * num2", "_____no_output_____" ], [ "huwang = Joker()", "我开始初始化\n" ], [ "huwang.print_(\"name100\")", "Hello world name100\n" ], [ "huwang.sum(1,2)", "_____no_output_____" ], [ "huwang.cheng(2,3)", "_____no_output_____" ], [ "#一切类必须要初始化#在类中,所有函数的第一个参数,都是标识类,不是参数\n#一切函数必须初始化\n#若某一个参数需要使用多次,那么可以将其统一放在初始化函数中\nclass Joker:\n def __init__(self,num1,num2):\n print('我开始初始化')\n self.num1 = num1\n self.num2 = num2\n def print_(self,name):\n print(\"Hello world\",name)\n def sum(self):\n return self.num1 + self.num2\n def cheng(self):\n return self.num1 * self.num2", "_____no_output_____" ], [ "huwang=Joker(num1=1,num2=2)", "我开始初始化\n" ], [ "huwang.print_(\"100\")", "Hello world 100\n" ], [ "huwang.sum()", "_____no_output_____" ], [ "huwang.cheng()", "_____no_output_____" ] ], [ [ "- Circle 和 className_ 的第一个区别有 __init__ 这个函数\n- 。。。。 第二个区别,类中的每一个函数都有self的这个“参数”", "_____no_output_____" ], [ "## 何为self?\n- self 是指向对象本身的参数\n- self 只是一个命名规则,其实可以改变的,但是我们约定俗成的是self,也便于理解\n- 使用了self就可以访问类中定义的成员\n<img src=\"../Photo/86.png\"></img>", "_____no_output_____" ], [ "## 使用类 Cirlcle", "_____no_output_____" ], [ "## 类的传参\n- class ClassName:\n \n def __init__(self, para1,para2...):\n \n self.para1 = para1\n \n self.para2 = para2", "_____no_output_____" ], [ "## EP:\n- A:定义一个类,类中含有两个功能:\n - 1、计算随机数的最大值\n - 2、计算随机数的最小值\n- B:定义一个类,(类中函数的嵌套使用)\n - 1、第一个函数的功能为:输入一个数字\n - 2、第二个函数的功能为:使用第一个函数中得到的数字进行平方处理\n - 3、第三个函数的功能为:得到平方处理后的数字 - 原来输入的数字,并打印结果", "_____no_output_____" ] ], [ [ "import random", "_____no_output_____" ], [ "\na=random.randint(0,10)\nb=random.randint(0,10)\nc=random.randint(0,10)", "_____no_output_____" ], [ "class joker2:\n def __init__(self,a,b,c):\n self.a=a\n self.b=b\n self.c=c\n def max_(self):\n return max(self.a,self.b,self.c)\n def min_(self):\n return min(self.a,self.b,self.c)", "_____no_output_____" ], [ "SB=joker2(a,b,c)", "_____no_output_____" ], [ "class joker3:\n def __init__(self):\n pass\n def input_(self):\n num = eval(input('>>'))\n return num\n def square(self):\n num=self.input_()\n return num ** 2\n def chazhi(self):\n pass", "_____no_output_____" ], [ "class joker3:\n def __init__(self):\n pass\n def input_(self):\n self.num = eval(input('>>'))\n def square(self):\n self.num_2=self.num ** 2\n def chazhi(self):\n res = self.num_2 - self.num\n return res", "_____no_output_____" ], [ "huwang2 = joker3()", "_____no_output_____" ], [ "huwang2.input_()", ">>10\n" ], [ "huwang2.square()", "_____no_output_____" ], [ "class joker3:\n def __init__(self):\n pass\n def input_(self):\n num = eval(input('>>'))\n return num\n def square(self):\n num_2=self.input_()\n num_2 = num**2\n return num_2\n def chazhi(self):\n num = self.input_()\n num2 = self.square()\n return num2-num\n", "_____no_output_____" ], [ "huwang2 = joker3()", "_____no_output_____" ], [ "huwang2.input_()", ">>10\n" ], [ "class QQ:\n def __init__(self):\n self.account = '123'\n self.password = '123'\n def account_(self):\n acc = input('account:>>')\n password = input('password:>>')\n if acc == self.account and self.password == password:\n print('Success')\n else:\n print('Failed')\n \n def yanzhengma(self):", "_____no_output_____" ], [ "qq=QQ()", "_____no_output_____" ], [ "qq.account_()", "account:>>123\npassword:>>234\nFailed\n" ], [ "class QQ:\n def __init__(self):\n self.account = '123'\n self.password = '123'\n def account_(self):\n acc = input('account:>>')\n password = input('password:>>')\n if acc == self.account and self.password == password:\n print('Success')\n else:\n self.yanzhengma()\n def yanzhengma(self):\n yanzhen='ppp'\n print('验证码是:',yanzhen)\n while 1:\n N = input('请输入验证码:>>')\n if N == yanzhen:\n print('验证码正确')\n print('账号或密码错误')\n break", "_____no_output_____" ], [ "qq=QQ()", "_____no_output_____" ], [ "qq.account_()", "account:>>123\npassword:>>234\n验证码是: ppp\n请输入验证码:>>ppp\n验证码正确\n账号或密码错误\n" ] ], [ [ "## 类的继承\n- 类的单继承\n- 类的多继承\n- 继承标识\n> class SonClass(FatherClass):\n \n def __init__(self):\n \n FatherClass.__init__(self)", "_____no_output_____" ] ], [ [ "class fu(object):\n def __init__(self):\n self.a = 'a'\n self.b = 'b'\n def print_(self):\n print('fu')", "_____no_output_____" ], [ "class mu:\n def __init__(self):\n self.a = 'c'", "_____no_output_____" ], [ "class zi(fu):\n def __init__(self):\n #告诉父类子类即将继承父类\n fu.__init__(self)\n def haha(self):\n print(self.a)\n self.print_()", "_____no_output_____" ], [ "joker4 = zi()", "_____no_output_____" ], [ "joker4.haha()", "a\nfu\n" ], [ "#俩个下划线为私有变量", "_____no_output_____" ] ], [ [ "## 私有数据域(私有变量,或者私有函数)\n- 在Python中 变量名或者函数名使用双下划线代表私有 \\__Joker, def \\__Joker():\n- 私有数据域不可继承\n- 私有数据域强制继承 \\__dir__()", "_____no_output_____" ], [ "![](../Photo/87.png)", "_____no_output_____" ] ], [ [ "joker.__dir__()", "_____no_output_____" ], [ "@staticmethod", "_____no_output_____" ], [ "class fu(object):\n def __init__(self):\n self.a = 'a'\n self.b = 'b'\n @staticmethod#静态参数\n def print_(self):\n print(self)\n print('fu')", "_____no_output_____" ], [ "class fu(object):\n def __init__(self):\n self.a = 'a'\n self.b = 'b'\n @staticmethod#静态参数\n def print_():\n print(hahaha)", "_____no_output_____" ], [ "joker = fu()", "_____no_output_____" ], [ "joker.print_('jhaha')", "_____no_output_____" ] ], [ [ "## EP:\n![](../Photo/88.png)\n![](../Photo/89.png)\n![](../Photo/90.png)\n", "_____no_output_____" ], [ "## 类的其他\n- 类的封装\n - 实际上就是将一类功能放在一起,方便未来进行管理\n- 类的继承(上面已经讲过)\n- 类的多态\n - 包括装饰器:将放在以后处理高级类中教\n - 装饰器的好处:当许多类中的函数需要使用同一个功能的时候,那么使用装饰器就会方便许多\n - 装饰器是有固定的写法\n - 其包括普通装饰器与带参装饰器", "_____no_output_____" ], [ "# Homewor\n## UML类图可以不用画\n## UML 实际上就是一个思维图\n- 1\n![](../Photo/91.png)", "_____no_output_____" ] ], [ [ "class Rectangle:\n def __init__(self,width=1,heigthd=2):\n self.width = width\n self.heigthd = heigthd\n def getArea(self):\n area = self.width * self.heigthd\n print(self.width)\n print(self.heigthd)\n print(\"面积为>>:\",area)\n def getzhouchang(self):\n zhouchang = 2*self.width + 2*self.heigthd\n print(self.width)\n print(self.heigthd)\n print(\"周长为>>:\",zhouchang)", "_____no_output_____" ], [ "gao = Rectangle()", "_____no_output_____" ], [ "gao = Rectangle(3.5,35.7)", "_____no_output_____" ], [ "gao.getArea()\ngao.getzhouchang()", "3.5\n35.7\n面积为>>: 124.95000000000002\n3.5\n35.7\n周长为>>: 78.4\n" ] ], [ [ "- 2\n![](../Photo/92.png)", "_____no_output_____" ], [ "- 3\n![](../Photo/93.png)", "_____no_output_____" ], [ "- 4\n![](../Photo/94.png)\n![](../Photo/95.png)", "_____no_output_____" ] ], [ [ "import math\nclass Point():\n def __init__(self):\n pass\n def input_(self):\n num = eval(input('>>'))\n return num\n def chang_(self):\n num = self.input_()\n num_2 = num\n return num_2\n def ji(self):\n num = self.input_()\n d = num /(4 * math.tan(math.pi/5))\n return d", "_____no_output_____" ], [ "yy=Point()", "_____no_output_____" ], [ "yy.input_()", ">>6\n" ], [ "yy.chang_()", ">>0\n" ] ], [ [ "- 5\n![](../Photo/96.png)", "_____no_output_____" ] ], [ [ "from scipy import linalg\nimport numpy as np\n\n# x1 + x2 + 7*x3 = 2\n# 2*x1 + 3*x2 + 5*x3 = 3\n# 4*x1 + 2*x2 + 6*x3 = 4\n\nA = np.array([[1, 1, 7], [2, 3, 5], [4, 2, 6]]) # A代表系数矩阵\nb = np.array([2, 3, 4]) # b代表常数列\nx = linalg.solve(A, b)\nprint(x)", "_____no_output_____" ] ], [ [ "- 6\n![](../Photo/97.png)", "_____no_output_____" ] ], [ [ "def cross_point(line1,line2):#计算交点函数\n x1=line1[0]#取四点坐标\n y1=line1[1]\n x2=line1[2]\n y2=line1[3]\n \n x3=line2[0]\n y3=line2[1]\n x4=line2[2]\n y4=line2[3]\n \n k1=(y2-y1)*1.0/(x2-x1)#计算k1,由于点均为整数,需要进行浮点数转化\n b1=y1*1.0-x1*k1*1.0#整型转浮点型是关键\n if (x4-x3)==0:#L2直线斜率不存在操作\n k2=None\n b2=0\n else:\n k2=(y4-y3)*1.0/(x4-x3)#斜率存在操作\n b2=y3*1.0-x3*k2*1.0\n if k2==None:\n x=x3\n else:\n x=(b2-b1)*1.0/(k1-k2)\n y=k1*x*1.0+b1*1.0\n return [x,y]", "_____no_output_____" ], [ "line1=[1,1,-1,-1]\nline2=[-1,1,1,-1]\nprint(cross_point(line1, line2))", "[0.0, 0.0]\n" ] ], [ [ "- 7\n![](../Photo/98.png)", "_____no_output_____" ] ], [ [ "class lei:\n def __init__(self,a,b,c,d,e,f):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n self.e = e\n self.f = f\n def jie_(self):\n x = (self.e * self.d - self.b * self.f)/(self.a * self.d - self.b * self.c)\n y = (self.a * self.f - self.e * self.c)/(self.a * self.d - self.b * self.c)\n if (self.a * self.d - self.b * self.c) == 0:\n print('无解')\n else:\n print(x,y)", "_____no_output_____" ], [ "kk=lei(1,2,4,5,8,9)", "_____no_output_____" ], [ "kk.jie_()", "-7.333333333333333 7.666666666666667\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec79b5b68f995ac5fd95ce3f2f68678ae1d372ff
54,149
ipynb
Jupyter Notebook
Chapter3-1.ipynb
huiselilun/LBM_Applications
2af107683d63995e5cc960e029c1dc508ee3f6ea
[ "MIT" ]
3
2020-06-30T03:11:47.000Z
2021-01-16T07:01:17.000Z
Chapter3-1.ipynb
huiselilun/LBM_Applications
2af107683d63995e5cc960e029c1dc508ee3f6ea
[ "MIT" ]
null
null
null
Chapter3-1.ipynb
huiselilun/LBM_Applications
2af107683d63995e5cc960e029c1dc508ee3f6ea
[ "MIT" ]
1
2021-01-16T07:01:23.000Z
2021-01-16T07:01:23.000Z
140.646753
14,108
0.867329
[ [ [ "# 恒温无限大板中的热扩散\n\n## 问题 1\n假设板的初始温度$T=0$,当时间 $t>= 0$ 时,板的左侧面受到高温$T=1.0$,板长为100。设$\\alpha =0.25$,计算$t=200$时,板中的温度分布,并比较LBM和FDM两种方法的结果。", "_____no_output_____" ], [ "## A.1.1 The LBM Code (D1Q2)", "_____no_output_____" ] ], [ [ "# LBM Code for 1-D, diffusion problem, D1Q2\n\nimport numpy as np\nimport matplotlib.pyplot as pl\n% matplotlib inline\n\nm = 100 # m is the number of lattice nodes\n\nf1 = np.zeros(m+1,dtype=float)\nf2 = np.zeros(m+1,dtype=float)\nrho = np.zeros(m+1,dtype=float)\nfeq = np.zeros(m+1,dtype=float)\nx = np.zeros(m+1,dtype=float)\n\ndt = 1.0\ndx = 1.0\nx[0] = 0.0\n\nfor i in range(1,m+1):\n x[i] = x[i-1] + dx\n\ncsq = dx*dx/(dt*dt)\nalpha = 0.25\nomega = 1.0/(alpha/(dt*csq)+0.5)\nmstep = 200 # The total number of time steps\ntwall = 1.0 # Left hand wall temperature\n\n# Initial condition\nfor i in range(0,m+1):\n rho[i] = 0.0 # Initial value of the domain temperature\n f1[i] = 0.5 * rho[i]\n f2[i] = 0.5 * rho[i]\n\n# main loop\nfor kk in range(1,mstep+1): # time loop\n # collision process:\n for i in range(0,m+1): # nodes loop\n rho[i] = f1[i] + f2[i]\n feq[i] = 0.5 * rho[i]\n \n # since k1 = k2 = 0.5, then feq1 = feq2 = feq\n f1[i] = (1.0 - omega)*f1[i] + omega*feq[i]\n f2[i] = (1.0 - omega)*f2[i] + omega*feq[i]\n \n # streaming process:\n for i in range(1,m): # nodes loop\n f1[m-i] = f1[m-i-1] # f1 streaming\n f2[i-1] = f2[i] # f2 streaming\n \n # Boundary condition\n f1[0] = twall - f2[0] # constant temperature boundary condition, x = 0\n f1[m] = f1[m-1] # adiabatic(绝热的) boundary condition, x = L\n f2[m] = f2[m-1] # adiabatic boundary condition, x = L\n \n# end of the main loop\n\n#for i in range(0,m+1):\n# print(x[i],\" \",rho[i])", "_____no_output_____" ], [ "pl.plot(x,rho)", "_____no_output_____" ] ], [ [ "## A.1.2 The FDM Code (1-D)\n\n显示格式的差分方程为Eqs. 3.8,如下所示\n$$T_i^{n+1} = T_i^n + \\frac{\\alpha \\Delta t}{\\Delta x^2}(T_{i+1}^n - 2 T_i^n + T_{i-1}^n)$$", "_____no_output_____" ] ], [ [ "# Finite Difference Code for 1-D diffusion problems\n\nimport numpy as np\nimport matplotlib.pyplot as pl\n% matplotlib inline\n\nm = 100 # m is the number of lattice nodes\nfo = np.zeros(m+1,dtype=float)\nf = np.zeros(m+1,dtype=float)\n\ndx = 1.0\ndt = 0.5\nalpha = 0.25\nmstep = 400\n \nfo[0] = 1.0 # initial condition for old value of f at x=0\nf[0] = 1.0 # initial condition for updated value of f at x=0\nfo[m] = fo[m-1] # initial condition for old value of f at x=L\nf[m] = f[m-1] # initial condition for uodated value of f at x=L\n\nfor kk in range(1,mstep+1):\n # main loop\n for i in range(1,m):\n f[i] = fo[i] + dt*alpha*(fo[i+1]-2.0*fo[i]+fo[i-1])/(dx*dx) # Eqs. 3.8\n \n for i in range(1,m):\n fo[i] = f[i] # updating\n \n fo[m] = f[m-1] # updating the boundary condition at x=L\n # end of the main loop\n \nx_fdm = np.zeros(m+1,dtype=float)\n\nfor i in range(0,m+1):\n # print(x_fdm[i],\" \",f[i])\n if i < m:\n x_fdm[i+1] = x_fdm[i] + dx\n \npl.plot(x_fdm,f)", "_____no_output_____" ] ], [ [ "## 结果对比", "_____no_output_____" ] ], [ [ "pl.plot(x,rho,'bo-',label='LBM')\npl.plot(x_fdm,f,'rD-',label='FDM')\npl.xlabel('x') \npl.ylabel('T') \npl.legend()\npl.xlim(0,30)\npl.ylim(0,1)\npl.show ", "_____no_output_____" ] ], [ [ "## 问题 2\n假设板的初始温度$T=0$,当时间 $t>= 0$ 时,板的左侧边界有$100 W/m^2$的恒热流密度存在,板长为100,板的热导系数为$20W/mK$。设$\\alpha =0.25$,计算$t=200$时,板中的温度分布。", "_____no_output_____" ], [ "## The LBM Code (D1Q2)", "_____no_output_____" ] ], [ [ "# LBM Code for 1-D, diffusion problem, D1Q2\n\nimport numpy as np\nimport matplotlib.pyplot as pl\n% matplotlib inline\n\nm = 100 # m is the number of lattice nodes\n\nf1 = np.zeros(m+1,dtype=float)\nf2 = np.zeros(m+1,dtype=float)\nrho = np.zeros(m+1,dtype=float)\nfeq = np.zeros(m+1,dtype=float)\nx = np.zeros(m+1,dtype=float)\n\ndt = 1.0\ndx = 1.0\nx[0] = 0.0\n\nfor i in range(1,m+1):\n x[i] = x[i-1] + dx\n\ncsq = dx*dx/(dt*dt)\nalpha = 0.25\nomega = 1.0/(alpha/(dt*csq)+0.5)\nmstep = 200 # The total number of time steps\ntwall = 1.0 # Left hand wall temperature\nflux = 200 # Heat flux W/m^2\ntk = 20 # Thermal conductivity W/mKW/mK\n\n# Initial condition\nfor i in range(0,m+1):\n rho[i] = 0.0 # Initial value of the domain temperature\n f1[i] = 0.5 * rho[i]\n f2[i] = 0.5 * rho[i]\n\n# main loop\nfor kk in range(1,mstep+1): # time loop\n # collision process:\n for i in range(0,m+1): # nodes loop\n rho[i] = f1[i] + f2[i]\n feq[i] = 0.5 * rho[i]\n \n # since k1 = k2 = 0.5, then feq1 = feq2 = feq\n f1[i] = (1.0 - omega)*f1[i] + omega*feq[i]\n f2[i] = (1.0 - omega)*f2[i] + omega*feq[i]\n \n # streaming process:\n for i in range(1,m): # nodes loop\n f1[m-i] = f1[m-i-1] # f1 streaming\n f2[i-1] = f2[i] # f2 streaming\n \n # Boundary condition\n f1[0] = f1[1] + f2[1] - f2[0] + flux*dx/tk # constant heat flux boundary condition, x = 0\n f1[m] = f1[m-1] # adiabatic(绝热的) boundary condition, x = L\n f2[m] = f2[m-1] # adiabatic boundary condition, x = L\n \n# end of the main loop\n\n#for i in range(0,m+1):\n# print(x[i],\" \",rho[i])", "_____no_output_____" ], [ "pl.plot(x,rho)\npl.xlabel('x') \npl.ylabel('T') \npl.xlim(0,30)\npl.show ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ec79b6e754e1fc4246b37893976c98ab42c2e78b
41,397
ipynb
Jupyter Notebook
Police_Data.ipynb
amber5634/Analyzing-the-Police-data
d4a873fd790ed8a8b84f04c460e40a891f00168f
[ "Apache-2.0" ]
null
null
null
Police_Data.ipynb
amber5634/Analyzing-the-Police-data
d4a873fd790ed8a8b84f04c460e40a891f00168f
[ "Apache-2.0" ]
null
null
null
Police_Data.ipynb
amber5634/Analyzing-the-Police-data
d4a873fd790ed8a8b84f04c460e40a891f00168f
[ "Apache-2.0" ]
null
null
null
54.043081
10,236
0.67186
[ [ [ "### Importing Pandas for data reading and data manipulation", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "### Reading data", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"C:\\\\Users\\\\ambar\\\\Downloads\\\\3. Police Data.csv\")", "_____no_output_____" ] ], [ [ "### Reading first few rows and columns", "_____no_output_____" ] ], [ [ "data.head()", "_____no_output_____" ] ], [ [ "### Total no of rows and features", "_____no_output_____" ] ], [ [ "data.shape", "_____no_output_____" ] ], [ [ "### Data type of each variable", "_____no_output_____" ] ], [ [ "data.dtypes", "_____no_output_____" ] ], [ [ "### Some basic information of data", "_____no_output_____" ] ], [ [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 65535 entries, 0 to 65534\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 stop_date 65535 non-null object \n 1 stop_time 65535 non-null object \n 2 country_name 0 non-null float64\n 3 driver_gender 61474 non-null object \n 4 driver_age_raw 61481 non-null float64\n 5 driver_age 61228 non-null float64\n 6 driver_race 61475 non-null object \n 7 violation_raw 61475 non-null object \n 8 violation 61475 non-null object \n 9 search_conducted 65535 non-null bool \n 10 search_type 2479 non-null object \n 11 stop_outcome 61475 non-null object \n 12 is_arrested 61475 non-null object \n 13 stop_duration 61475 non-null object \n 14 drugs_related_stop 65535 non-null bool \ndtypes: bool(2), float64(3), object(10)\nmemory usage: 6.6+ MB\n" ] ], [ [ "### Q. Is there any null values present or not ?", "_____no_output_____" ] ], [ [ "data.isnull().any()", "_____no_output_____" ] ], [ [ "### Total count of null values of each features", "_____no_output_____" ] ], [ [ "data.isnull().sum()", "_____no_output_____" ] ], [ [ "### Columns names", "_____no_output_____" ] ], [ [ "data.columns", "_____no_output_____" ] ], [ [ "### Dropping the column country_name as all values are null\n### Use of inplace to make sure it is permanently dropped", "_____no_output_____" ] ], [ [ "data.drop(columns = 'country_name' , inplace = True)", "_____no_output_____" ] ], [ [ "### Checking if that column is dropped or not ?", "_____no_output_____" ] ], [ [ "data.head()", "_____no_output_____" ] ], [ [ "### Checking Gender wise Speeding Violation", "_____no_output_____" ] ], [ [ "data[data.violation == 'Speeding'].driver_gender.value_counts()", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "data[data.violation == 'Speeding'].driver_gender.value_counts().\\\nplot(kind = 'bar' , xlabel = 'Gender' , ylabel = 'Count' , title = 'Gender wise Traffic Rules violation' )", "_____no_output_____" ] ], [ [ "### Gender wise Search Conducted by Police", "_____no_output_____" ] ], [ [ "data.groupby('driver_gender').search_conducted.sum()\\\n.plot(title = 'Gender wise search conducted by police', kind = 'bar' , xlabel = 'Gender' , ylabel = 'Count')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec79d3b32de68220097b63715d256f3423d52704
357,423
ipynb
Jupyter Notebook
CIS8005_Project_Final.ipynb
SakethValiveti/CIS8005_Project
43a6a47a9552edf9ed7167bc178bd5e6ff9388f9
[ "MIT" ]
null
null
null
CIS8005_Project_Final.ipynb
SakethValiveti/CIS8005_Project
43a6a47a9552edf9ed7167bc178bd5e6ff9388f9
[ "MIT" ]
null
null
null
CIS8005_Project_Final.ipynb
SakethValiveti/CIS8005_Project
43a6a47a9552edf9ed7167bc178bd5e6ff9388f9
[ "MIT" ]
null
null
null
131.164404
105,816
0.821587
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ], [ "# Read in the data set\nolympics = pd.read_csv('athlete_events.csv')\nolympics.head()", "_____no_output_____" ], [ "print(olympics.isnull().sum())", "ID 0\nName 0\nSex 0\nAge 9474\nHeight 60171\nWeight 62875\nTeam 0\nNOC 0\nGames 0\nYear 0\nSeason 0\nCity 0\nSport 0\nEvent 0\nMedal 231333\ndtype: int64\n" ], [ "olympics['Medal'].fillna('P', inplace = True)", "_____no_output_____" ], [ "print(olympics.isnull().sum())", "ID 0\nName 0\nSex 0\nAge 9474\nHeight 60171\nWeight 62875\nTeam 0\nNOC 0\nGames 0\nYear 0\nSeason 0\nCity 0\nSport 0\nEvent 0\nMedal 0\ndtype: int64\n" ], [ "print(olympics.loc[:, ['NOC', 'Team']].drop_duplicates()['NOC'].value_counts().head())", "FRA 160\nUSA 97\nGBR 96\nSWE 52\nNOR 46\nName: NOC, dtype: int64\n" ], [ "# Lets read in the noc_country mapping first\nnoc_country = pd.read_csv('noc_regions.csv')\nnoc_country.drop('notes', axis = 1 , inplace = True)\nnoc_country.rename(columns = {'region':'Country'}, inplace = True)\n\nnoc_country.head()", "_____no_output_____" ], [ "# merging\nolympics_merge = olympics.merge(noc_country,\n left_on = 'NOC',\n right_on = 'NOC',\n how = 'left')", "_____no_output_____" ], [ "olympics_merge.loc[olympics_merge['Country'].isnull(),['NOC', 'Team']].drop_duplicates()", "_____no_output_____" ], [ "# Replace missing Teams by the values above.\n#olympics_merge.loc[olympics_merge['Country'].isnull(), ['Country']] = olympics_merge['Team']\n\nolympics_merge['Country'] = np.where(olympics_merge['NOC']=='SGP', 'Singapore', olympics_merge['Country'])\nolympics_merge['Country'] = np.where(olympics_merge['NOC']=='ROT', 'Refugee Olympic Athletes', olympics_merge['Country'])\nolympics_merge['Country'] = np.where(olympics_merge['NOC']=='UNK', 'Unknown', olympics_merge['Country'])\nolympics_merge['Country'] = np.where(olympics_merge['NOC']=='TUV', 'Tuvalu', olympics_merge['Country'])\n\n\n# Put these values from Country into Team\nolympics_merge.drop('Team', axis = 1, inplace = True)\nolympics_merge.rename(columns = {'Country': 'Team'}, inplace = True)", "_____no_output_____" ], [ "# Glance at the data.\nw_gdp = pd.read_csv('world_gdp.csv', skiprows = 3)\n\n# Remove unnecessary columns\nw_gdp.drop(['Indicator Name', 'Indicator Code'], axis = 1, inplace = True)\n\n# The columns are the years for which the GDP has been recorded. This needs to brought into a single column for efficient\n# merging.\nw_gdp = pd.melt(w_gdp, id_vars = ['Country Name', 'Country Code'], var_name = 'Year', value_name = 'GDP')\n\n# convert the year column to numeric\nw_gdp['Year'] = pd.to_numeric(w_gdp['Year'])\n\nw_gdp.head()", "_____no_output_____" ], [ "len(list(set(olympics_merge['NOC'].unique()) - set(w_gdp['Country Code'].unique())))", "_____no_output_____" ], [ "len(list(set(olympics_merge['Team'].unique()) - set(w_gdp['Country Name'].unique())))", "_____no_output_____" ], [ "# Merge to get country code\nolympics_merge_ccode = olympics_merge.merge(w_gdp[['Country Name', 'Country Code']].drop_duplicates(),\n left_on = 'Team',\n right_on = 'Country Name',\n how = 'left')\n\nolympics_merge_ccode.drop('Country Name', axis = 1, inplace = True)\n\n# Merge to get gdp too\nolympics_merge_gdp = olympics_merge_ccode.merge(w_gdp,\n left_on = ['Country Code', 'Year'],\n right_on = ['Country Code', 'Year'],\n how = 'left')\n\nolympics_merge_gdp.drop('Country Name', axis = 1, inplace = True)", "_____no_output_____" ], [ "# Read in the population data\nw_pop = pd.read_csv('world_pop.csv')\n\nw_pop.drop(['Indicator Name', 'Indicator Code'], axis = 1, inplace = True)\n\nw_pop = pd.melt(w_pop, id_vars = ['Country', 'Country Code'], var_name = 'Year', value_name = 'Population')\n\n# Change the Year to integer type\nw_pop['Year'] = pd.to_numeric(w_pop['Year'])\n\nw_pop.head()", "_____no_output_____" ], [ "olympics_complete = olympics_merge_gdp.merge(w_pop,\n left_on = ['Country Code', 'Year'],\n right_on= ['Country Code', 'Year'],\n how = 'left')\n\nolympics_complete.drop('Country', axis = 1, inplace = True)\n\nolympics_complete.head()", "_____no_output_____" ], [ "olympics_complete.isnull().sum()", "_____no_output_____" ], [ "# Lets take data from 1961 onwards only and for summer olympics only\nolympics_complete_subset = olympics_complete.loc[(olympics_complete['Year'] > 1960) & (olympics_complete['Season'] == \"Summer\"), :]\n\n# Reset row indices\nolympics_complete_subset = olympics_complete_subset.reset_index()", "_____no_output_____" ], [ "olympics_complete_subset['Medal_Won'] = np.where(olympics_complete_subset.loc[:,'Medal'] == 'P', 0, 1)", "_____no_output_____" ], [ "# Check whether number of medals won in a year for an event by a team exceeds 1. This indicates a team event.\nidentify_team_events = pd.pivot_table(olympics_complete_subset,\n index = ['Team', 'Year', 'Event'],\n columns = 'Medal',\n values = 'Medal_Won',\n aggfunc = 'sum',\n fill_value = 0).drop('P', axis = 1).reset_index()\n\nidentify_team_events = identify_team_events.loc[identify_team_events['Gold'] > 1, :]\n\nteam_sports = identify_team_events['Event'].unique()", "_____no_output_____" ], [ "# if an event name matches with one in team sports, then it is a team event. Others are singles events.\nteam_event_mask = olympics_complete_subset['Event'].map(lambda x: x in team_sports)\nsingle_event_mask = [not i for i in team_event_mask]\n\n# rows where medal_won is 1\nmedal_mask = olympics_complete_subset['Medal_Won'] == 1\n\n# Put 1 under team event if medal is won and event in team event list\nolympics_complete_subset['Team_Event'] = np.where(team_event_mask & medal_mask, 1, 0)\n\n# Put 1 under singles event if medal is won and event not in team event list\nolympics_complete_subset['Single_Event'] = np.where(single_event_mask & medal_mask, 1, 0)\n\n# Add an identifier for team/single event\nolympics_complete_subset['Event_Category'] = olympics_complete_subset['Single_Event'] + \\\nolympics_complete_subset['Team_Event']", "_____no_output_____" ], [ "medal_tally_agnostic = olympics_complete_subset.\\\ngroupby(['Year', 'Team', 'Event', 'Medal'])[['Medal_Won', 'Event_Category']].\\\nagg('sum').reset_index()\n\nmedal_tally_agnostic['Medal_Won_Corrected'] = medal_tally_agnostic['Medal_Won']/medal_tally_agnostic['Event_Category']", "_____no_output_____" ], [ "# Medal Tally.\nmedal_tally = medal_tally_agnostic.groupby(['Year','Team'])['Medal_Won_Corrected'].agg('sum').reset_index()\n\nmedal_tally_pivot = pd.pivot_table(medal_tally,\n index = 'Team',\n columns = 'Year',\n values = 'Medal_Won_Corrected',\n aggfunc = 'sum',\n margins = True).sort_values('All', ascending = False)[1:5]\n\n# print total medals won in the given period\nmedal_tally_pivot.loc[:,'All']", "_____no_output_____" ], [ "# List of top countries\ntop_countries = ['USA', 'Russia', 'Germany', 'China']\n\nyear_team_medals = pd.pivot_table(medal_tally,\n index = 'Year',\n columns = 'Team',\n values = 'Medal_Won_Corrected',\n aggfunc = 'sum')[top_countries]\n\n# plotting the medal tallies\nyear_team_medals.plot(linestyle = '-', marker = 'o', alpha = 0.9, figsize = (10,8), linewidth = 2)\nxlabel('Olympic Year')\nylabel('Number of Medals')\ntitle('Olympic Performance Comparison')", "_____no_output_____" ], [ "# List of top countries\ntop_countries = ['USA', 'Russia', 'Germany', 'China']\n\n# row mask where countries match\nrow_mask_2 = medal_tally_agnostic['Team'].map(lambda x: x in top_countries)\n\n# Pivot table to calculate sum of gold, silver and bronze medals for each country\nmedal_tally_specific = pd.pivot_table(medal_tally_agnostic[row_mask_2],\n index = ['Team'],\n columns = 'Medal',\n values = 'Medal_Won_Corrected',\n aggfunc = 'sum',\n fill_value = 0).drop('P', axis = 1)\n\n# Re-order the columns so that they appear in order on the chart.\nmedal_tally_specific = medal_tally_specific.loc[:, ['Gold', 'Silver', 'Bronze']]\n\nmedal_tally_specific.plot(kind = 'bar', stacked = True, figsize = (8,6), rot = 0)\nxlabel('Number of Medals')\nylabel('Country')", "_____no_output_____" ], [ "# To get the sports, teams are best at, we now aggregate the medal_tally_agnostic dataframe as we did earlier.\nbest_team_sports = pd.pivot_table(medal_tally_agnostic[row_mask_2],\n index = ['Team', 'Event'],\n columns = 'Medal',\n values = 'Medal_Won_Corrected',\n aggfunc = 'sum',\n fill_value = 0).sort_values(['Team', 'Gold'], ascending = [True, False]).reset_index()\n\nbest_team_sports.drop(['Bronze', 'Silver', 'P'], axis = 1, inplace = True)\nbest_team_sports.columns = ['Team', 'Event', 'Gold_Medal_Count']\n\nbest_team_sports.groupby('Team').head(5)", "_____no_output_____" ], [ "row_mask_3 = olympics_complete_subset['Team'].map(lambda x: x in top_countries)\n\n# Get year wise team wise athletes.\nyear_team_athelete = olympics_complete_subset.loc[row_mask_3, ['Year','Team', 'Name']].drop_duplicates()\n\n# sum these up to get total contingent size.\ncontingent_size = pd.pivot_table(year_team_athelete,\n index = 'Year',\n columns = 'Team',\n values = 'Name',\n aggfunc = 'count')\n\nfig, ((ax1, ax2), (ax3, ax4)) = subplots(nrows = 2,\n ncols = 2,\n figsize = (20,12))\n\nfig.subplots_adjust(hspace = 0.3)\n\n# Plot australia's medal tally and contingent size\ncontingent_size['China'].plot(ax = ax1, linestyle = '-', marker = 'o', linewidth = 2, color = 'red', \n label = 'Contingent Size')\nyear_team_medals['China'].plot(ax = ax1, linestyle = '-', marker = 'o', linewidth = 2, color = 'black',\n label = 'Medal Tally')\nax1.plot(2008, contingent_size.loc[2008, 'China'], marker = '^', color = 'red', ms = 14)\nax1.plot(2008, year_team_medals.loc[2008, 'China'], marker = '^', color = 'black', ms = 14)\nax1.set_xlabel('Olympic Year')\nax1.set_ylabel('Number of Athletes/Medal Tally')\nax1.set_title('Team China\\nContingent Size vs Medal Tally')\nax1.legend(loc = 'best')\n\n# Plot USA's medal tally and contingent size\ncontingent_size['USA'].plot(ax = ax2, linestyle = '-', marker = 'o', linewidth = 2, color = 'blue',\n label = 'Contingent Size')\nyear_team_medals['USA'].plot(ax = ax2, linestyle = '-', marker = 'o', linewidth = 2, color = 'black',\n label = 'Medal Tally')\nax2.plot(1984, contingent_size.loc[1984, 'USA'], marker = '^', color = 'blue', ms = 14)\nax2.plot(1984, year_team_medals.loc[1984, 'USA'], marker = '^', color = 'black', ms = 14)\nax2.set_xlabel('Olympic Year')\nax2.set_ylabel('Number of Athletes/Medal Tally')\nax2.set_title('Team USA\\nContingent Size vs Medal Tally')\nax2.legend(loc = 'best')\n\n# Plot Germany's medal tally and contingent size\ncontingent_size['Germany'].plot(ax = ax3, linestyle = '-', marker = 'o', linewidth = 2, color = 'green',\n label = 'Contingent Size')\nyear_team_medals['Germany'].plot(ax = ax3, linestyle = '-', marker = 'o', linewidth = 2, color = 'black',\n label = 'Medal Tally')\nax3.plot(1972, year_team_medals.loc[1972, 'Germany'], marker = '^', color = 'black', ms = 14)\nax3.plot(1972, contingent_size.loc[1972, 'Germany'], marker = '^', color = 'green', ms = 14)\nax3.set_xlabel('Olympic Year')\nax3.set_ylabel('Number of Athletes/Medal Tally')\nax3.set_title('Team Germany\\nContingent Size vs Medal Tally')\nax3.legend(loc = 'best')\n\n# Plot Russia's medal tally and contingent size\ncontingent_size['Russia'].plot(ax = ax4, linestyle = '-', marker = 'o', linewidth = 2, color = 'orange',\n label = 'Contingent Size')\nyear_team_medals['Russia'].plot(ax = ax4, linestyle = '-', marker = 'o', linewidth = 2, color = 'black',\n label = 'Medal Tally')\nax4.plot(1980, contingent_size.loc[1980, 'Russia'], marker = '^', color = 'orange', ms = 14)\nax4.plot(1980, year_team_medals.loc[1980, 'Russia'], marker = '^', color = 'black', ms = 14)\nax4.set_xlabel('Olympic Year')\nax4.set_ylabel('Number of Athletes/Medal Tally')\nax4.set_title('Team Russia\\nContingent Size vs Medal Tally')\nax4.legend(loc = 'best')\n\nshow()", "_____no_output_____" ], [ "# Lets merge contingent size and medals won!\nyear_team_medals_unstack = year_team_medals.unstack().reset_index()\nyear_team_medals_unstack.columns = ['Team','Year', 'Medal_Count']\n\ncontingent_size_unstack = contingent_size.unstack().reset_index()\n\ncontingent_size_unstack.columns = ['Team','Year', 'Contingent']\n\ncontingent_medals = contingent_size_unstack.merge(year_team_medals_unstack,\n left_on = ['Team', 'Year'],\n right_on = ['Team', 'Year'])\n\ncontingent_medals[['Contingent', 'Medal_Count']].corr()\n\n", "_____no_output_____" ], [ "# merge best team sports with olympics data to get sport for each event.\nteam_commonalities = best_team_sports.merge(olympics_complete_subset.loc[:,['Sport', 'Event']].drop_duplicates(),\n left_on = 'Event',\n right_on = 'Event')\n\nteam_commonalities = team_commonalities.sort_values(['Team', 'Gold_Medal_Count'], ascending = [True, False])\nteam_commonalities = team_commonalities.groupby('Team').head(5).reset_index()\n\n# make a pivot table of the commonalities.\npd.pivot_table(team_commonalities,\n index = 'Sport',\n columns = 'Team',\n values = 'Event',\n aggfunc = 'count',\n fill_value = 0,\n margins = True).sort_values('All', ascending = False)[1:]", "_____no_output_____" ], [ "olympics_complete_subset[['Year', 'City']].drop_duplicates().sort_values('Year')", "_____no_output_____" ], [ "# Correct city names in the dataset\nolympics_complete_subset['City'].replace(['Athina', 'Moskva'], ['Athens', 'Moscow'], inplace = True)\n\n# city to country mapping dictionary\ncity_to_country = {'Tokyo': 'Japan',\n 'Mexico City': 'Mexico',\n 'Munich': 'Germany',\n 'Montreal': 'Canada',\n 'Moscow': 'Russia',\n 'Los Angeles': 'USA',\n 'Seoul': 'South Korea',\n 'Barcelona': 'Spain',\n 'Atlanta': 'USA',\n 'Sydney': 'Australia',\n 'Athens': 'Greece',\n 'Beijing': 'China',\n 'London': 'UK',\n 'Rio de Janeiro': 'Brazil'}\n\n# Map cities to countries\nolympics_complete_subset['Country_Host'] = olympics_complete_subset['City'].map(city_to_country)\n\n#print the \nolympics_complete_subset.loc[:, ['Year', 'Country_Host']].drop_duplicates().sort_values('Year')\n\n# Extract year, host nation and team name from the data\nyear_host_team = olympics_complete_subset[['Year', 'Country_Host', 'Team']].drop_duplicates()\n\n# check rows where host country is the same as team\nrow_mask_4 = (year_host_team['Country_Host'] == year_host_team['Team'])\n\n# add years in the year_host_team to capture one previous and one later year\nyear_host_team['Prev_Year'] = year_host_team['Year'] - 4\nyear_host_team['Next_Year'] = year_host_team['Year'] + 4\n\n# Subset only where host nation and team were the same\nyear_host_team = year_host_team[row_mask_4]\n\n# Calculate the medals won in each year where a team played at home. merge year_host_team with medal_tally on year and team\nyear_host_team_medal = year_host_team.merge(medal_tally,\n left_on = ['Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\n\nyear_host_team_medal.rename(columns = {'Medal_Won_Corrected' : 'Medal_Won_Host_Year'}, inplace = True)\n\n# Calculate medals won by team in previous year\nyear_host_team_medal = year_host_team_medal.merge(medal_tally,\n left_on = ['Prev_Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\n\nyear_host_team_medal.drop('Year_y', axis = 1, inplace = True)\nyear_host_team_medal.rename(columns = {'Medal_Won_Corrected': 'Medal_Won_Prev_Year',\n 'Year_x':'Year'}, inplace = True)\n\n# Calculate the medals won by the team the year after they hosted.\nyear_host_team_medal = year_host_team_medal.merge(medal_tally,\n left_on = ['Next_Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\n\nyear_host_team_medal.drop('Year_y', axis = 1, inplace = True)\nyear_host_team_medal.rename(columns = {'Year_x': 'Year',\n 'Medal_Won_Corrected' : 'Medal_Won_Next_Year'}, inplace = True)\n\n# General formatting changes\nyear_host_team_medal.drop(['Prev_Year', 'Next_Year'], axis = 1, inplace = True)\nyear_host_team_medal.sort_values('Year', ascending = True, inplace = True)\nyear_host_team_medal.reset_index(inplace = True, drop = True)\n\n# column re-ordering\nyear_host_team_medal = year_host_team_medal.loc[:, ['Year', 'Country_Host', 'Team', 'Medal_Won_Prev_Year', 'Medal_Won_Host_Year', 'Medal_Won_Next_Year']]\n\nyear_host_team_medal", "_____no_output_____" ], [ "year_team_gdp = olympics_complete_subset.loc[:, ['Year', 'Team', 'GDP']].drop_duplicates()\n\nmedal_tally_gdp = medal_tally.merge(year_team_gdp,\n left_on = ['Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\n\nrow_mask_5 = medal_tally_gdp['Medal_Won_Corrected'] > 0\nrow_mask_6 = medal_tally_gdp['Team'].map(lambda x: x in top_countries)\n\ncorrelation = medal_tally_gdp.loc[row_mask_5, ['GDP', 'Medal_Won_Corrected']].corr()['Medal_Won_Corrected'][0]\n\nplot(medal_tally_gdp.loc[row_mask_5, 'GDP'], \n medal_tally_gdp.loc[row_mask_5, 'Medal_Won_Corrected'] , \n linestyle = 'none', \n marker = 'o',\n alpha = 0.4)\nxlabel('Country GDP')\n\nylabel('Number of Medals')\ntitle('GDP versus medal tally')\ntext(np.nanpercentile(medal_tally_gdp['GDP'], 99.6), \n max(medal_tally_gdp['Medal_Won_Corrected']) - 50,\n \"Correlation = \" + str(correlation))", "_____no_output_____" ], [ "year_team_pop = olympics_complete_subset.loc[:, ['Year', 'Team', 'Population']].drop_duplicates()\n\nmedal_tally_pop = medal_tally.merge(year_team_pop,\n left_on = ['Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\n\nrow_mask_5 = medal_tally_pop['Medal_Won_Corrected'] > 0\nrow_mask_6 = medal_tally_pop['Team'].map(lambda x: x in top_countries)\n\ncorrelation = medal_tally_pop.loc[row_mask_5, ['Population', 'Medal_Won_Corrected']].corr()['Medal_Won_Corrected'][0]\n\nplot(medal_tally_pop.loc[row_mask_5, 'Population'], \n medal_tally_pop.loc[row_mask_5, 'Medal_Won_Corrected'] , \n linestyle = 'none', \n marker = 'o',\n alpha = 0.4)\nxlabel('Country Population')\n\nylabel('Number of Medals')\ntitle('Population versus medal tally')\ntext(np.nanpercentile(medal_tally_pop['Population'], 99.6), \n max(medal_tally_pop['Medal_Won_Corrected']) - 50,\n \"Correlation = \" + str(correlation))", "_____no_output_____" ], [ "year_team_gdp_pop = olympics_complete_subset.loc[:, ['Year','Team','GDP','Population']].drop_duplicates()\n\nmedal_gdp_pop = medal_tally.merge(year_team_gdp_pop,\n left_on = ['Year', 'Team'],\n right_on = ['Year', 'Team'],\n how = 'left')\nmedal_gdp_pop = medal_gdp_pop.dropna()\nmedal_gdp_pop.head(5)", "_____no_output_____" ], [ "from sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nyear_team_gender = olympics_complete_subset[['Year','Team', 'Name', 'Sex']].drop_duplicates()\ncontingent_size = pd.pivot_table(year_team_gender,\n index = ['Year','Team'],\n columns = 'Sex',\n aggfunc = 'count').reset_index()\n# rename columns as per column names in the 0th level\ncontingent_size.columns = contingent_size.columns.get_level_values(0)\n\n# rename the columns appropriately\ncontingent_size.columns = ['Year', 'Team', 'Female_Athletes', 'Male_Athletes']\n\ncontingent_size['Total_Athletes'] = contingent_size['Female_Athletes'] + \\\ncontingent_size['Male_Athletes']\n#medal_all = medal_all.drop(medal_all[medal_all.Medal_Count == 0].index)\ncontingent_size = contingent_size.dropna()\ncontingent_size = contingent_size.drop(['Female_Athletes','Male_Athletes'],axis=1)\ncontingent_size.head(100)\n\nmedal_all = contingent_size.merge(medal_gdp_pop,\n left_on = ['Year', 'Team'],\n right_on = ['Year', 'Team'],\n )\nX = medal_all[['Total_Athletes','GDP','Population']]\n\ny = medal_all['Medal_Won_Corrected']\n\nX = X.values\n\ny=y.values\n\nX_Train,X_Test,Y_Train,Y_Test = train_test_split(X,y,test_size=0.2,random_state=1)\n\n# X_Train = X_Train.reshape(-1,1)\n# print(type(X_Train))\n# X_Test = X_Test.reshape(-1,1)\nlreg = linear_model.LinearRegression().fit(X_Train,Y_Train)\nprint(\"Linear model for GDP and Medal Tally:\")\nprint(\"linear model intercept b = \",lreg.intercept_)\nprint(\"linear model coefficient w = \",lreg.coef_)\nprint(\"Training:\",lreg.score(X_Train,Y_Train))\nprint(\"Test:\",lreg.score(X_Test,Y_Test))\n\n\n", "Linear model for GDP and Medal Tally:\nlinear model intercept b = -4.68658915746221\nlinear model coefficient w = [1.24148491e-01 2.43661434e-12 7.38194907e-09]\nTraining: 0.7202897525698042\nTest: 0.8181053075060682\n" ], [ "import seaborn as sns\n#medal_all = medal_all.drop(['Year'],axis=1)\nmatrix = medal_all.corr()\nf, ax = plt.subplots(figsize=(4, 3)) \nsns.heatmap(matrix, vmax=.8, square=True, cmap=\"Greens\");", "_____no_output_____" ], [ "from sklearn.cluster import KMeans\nmodel = KMeans(n_clusters=5)\nk_data = medal_gdp_pop.drop(columns=['Year','Team','Population'])\n\nmodel.fit(k_data)\ny_predict = model.predict(k_data)\nplt.scatter(k_data['GDP'], k_data['Medal_Won_Corrected'], c=y_predict, s=50, cmap='viridis')\n\n", "_____no_output_____" ], [ "from sklearn.neighbors import KNeighborsClassifier \n\nmedal_gdp_pop = medal_gdp_pop.drop(medal_gdp_pop[medal_gdp_pop.Medal_Won_Corrected == 0].index)\n\nX = medal_gdp_pop['GDP']\n\ny = medal_gdp_pop['Medal_Won_Corrected']\n\nX = X.values\n\ny=y.values\n\nX_Train,X_Test,Y_Train,Y_Test = train_test_split(X,y,random_state=0)\n\nX_Train = X_Train.reshape(-1,1)\n# print(type(X_Train))\nX_Test = X_Test.reshape(-1,1)\n\nfrom sklearn.preprocessing import StandardScaler \nscaler = StandardScaler() \nscaler.fit(X_Train)\n\nX_Train = scaler.transform(X_Train) \nX_Test = scaler.transform(X_Test)\n\nclassifier = KNeighborsClassifier(n_neighbors=3) \nclassifier.fit(X_Train, Y_Train) \n\ny_pred = classifier.predict(X_Test)\n\nfrom sklearn import metrics\n\nprint(metrics.accuracy_score(Y_Test, y_pred))", "0.19270833333333334\n" ], [ "\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec79ea09c797fa8c546d40cda77e8c9bee56aee1
834,048
ipynb
Jupyter Notebook
Copia_de_proyecto_eco.ipynb
ibm-nico/proyecto_ecg
dd24fd45c06eac14d6906dc946e84abf64cebe83
[ "MIT" ]
null
null
null
Copia_de_proyecto_eco.ipynb
ibm-nico/proyecto_ecg
dd24fd45c06eac14d6906dc946e84abf64cebe83
[ "MIT" ]
2
2020-06-08T13:39:11.000Z
2020-06-08T14:04:30.000Z
Copia_de_proyecto_eco.ipynb
ibm-nico/proyecto_ecg
dd24fd45c06eac14d6906dc946e84abf64cebe83
[ "MIT" ]
null
null
null
2,926.484211
826,742
0.960547
[ [ [ "<a href=\"https://colab.research.google.com/github/ibm-nico/proyecto_ecg/blob/master/Copia_de_proyecto_eco.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#Ejemplo para empezar con Github y colab\n", "_____no_output_____" ], [ "Proceso\n1. cargar sensores\n2. mostrar señales y registrar\n\n", "_____no_output_____" ], [ "##Librerias", "_____no_output_____" ] ], [ [ "import time \nfrom IPython.display import Image\nfrom termcolor import colored", "_____no_output_____" ] ], [ [ "## Referencia\n![texto alternativo](https://user-images.githubusercontent.com/66627182/84036930-478dd780-a974-11ea-8910-9d772d2e5887.jpg)\n\n", "_____no_output_____" ], [ "## Funciones", "_____no_output_____" ] ], [ [ "def programa_principal():\n '''carga los sensores y devuelve True si esta todo Ok'''\n registrar_valores = False # para avisar si puede procesar datos\n\n print(\"chequeando los sensores\")\n\n for i in range(0,11):\n #print(i)\n time.sleep(1)\n print(colored((str(i/10 *100)+\" % \" ),'red'))\n\n if i == 10:\n print(\"listo para registrar\")\n registrar_valores = True\n\n return registrar_valores", "_____no_output_____" ], [ "programa_principal()", "chequeando los sensores\n\u001b[31m0.0 % \u001b[0m\n\u001b[31m10.0 % \u001b[0m\n\u001b[31m20.0 % \u001b[0m\n\u001b[31m30.0 % \u001b[0m\n\u001b[31m40.0 % \u001b[0m\n\u001b[31m50.0 % \u001b[0m\n\u001b[31m60.0 % \u001b[0m\n\u001b[31m70.0 % \u001b[0m\n\u001b[31m80.0 % \u001b[0m\n\u001b[31m90.0 % \u001b[0m\n\u001b[31m100.0 % \u001b[0m\nlisto para registrar\n" ] ], [ [ "## Programa Principal", "_____no_output_____" ] ], [ [ "respuesta = programa_principal()\n\nif respuesta == True: \n print(\"Visualizando y Registrando\")\n\nelse:\n print(\"visualizando pero NO! está registrando\")\n \nImage('https://thumbs.gfycat.com/VerifiableWealthyAmericanratsnake.webp', format='png')", "chequeando los sensores\n\u001b[31m0.0 % \u001b[0m\n\u001b[31m10.0 % \u001b[0m\n\u001b[31m20.0 % \u001b[0m\n\u001b[31m30.0 % \u001b[0m\n\u001b[31m40.0 % \u001b[0m\n\u001b[31m50.0 % \u001b[0m\n\u001b[31m60.0 % \u001b[0m\n\u001b[31m70.0 % \u001b[0m\n\u001b[31m80.0 % \u001b[0m\n\u001b[31m90.0 % \u001b[0m\n\u001b[31m100.0 % \u001b[0m\nlisto para registrar\nVisualizando y Registrando\n" ], [ "from termcolor import colored\n\n# el % rojo\nprint(colored('hola mundo', 'red'))\n\n'visualizando y registrando' # en verde", "\u001b[31mhola mundo\u001b[0m\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec79f5127a1063eb9fb9ada5933651c28da8bb6e
66,250
ipynb
Jupyter Notebook
book/Chapter 9 - ETF/Oil ETF.ipynb
allaccountstaken/testing_clenow
8e0b2e53dfd71556655392b8e0646c3909d345dc
[ "MIT" ]
null
null
null
book/Chapter 9 - ETF/Oil ETF.ipynb
allaccountstaken/testing_clenow
8e0b2e53dfd71556655392b8e0646c3909d345dc
[ "MIT" ]
null
null
null
book/Chapter 9 - ETF/Oil ETF.ipynb
allaccountstaken/testing_clenow
8e0b2e53dfd71556655392b8e0646c3909d345dc
[ "MIT" ]
1
2022-03-26T07:11:18.000Z
2022-03-26T07:11:18.000Z
920.138889
64,560
0.955396
[ [ [ "%matplotlib inline\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Read data from csv\ndf = pd.read_csv('oil_etf_vs_spot.csv', index_col='Date', parse_dates=['Date'])\n\n# Make new figure and set the size.\nfig = plt.figure(figsize=(12, 8))\n\n# The first subplot, planning for 3 plots high, 1 plot wide, this being the first.\nax = fig.add_subplot(111)\nax.set_title('Oil ETF vs. Spot')\nax.plot(df['WTI-West-Texas-Intermediate'], linestyle='-', label='Spot', linewidth=3.0, color='black')\nax.plot(df['USO'], linestyle='--', label='ETF', linewidth=3.0, color = 'grey')\nax.legend()\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]