hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ec7e617288db1922c4b29af20d1a13f0a87dcd6f
5,259
ipynb
Jupyter Notebook
2/Activities/06-Stu_ReflectingOnSQL/Unsolved/Stu_Reflection.ipynb
Marciooliver/Advanced-Data-Storage-and-Retrieval
5137bb3eeb74a55a581e39cc0edb6f21594182d0
[ "ADSL" ]
null
null
null
2/Activities/06-Stu_ReflectingOnSQL/Unsolved/Stu_Reflection.ipynb
Marciooliver/Advanced-Data-Storage-and-Retrieval
5137bb3eeb74a55a581e39cc0edb6f21594182d0
[ "ADSL" ]
null
null
null
2/Activities/06-Stu_ReflectingOnSQL/Unsolved/Stu_Reflection.ipynb
Marciooliver/Advanced-Data-Storage-and-Retrieval
5137bb3eeb74a55a581e39cc0edb6f21594182d0
[ "ADSL" ]
null
null
null
22.004184
88
0.561894
[ [ [ "# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine", "_____no_output_____" ], [ "# Create engine using the `demographics.sqlite` database file\nengine = create_engine(\"sqlite:///../Resources/demographics.sqlite\")", "_____no_output_____" ], [ "# Declare a Base using `automap_base()`\n# YOUR CODE HERE", "_____no_output_____" ], [ "# Use the Base class to reflect the database tables\n# YOUR CODE HERE", "_____no_output_____" ], [ "# Print all of the classes mapped to the Base\n# YOUR CODE HERE", "_____no_output_____" ], [ "# Assign the demographics class to a variable called `Demographics`\n# YOUR CODE HERE", "_____no_output_____" ], [ "# Create a session\n# YOUR CODE HERE", "_____no_output_____" ], [ "# Use the session to query Demographics table and display the first 5 locations\n# YOUR CODE HERE", "_____no_output_____" ], [ "# BONUS: Query and print the number of unique Locations\n# Hints: Look into counting and grouping operations in SQLAlchemy\n# YOUR CODE HERE", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7e642b34e6facad7e2cecbb2523f83d9d40297
75,901
ipynb
Jupyter Notebook
documentation/examples/preview-simulators-from-python.ipynb
Bradben/qsharp-runtime
72f7e963077c4cebea641ecc50b1ce4417048453
[ "MIT" ]
260
2019-07-11T16:40:17.000Z
2022-03-23T17:46:59.000Z
documentation/examples/preview-simulators-from-python.ipynb
Bradben/qsharp-runtime
72f7e963077c4cebea641ecc50b1ce4417048453
[ "MIT" ]
507
2019-07-11T17:14:48.000Z
2022-03-29T01:17:07.000Z
documentation/examples/preview-simulators-from-python.ipynb
Bradben/qsharp-runtime
72f7e963077c4cebea641ecc50b1ce4417048453
[ "MIT" ]
89
2019-07-11T18:02:27.000Z
2022-03-25T09:06:58.000Z
30.470092
7,636
0.414408
[ [ [ "# Using Preview Simulators with Q# and Python", "_____no_output_____" ], [ "The preview open systems and stabilizer simulators for the Quantum Development Kit use the [QuTiP](https://qutip.org) library for Python to help represent noise models, so we import it here.", "_____no_output_____" ] ], [ [ "import qutip as qt", "_____no_output_____" ] ], [ [ "To use the preview simulators, we start by importing Q# interoperability as normal.", "_____no_output_____" ] ], [ [ "import qsharp", "_____no_output_____" ] ], [ [ "We can then use `qsharp.experimental.enable_noisy_simulation()` to add support for preview simulators.", "_____no_output_____" ] ], [ [ "import qsharp.experimental\nqsharp.experimental.enable_noisy_simulation()", "_____no_output_____" ] ], [ [ "Doing so adds the `.simulate_noise` method to Python representations of Q# callables:", "_____no_output_____" ] ], [ [ "%%qsharp\n\noperation DumpPlus() : Unit {\n use q = Qubit();\n H(q);\n Microsoft.Quantum.Diagnostics.DumpMachine();\n X(q);\n Reset(q);\n}", "_____no_output_____" ], [ "DumpPlus.simulate_noise()", "_____no_output_____" ] ], [ [ "Looking at the output from the above, we notice two distinct differences with the output from `.simulate()`:\n\n- The preview simulators use quantum registers of a fixed size (by default, three qubits), and allocate qubits from that register.\n- By default, the preview simulators represent quantum states as density operators ($\\rho = \\left|\\psi\\right\\rangle\\left\\langle\\psi\\right|$) instead of as state vectors ($\\left|\\psi\\right\\rangle$).\n\nFor example, in the output above, the preview simulator has output the density operator $\\rho = \\left|+00\\right\\rangle\\left\\langle+00\\right|$, as we can verify by using QuTiP.", "_____no_output_____" ] ], [ [ "ket_zero = qt.basis(2, 0)\nket_zero", "_____no_output_____" ], [ "ket_one = qt.basis(2, 1)\nket_plus = (ket_zero + ket_one).unit()\nket_plus", "_____no_output_____" ], [ "ket_psi = qt.tensor(ket_plus, ket_zero, ket_zero)\nrho = ket_psi * ket_psi.dag()\nrho", "_____no_output_____" ] ], [ [ "## Configuring Open Systems Noise Models", "_____no_output_____" ], [ "The preview simulators can be configured by the use of the `qsharp.config` object. For example, to change the size of the register used, we can modify the `experimental.simulators.nQubits` configuration setting:", "_____no_output_____" ] ], [ [ "qsharp.config['experimental.simulators.nQubits'] = 1", "_____no_output_____" ], [ "DumpPlus.simulate_noise()", "_____no_output_____" ] ], [ [ "We can modify the noise model used in simulating Q# programs by using several functions in the `qsharp.experimental` module. For instance, to initialize the noise model to an ideal model (that is, with no noise), we can use `set_noise_model_by_name` or the `%noise_model --set-by-name` magic command:", "_____no_output_____" ] ], [ [ "qsharp.experimental.set_noise_model_by_name('ideal')\n%noise_model --set-by-name ideal", "_____no_output_____" ] ], [ [ "We can then access the noise model by using `get_noise_model`:", "_____no_output_____" ] ], [ [ "noise_model = qsharp.experimental.get_noise_model()", "_____no_output_____" ] ], [ [ "This noise model is represented as a Python dictionary from preparations, measurements, and gates to Python objects representing the noise in each. For example, in the ideal noise model, the `Microsoft.Quantum.Intrinsic.H` operation is simulated by a unitary matrix:", "_____no_output_____" ] ], [ [ "noise_model['h']", "_____no_output_____" ] ], [ [ "We can modify this to add depolarizing noise using QuTiP functions to build a depolarizing noise channel:", "_____no_output_____" ] ], [ [ "I, X, Y, Z = [P.as_qobj() for P in qsharp.Pauli]", "_____no_output_____" ], [ "def depolarizing_noise(p=1.0):\n return p * qt.to_super(I) + ((1 - p) / 4) * sum(map(qt.to_super, [I, X, Y, Z]))", "_____no_output_____" ], [ "noise_model['h'] = depolarizing_noise(0.99) * qt.to_super(qt.qip.operations.hadamard_transform())\nnoise_model['h']", "_____no_output_____" ], [ "ket_zero = qt.basis(2, 0)\nket_zero", "_____no_output_____" ], [ "rho_zero = ket_zero * ket_zero.dag()\nrho_zero", "_____no_output_____" ], [ "noise_model['h'](rho_zero)", "_____no_output_____" ] ], [ [ "Once we have modified our noise model in this way, we can set it as the active noise model used in simulating Q# programs:", "_____no_output_____" ] ], [ [ "qsharp.experimental.set_noise_model(noise_model)", "_____no_output_____" ] ], [ [ "Using this model, we no longer get the exact $|+\\rangle\\langle+|$ state, but see that our Q# program has incurred some small error due to noise in the application of `Microsoft.Quantum.Intrinsic.H`:", "_____no_output_____" ] ], [ [ "DumpPlus.simulate_noise()", "_____no_output_____" ], [ "qt.to_kraus(noise_model['h'])", "_____no_output_____" ] ], [ [ "## Configuring Stabilizer Noise Models", "_____no_output_____" ], [ "We can also configure the preview simulator to use stabilizer (_a.k.a._ CHP) simulation. This time, let's get a new noise model by using `get_noise_model_by_name`:", "_____no_output_____" ] ], [ [ "noise_model = qsharp.experimental.get_noise_model_by_name('ideal_stabilizer')\nnoise_model", "_____no_output_____" ], [ "qsharp.experimental.set_noise_model(noise_model)", "_____no_output_____" ] ], [ [ "To make the best use of stabilizer noise models, we also need to configure the simulator to start off in the stabilizer representation:", "_____no_output_____" ] ], [ [ "qsharp.config['experimental.simulators.representation'] = 'stabilizer'", "_____no_output_____" ], [ "DumpPlus.simulate_noise()", "_____no_output_____" ] ], [ [ "Notably, the stabilizer representation does not support operations outside of the stabilizer formalism, such as `T` and `CCNOT`. This allows the stabilizer representation to support significantly more qubits than other representations:", "_____no_output_____" ] ], [ [ "qsharp.config['experimental.simulators.nQubits'] = 10", "_____no_output_____" ], [ "DumpPlus.simulate_noise()", "_____no_output_____" ] ], [ [ "If we turn off visualization, we can get significantly more qubits still!", "_____no_output_____" ] ], [ [ "%%qsharp\nopen Microsoft.Quantum.Arrays;\n\noperation SampleRandomBitstring(nQubits : Int) : Result[] {\n use register = Qubit[nQubits];\n ApplyToEachCA(H, register);\n return ForEach(M, register);\n}", "_____no_output_____" ], [ "qsharp.config['experimental.simulators.nQubits'] = 1000", "_____no_output_____" ], [ "%time SampleRandomBitstring.simulate_noise(nQubits=1000)", "Wall time: 2.79 s\n" ] ], [ [ "For now, though, we'll turn back down the number of qubits just to make dumps easier to read!", "_____no_output_____" ] ], [ [ "qsharp.config['experimental.simulators.nQubits'] = 4", "_____no_output_____" ] ], [ [ "The visualization style for stabilizer states can be selected by using the `experimental.simulators.stabilizerStateStyle` configuration setting:", "_____no_output_____" ] ], [ [ "%%qsharp\n\noperation DumpBellPair() : Unit {\n use left = Qubit();\n use right = Qubit();\n within {\n H(left);\n CNOT(left, right);\n } apply {\n Microsoft.Quantum.Diagnostics.DumpMachine();\n }\n}", "_____no_output_____" ], [ "qsharp.config['experimental.simulators.stabilizerStateStyle'] = 'matrixWithoutDestabilizers'", "_____no_output_____" ], [ "DumpBellPair.simulate_noise()", "_____no_output_____" ], [ "DumpBellPair.simulate()", "_____no_output_____" ], [ "qsharp.config['experimental.simulators.stabilizerStateStyle'] = 'denseGroupPresentation'", "_____no_output_____" ], [ "DumpBellPair.simulate_noise()", "_____no_output_____" ], [ "qsharp.config['experimental.simulators.stabilizerStateStyle'] = 'sparseGroupPresentation'", "_____no_output_____" ], [ "DumpBellPair.simulate_noise()", "_____no_output_____" ] ], [ [ "So far, we've only used ideal stabilizer simulation, but what happens if one of our operations is followed by a mixed Pauli channel?", "_____no_output_____" ] ], [ [ "noise_model['h'] = qsharp.experimental.SequenceProcess(1, \n [\n qsharp.experimental.ChpDecompositionProcess(1, [\n qsharp.experimental.Hadamard(0)\n ]),\n qsharp.experimental.MixedPauliProcess(1, [\n (0.9, 'I'),\n (0.1, 'Z')\n ])\n ]\n)\nnoise_model['h']", "_____no_output_____" ], [ "qsharp.experimental.set_noise_model(noise_model)", "_____no_output_____" ], [ "DumpBellPair.simulate_noise()", "_____no_output_____" ] ], [ [ "## Epilogue", "_____no_output_____" ] ], [ [ "qsharp.component_versions()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec7e653144a01e4f15f9f1a292b98e8c15bb82c4
203,242
ipynb
Jupyter Notebook
scikit-learn/Chapter 1. Introduction.ipynb
zzsza/TIL
8e623ebcbeca0f3fe1acbc2900f992a3c462a50b
[ "MIT" ]
22
2017-10-30T06:47:12.000Z
2020-04-15T11:50:31.000Z
scikit-learn/Chapter 1. Introduction.ipynb
zzsza/TIL
8e623ebcbeca0f3fe1acbc2900f992a3c462a50b
[ "MIT" ]
null
null
null
scikit-learn/Chapter 1. Introduction.ipynb
zzsza/TIL
8e623ebcbeca0f3fe1acbc2900f992a3c462a50b
[ "MIT" ]
17
2017-10-30T01:30:51.000Z
2021-08-31T18:41:15.000Z
360.998224
188,236
0.922004
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport mglearn\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Iris Data Classification", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\niris_dataset = load_iris()", "_____no_output_____" ], [ "print(\"iris_dataset의 key : \\n{}\".format(iris_dataset.keys()))", "iris_dataset의 key : \ndict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names'])\n" ], [ "print(iris_dataset['DESCR'])", "Iris Plants Database\n====================\n\nNotes\n-----\nData Set Characteristics:\n :Number of Instances: 150 (50 in each of three classes)\n :Number of Attributes: 4 numeric, predictive attributes and the class\n :Attribute Information:\n - sepal length in cm\n - sepal width in cm\n - petal length in cm\n - petal width in cm\n - class:\n - Iris-Setosa\n - Iris-Versicolour\n - Iris-Virginica\n :Summary Statistics:\n\n ============== ==== ==== ======= ===== ====================\n Min Max Mean SD Class Correlation\n ============== ==== ==== ======= ===== ====================\n sepal length: 4.3 7.9 5.84 0.83 0.7826\n sepal width: 2.0 4.4 3.05 0.43 -0.4194\n petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)\n petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)\n ============== ==== ==== ======= ===== ====================\n\n :Missing Attribute Values: None\n :Class Distribution: 33.3% for each of 3 classes.\n :Creator: R.A. Fisher\n :Donor: Michael Marshall (MARSHALL%[email protected])\n :Date: July, 1988\n\nThis is a copy of UCI ML iris datasets.\nhttp://archive.ics.uci.edu/ml/datasets/Iris\n\nThe famous Iris database, first used by Sir R.A Fisher\n\nThis is perhaps the best known database to be found in the\npattern recognition literature. Fisher's paper is a classic in the field and\nis referenced frequently to this day. (See Duda & Hart, for example.) The\ndata set contains 3 classes of 50 instances each, where each class refers to a\ntype of iris plant. One class is linearly separable from the other 2; the\nlatter are NOT linearly separable from each other.\n\nReferences\n----------\n - Fisher,R.A. \"The use of multiple measurements in taxonomic problems\"\n Annual Eugenics, 7, Part II, 179-188 (1936); also in \"Contributions to\n Mathematical Statistics\" (John Wiley, NY, 1950).\n - Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis.\n (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.\n - Dasarathy, B.V. (1980) \"Nosing Around the Neighborhood: A New System\n Structure and Classification Rule for Recognition in Partially Exposed\n Environments\". IEEE Transactions on Pattern Analysis and Machine\n Intelligence, Vol. PAMI-2, No. 1, 67-71.\n - Gates, G.W. (1972) \"The Reduced Nearest Neighbor Rule\". IEEE Transactions\n on Information Theory, May 1972, 431-433.\n - See also: 1988 MLC Proceedings, 54-64. Cheeseman et al\"s AUTOCLASS II\n conceptual clustering system finds 3 classes in the data.\n - Many, many more ...\n\n" ], [ "print(\"타겟의 이름 : {}\".format(iris_dataset['target_names']))\nprint(\"특성의 이름 : {}\".format(iris_dataset['feature_names']))\nprint(\"data 타입 : {}\".format(type(iris_dataset['data'])))\nprint(\"data 크기 : {}\".format(iris_dataset['data'].shape))", "타겟의 이름 : ['setosa' 'versicolor' 'virginica']\n특성의 이름 : ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\ndata 타입 : <class 'numpy.ndarray'>\ndata 크기 : (150, 4)\n" ] ], [ [ "150개의 Sample Data, 4개의 Feature", "_____no_output_____" ] ], [ [ "print(\"data의 처음 다섯 행 :\\n{}\".format(iris_dataset['data'][:5]))", "data의 처음 다섯 행 :\n[[ 5.1 3.5 1.4 0.2]\n [ 4.9 3. 1.4 0.2]\n [ 4.7 3.2 1.3 0.2]\n [ 4.6 3.1 1.5 0.2]\n [ 5. 3.6 1.4 0.2]]\n" ], [ "# target part\nprint(\"target의 타입 : {}\".format(type(iris_dataset['target'])))\nprint(\"target의 크기: {}\".format(iris_dataset['target'].shape))\nprint(\"Target : \\n{}\".format(iris_dataset['target']))", "target의 타입 : <class 'numpy.ndarray'>\ntarget의 크기: (150,)\nTarget : \n[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2]\n" ] ], [ [ "### 훈련 데이터와 테스트 데이터는 절대적으로 나누어야 함. 전체의 75%를 훈련 데이터로 사용하고 25%를 테스트 데이터로 사용하곤 합니다", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n iris_dataset['data'], iris_dataset['target'], random_state=0, test_size=0.33)", "_____no_output_____" ], [ "print(\"X_train 크기 : {}\".format(X_train.shape))\nprint(\"y_train 크기 : {}\".format(y_train.shape))", "X_train 크기 : (100, 4)\ny_train 크기 : (100,)\n" ], [ "print(\"X_test 크기 : {}\".format(X_test.shape))\nprint(\"y_test 크기 : {}\".format(y_test.shape))", "X_test 크기 : (50, 4)\ny_test 크기 : (50,)\n" ] ], [ [ "# 1. 데이터 탐색 ( EDA )\n- 비정상적인 값, 특이한 값 처리\n- 데이터 전처리\n- 산점도 행렬!", "_____no_output_____" ] ], [ [ "iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)", "_____no_output_____" ], [ "pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o', hist_kwds={'bins':20}, s=60,\n alpha=.8, cmap=mglearn.cm3)", "_____no_output_____" ], [ "# 3개의 클래스가 측정값에 따라 잘 구분되는 것을 확인할 수 있습니다", "_____no_output_____" ], [ "# knn 알고리즘 : 가장 가까운 k개의 이웃을 찾는다!", "_____no_output_____" ], [ "from sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=1)", "_____no_output_____" ], [ "knn", "_____no_output_____" ], [ "knn.fit(X_train, y_train)", "_____no_output_____" ], [ "X_new = np.array([[5, 2.9, 1, 0.2]])\nprint(\"X_new.shape: {}\".format(X_new.shape))", "X_new.shape: (1, 4)\n" ], [ "prediction = knn.predict(X_new)\nprint(\"예측 : {}\".format(prediction))\nprint(\"예측한 타깃의 이름 : {}\".format(iris_dataset['target_names'][prediction]))", "예측 : [0]\n예측한 타깃의 이름 : ['setosa']\n" ] ], [ [ "# 모델 평가하기\n- 정확도를 계산해 모델 성능 평가\n", "_____no_output_____" ] ], [ [ "y_pred = knn.predict(X_test)\nprint(\"테스트 세트에 대한 예측값:\\n {}\".format(y_pred))", "테스트 세트에 대한 예측값:\n [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0\n 2 1 1 2 0 2 0 0 1 2 2 1 2]\n" ], [ "print(\"테스트 세트의 정확도 : {:.2f}\".format(np.mean(y_pred == y_test)))", "테스트 세트의 정확도 : 0.96\n" ], [ "# knn.score 메서드로도 정확도 계산 가능!\nprint(\"테스트 세트의 정확도 : {:.2f}\".format(knn.score(X_test, y_test)))", "테스트 세트의 정확도 : 0.96\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec7e70225b6ec53aae64c4a41450b67a2368657e
66,000
ipynb
Jupyter Notebook
10_Deleting/Wine/Exercises.ipynb
society765/pandas_exercises
5f1ec84f2ba0322fb0a1ac3f7e85536adcb53b4c
[ "BSD-3-Clause" ]
null
null
null
10_Deleting/Wine/Exercises.ipynb
society765/pandas_exercises
5f1ec84f2ba0322fb0a1ac3f7e85536adcb53b4c
[ "BSD-3-Clause" ]
null
null
null
10_Deleting/Wine/Exercises.ipynb
society765/pandas_exercises
5f1ec84f2ba0322fb0a1ac3f7e85536adcb53b4c
[ "BSD-3-Clause" ]
null
null
null
30.330882
132
0.290136
[ [ [ "# Wine", "_____no_output_____" ], [ "### Introduction:\n\nThis exercise is a adaptation from the UCI Wine dataset.\nThe only pupose is to practice deleting data with pandas.\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd \nimport numpy as np \npd.__version__", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called wine", "_____no_output_____" ] ], [ [ "url='https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'\nwine = pd.read_csv(url, header=None)\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 4. Delete the first, fourth, seventh, nineth, eleventh, thirteenth and fourteenth columns", "_____no_output_____" ] ], [ [ "if 0 in wine.columns:\n wine = wine.drop([0, 3, 6, 8, 10, 12, 13], axis = 'columns')\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 5. Assign the columns as below:\n\nThe attributes are (dontated by Riccardo Leardi, riclea '@' anchem.unige.it): \n1) alcohol \n2) malic_acid \n3) alcalinity_of_ash \n4) magnesium \n5) flavanoids \n6) proanthocyanins \n7) hue ", "_____no_output_____" ] ], [ [ "wine = wine.rename({\n 1: 'alcohol', 2:'malic_acid', 4:'alcalinity_of_ash', 5:'magnesium', 7:'flavanoids', 9:'proanthocyanins', \n 11:'hue'\n}, axis='columns')\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 6. Set the values of the first 3 rows from alcohol as NaN", "_____no_output_____" ] ], [ [ "wine.loc[:2, 'alcohol'] = np.nan\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 7. Now set the value of the rows 3 and 4 of magnesium as NaN", "_____no_output_____" ] ], [ [ "wine.loc[3:4, 'magnesium'] = np.nan\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 8. Fill the value of NaN with the number 10 in alcohol and 100 in magnesium", "_____no_output_____" ] ], [ [ "wine = wine.fillna(\n {'alcohol': 10, 'magnesium':100}\n)\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 9. Count the number of missing values", "_____no_output_____" ] ], [ [ "wine.isnull().sum()", "_____no_output_____" ] ], [ [ "### Step 10. Create an array of 10 random numbers up until 10", "_____no_output_____" ] ], [ [ "ar = np.random.randint(10, size=10)\nar", "_____no_output_____" ] ], [ [ "### Step 11. Use random numbers you generated as an index and assign NaN value to each of cell.", "_____no_output_____" ] ], [ [ "wine.alcohol[ar] = np.nan\n\nwine.head(10)", "_____no_output_____" ] ], [ [ "### Step 12. How many missing values do we have?", "_____no_output_____" ] ], [ [ "wine.isnull().sum()", "_____no_output_____" ] ], [ [ "### Step 13. Delete the rows that contain missing values", "_____no_output_____" ] ], [ [ "wine = wine.dropna(axis='index', how='any')\nwine.head()", "_____no_output_____" ] ], [ [ "### Step 14. Print only the non-null values in alcohol", "_____no_output_____" ] ], [ [ "wine.alcohol[wine.alcohol.notna()]", "_____no_output_____" ] ], [ [ "### Step 15. Reset the index, so it starts with 0 again", "_____no_output_____" ] ], [ [ "wine.reset_index(drop=True)", "_____no_output_____" ] ], [ [ "### BONUS: Create your own question and answer it.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec7e8afd98fa1b6abd60c1f122a1918b5b0d45e6
16,755
ipynb
Jupyter Notebook
5052_05_Code/Logistic_regression.ipynb
PacktPublishing/-Data-Mining-with-Python-Implementing-Classification-and-Regression
ddf601bc63c2c3c4ac72eae451ae48afc5e850b7
[ "MIT" ]
5
2020-09-29T02:18:04.000Z
2021-11-29T20:04:34.000Z
5052_05_Code/Logistic_regression.ipynb
PacktPublishing/-Data-Mining-with-Python-Implementing-Classification-and-Regression
ddf601bc63c2c3c4ac72eae451ae48afc5e850b7
[ "MIT" ]
null
null
null
5052_05_Code/Logistic_regression.ipynb
PacktPublishing/-Data-Mining-with-Python-Implementing-Classification-and-Regression
ddf601bc63c2c3c4ac72eae451ae48afc5e850b7
[ "MIT" ]
1
2020-12-31T16:14:38.000Z
2020-12-31T16:14:38.000Z
27.971619
118
0.474903
[ [ [ "# About ANES 1996 Dataset", "_____no_output_____" ], [ "ANES 1996 dataset is a subset of the American National Election Studies of 1996. ", "_____no_output_____" ], [ "# Import Libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics", "_____no_output_____" ] ], [ [ "# Load ANES 1996 Dataset", "_____no_output_____" ] ], [ [ "anes_dataset = pd.read_csv('anes_dataset.csv')", "_____no_output_____" ] ], [ [ "# Attributes Description", "_____no_output_____" ] ], [ [ "print \"Number of Obseravations: \",anes_dataset.shape[0]\nprint \"Number of Features: \", anes_dataset.shape[1]", "Number of Obseravations: 944\nNumber of Features: 10\n" ], [ "print \"Features: \", anes_dataset.columns.values", "Features: ['popul' 'TVnews' 'selfLR' 'ClinLR' 'DoleLR' 'PID' 'age' 'educ' 'income'\n 'vote']\n" ] ], [ [ "### popul:\nCensus place population in 1000s\n\n### TVnews:\nNumber of times per week that respondent watches TV news.\n \n### PID:\nParty identification of respondent.\n\n0 - Strong Democrat </n>\n\n1 - Weak Democrat\n\n2 - Independent-Democrat\n\n3 - Independent-Indpendent\n4 - Independent-Republican\n5 - Weak Republican\n6 - Strong Republican\n\n###age : \nAge of respondent.\n\n###educ - Education level of respondent\n1 - 1-8 grades\n2 - Some high school\n3 - High school graduate\n4 - Some college\n5 - College degree\n6 - Master's degree\n7 - PhD\n\n### income - Income of household\n 1 - None or less than $2,999\n 2 - $3,000-$4,999\n 3 - $5,000-$6,999\n 4 - $7,000-$8,999\n 5 - $9,000-$9,999\n 6 - $10,000-$10,999\n 7 - $11,000-$11,999\n 8 - $12,000-$12,999\n 9 - $13,000-$13,999\n 10 - $14,000-$14.999\n 11 - $15,000-$16,999\n 12 - $17,000-$19,999\n 13 - $20,000-$21,999\n 14 - $22,000-$24,999\n 15 - $25,000-$29,999\n 16 - $30,000-$34,999\n 17 - $35,000-$39,999\n 18 - $40,000-$44,999\n 19 - $45,000-$49,999\n 20 - $50,000-$59,999\n 21 - $60,000-$74,999\n 22 - $75,000-89,999\n 23 - $90,000-$104,999\n 24 - $105,000 and over\n\n \nThe following 3 variables all take the values:\n 1 - Extremely liberal\n 2 - Liberal\n 3 - Slightly liberal\n 4 - Moderate\n 5 - Slightly conservative\n 6 - Conservative\n 7 - Extremely Conservative\n \nselfLR - Respondent's self-reported political leanings from \"Left\"\n to \"Right\".\n \nClinLR - Respondents impression of Bill Clinton's political\n leanings from \"Left\" to \"Right\".\n \nDoleLR - Respondents impression of Bob Dole's political leanings\n from \"Left\" to \"Right\".\n \n vote - Expected vote\n 0 - Clinton\n 1 - Dole", "_____no_output_____" ] ], [ [ "# Basic Statistics", "_____no_output_____" ] ], [ [ "anes_dataset.describe()", "_____no_output_____" ] ], [ [ "# Split Datasets", "_____no_output_____" ] ], [ [ "headers = list(anes_dataset.columns.values)\nprint headers\nfeatures = headers[:-1]\ntarget = headers[-1]\nprint \"Features: \", features\nprint \"Target: \", target\n\n# Split datast to train and test dataset\nx_train,x_test,y_train,y_test = train_test_split(anes_dataset[features],anes_dataset[target],test_size = 0.4)\n\nprint \"x_train Shape: \", x_train.shape\nprint \"y_train Shape: \", y_train.shape\nprint \"x_test Shape: \", x_test.shape\nprint \"y_test Shape: \", y_test.shape", "['popul', 'TVnews', 'selfLR', 'ClinLR', 'DoleLR', 'PID', 'age', 'educ', 'income', 'vote']\nFeatures: ['popul', 'TVnews', 'selfLR', 'ClinLR', 'DoleLR', 'PID', 'age', 'educ', 'income']\nTarget: vote\nx_train Shape: (566, 9)\ny_train Shape: (566,)\nx_test Shape: (378, 9)\ny_test Shape: (378,)\n" ] ], [ [ "# Logistic Regression model with 4 features", "_____no_output_____" ] ], [ [ "# Logistic Regressio model with 4 features\nfeatures_4 = ['TVnews','age','educ','income']\n\n# Logistic regression model instance to fit with Selected 4 features and target (Vote) In this case\nmodel_with_4_features = LogisticRegression()\nmodel_with_4_features.fit(x_train[features_4], y_train)\n\n# check the accuracy on the train dataset\ntrain_accuracy = model_with_4_features.score(x_train[features_4], y_train)\nprint \"Train Accuracy: \" , train_accuracy", "Train Accuracy: 0.598939929329\n" ] ], [ [ "# Logistic Regression model with all features", "_____no_output_____" ] ], [ [ "# Logistic Regression model instance to fit with all features.\nmodel_with_all_features = LogisticRegression()\nmodel_with_all_features.fit(x_train,y_train)\n\n# Check the accuracy on the train dataset\ntrain_accuracy = model_with_all_features.score(x_train,y_train)\nprint \"Train Accuracy: \", train_accuracy", "Train Accuracy: 0.913427561837\n" ] ], [ [ "# Prediction with both the Models", "_____no_output_____" ] ], [ [ "test_observation1_for_4_features_model = x_test[features_4][:1]\nprint test_observation1_for_4_features_model\ntest_observation1_for_all_features_model = x_test[:1]\nprint test_observation1_for_all_features_model\n\nprint model_with_4_features.predict(test_observation1_for_4_features_model)\nprint model_with_all_features.predict(test_observation1_for_all_features_model)", " TVnews age educ income\n786 7 45 6 22\n popul TVnews selfLR ClinLR DoleLR PID age educ income\n786 16 7 6 2 6 6 45 6 22\n[0]\n[1]\n" ] ], [ [ "# Accuracies of both the models on Test Dataset", "_____no_output_____" ] ], [ [ "\n# Prediction on test dataset with fitted model with 4 features\nmodel_with_4_features_predition = model_with_4_features.predict(x_test[features_4])\nmodel_with_4_features_test_accuracy = metrics.accuracy_score(y_test,model_with_4_features_predition)\n\n# Prediction on test dataset with fitted model with all features\nmodel_with_all_features_predition = model_with_all_features.predict(x_test)\nmodel_with_all_features_test_accuracy = metrics.accuracy_score(y_test,model_with_all_features_predition\n )\n\nprint \"Model with 4 features test accuracy: \", model_with_4_features_test_accuracy\nprint \"model_with_all_features_test_accuracy: \", model_with_all_features_test_accuracy\n", "Model with 4 features test accuracy: 0.597883597884\nmodel_with_all_features_test_accuracy: 0.910052910053\n" ] ], [ [ "# Thank you..", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec7e8d0bae2c2e73ac809ee799da444e068bafc4
45,227
ipynb
Jupyter Notebook
visualize_stock.ipynb
gary159/quantai
b8c13e7a631dfdf137dad2c5f17ae0aa2bd0ffb6
[ "MIT" ]
null
null
null
visualize_stock.ipynb
gary159/quantai
b8c13e7a631dfdf137dad2c5f17ae0aa2bd0ffb6
[ "MIT" ]
null
null
null
visualize_stock.ipynb
gary159/quantai
b8c13e7a631dfdf137dad2c5f17ae0aa2bd0ffb6
[ "MIT" ]
null
null
null
51.74714
5,312
0.51938
[ [ [ "from ipywidgets import interact, widgets\nimport pandas as pd\nimport requests\nimport numpy as np\nfrom IPython.core.display import display, HTML\nfrom pandas_cache import pd_cache, timeit\n\nfrom bokeh.io import push_notebook, show, output_notebook\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.layouts import gridplot\n\nfrom bokeh.models.tools import HoverTool\nimport pandas_bokeh\n\npandas_bokeh.output_notebook()\n\nFORMAT = '%Y-%m-%d'\noutput_notebook()", "_____no_output_____" ] ], [ [ "## API SETUP", "_____no_output_____" ], [ "This Jupyter notebook pulls data from AlphaVantage.<br /> \nIf you don't have one, claim your free API Key at https://www.alphavantage.co/support/#api-key", "_____no_output_____" ] ], [ [ "@interact.options(manual=True)\ndef set_api(text=\"Enter your alphavantage API KEY\"):\n global API \n API = text\n print(f\"Your API key is: '{text}'\")", "_____no_output_____" ], [ "#@pd_cache\ndef stock_daily(stock):\n \n #tsleep(10)\n \n url = 'https://www.alphavantage.co/query'\n payload = {\n 'function': 'TIME_SERIES_DAILY',\n 'symbol': stock,\n #'interval': '5min',\n 'outputsize': 'full',\n 'apikey': API\n }\n\n try: \n r = requests.get(url, params=payload)\n r.raise_for_status()\n except requests.exceptions.HTTPError as err: \n print('Failed request alphavantage',err)\n return e\n df = pd.DataFrame(r.json()[list(r.json().keys())[1]]).T\n df.columns = ['Open', 'High', 'Low', 'Close', 'Volume']\n df=df.astype(float)\n df['Mid'] = df[['Open', 'Close']].mean(axis=1)\n df.index = pd.to_datetime(df.index)\n df = df.sort_index()\n return df", "_____no_output_____" ], [ "def sma(df, column, sma1, sma2):\n df['sma1'] = df[column].rolling(window=sma1).mean()\n df['sma2'] = df[column].rolling(window=sma2).mean()\n return df", "_____no_output_____" ], [ "def macd(df, column='Close', fast_MA=12, slow_MA=26, period=9):\n exp1 = df[column].ewm(span=fast_MA, adjust=False).mean()\n exp2 = df[column].ewm(span=slow_MA, adjust=False).mean()\n df['macd1'] = exp1-exp2\n df['macd2'] = df.macd1.ewm(span=period, adjust=False).mean()\n return df", "_____no_output_____" ], [ "def rsi(df, column, n=14):\n delta = df[column].diff()\n dUp, dDown = delta.copy(), delta.copy()\n dUp[dUp < 0] = 0\n dDown[dDown > 0] = 0\n RolUp = dUp.rolling(n).mean()\n RolDown = dDown.abs().rolling(n).mean()\n\n RS = RolUp / RolDown\n df['RSI'] = 100.0 - (100.0 / (1.0 + RS))\n return df", "_____no_output_____" ], [ "stocks = ['AMZN',\n 'MSFT',\n 'SHOP',\n 'NDAQ',\n 'SQ',\n 'V',\n 'CMCSA',\n 'FB',\n 'DIS',\n 'GOOG',\n 'AMD',\n 'WORK',\n 'LEVI',\n 'VGT',\n 'ULTA',\n 'ACB'\n]", "_____no_output_____" ], [ "macd_slider = widgets.IntRangeSlider(\n value=[12, 26],\n min=0,\n max=200,\n step=2.\n)\nprice_columns = ['Open', 'High', 'Low', 'Close', 'Mid']", "_____no_output_____" ], [ "@interact(stock=stocks, column=price_columns, macd_range=macd_slider)\ndef viz_stock(stock, column,macd_range):\n df = stock_daily(stock)\n fast_MA, slow_MA = macd_range\n df = macd(df, column, fast_MA, slow_MA)\n df = rsi(df, column)\n source = ColumnDataSource(df)\n \n min_date = str(df.index.min().strftime(FORMAT))\n max_date = str(df.index.max().strftime(FORMAT))\n\n p1 = figure(title=f\"{stock} from {min_date} to {max_date}\", y_axis_type=\"linear\",\n plot_height=400, x_axis_type=\"datetime\", active_scroll= \"wheel_zoom\")\n p1.yaxis.axis_label = column\n p1.line('index', column, line_width = 1.5, source=source, line_color='blue', alpha=0.6,legend=stock)\n\n\n hover = HoverTool(\n tooltips=[('date', '@index{%F}'), (column, f'$@{column}{{0,0.00}}'), ('volume', '@Volume{0.00 a}'),\n ('sma1', '@sma1{0,0}'), ('sma2', '@sma2{0,0}'),\n ('macd1', '@macd1{0.00}')\n ],\n formatters={'index': 'datetime'},\n mode='vline'\n )\n\n p1.add_tools(hover)\n p1.legend.location = \"top_left\"\n\n \n p2 = figure(title=f\"MACD: 12,26,9\", y_axis_type=\"linear\",\n plot_height=200, x_axis_type=\"datetime\",\n x_range=p1.x_range, active_scroll= \"wheel_zoom\")\n p2.line('index', 'macd1', line_width = 1, source=source, line_color='red',legend='MACD1')\n p2.line('index', 'macd2', line_width = 1, source=source, line_color='green',legend='MACD2')\n p2.add_tools(hover)\n p2.legend.location = \"top_left\"\n \n p3 = figure(title=f\"RSI\", y_axis_type=\"linear\",\n plot_height=150, x_axis_type=\"datetime\",\n x_range=p1.x_range, active_scroll= \"wheel_zoom\")\n p3.line('index', 'RSI', line_width = 1, source=source, line_color='purple',legend='rsi')\n p3.add_tools(hover)\n p3.legend.location = \"top_left\"\n \n f = gridplot([[p1],[p2], [p3]], plot_width=1000)\n show(f);", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7e8ed7ed5a06b0094a52aa718f8125183a86e9
13,340
ipynb
Jupyter Notebook
labs/19_TF-IDF and Classification/19_TF_IDF_and_Classification.ipynb
jdmarshl/Legal-123-Sp20
11de39c916ae1d385b1cc675dee2a984ecb3931d
[ "BSD-3-Clause" ]
3
2021-01-20T19:08:40.000Z
2022-01-19T18:27:00.000Z
labs/19_TF-IDF and Classification/19_TF_IDF_and_Classification.ipynb
jdmarshl/Legal-123-Sp20
11de39c916ae1d385b1cc675dee2a984ecb3931d
[ "BSD-3-Clause" ]
null
null
null
labs/19_TF-IDF and Classification/19_TF_IDF_and_Classification.ipynb
jdmarshl/Legal-123-Sp20
11de39c916ae1d385b1cc675dee2a984ecb3931d
[ "BSD-3-Clause" ]
4
2021-03-08T09:54:36.000Z
2022-02-01T03:44:51.000Z
24.795539
428
0.576312
[ [ [ "# [LEGALST-123] Lab 19: TF-IDF and Classification", "_____no_output_____" ], [ "<img src = \"https://cdn.sstatic.net/Sites/stackoverflow/company/img/logos/se/se-logo.png?v=dd7153fcc7fa\" style = \"width:500px; height: 275px;\" />\n\nThis lab will cover the term frequency-inverse document frequency method, and classification algorithms in machine learning.\n\nEstimated Lab time: 30 minutes", "_____no_output_____" ] ], [ [ "# Dependencies\nfrom datascience import *\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import svm\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import MultinomialNB\nimport itertools\nimport seaborn as sn\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# The Data", "_____no_output_____" ], [ "For this lab, we'll use a dataset that was drawn from a Kaggle collection of questions posed on stackexchange (a website/forum where people ask and answer questions about statistics, programming etc.)\n\nThe data has the following features:\n\n- \"Id\": The Id number for the question\n- \"Body\": The text of the answer\n- \"Tag\": Whether the question was tagged as dealing with python, xml, java, json, or android\n\nYour task will be to extract features from the \"Body\" column, and use those features to predict class membership, denoted by \"Tag.\"", "_____no_output_____" ] ], [ [ "stack_data = pd.read_csv('data/stackexchange.csv', encoding='latin-1')\nstack_data.head(5)", "_____no_output_____" ] ], [ [ "# Section 1: TF-IDF Vectorizer", "_____no_output_____" ], [ "The term frequency-inverse document frequency (tf-idf) vectorizer is a statistic that measures similarity within and across documents. Term frequency refers to the number of times a term shows up within a document. Inverse document frequency is the logarithmically scaled inverse fraction of the documents that contains the word, and penalizes words that occur frequently. Tf-idf multiplies these two measures together.\n\nCheck out the documentation [here](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html).", "_____no_output_____" ], [ "#### Question 1: Why is tf-idf a potentially more attractive vectorizer than the standard count vectorizer?", "_____no_output_____" ], [ "Let's get started! First, extract the \"Body\" column into its own numpy array called \"text_list\"", "_____no_output_____" ] ], [ [ "# Extract Text Data\n", "_____no_output_____" ] ], [ [ "Next, initialize a term frequency-inverse document frequency (tf-idf) vectorizer. Check out the documentation to fill in the arguments: http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html", "_____no_output_____" ] ], [ [ "tf = TfidfVectorizer(analyzer=, \n ngram_range=(), \n min_df = , \n stop_words = )", "_____no_output_____" ] ], [ [ "Next, use the \"fit_transform\" method to take in the list of documents, and convert them into a document term matrix. Use \"get_feature_names()\" and \"len\" to calculate how many features this generates.", "_____no_output_____" ], [ "#### Question 2: The dimensionality explodes quickly. Why might this be a problem as you use more data?", "_____no_output_____" ], [ "Calculate the tf-idf scores for the first document in the corpus. Do the following:\n\n1. Use \".todense()\" to turn the tfidf matrix into a dense matrix (get rid of the sparsity)\n2. Create an object for th document by calling the 0th index of the dense matrix, converting it to a list. Try something like: document = dense[0].tolist()[0]\n3. Calculate the phrase scores by using the \"zip\" command to iterate from 0 to the length of the document, retraining scores greater than 0.\n4. Sort the scores using the \"sorted\" command\n5. Print the top 20 scores with their feature names", "_____no_output_____" ], [ "# Section 2: Classification Algorithms", "_____no_output_____" ], [ "One of the main tasks in supervised machine learning is classification. In this case, we will develop algorithms that will predict a question's tag based on the text of its answer.\n\nThe first step is to split our data into training, validation, and test sets. ", "_____no_output_____" ], [ "## Naive Bayes", "_____no_output_____" ], [ "[Naive Bayes classifers](http://scikit-learn.org/stable/modules/naive_bayes.html) classify observations by making the assumption that features are all independent of one another. Do the following:\n\n1. Initialize a Naive Bayes classifier method with \"MultinomialNB()\"\n2. Fit the model on your training data\n3. Predict on the validation data and store the predictions\n4. Use \"np.mean\" to calculate how correct the classier was on average\n5. Calcualte the confusion matrix using \"confusion_matrix,\" providing the true values first and the predicted values second.", "_____no_output_____" ], [ "Let's plot the confusion matrix! Use the following code from the [\"seaborn\"](https://seaborn.pydata.org/generated/seaborn.heatmap.html) package to make a heatmap out of the matrix.", "_____no_output_____" ] ], [ [ "# Transform confusion matrix into a dataframe\nnb_df_cm = pd.DataFrame(nb_cf_matrix, range(5),\n range(5))\n\n# Rename the column and row indices\nnb_df_cm = nb_df_cm.rename(index=str, columns={0: \"python\", 1: \"xml\", 2: \"java\", 3: \"json\", 4: \"android\"})\nnb_df_cm.index = ['python', 'xml', 'java', 'json', 'android']\n\n# Plot the confusion matrix\nplt.figure(figsize = (10,7))\nsn.set(font_scale=1.4)#for label size\nsn.heatmap(nb_df_cm, \n annot=True,\n annot_kws={\"size\": 16})\n\nplt.title(\"Naive Bayes Confusion Matrix\")\nplt.xlabel(\"Predicted Label\")\nplt.ylabel(\"True Label\")\nplt.show()", "_____no_output_____" ] ], [ [ "#### Question 3: Do you notice any patterns? Are there any patterns in misclassification that are worrisome?", "_____no_output_____" ], [ "## Multinomial Logistic Regression", "_____no_output_____" ], [ "Next, let's try [multinomial logistic regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)! Follow the same steps as with Naive Bayes, and plot the confusion matrix.", "_____no_output_____" ], [ "## SVM", "_____no_output_____" ], [ "Now do the same for a [Support Vector Machine](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html).", "_____no_output_____" ], [ "#### Question 4: How did each of the classifiers do? Which one would you prefer the most?", "_____no_output_____" ], [ "## Test Final Classifier", "_____no_output_____" ], [ "Choose your best classifier and use it to predict on the test set. Report the mean accuracy and confusion matrix. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec7e96a92065fbf2f64edf9b7fef36bc4fe56791
2,528
ipynb
Jupyter Notebook
tests/nb_builds/nb_preheader/03.02..Part-Orbits_of_Planets_and_Satellites.ipynb
rmsrosa/nbjoint
7019ff336e4a7bb1f6ed20da5fd12b9f702c424a
[ "MIT" ]
null
null
null
tests/nb_builds/nb_preheader/03.02..Part-Orbits_of_Planets_and_Satellites.ipynb
rmsrosa/nbjoint
7019ff336e4a7bb1f6ed20da5fd12b9f702c424a
[ "MIT" ]
null
null
null
tests/nb_builds/nb_preheader/03.02..Part-Orbits_of_Planets_and_Satellites.ipynb
rmsrosa/nbjoint
7019ff336e4a7bb1f6ed20da5fd12b9f702c424a
[ "MIT" ]
null
null
null
24.784314
325
0.581883
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec7eacd3bafcf794a81b8e74cf93f7164078e899
156,344
ipynb
Jupyter Notebook
ch06-Data Loading, Storage, and File Formats.ipynb
blockchain99/data_analysis
6648e63f4dca6899a643014148194015bd1707bd
[ "MIT" ]
1
2021-09-04T04:38:29.000Z
2021-09-04T04:38:29.000Z
ch06-Data Loading, Storage, and File Formats.ipynb
blockchain99/data_analysis
6648e63f4dca6899a643014148194015bd1707bd
[ "MIT" ]
null
null
null
ch06-Data Loading, Storage, and File Formats.ipynb
blockchain99/data_analysis
6648e63f4dca6899a643014148194015bd1707bd
[ "MIT" ]
null
null
null
26.698087
327
0.395692
[ [ [ "#$conda activate py37\n#(py37)$ jupyter notebook ", "_____no_output_____" ], [ "# %config IPCompleter.greedy=True", "_____no_output_____" ], [ "#press [SHIFT] and [TAB] from within the method parentheses\n\n### intellisense - works perfect!! -> excute in command line windows. : (py37) $ -> works perfect !!\n# (py37) $pip3 install jupyter-tabnine\n# (py37) $sudo jupyter nbextension install --py jupyter_tabnine\n\n# (py37) $jupyter nbextension enable jupyter_tabnine --py\n##### jupyter nbextension enable --py jupyter_tabnine ##instead above line excuted. \n\n# (py37) $jupyter serverextension enable --py jupyter_tabnine\n\n#--> I installed in (py37) conda env.", "_____no_output_____" ] ], [ [ "# Data Loading, Storage, ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nnp.random.seed(12345)\nimport matplotlib.pyplot as plt\nplt.rc('figure', figsize=(10, 6))\nnp.set_printoptions(precision=4, suppress=True)", "_____no_output_____" ] ], [ [ "## Reading and Writing Data in Text Format\n* pd.read_csv('examples/ex2.csv', names=names_list, index_col='message')\n* convert text data into a DataFrame", "_____no_output_____" ], [ "|Category|Description|\n|:-------|:----------|\n|Indexing|Can treat one or more columns as the returned DataFrame, and whether to get column names from the file, the user, or not at all.|\n|Type inference and data conversion|user-defined value conversions and custom list of missing value markers.|\n|Datetime parsing|combining capability, including combining date and time information spread over multiple columns into a single column in the result|\n|Iterating|iterating over chunks of very large files.|\n|Unclean data issues|Skipping rows or a footer, comments, or other minor things like numeric data with thousands separated by commas.|", "_____no_output_____" ] ], [ [ "!cat examples/ex1.csv", "a,b,c,d,message\r\n1,2,3,4,hello\r\n5,6,7,8,world\r\n9,10,11,12,foo" ], [ "# use comma as default delimiter\ndf = pd.read_csv('examples/ex1.csv')\ndf", "_____no_output_____" ], [ "# use tab ( '\\t' ) as default delimiter\npd.read_table('examples/ex1.csv', sep=',')", "_____no_output_____" ], [ "!cat examples/ex2.csv", "1,2,3,4,hello\r\n5,6,7,8,world\r\n9,10,11,12,foo" ], [ "pd.read_csv('examples/ex2.csv') #not as intended!", "_____no_output_____" ], [ "pd.read_csv('examples/ex2.csv', header=None)", "_____no_output_____" ], [ "pd.read_csv('examples/ex2.csv', names=['a', 'b', 'c', 'd', 'message'])", "_____no_output_____" ], [ "names = ['a', 'b', 'c', 'd', 'message']\npd.read_csv('examples/ex2.csv', names=names, index_col='message')", "_____no_output_____" ], [ "!cat examples/csv_mindex.csv #with header", "key1,key2,value1,value2\r\none,a,1,2\r\none,b,3,4\r\none,c,5,6\r\none,d,7,8\r\ntwo,a,9,10\r\ntwo,b,11,12\r\ntwo,c,13,14\r\ntwo,d,15,16\r\n" ], [ "parsed = pd.read_csv('examples/csv_mindex.csv',\n index_col=['key1', 'key2'])\nparsed", "_____no_output_____" ], [ "#table might not have a fixed delimiter, using whitespace or some\n# other pattern to separate fields, Consider a text file that looks like this:\nlist(open('examples/ex3.txt'))", "_____no_output_____" ] ], [ [ "While you could do some munging by hand, the fields here are separated by **a variable amount of whitespace**. In these cases, you can pass a regular expression as a\ndelimiter for read_table . This can be expressed by the regular expression **\\s+** , so we\nhave then:", "_____no_output_____" ] ], [ [ "result = pd.read_table('examples/ex3.txt', sep='\\s+')\nresult", "_____no_output_____" ], [ "!cat examples/ex4.csv", "# hey!\r\na,b,c,d,message\r\n# just wanted to make things more difficult for you\r\n# who reads CSV files with computers, anyway?\r\n1,2,3,4,hello\r\n5,6,7,8,world\r\n9,10,11,12,foo" ], [ "pd.read_csv('examples/ex4.csv', skiprows=[0, 2, 3])", "_____no_output_____" ], [ "!cat examples/ex5.csv", "something,a,b,c,d,message\r\none,1,2,3,4,NA\r\ntwo,5,6,,8,world\r\nthree,9,10,11,12,foo" ], [ "result = pd.read_csv('examples/ex5.csv')\nresult", "_____no_output_____" ], [ "pd.isnull(result)", "_____no_output_____" ] ], [ [ "[Null value : None, NaN](https://jakevdp.github.io/PythonDataScienceHandbook/03.04-missing-values.html)", "_____no_output_____" ], [ "|Category|Description|\n|:-------|:----------|\n|isnull()| Generate a boolean mask indicating missing values|\n|notnull()| Opposite of isnull()|\n|dropna()| Return a filtered version of the data|\n|fillna()| Return a copy of the data with missing values filled or imputed|", "_____no_output_____" ] ], [ [ "# The na_values option can take either a list or set of strings to consider missing\n# values:\nresult = pd.read_csv('examples/ex5.csv', na_values=['NULL']) #NULL consists of NaN and None\nresult", "_____no_output_____" ], [ "!cat examples/ex51.csv", "something,a,b,c,d,message\r\none,1,2,3,4,NULL\r\ntwo,5,6,,8,world\r\nthree,9,10,11,12,foo\r\n" ], [ "result2 = pd.read_csv('examples/ex51.csv', na_values=['NULL'])\nresult2", "_____no_output_____" ], [ "sentinels = {'message': ['foo', 'NA'], 'something': ['two']}\npd.read_csv('examples/ex5.csv', na_values=sentinels)", "_____no_output_____" ] ], [ [ "#### read_csv/read_table function arguments\n|Argument|Description|\n|:-------|:----------|\n|path|String indicating filesystem location, URL, or file-like object|\n|sep or delimiter|Character sequence or regular expression to use to split fields in each row|\n|header|Row number to use as column names; defaults to 0 (first row), but should be None if there is no\nheader row|\n|index_col|Column numbers or names to use as the row index in the result; can be a single name/number or a list of them for a hierarchical index|\n|names|List of column names for result, combine with header=None| \n|skiprows|Number of rows at beginning of file to ignore or list of row numbers (starting from 0) to skip.|\n|na_values|Sequence of values to replace with NA.|\n|comment|Character(s) to split comments off the end of lines.|\n|parse_dates|Attempt to parse data to datetime ; False by default. If True , will attempt to parse all columns. Otherwise can specify a list of column numbers or name to parse. If element of list is tuple or list, will combine multiple columns together and parse to date (e.g., if date/time split across two columns).|\n|keep_date_col|If joining columns to parse date, keep the joined columns; False by default.|\n|converters|Dict containing column number of name mapping to functions (e.g., {'foo': f} would apply the function f to all values in the 'foo' column).|\n|dayfirst|When parsing potentially ambiguous dates, treat as international format (e.g., 7/6/2012 -> June 7, 2012); False by default.|\n|date_parser|Function to use to parse dates.|\n|nrows|Number of rows to read from beginning of file.|\n|iterator|Return a TextParser object for reading file piecemeal.|\n|chunksize|For iteration, size of file chunks.|\n|skip_footer|Number of lines to ignore at end of file.|\n|verbose|Print various parser output information, like the number of missing values placed in non-numeric columns.|\n|encoding|Text encoding for Unicode (e.g., 'utf-8' for UTF-8 encoded text).|\n|squeeze|If the parsed data only contains one column, return a Series.|\n|thousands|Separator for thousands (e.g., ',' or '.' ).|", "_____no_output_____" ], [ "### Reading Text Files in Pieces", "_____no_output_____" ] ], [ [ "pd.options.display.max_rows = 10 #pandas display settings more compact:(10 rows)", "_____no_output_____" ], [ "result = pd.read_csv('examples/ex6.csv')\nresult", "_____no_output_____" ], [ "print(result['key'].unique())\nresult['key']", "['L' 'B' 'G' 'R' 'Q' 'U' 'K' 'S' '8' '1' 'P' 'J' 'E' 'A' 'F' 'H' 'W' 'C'\n 'V' 'I' '6' 'Y' 'T' 'M' 'X' 'N' 'O' 'Z' '2' 'D' '7' '0' '4' '5' '3' '9']\n" ], [ "#test 1 : count frequency\nimport numpy as np\na = np.array( [10,10,20,10,20,20,20,30, 30,50,40,40] )\nprint(\"Original array:\")\nprint(a)\nunique_elements, counts_elements = np.unique(a, return_counts=True)\nprint(\"Frequency of unique values of the said array:\")\nprint(np.asarray((unique_elements, counts_elements)))\n\nprint(\"----------------------------------\")\nprint(np.array(np.unique(a, return_counts=True)))\nprint(\"-----------.T---------------------\")\nprint(np.array(np.unique(a, return_counts=True)).T)\nprint(\"==================================\")\nresult_frequency = np.array(np.unique(a, return_counts=True)).T\nprint(type(result_frequency))\n[f\"value {x[0]}'s frequency is {x[1]}\" for x in result_frequency]", "Original array:\n[10 10 20 10 20 20 20 30 30 50 40 40]\nFrequency of unique values of the said array:\n[[10 20 30 40 50]\n [ 3 4 2 2 1]]\n----------------------------------\n[[10 20 30 40 50]\n [ 3 4 2 2 1]]\n-----------.T---------------------\n[[10 3]\n [20 4]\n [30 2]\n [40 2]\n [50 1]]\n==================================\n<class 'numpy.ndarray'>\n" ] ], [ [ "#### sort 2-d array based on 2nd column : .argsort() : returns an numpy.array of indices that sort the given numpy.array\n* arr[arr[:, 1].argsort()]", "_____no_output_____" ] ], [ [ "#test 2\ntest_arr2 = np.array(np.unique(result['key'], return_counts=True)).T\ntest_arr2", "_____no_output_____" ], [ "#test 2\ntest_arr2[test_arr2[:, 1].argsort()] ", "_____no_output_____" ], [ "#test2-2\nn = len(test_arr2)\nprint(n)\ntest_arr2[test_arr2[:, 1].argsort()][::-1] #reverse order", "36\n" ], [ "#test 3 \nresult['key'].value_counts()", "_____no_output_____" ], [ "pd.read_csv('examples/ex6.csv', nrows=5)", "_____no_output_____" ], [ "chunker = pd.read_csv('examples/ex6.csv', chunksize=1000)\nchunker", "_____no_output_____" ], [ "chunker = pd.read_csv('examples/ex6.csv', chunksize=1000)\n\ntot = pd.Series([])\nfor piece in chunker:\n tot = tot.add(piece['key'].value_counts(), fill_value=0)\n\ntot = tot.sort_values(ascending=False)", "_____no_output_____" ], [ "tot", "_____no_output_____" ], [ "tot[:10]", "_____no_output_____" ] ], [ [ "### Writing Data to Text Format\n* data.to_csv('out.csv', index=False)", "_____no_output_____" ] ], [ [ "data = pd.read_csv('examples/ex5.csv')\ndata", "_____no_output_____" ], [ "data.to_csv('examples/out.csv') #seperator ,\n!cat examples/out.csv", ",something,a,b,c,d,message\r\n0,one,1,2,3.0,4,\r\n1,two,5,6,,8,world\r\n2,three,9,10,11.0,12,foo\r\n" ] ], [ [ "Other delimiters can be used, of course (writing to sys.stdout so it prints the text\nresult to the console):", "_____no_output_____" ] ], [ [ "import sys\ndata.to_csv(sys.stdout, sep='|') #seperator |", "|something|a|b|c|d|message\n0|one|1|2|3.0|4|\n1|two|5|6||8|world\n2|three|9|10|11.0|12|foo\n" ], [ "# Missing values appear as empty strings in the output. You might want to denote them\n# by some other sentinel value:\ndata.to_csv(sys.stdout, na_rep='NULL')", ",something,a,b,c,d,message\n0,one,1,2,3.0,4,NULL\n1,two,5,6,NULL,8,world\n2,three,9,10,11.0,12,foo\n" ], [ "data.to_csv(sys.stdout, index=False, header=False)", "one,1,2,3.0,4,\ntwo,5,6,,8,world\nthree,9,10,11.0,12,foo\n" ], [ "data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c'])", "a,b,c\n1,2,3.0\n5,6,\n9,10,11.0\n" ], [ "dates = pd.date_range('1/1/2000', periods=7)\ndates", "_____no_output_____" ], [ "ts = pd.Series(np.arange(7), index=dates)\nts", "_____no_output_____" ], [ "ts.to_csv('examples/tseries.csv')", "/home/ys/anaconda3/envs/py37/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "!cat examples/tseries.csv", "2000-01-01,0\r\n2000-01-02,1\r\n2000-01-03,2\r\n2000-01-04,3\r\n2000-01-05,4\r\n2000-01-06,5\r\n2000-01-07,6\r\n" ] ], [ [ "### Working with Delimited Formats", "_____no_output_____" ] ], [ [ "!cat examples/ex7.csv", "\"a\",\"b\",\"c\"\r\n\"1\",\"2\",\"3\"\r\n\"1\",\"2\",\"3\"\r\n" ], [ "import csv\nf = open('examples/ex7.csv')\n\nreader = csv.reader(f)\nreader", "_____no_output_____" ], [ "for line in reader:\n print(line)", "['a', 'b', 'c']\n['1', '2', '3']\n['1', '2', '3']\n" ], [ "with open('examples/ex7.csv') as f:\n lines = list(csv.reader(f))\n \n# from csv import reader\n# with open('examples/ex7.csv') as file:\n# csv_reader = reader(file)\n# lines = list(csv_reader)", "_____no_output_____" ], [ "header, values = lines[0], lines[1:]", "_____no_output_____" ], [ "#test\nheader", "_____no_output_____" ], [ "#test\nprint(\"values : \",values)\nprint(\"*values : \",*values)\nprint(\"-----------------------------------\")\nprint(zip(*values))\nprint(\"=====================================\")\nfor element in zip(*values):\n print(element)", "values : [['1', '2', '3'], ['1', '2', '3']]\n*values : ['1', '2', '3'] ['1', '2', '3']\n-----------------------------------\n<zip object at 0x7f28710e6af0>\n=====================================\n('1', '1')\n('2', '2')\n('3', '3')\n" ], [ "data_dict = {h: v for h, v in zip(header, zip(*values))}\ndata_dict", "_____no_output_____" ], [ "#test csv read # convert to Uppercase , then save.\nfrom csv import reader, writer\nwith open('examples/ex7.csv') as file:\n csv_reader = reader(file)\n lines2 = list(csv_reader)\n header2, values2 = lines2[0], lines2[1:] ", "_____no_output_____" ], [ "header2", "_____no_output_____" ], [ "values2", "_____no_output_____" ], [ "data_dict2 = { h: v for h,v in zip(header2, zip(*values2))}\n# data_dict2 = [{h: v} for h,v in zip(header2, zip(*values2))] #[{'a': ('1', '1')}, {'b': ('2', '2')}, {'c': ('3', '3')}]\ndata_dict2", "_____no_output_____" ], [ "#test : read csv, convert to upper case then save as csv file\nfrom csv import reader, writer\nwith open('examples/ex7.csv') as file:\n csv_reader = reader(file)\n\n with open('examples/ex7_out.csv','w') as file:\n csv_writer = writer(file)\n for line in csv_reader:\n csv_writer.writerow([x.upper() for x in line])", "_____no_output_____" ], [ "#######test csv reader writer DictReader DictWriter##########\n\n# from csv import writer, DictWriter\n# print(\"---------- Version using writer--------\")\n# with open(\"cats3.csv\", \"w\") as file:\n# csv_writer = writer(file)\n# csv_writer.writerow([\"Name\", \"Age\"])\n# csv_writer.writerow([\"Blue\", 3])\n# csv_writer.writerow([\"Kitty\", 1])\n\n# print(\"## ----------Version using DictWriter-------\")\n# with open(\"cats33.csv\", \"w\") as file:\n# \theaders = [\"Name\", \"Breed\", \"Age\"]\n# \tcsv_writer = DictWriter(file, fieldnames=headers)\n# \tcsv_writer.writeheader()\n# \tcsv_writer.writerow({\n# \t\t\"Name\": \"Garfield\",\n# \t\t\"Breed\": \"Orange Tabby\",\n# \t\t\"Age\": 10\n# \t})\n\n##########################\n# print(\"####### DictReader, convert cm to in and DictWriter as new dict #####\")\n# from csv import DictReader, DictWriter\n\n# def cm_to_in(cm):\n# return int(float(cm) * 0.393701)\n\n# with open(\"test305.csv\") as file:\n# csv_reader = DictReader(file) \n# fighters = list(csv_reader)\n\n# with open(\"test305_inches.csv\", \"w\") as file:\n# headers = (\"Name\", \"Country\", \"Height\")\n# csv_writer = DictWriter(file, fieldnames=headers)\n# csv_writer.writeheader()\n\n# for f in fighters:\n# csv_writer.writerow({\n# \"Name\" : f[\"Name\"],\n# \"Country\" : f[\"Country\"],\n# \"Height\" : cm_to_in(f[\"Height (in cm)\"])\n# }) ", "_____no_output_____" ] ], [ [ "To define a new format with a different\ndelimiter, string quoting convention, or line terminator, we define a simple subclass\nof csv.Dialect :", "_____no_output_____" ] ], [ [ "class my_dialect(csv.Dialect):\n lineterminator = '\\n'\n delimiter = ';'\n quotechar = '\"'\n quoting = csv.QUOTE_MINIMAL", "_____no_output_____" ], [ "reader = csv.reader(f, dialect=my_dialect)", "_____no_output_____" ] ], [ [ "We can also give individual CSV dialect parameters as keywords to **csv.reader**\nwithout having to define a subclass:", "_____no_output_____" ] ], [ [ "reader = csv.reader(f, delimiter='|')", "_____no_output_____" ], [ "with open('mydata.csv', 'w') as f:\n writer = csv.writer(f, dialect=my_dialect)\n writer.writerow(('one', 'two', 'three'))\n writer.writerow(('1', '2', '3'))\n writer.writerow(('4', '5', '6'))\n writer.writerow(('7', '8', '9'))", "_____no_output_____" ] ], [ [ "### JSON Data\n* result = json.loads(obj)\n* asjson = json.dumps(result)\n* pd.read_json('examples/example.json')", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "obj = \"\"\"\n{\"name\": \"Wes\",\n \"places_lived\": [\"United States\", \"Spain\", \"Germany\"],\n \"pet\": null,\n \"siblings\": [{\"name\": \"Scott\", \"age\": 30, \"pets\": [\"Zeus\", \"Zuko\"]},\n {\"name\": \"Katie\", \"age\": 38,\n \"pets\": [\"Sixes\", \"Stache\", \"Cisco\"]}]\n}\n\"\"\"", "_____no_output_____" ], [ "import json\nresult = json.loads(obj)\nresult", "_____no_output_____" ], [ "asjson = json.dumps(result)\nasjson", "_____no_output_____" ], [ "siblings = pd.DataFrame(result['siblings'], columns=['name', 'age'])\nsiblings", "_____no_output_____" ], [ "!cat examples/example.json", "[{\"a\": 1, \"b\": 2, \"c\": 3},\r\n {\"a\": 4, \"b\": 5, \"c\": 6},\r\n {\"a\": 7, \"b\": 8, \"c\": 9}]\r\n" ], [ "data = pd.read_json('examples/example.json')\ndata", "_____no_output_____" ], [ "print(data.to_json())", "{\"a\":{\"0\":1,\"1\":4,\"2\":7},\"b\":{\"0\":2,\"1\":5,\"2\":8},\"c\":{\"0\":3,\"1\":6,\"2\":9}}\n" ], [ "print(data.to_json(orient='records'))", "[{\"a\":1,\"b\":2,\"c\":3},{\"a\":4,\"b\":5,\"c\":6},{\"a\":7,\"b\":8,\"c\":9}]\n" ] ], [ [ "**json.dumps** returns a JSON STRING:", "_____no_output_____" ] ], [ [ "import json\n\nclass Cat:\n\tdef __init__(self, name, breed):\n\t\tself.name = name\n\t\tself.breed = breed\n\t\t\nc = Cat(\"Charles\", \"Tabby\")\n\n# json.dumps returns a JSON STRING:\n\nj1 = json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])\nprint(j1)\n# results in '[\"foo\", {\"bar\": [\"baz\", null, 1.0, 2]}]'\n\nj2 = json.dumps(c.__dict__)\nprint(j2)\n# results in '{\"name\": \"Charles\", \"breed\": \"Tabby\"}'", "[\"foo\", {\"bar\": [\"baz\", null, 1.0, 2]}]\n{\"name\": \"Charles\", \"breed\": \"Tabby\"}\n" ] ], [ [ "#### To JSONPICKLE 'c' the cat:\n\n``\nwith open(\"cat.json\", \"w\") as file:\n\tfrozen = jsonpickle.encode(c)\n\tfile.write(frozen)\n``", "_____no_output_____" ] ], [ [ "# !python3 -m pip install jsonpickle # installed!", "_____no_output_____" ], [ "## $ python3 -m pip install jsonpickle\nimport jsonpickle\n\nclass Cat:\n\tdef __init__(self, name, breed):\n\t\tself.name = name\n\t\tself.breed = breed\n\nc = Cat(\"Charles\", \"Tabby\")\n\n# To JSONPICKLE 'c' the cat:\nwith open(\"cat.json\", \"w\") as file:\n\tfrozen = jsonpickle.encode(c)\n\tfile.write(frozen)\n\n# To bring back 'c' the cat using JSONPICKLE\nwith open(\"cat.json\", \"r\") as file:\n\tcontents = file.read()\n\tunfrozen = jsonpickle.decode(contents)\n\tprint(unfrozen)\n\tprint(type(unfrozen)) #<class '__main__.Cat'>\n\tprint(f\"name is {unfrozen.name} and breed is {unfrozen.breed}\")", "<__main__.Cat object at 0x7fb71adb8550>\n<class '__main__.Cat'>\nname is Charles and breed is Tabby\n" ], [ "!cat cat.json", "{\"breed\": \"Tabby\", \"name\": \"Charles\", \"py/object\": \"__main__.Cat\"}" ] ], [ [ "### XML and HTML: Web Scraping", "_____no_output_____" ], [ "```\n(py37)$ conda install lxml\n(py37)$ conda install -c anaconda beautifulsoup4\n(py37)$ conda install -c anaconda html5lib\n\n```\n<pip install beautifulsoup4 html5lib>", "_____no_output_____" ], [ "**pandas.read_html()** function has a number of options, but by default it searches\nfor and attempts to parse all tabular data contained within ``<table> tags``. The result is\na **list of DataFrame objects**:", "_____no_output_____" ] ], [ [ "tables = pd.read_html('examples/fdic_failed_bank_list.html')", "_____no_output_____" ], [ "print(len(tables))\ntables #list", "1\n" ], [ "failures = tables[0]\nprint(type(failures))\nfailures.head()", "<class 'pandas.core.frame.DataFrame'>\n" ], [ "close_timestamps = pd.to_datetime(failures['Closing Date'])\nclose_timestamps", "_____no_output_____" ], [ "close_timestamps.dt", "_____no_output_____" ], [ "close_timestamps.dt.year", "_____no_output_____" ], [ "close_timestamps.dt.year.value_counts()", "_____no_output_____" ] ], [ [ "#### Parsing XML with lxml.objectify\n* Using lxml.objectify , we parse the file and get a reference to the root node of the\nXML file with **getroot** :\n\n```\nfrom lxml import objectify\npath = 'datasets/mta_perf/Performance_MNR.xml'\nparsed = objectify.parse(open(path))\nroot = parsed.getroot()\n```\n\n```\ndata = []\nskip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ',\n 'DESIRED_CHANGE', 'DECIMAL_PLACES']\nfor elt in root.INDICATOR:\n el_data = {}\n for child in elt.getchildren():\n if child.tag in skip_fields:\n continue\n el_data[child.tag] = child.pyval #make dictionary\n data.append(el_data)\n```\n\n```\nperf = pd.DataFrame(data)\n```\n--------------------------------------------\n\n```\nfrom io import StringIO\ntag = '<a href=\"http://www.google.com\">Google</a>'\nroot = objectify.parse(StringIO(tag)).getroot()\n```", "_____no_output_____" ], [ "```\n<INDICATOR>\n <INDICATOR_SEQ>373889</INDICATOR_SEQ>\n <PARENT_SEQ></PARENT_SEQ>\n <AGENCY_NAME>Metro-North Railroad</AGENCY_NAME>\n <INDICATOR_NAME>Escalator Availability</INDICATOR_NAME>\n <DESCRIPTION>Percent of the time that escalators are operational\n systemwide. The availability rate is based on physical observations performed\n the morning of regular business days only. This is a new indicator the agency\n began reporting in 2009.</DESCRIPTION>\n <PERIOD_YEAR>2011</PERIOD_YEAR>\n <PERIOD_MONTH>12</PERIOD_MONTH>\n <CATEGORY>Service Indicators</CATEGORY>\n <FREQUENCY>M</FREQUENCY>\n <DESIRED_CHANGE>U</DESIRED_CHANGE>\n <INDICATOR_UNIT>%</INDICATOR_UNIT>\n <DECIMAL_PLACES>1</DECIMAL_PLACES>\n <YTD_TARGET>97.00</YTD_TARGET>\n <YTD_ACTUAL></YTD_ACTUAL>\n <MONTHLY_TARGET>97.00</MONTHLY_TARGET>\n <MONTHLY_ACTUAL></MONTHLY_ACTUAL>\n</INDICATOR>\n```", "_____no_output_____" ] ], [ [ "# !cat datasets/mta_perf/Performance_MNR.xml", "_____no_output_____" ], [ "from lxml import objectify\n\npath = 'datasets/mta_perf/Performance_MNR.xml'\nparsed = objectify.parse(open(path))\nroot = parsed.getroot()", "_____no_output_____" ], [ "\ndata = []\n\nskip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ',\n 'DESIRED_CHANGE', 'DECIMAL_PLACES']\n\n# root.INDICATOR returns a generator yielding each <INDICATOR> XML element.\n# For each record, we can populate a dict of tag names (like YTD_ACTUAL ) to data values\n# (excluding a few tags):\nfor elt in root.INDICATOR:\n el_data = {}\n for child in elt.getchildren():\n if child.tag in skip_fields:\n continue\n el_data[child.tag] = child.pyval #make dictionary, named el_data {child.tag:child.pyval}\n data.append(el_data)import json\n\nclass Cat:\n\tdef __init__(self, name, breed):\n\t\tself.name = name\n\t\tself.breed = breed\n\t\t\nc = Cat(\"Charles\", \"Tabby\")\n\n# json.dumps returns a JSON STRING:\n\nj = json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])\n# results in '[\"foo\", {\"bar\": [\"baz\", null, 1.0, 2]}]'\n\nj = json.dumps(c.__dict__)\n# results in '{\"name\": \"Charles\", \"breed\": \"Tabby\"}'", "_____no_output_____" ], [ "perf = pd.DataFrame(data)\nperf.head()", "_____no_output_____" ], [ "# Consider an HTML link tag, which is also valid XML:\nfrom io import StringIO\ntag = '<a href=\"http://www.google.com\">Google</a>'\nroot = objectify.parse(StringIO(tag)).getroot()", "_____no_output_____" ] ], [ [ "You can now access any of the fields (like href ) in the tag or the link text:", "_____no_output_____" ] ], [ [ "root", "_____no_output_____" ], [ "# root.get('a')", "_____no_output_____" ], [ "root.get('href')", "_____no_output_____" ], [ "root.text", "_____no_output_____" ] ], [ [ "## Binary Data Formats\n* frame.to_pickle('frame_pickle') : store as pickle format\n* pd.read_pickle('frame_pickle')", "_____no_output_____" ] ], [ [ "frame = pd.read_csv('examples/ex1.csv')\nframe", "_____no_output_____" ], [ "frame.to_pickle('examples/frame_pickle')", "_____no_output_____" ], [ "pd.read_pickle('examples/frame_pickle')", "_____no_output_____" ], [ "!rm examples/frame_pickle", "_____no_output_____" ] ], [ [ "### pickle an object: pickle.dump(blue, file)\n``\nblue = Cat(\"Blue\", \"Scottish Fold\", \"String\")\nwith open(\"pets.pickle\", \"wb\") as file:\n\tpickle.dump(blue, file) ``", "_____no_output_____" ] ], [ [ "#test \nimport pickle\nclass Animal:\n\tdef __init__(self, name, species):\n\t\tself.name = name\n\t\tself.species = species\n\n\tdef __repr__(self):\n\t\treturn f\"{self.name} is a {self.species}\"\n\n\tdef make_sound(self, sound):\n\t\tprint(f\"this animal says {sound}\")\n\n\nclass Cat(Animal):\n\tdef __init__(self, name, breed, toy):\n\t\tsuper().__init__(name, species=\"Cat\") # Call init on parent class\n\t\tself.breed = breed\n\t\tself.toy = toy\n\n\tdef play(self):\n\t\tprint(f\"{self.name} plays with {self.toy}\")\n\n\nblue = Cat(\"Blue\", \"Scottish Fold\", \"String\")\n\n\n# To pickle an object:\nprint(\"---write pickle -> pickle.dump(class_instance, opened_file_with_wb)--\")\n\nwith open(\"pets.pickle\", \"wb\") as file:\n\tpickle.dump(blue, file)\n\n# #To unpickle something:\nprint(\"== unpickle -> pickle.load(opened_file_with_rb)\")\nwith open(\"pets.pickle\", \"rb\") as file:\n\tzombie_blue = pickle.load(file)\n\tprint(zombie_blue)\n\tprint(zombie_blue.play())", "---write pickle -> pickle.dump(class_instance, opened_file_with_wb)--\n== unpickle -> pickle.load(opened_file_with_rb)\nBlue is a Cat\nBlue plays with String\nNone\n" ] ], [ [ "**json.dumps(...)** returns a JSON STRING:", "_____no_output_____" ], [ "### Using HDF5 Format(Hierarchical Data Format)\n* good choice for working with very large data‐\nsets that don’t fit into memory, as you can efficiently read and write small sections of\nmuch larger arrays.\n* **HDFStore** class works like a **dict** and handles the low-level\ndetails:\n```\nframe = pd.DataFrame({'a': np.random.randn(100)})\nstore = pd.HDFStore('mydata.h5')\nstore['obj1'] = frame\n```\n* store.put\n```\nstore.put('obj2', frame, format='table')\nstore.select('obj2', where=['index >= 10 and index <= 15'])\nstore.close()\n```\n```\nframe.to_hdf('mydata3.h5', 'obj3', format='table')\npd.read_hdf('mydata3.h5', 'obj3', where=['index < 5'])\n```", "_____no_output_____" ] ], [ [ "frame = pd.DataFrame({'a': np.random.randn(100)})\nframe", "_____no_output_____" ], [ "store = pd.HDFStore('mydata.h5')\nstore", "_____no_output_____" ], [ "store['obj1'] = frame #work like dict\nstore['obj1_col'] = frame['a']\nstore", "_____no_output_____" ], [ "store['obj1']", "_____no_output_____" ], [ "#test\nstore['obj1_col'] ", "_____no_output_____" ] ], [ [ "HDFStore supports two storage schemas, **'fixed' and 'table'** . The latter is generally\nslower, but it supports query operations using a special syntax:", "_____no_output_____" ] ], [ [ "store.put('obj2', frame, format='table') #'table' - query support", "_____no_output_____" ], [ "store.select('obj2', where=['index >= 10 and index <= 15'])", "_____no_output_____" ], [ "store.close()", "_____no_output_____" ], [ "frame.to_hdf('mydata3.h5', 'obj3', format='table') #save to hdf\npd.read_hdf('mydata3.h5', 'obj3', where=['index < 5'])", "_____no_output_____" ], [ "import os\nos.remove('mydata3.h5')", "_____no_output_____" ] ], [ [ "### Reading Microsoft Excel Files(using ExcelFile class or pandas.read_excel function.)\n* xlsx = pd.ExcelFile('...')\n* frame = pd.read_excel(xlsx, 'Sheet1') #faster\n---------\n* frame = pd.read_excel('examples/ex1.xlsx', 'Sheet1') #slower\n-------------\n```\n* option1 \nwriter = pd.ExcelWriter('examples/ex2.xlsx')\nframe.to_excel(writer, 'Sheet1')\nwriter.save()\n\n* option2\nframe.to_excel('examples/ex2.xlsx')\n```\n", "_____no_output_____" ] ], [ [ "#To use ExcelFile , create an instance by passing a path to an xls or xlsx file\nxlsx = pd.ExcelFile('examples/ex1.xlsx') #faster\nxlsx", "_____no_output_____" ], [ "#Data stored in a sheet can then be read into DataFrame with parse :\npd.read_excel(xlsx, 'Sheet1')", "_____no_output_____" ] ], [ [ "If you are reading multiple sheets in a file, then it is faster to create the **ExcelFile** ,\nbut you can also simply pass the filename to pandas.read_excel :", "_____no_output_____" ] ], [ [ "frame = pd.read_excel('examples/ex1.xlsx', 'Sheet1') #slower\nframe", "_____no_output_____" ], [ "writer = pd.ExcelWriter('examples/ex2.xlsx')\nframe.to_excel(writer, 'Sheet1')\nwriter.save()", "_____no_output_____" ], [ "frame.to_excel('examples/ex2.xlsx')", "_____no_output_____" ], [ "!rm examples/ex2.xlsx", "_____no_output_____" ] ], [ [ "## Interacting with Web APIs", "_____no_output_____" ] ], [ [ "import requests\nurl = 'https://api.github.com/repos/pandas-dev/pandas/issues'\nresp = requests.get(url)\nresp", "_____no_output_____" ], [ "data = resp.json()\nprint(len(data))\n# data", "30\n" ], [ "data[0]['title']", "_____no_output_____" ], [ "issues = pd.DataFrame(data, columns=['number', 'title',\n 'labels', 'state'])\nissues.head()", "_____no_output_____" ] ], [ [ "## Interacting with Databases\n[SQLite keyword] https://www.sqlite.org/lang.html\n```\n$ sqlite3 ex1\nSQLite version 3.28.0 2019-03-02 15:25:24\nEnter \".help\" for usage hints.\nsqlite> create table tbl1(one varchar(10), two smallint);\nsqlite> insert into tbl1 values('hello!',10);\nsqlite> insert into tbl1 values('goodbye', 20);\nsqlite> select * from tbl1;\nhello!|10\ngoodbye|20\nsqlite>\n```\n* a semicolon at the end of each SQL command!\n```\nsqlite> CREATE TABLE tbl2 (\n ...> f1 varchar(30) primary key,\n ...> f2 text,\n ...> f3 real\n ...> );\nsqlite>\n```\n54. sqlite command line shell ; https://sqlite.org/cli.html\n* SQLite will use a temporary database that is deleted when the session exits. To use a persistent disk file as the database, enter the \".open\" command immediately after the terminal window starts up:\n * database file named \"ex1.db\" to be opened and used. The \"ex1.db\" file is created if it does not previously exist. \n```\nSQLite version 3.28.0 2019-03-02 15:25:24\nEnter \".help\" for usage hints.\nConnected to a transient in-memory database.\nUse \".open FILENAME\" to reopen on a persistent database.\nsqlite> .open ex1.db\nsqlite>\n```\n* Alternatively, you can create a new database using the default temporary storage, then save that database into a disk file using the \".save\" command:\n * Caution:\".save\" command as it will overwrite any preexisting database files having the same name without prompting for confirmation. As with the \".open\" command, you might want to use a full pathname with forward-slash directory separators to avoid ambiguity.\n```\nSQLite version 3.28.0 2019-03-02 15:25:24\nEnter \".help\" for usage hints.\nConnected to a transient in-memory database.\nUse \".open FILENAME\" to reopen on a persistent database.\nsqlite> ... many SQL commands omitted ...\nsqlite> .save ex1.db\nsqlite\n```", "_____no_output_____" ], [ "### create table and save\n```\nCREATE TABLE dogs (\n name TEXT,\n breed TEXT,\n age INTEGER\n);\n############ option1 #######command is capital !!###\n$ sqlite3 test.sl3 \nsqlite> create table dogs (name TEXT, breed TEXT, age INTEGER);\nsqlite> insert into dogs values('Lews', 'Border Coli', 4);\nsqlite> insert into dogs (name, breed, age) values('Lews1', 'Border Coli1', 10);\nsqlite> insert into dogs values('Delma', 'Shepard', 9);\nsqlite> select * from dogs;\n.quit #crtl + d\n\n## no .save -> Now load the databse again.\n$ sqlite3 test.sl3\nsqlite> select * from dogs; # shows result.\n\n############## option2 #########\n##sqlite> .save cats_db.db after creeate table.. for saving.\n##sqlite> .backup main cats_db.db \n###################################\n#$ sqlite3\nsqllite> create table cats (name TEXT, breed TEXT, age INTEGER);\nsqlite> insert into cats values('King', 'Persian', 2);\nsqlite> insert into cats values('Kong', 'Boland', 5);\nsqlite> .backup main cats_db.db\nsqlite> .quit #ctrl _ db\n###\n$ sqlite3 cats_db.db\nsqlite> .schema\nsqlite> select * from cats_db.db;\n\n############# option3 : .open 'Tweet_Data.db'#######\n# Default for sqlite is to use transient in-memory db, But\n# We will create db using sqlite> .open db_name.db\n```\n--------------------------------------------------------------------", "_____no_output_____" ], [ "``%%writefile ch06profile_csv``", "_____no_output_____" ] ], [ [ "%%writefile ch06profile.csv\nName,Country,Height (in cm)\nRyu,Japan,175\nKen,USA,175\nChun-Li,China,165\nGuile,USA,182\nE. Honda,Japan,185\nDhalsim,India,176\nBlanka,Brazil,192\nZangief,Russia,214", "Overwriting ch06profile.csv\n" ], [ "# !cat ch06profile.csv", "_____no_output_____" ], [ "print(\"####### DictReader, convert cm to in and DictWriter as new dict #####\")\nfrom csv import DictReader, DictWriter\n\ndef cm_to_in(cm):\n return int(float(cm) * 0.393701)\n\nwith open(\"ch06profile.csv\") as file:\n csv_reader = DictReader(file) \n fighters = list(csv_reader)\n\nwith open(\"ch06profile_inch.csv\", \"w\") as file:\n headers = (\"Name\", \"Country\", \"Height\")\n csv_writer = DictWriter(file, fieldnames=headers)\n csv_writer.writeheader()\n \n for f in fighters:\n csv_writer.writerow({\n \"Name\" : f[\"Name\"],\n \"Country\" : f[\"Country\"],\n \"Height\" : cm_to_in(f[\"Height (in cm)\"])\n }) ", "####### DictReader, convert cm to in and DictWriter as new dict #####\n" ], [ "!cat ch06profile_inch.csv", "Name,Country,Height\r\r\nRyu,Japan,68\r\r\nKen,USA,68\r\r\nChun-Li,China,64\r\r\nGuile,USA,71\r\r\nE. Honda,Japan,72\r\r\nDhalsim,India,69\r\r\nBlanka,Brazil,75\r\r\nZangief,Russia,84\r\r\n" ], [ "# test1 -> once excuted : Don't excute again this cell !!(since duplicated creation of TABLE profile!!)\nimport csv, sqlite3\n\n# con = sqlite3.connect(\":memory:\")\ncon = sqlite3.connect(\"ch06profile_inch.db\")\ncur = con.cursor()\ncur.execute(\"CREATE TABLE profile (Name TEXT,Country TEXT,Height INTEGER);\") # use your column names here\n\nwith open('ch06profile_inch.csv','r') as file: # `with` statement available in 2.5+\n # csv.DictReader uses first line in file for column headings by default\n csv_reader = csv.DictReader(file) # comma is default delimiter\n to_db = [(row['Name'], row['Country'], row['Height']) for row in csv_reader]\n\ncur.executemany(\"INSERT INTO profile (Name, Country, Height) VALUES (?, ?, ?);\", to_db)\ncon.commit()\ncon.close()", "_____no_output_____" ], [ "import csv, sqlite3\n\ncon = sqlite3.connect(\"ch06profile_inch.db\")\ncur = con.cursor()\nquery_list = [\n \"SELECT * FROM profile WHERE Name IS 'Ken';\",\n \"SELECT * FROM profile WHERE Country IS 'China';\",\n \"SELECT Name fromFROM profile WHERE Country IS 'Japan';\",\n \"SELECT * FROM profile WHERE Country IS NOT 'USA';\",\n \"SELECT * FROM profile WHERE Country IS NOT 'USA' AND Country IS NOT 'Brazil';\",\n \"SELECT * FROM profile WHERE Height > 68;\",\n]\nfor query_element in query_list:\n print(cur.execute(query_element).fetchall())\n\nprint(\"--------------------------------------\")\ncur.execute(\"SELECT * FROM profile;\")\nfor result in cur:\n print(result)\n\nprint(\"--------fetchall()-> in single list--------\") \ncur.execute(\"SELECT * FROM profile;\") \nprint(cur.fetchall()) \n# print(cur.fetchone()) \nprint(\"--------fetchone()-> show one --------\") \n# cur.execute(\"SELECT * FROM profile WHERE Height > 68 ORDER BY Height\") \ncur.execute(\"SELECT * FROM profile WHERE Height > 68\") \n# cur.execute(\"SELECT * FROM profile WHERE Height > 68;\") \n# print(cur.fetchall()) \nprint(cur.fetchone()) \ncon.commit()\ncon.close()", "[('Ken', 'USA', 68)]\n[('Chun-Li', 'China', 64)]\n[('Ryu',), ('E. Honda',)]\n[('Ryu', 'Japan', 68), ('Chun-Li', 'China', 64), ('E. Honda', 'Japan', 72), ('Dhalsim', 'India', 69), ('Blanka', 'Brazil', 75), ('Zangief', 'Russia', 84)]\n[('Ryu', 'Japan', 68), ('Chun-Li', 'China', 64), ('E. Honda', 'Japan', 72), ('Dhalsim', 'India', 69), ('Zangief', 'Russia', 84)]\n[('Guile', 'USA', 71), ('E. Honda', 'Japan', 72), ('Dhalsim', 'India', 69), ('Blanka', 'Brazil', 75), ('Zangief', 'Russia', 84)]\n--------------------------------------\n('Ryu', 'Japan', 68)\n('Ken', 'USA', 68)\n('Chun-Li', 'China', 64)\n('Guile', 'USA', 71)\n('E. Honda', 'Japan', 72)\n('Dhalsim', 'India', 69)\n('Blanka', 'Brazil', 75)\n('Zangief', 'Russia', 84)\n--------fetchall()-> in single list--------\n[('Ryu', 'Japan', 68), ('Ken', 'USA', 68), ('Chun-Li', 'China', 64), ('Guile', 'USA', 71), ('E. Honda', 'Japan', 72), ('Dhalsim', 'India', 69), ('Blanka', 'Brazil', 75), ('Zangief', 'Russia', 84)]\n--------fetchone()-> show one --------\n('Guile', 'USA', 71)\n" ], [ "import sqlite3 #once excuted , don't excute again!! or remove mydata.sqlite in dir. \nquery = \"\"\"\nCREATE TABLE test\n(a VARCHAR(20), b VARCHAR(20),\n c REAL, d INTEGER\n);\"\"\"\ncon = sqlite3.connect('mydata.sqlite')\ncon.execute(query)\ncon.commit()", "_____no_output_____" ], [ "data = [('Atlanta', 'Georgia', 1.25, 6),\n ('Tallahassee', 'Florida', 2.6, 3),\n ('Sacramento', 'California', 1.7, 5)]\nstmt = \"INSERT INTO test VALUES(?, ?, ?, ?)\"\ncon.executemany(stmt, data)\ncon.commit()", "_____no_output_____" ], [ "cursor = con.execute('select * from test')\nrows = cursor.fetchall()\nrows", "_____no_output_____" ], [ "cursor.description", "_____no_output_____" ], [ "pd.DataFrame(rows, columns=[x[0] for x in cursor.description])", "_____no_output_____" ], [ "import sqlalchemy as sqla\ndb = sqla.create_engine('sqlite:///mydata.sqlite')\npd.read_sql('select * from test', db)", "_____no_output_____" ], [ "!rm mydata.sqlite", "_____no_output_____" ], [ "#test con, cur version -> Once execute, Don't execute'\nimport sqlite3, csv \ncon = sqlite3.connect('mydata2.sqlite')\ncur = con.cursor()\nquery = \"\"\" CREATE TABLE test (\n a VARCHAR(20), b VARCHAR(20),\n c REAL, d INTEGER\n)\n\"\"\"\ncur.execute(query)\ncon.commit()", "_____no_output_____" ], [ "stmt2 = \"INSERT INTO test VALUES(?, ?, ?, ?)\"\ndata2 = [('Atlanta', 'Georgia', 1.25, 6),\n ('Tallahassee', 'Florida', 2.6, 3),\n ('Sacramento', 'California', 1.7, 5)]\ncon.executemany(stmt2, data2)\ncon.commit()\n# con.close()", "_____no_output_____" ], [ "##option 1\ncur.execute('select * from test')\nfor result in cur:\n print(result)\n \n# cursor = cur.execute('select * from test')\n# for result in cursor:\n# print(result)", "('Atlanta', 'Georgia', 1.25, 6)\n('Tallahassee', 'Florida', 2.6, 3)\n('Sacramento', 'California', 1.7, 5)\n" ], [ "##option 2\ncur.execute('select * from test')\nrows_all = cur.fetchall()\nrows_all \n\n# cursor = cur.execute('select * from test')\n# rows_all = cursor.fetchall()\n# rows_all", "_____no_output_____" ], [ "pd.DataFrame(rows_all, columns=[x[0] for x in cur.description])\n# pd.DataFrame(rows_all, columns=[x[0] for x in cursor.description])", "_____no_output_____" ], [ "cur.execute('select * from test')\nrows_one = cur.fetchone()\nrows_one\n\n# cursor = cur.execute('select * from test')\n# rows_one = cursor.fetchone()\n# rows_one", "_____no_output_____" ] ], [ [ "## Conclusion", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec7eb1a3148120e7f0b3b9b451b7460fda6bd060
157,461
ipynb
Jupyter Notebook
code/01_data_collection.ipynb
welldunolive/covid19_analysis
f68b8c60aba6aeae9e097c9f719e8e8ba9dfd4ec
[ "CC0-1.0" ]
null
null
null
code/01_data_collection.ipynb
welldunolive/covid19_analysis
f68b8c60aba6aeae9e097c9f719e8e8ba9dfd4ec
[ "CC0-1.0" ]
null
null
null
code/01_data_collection.ipynb
welldunolive/covid19_analysis
f68b8c60aba6aeae9e097c9f719e8e8ba9dfd4ec
[ "CC0-1.0" ]
2
2020-10-16T01:36:11.000Z
2020-10-31T01:04:12.000Z
33.797167
135
0.374931
[ [ [ "#### Notebooks \n- [Data Collection](./01_data_collection.ipynb)\n- [Data Cleaning](./02_data_cleaning.ipynb)\n- [Data Preprocessing](./03_data_preprocessing.ipynb)\n- [EDA Five States](./04_eda_five_states.ipynb)\n- [EDA California](./05_eda_ca.ipynb)\n- [EDA Florida](./05_eda_fl.ipynb)\n- [EDA Illinois](./05_eda_il.ipynb)\n- [EDA New York](./05_eda_ny.ipynb)\n- [EDA Texas](./05_eda_tx.ipynb)\n- [Modeling Five States](./06_modeling_five_states.ipynb)\n- [Modeling California](./07_modeling_ca.ipynb)\n- [Modeling Florida](./07_modeling_fl.ipynb)\n- [Modeling Illinois](./07_modeling_il.ipynb)\n- [Modeling New York](./07_modeling_ny.ipynb)\n- [Modeling Texas](./07_modeling_tx.ipynb)\n- [Conclusions](./08_conclusions.ipynb)\n\n#### This Notebook's Contents \n- [Pulling DP05 From Census API](#Pulling-DP05-From-Census-API)\n- [Pulling DP03 From Census API](#Pulling-DP03-From-Census-API)", "_____no_output_____" ] ], [ [ "# Import the required libraries.\nimport pandas as pd\nimport requests", "_____no_output_____" ] ], [ [ "# Pulling DP05 From Census API\nStates: CA (06), FL(12), IL(17), NY (36), TX (48) \nACS 5-YEAR DEMOGRAPHIC AND HOUSING ESTIMATES \nSurvey/Program: American Community Survey \n2018: ACS 5-Year Estimates Data Profiles \nTableID: DP05 ", "_____no_output_____" ], [ "## California: 06", "_____no_output_____" ] ], [ [ "# Set base url\nurl = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params\nparams = {\n 'get': 'group(DP05),NAME',\n 'for': 'county:*',\n 'in': 'state:06',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres = requests.get(url,params)\nres", "_____no_output_____" ], [ "# Create a dataframe from the request JSON object.\ndf_ca = pd.DataFrame(res.json())\n# Display the first few rows of the dataframe.\ndf_ca.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns.\ndf_ca.columns = df_ca.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf_ca = df_ca.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf_ca.head(2)", "_____no_output_____" ] ], [ [ "## Florida: 12", "_____no_output_____" ] ], [ [ "# Set base url\nurl = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params\nparams = {\n 'get': 'group(DP05),NAME',\n 'for': 'county:*',\n 'in': 'state:12',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres = requests.get(url,params)\nres", "_____no_output_____" ], [ "# Create a dataframe from the request JSON object.\ndf_fl = pd.DataFrame(res.json())\n# Display the first few rows of the dataframe.\ndf_fl.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf_fl.columns = df_fl.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf_fl = df_fl.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf_fl.head(2)", "_____no_output_____" ] ], [ [ "## Illinois: 17", "_____no_output_____" ] ], [ [ "# Set base url\nurl = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params\nparams = {\n 'get': 'group(DP05),NAME',\n 'for': 'county:*',\n 'in': 'state:17',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres = requests.get(url,params)\nres", "_____no_output_____" ], [ "# Create a dataframe from the request JSON object.\ndf_il = pd.DataFrame(res.json())\n# Display the first few rows of the dataframe.\ndf_il.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf_il.columns = df_il.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf_il = df_il.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf_il.head(2)", "_____no_output_____" ] ], [ [ "## New York: 36", "_____no_output_____" ] ], [ [ "# Set base url\nurl = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params\nparams = {\n 'get': 'group(DP05),NAME',\n 'for': 'county:*',\n 'in': 'state:36',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres = requests.get(url,params)\nres", "_____no_output_____" ], [ "# Create a dataframe from the request JSON object.\ndf_ny = pd.DataFrame(res.json())\n# Display the first few rows of the dataframe.\ndf_ny.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf_ny.columns = df_ny.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf_ny = df_ny.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf_ny.head(2)", "_____no_output_____" ] ], [ [ "## Texas: 48", "_____no_output_____" ] ], [ [ "# Set base url.\nurl = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params.\nparams = {\n 'get': 'group(DP05),NAME',\n 'for': 'county:*',\n 'in': 'state:48',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres = requests.get(url,params)\nres", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf_tx = pd.DataFrame(res.json())\n# Display the first few rows of the dataframe.\ndf_tx.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf_tx.columns = df_tx.iloc[0]", "_____no_output_____" ], [ "# Drop the first row.\ndf_tx = df_tx.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf_tx.head(2)", "_____no_output_____" ] ], [ [ "## Combining States", "_____no_output_____" ] ], [ [ "# Concatenate the state dataframes.\ndf = pd.concat([df_tx, df_ny, df_ca, df_fl, df_il])", "_____no_output_____" ], [ "# Export the data.\ndf.to_csv('../data/preprocessing/raw_dp05_five_states.csv', index=False)", "_____no_output_____" ] ], [ [ "## Pulling DP05 Headers", "_____no_output_____" ] ], [ [ "# Import the DP05 headers.\nheader_df = pd.read_csv('../data/preprocessing/acs5y2018_dp05_data_with_overlays.csv')\n# Display the dataframe.\nheader_df", "_____no_output_____" ], [ "# Drop the geo ID column\nheader_df = header_df.iloc[:, 1:]", "_____no_output_____" ], [ "# Export the data.\nheader_df.to_csv('../data/preprocessing/dp05_headers.csv')", "_____no_output_____" ] ], [ [ "## Create a dictionary of the columns and their identifiers", "_____no_output_____" ] ], [ [ "# Extract the header columns as a list.\nheader_cols = list(header_df.columns)", "_____no_output_____" ], [ "# Drop the second row of the dataframe.\nrow_one_df = header_df.iloc[:1, :]\n# Display the dataframe.\nrow_one_df", "_____no_output_____" ], [ "# Convert the row of the dataframe into a list\ndescriptions = row_one_df.values.tolist()\n\n# The output is a nested list. Extract the list.\ndescriptions = descriptions[0]\n\n# View the first five entries\ndescriptions[:5]", "_____no_output_____" ], [ "# Create a dictionary from a zipped list of the header columns and descriptions.\nheader_dict = dict(zip(header_cols, descriptions))\n\n# View the first five entries in the dictionary\nlist(header_dict.items())[:5]", "_____no_output_____" ], [ "# Rename the colums in the original dataframe according to the dictionary\ndf.rename(columns = header_dict, inplace=True)", "_____no_output_____" ], [ "# Drop columns with NaN values\ndf.dropna(axis=1, inplace=True)", "_____no_output_____" ], [ "# Display the first few rows of the dataframe.\ndf.head(2)", "_____no_output_____" ], [ "# Export the data.\ndf.to_csv('../data/preprocessing/raw_dp05_with_headers_five_states.csv', index=False)", "_____no_output_____" ] ], [ [ "# Pulling DP03 From Census API\nStates: CA (06), FL(12), IL(17), NY (36), TX (48) \nSELECTED ECONOMIC CHARACTERISTICS \nSurvey/Program: American Community Survey \n2018: ACS 5-Year Estimates Data Profiles \nTableID: DP03 ", "_____no_output_____" ], [ "## California: 06", "_____no_output_____" ] ], [ [ "# Set base url\nurl03 = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params\nparams03 = {\n 'get': 'group(DP03),NAME',\n 'for': 'county:*',\n 'in': 'state:06',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres03 = requests.get(url03,params03)\nres03", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf03_ca = pd.DataFrame(res03.json())\n# Display the first few rows of the dataframe.\ndf03_ca.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf03_ca.columns = df03_ca.iloc[0]", "_____no_output_____" ], [ "# Drop the first row.\ndf03_ca = df03_ca.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf03_ca.head(2)", "_____no_output_____" ] ], [ [ "## Florida: 12", "_____no_output_____" ] ], [ [ "# Set base url.\nurl03 = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params.\nparams03 = {\n 'get': 'group(DP03),NAME',\n 'for': 'county:*',\n 'in': 'state:12',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres03 = requests.get(url03,params03)\nres03", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf03_fl = pd.DataFrame(res03.json())\n# Display the first few rows of the dataframe.\ndf03_fl.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf03_fl.columns = df03_fl.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf03_fl = df03_fl.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf03_fl.head(2)", "_____no_output_____" ] ], [ [ "## Illinois: 17", "_____no_output_____" ] ], [ [ "# Set base url.\nurl03 = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params.\nparams03 = {\n 'get': 'group(DP03),NAME',\n 'for': 'county:*',\n 'in': 'state:17',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres03 = requests.get(url03,params03)\nres03", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf03_il = pd.DataFrame(res03.json())\n# Display the first few rows of the dataframe.\ndf03_il.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns.\ndf03_il.columns = df03_il.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf03_il = df03_il.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf03_il.head(2)", "_____no_output_____" ] ], [ [ "## New York: 36", "_____no_output_____" ] ], [ [ "# Set base url.\nurl03 = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params.\nparams03 = {\n 'get': 'group(DP03),NAME',\n 'for': 'county:*',\n 'in': 'state:36',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres03 = requests.get(url03,params03)\nres03", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf03_ny = pd.DataFrame(res03.json())\n# Display the first few rows of the dataframe.\ndf03_ny.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns\ndf03_ny.columns = df03_ny.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf03_ny = df03_ny.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf03_ny.head(2)", "_____no_output_____" ] ], [ [ "## Texas: 48", "_____no_output_____" ] ], [ [ "# Set base url.\nurl03 = 'https://api.census.gov/data/2018/acs/acs5/profile?'\n\n# Set params.\nparams03 = {\n 'get': 'group(DP03),NAME',\n 'for': 'county:*',\n 'in': 'state:48',\n 'key': 'YOURKEYHERE'\n}\n\n# Make a request and display the response code.\nres03 = requests.get(url03,params03)\nres03", "_____no_output_____" ], [ "# Convert the request JSON to a dataframe.\ndf03_tx = pd.DataFrame(res03.json())\n# Display the first few rows of the dataframe.\ndf03_tx.head(2)", "_____no_output_____" ], [ "# Set the values in the first row to the columns.\ndf03_tx.columns = df03_tx.iloc[0]", "_____no_output_____" ], [ "# Drop the first row\ndf03_tx = df03_tx.iloc[1:, :]\n# Display the first few rows of the dataframe.\ndf03_tx.head(2)", "_____no_output_____" ] ], [ [ "## Combining States", "_____no_output_____" ] ], [ [ "# Concatenate the dataframes.\ndf03 = pd.concat([df03_tx, df03_ny, df03_ca, df03_fl, df03_il])", "_____no_output_____" ], [ "# Export the data.\ndf03.to_csv('../data/preprocessing/raw_dp03_five_states.csv', index=False)", "_____no_output_____" ] ], [ [ "## Pulling DP03 Headers", "_____no_output_____" ] ], [ [ "# Import the DP03 headers.\nheader_df03 = pd.read_csv('../data/preprocessing/acs5y2018_dp03_data_with_overlays.csv')\n# Display the dataframe.\nheader_df03", "_____no_output_____" ], [ "# Drop the geo ID column.\nheader_df03 = header_df03.iloc[:, 1:]", "_____no_output_____" ], [ "# Export the data.\nheader_df03.to_csv('../data/preprocessing/dp03_headers.csv')", "_____no_output_____" ] ], [ [ "## Create a dictionary of the columns and their identifiers", "_____no_output_____" ] ], [ [ "# Extract the header columns as a list.\nheader_cols = list(header_df03.columns)", "_____no_output_____" ], [ "# Drop the second row of the dataframe.\nrow_one_df = header_df03.iloc[:1, :]\n# Display the dataframe.\nrow_one_df", "_____no_output_____" ], [ "# Convert the row of the dataframe into a list.\ndescriptions = row_one_df.values.tolist()\n\n# The output is a nested list. Extract the list.\ndescriptions = descriptions[0]\n\n# View the first five entries.\ndescriptions[:5]", "_____no_output_____" ], [ "# Create a dictionary from a zipped list of the header columns and descriptions.\nheader_dict = dict(zip(header_cols, descriptions))\n\n# View the first five entries in the dictionary\nlist(header_dict.items())[:5]", "_____no_output_____" ], [ "# Rename the colums in the original dataframe according to the dictionary.\ndf03 = df03.rename(columns = header_dict)", "_____no_output_____" ], [ "# Drop columns with NaN values.\ndf03 = df03.dropna(axis=1)", "_____no_output_____" ], [ "# Display the first few rows of the dataframe.\ndf03.head(3)", "_____no_output_____" ], [ "# Export the data.\ndf03.to_csv('../data/preprocessing/raw_dp03_with_headers_five_states.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7ecf286bd3d8d2650d9d4c55ce3feb8001a0e5
12,643
ipynb
Jupyter Notebook
automl/13a.auto-ml-dataprep.ipynb
nishankgu/MachineLearningNotebooks
7bb906b53c4903edf2252e8dc0ac14a322c7a253
[ "MIT" ]
null
null
null
automl/13a.auto-ml-dataprep.ipynb
nishankgu/MachineLearningNotebooks
7bb906b53c4903edf2252e8dc0ac14a322c7a253
[ "MIT" ]
null
null
null
automl/13a.auto-ml-dataprep.ipynb
nishankgu/MachineLearningNotebooks
7bb906b53c4903edf2252e8dc0ac14a322c7a253
[ "MIT" ]
null
null
null
28.284116
279
0.584434
[ [ [ "Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.", "_____no_output_____" ], [ "# AutoML 13: Prepare Data using `azureml.dataprep` for Local Execution\nIn this example we showcase how you can use the `azureml.dataprep` SDK to load and prepare data for AutoML. `azureml.dataprep` can also be used standalone; full documentation can be found [here](https://github.com/Microsoft/PendletonDocs).\n\nMake sure you have executed the [setup](00.configuration.ipynb) before running this notebook.\n\nIn this notebook you will learn how to:\n1. Define data loading and preparation steps in a `Dataflow` using `azureml.dataprep`.\n2. Pass the `Dataflow` to AutoML for a local run.\n3. Pass the `Dataflow` to AutoML for a remote run.", "_____no_output_____" ], [ "## Diagnostics\n\nOpt-in diagnostics for better experience, quality, and security of future releases.", "_____no_output_____" ] ], [ [ "from azureml.telemetry import set_diagnostics_collection\nset_diagnostics_collection(send_diagnostics = True)", "_____no_output_____" ] ], [ [ "## Create an Experiment\n\nAs part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.", "_____no_output_____" ] ], [ [ "import logging\nimport os\n\nimport pandas as pd\n\nimport azureml.core\nfrom azureml.core.experiment import Experiment\nfrom azureml.core.workspace import Workspace\nimport azureml.dataprep as dprep\nfrom azureml.train.automl import AutoMLConfig", "_____no_output_____" ], [ "ws = Workspace.from_config()\n \n# choose a name for experiment\nexperiment_name = 'automl-dataprep-local'\n# project folder\nproject_folder = './sample_projects/automl-dataprep-local'\n \nexperiment = Experiment(ws, experiment_name)\n \noutput = {}\noutput['SDK version'] = azureml.core.VERSION\noutput['Subscription ID'] = ws.subscription_id\noutput['Workspace Name'] = ws.name\noutput['Resource Group'] = ws.resource_group\noutput['Location'] = ws.location\noutput['Project Directory'] = project_folder\noutput['Experiment Name'] = experiment.name\npd.set_option('display.max_colwidth', -1)\npd.DataFrame(data = output, index = ['']).T", "_____no_output_____" ] ], [ [ "## Loading Data using DataPrep", "_____no_output_____" ] ], [ [ "# You can use `smart_read_file` which intelligently figures out delimiters and datatypes of a file.\n# The data referenced here was pulled from `sklearn.datasets.load_digits()`.\nsimple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'\nX = dprep.smart_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.\n\n# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)\n# and convert column types manually.\n# Here we read a comma delimited file and convert all columns to integers.\ny = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))", "_____no_output_____" ] ], [ [ "## Review the Data Preparation Result\n\nYou can peek the result of a Dataflow at any range using `skip(i)` and `head(j)`. Doing so evaluates only `j` records for all the steps in the Dataflow, which makes it fast even against large datasets.", "_____no_output_____" ] ], [ [ "X.skip(1).head(5)", "_____no_output_____" ] ], [ [ "## Configure AutoML\n\nThis creates a general AutoML settings object applicable for both local and remote runs.", "_____no_output_____" ] ], [ [ "automl_settings = {\n \"iteration_timeout_minutes\" : 10,\n \"iterations\" : 2,\n \"primary_metric\" : 'AUC_weighted',\n \"preprocess\" : False,\n \"verbosity\" : logging.INFO,\n \"n_cross_validations\": 3\n}", "_____no_output_____" ] ], [ [ "## Local Run", "_____no_output_____" ], [ "### Pass Data with `Dataflow` Objects\n\nThe `Dataflow` objects captured above can be passed to the `submit` method for a local run. AutoML will retrieve the results from the `Dataflow` for model training.", "_____no_output_____" ] ], [ [ "automl_config = AutoMLConfig(task = 'classification',\n debug_log = 'automl_errors.log',\n X = X,\n y = y,\n **automl_settings)", "_____no_output_____" ], [ "local_run = experiment.submit(automl_config, show_output = True)", "_____no_output_____" ] ], [ [ "## Explore the Results", "_____no_output_____" ], [ "#### Widget for Monitoring Runs\n\nThe widget will first report a \"loading\" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.\n\n**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.", "_____no_output_____" ] ], [ [ "from azureml.widgets import RunDetails\nRunDetails(local_run).show()", "_____no_output_____" ] ], [ [ "#### Retrieve All Child Runs\nYou can also use SDK methods to fetch all the child runs and see individual metrics that we log.", "_____no_output_____" ] ], [ [ "children = list(local_run.get_children())\nmetricslist = {}\nfor run in children:\n properties = run.get_properties()\n metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}\n metricslist[int(properties['iteration'])] = metrics\n \nimport pandas as pd\nrundata = pd.DataFrame(metricslist).sort_index(1)\nrundata", "_____no_output_____" ] ], [ [ "### Retrieve the Best Model\n\nBelow we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.", "_____no_output_____" ] ], [ [ "best_run, fitted_model = local_run.get_output()\nprint(best_run)\nprint(fitted_model)", "_____no_output_____" ] ], [ [ "#### Best Model Based on Any Other Metric\nShow the run and the model that has the smallest `log_loss` value:", "_____no_output_____" ] ], [ [ "lookup_metric = \"log_loss\"\nbest_run, fitted_model = local_run.get_output(metric = lookup_metric)\nprint(best_run)\nprint(fitted_model)", "_____no_output_____" ] ], [ [ "#### Model from a Specific Iteration\nShow the run and the model from the first iteration:", "_____no_output_____" ] ], [ [ "iteration = 0\nbest_run, fitted_model = local_run.get_output(iteration = iteration)\nprint(best_run)\nprint(fitted_model)", "_____no_output_____" ] ], [ [ "### Test the Best Fitted Model\n\n#### Load Test Data", "_____no_output_____" ] ], [ [ "from sklearn import datasets\n\ndigits = datasets.load_digits()\nX_test = digits.data[:10, :]\ny_test = digits.target[:10]\nimages = digits.images[:10]", "_____no_output_____" ] ], [ [ "#### Testing Our Best Fitted Model\nWe will try to predict 2 digits and see how our model works.", "_____no_output_____" ] ], [ [ "#Randomly select digits and test\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import imshow\nimport random\nimport numpy as np\n\nfor index in np.random.choice(len(y_test), 2, replace = False):\n print(index)\n predicted = fitted_model.predict(X_test[index:index + 1])[0]\n label = y_test[index]\n title = \"Label value = %d Predicted value = %d \" % (label, predicted)\n fig = plt.figure(1, figsize=(3,3))\n ax1 = fig.add_axes((0,0,.8,.8))\n ax1.set_title(title)\n plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')\n plt.show()", "_____no_output_____" ] ], [ [ "## Appendix", "_____no_output_____" ], [ "### Capture the `Dataflow` Objects for Later Use in AutoML\n\n`Dataflow` objects are immutable and are composed of a list of data preparation steps. A `Dataflow` object can be branched at any point for further usage.", "_____no_output_____" ] ], [ [ "# sklearn.digits.data + target\ndigits_complete = dprep.smart_read_file('https://dprepdata.blob.core.windows.net/automl-notebook-data/digits-complete.csv')", "_____no_output_____" ] ], [ [ "`digits_complete` (sourced from `sklearn.datasets.load_digits()`) is forked into `dflow_X` to capture all the feature columns and `dflow_y` to capture the label column.", "_____no_output_____" ] ], [ [ "digits_complete.to_pandas_dataframe().shape\nlabels_column = 'Column64'\ndflow_X = digits_complete.drop_columns(columns = [labels_column])\ndflow_y = digits_complete.keep_columns(columns = [labels_column])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec7eec989a4de820277f3b2023e1db06270d954c
75,156
ipynb
Jupyter Notebook
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fft-example.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
28
2020-06-15T23:53:36.000Z
2022-03-19T09:27:02.000Z
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fft-example.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
4
2020-06-24T22:20:31.000Z
2022-02-28T01:37:36.000Z
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fft-example.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
78
2020-06-19T09:41:01.000Z
2022-02-05T00:13:29.000Z
235.598746
29,302
0.907433
[ [ [ "# Working with Numpy FFT Results: Scaling and Folding", "_____no_output_____" ], [ "Imports\n-------", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Create a Test Signal\n--------------------\n$f_s$ is the sampling frequency, while $f$ is a base frequency for the signal content. We create a signal that contains components at a couple of multiples of this base frequency. Note the amplitudes here since we will be trying to extract those correctly from the FFT later.", "_____no_output_____" ] ], [ [ "f_s = 50.0 # Hz\nf = 1.0 # Hz\ntime = np.arange(0.0, 3.0, 1/f_s)\nx = 5 * np.sin(2 * np.pi * f * time) + 2 * np.sin(10 * 2 * np.pi * f * time)", "_____no_output_____" ], [ "plt.plot(time, x)\nplt.xlabel(\"Time (sec)\")\nplt.ylabel(\"x\")", "_____no_output_____" ] ], [ [ "Compute the FFT\n---------------\nThe FFT and a matching vector of frequencies", "_____no_output_____" ] ], [ [ "fft_x = np.fft.fft(x)\nn = len(fft_x)\nfreq = np.fft.fftfreq(n, 1/f_s)\nprint (n)\nprint (freq)", "150\n[ 0. 0.33333333 0.66666667 1. 1.33333333\n 1.66666667 2. 2.33333333 2.66666667 3. 3.33333333\n 3.66666667 4. 4.33333333 4.66666667 5. 5.33333333\n 5.66666667 6. 6.33333333 6.66666667 7. 7.33333333\n 7.66666667 8. 8.33333333 8.66666667 9. 9.33333333\n 9.66666667 10. 10.33333333 10.66666667 11. 11.33333333\n 11.66666667 12. 12.33333333 12.66666667 13. 13.33333333\n 13.66666667 14. 14.33333333 14.66666667 15. 15.33333333\n 15.66666667 16. 16.33333333 16.66666667 17. 17.33333333\n 17.66666667 18. 18.33333333 18.66666667 19. 19.33333333\n 19.66666667 20. 20.33333333 20.66666667 21. 21.33333333\n 21.66666667 22. 22.33333333 22.66666667 23. 23.33333333\n 23.66666667 24. 24.33333333 24.66666667 -25. -24.66666667\n -24.33333333 -24. -23.66666667 -23.33333333 -23. -22.66666667\n -22.33333333 -22. -21.66666667 -21.33333333 -21. -20.66666667\n -20.33333333 -20. -19.66666667 -19.33333333 -19. -18.66666667\n -18.33333333 -18. -17.66666667 -17.33333333 -17. -16.66666667\n -16.33333333 -16. -15.66666667 -15.33333333 -15. -14.66666667\n -14.33333333 -14. -13.66666667 -13.33333333 -13. -12.66666667\n -12.33333333 -12. -11.66666667 -11.33333333 -11. -10.66666667\n -10.33333333 -10. -9.66666667 -9.33333333 -9. -8.66666667\n -8.33333333 -8. -7.66666667 -7.33333333 -7. -6.66666667\n -6.33333333 -6. -5.66666667 -5.33333333 -5. -4.66666667\n -4.33333333 -4. -3.66666667 -3.33333333 -3. -2.66666667\n -2.33333333 -2. -1.66666667 -1.33333333 -1. -0.66666667\n -0.33333333]\n" ], [ "plt.plot(np.abs(fft_x))", "_____no_output_____" ] ], [ [ "Swap Half Spaces\n----------------\nNote that frequencies in the FFT and the `freq` vector go from zero to some larger positive number then from a large negative number back toward zero. We can swap that so that the DC component is in the center of the vector while maintaining a two-sided spectrum.", "_____no_output_____" ] ], [ [ "fft_x_shifted = np.fft.fftshift(fft_x)\nfreq_shifted = np.fft.fftshift(freq)", "_____no_output_____" ], [ "plt.plot(freq_shifted, np.abs(fft_x_shifted))\nplt.xlabel(\"Frequency (Hz)\")", "_____no_output_____" ] ], [ [ "Fold Negative Frequencies and Scale\n------------------------------\nIt's actually more common to look at just the first half of the unshifted FFT and frequency vectors and fold all the amplitude information into the positive frequencies. Furthermore, to get ampltude right, we must normalize by the length of the original FFT. Note the factor of $2/n$ in the following which accomplishes both the folding and scaling.", "_____no_output_____" ] ], [ [ "# edited for python 3\nhalf_n = int(np.ceil(n/2.0))\nfft_x_half = (2.0 / n) * fft_x[:half_n]\nfreq_half = freq[:half_n]", "_____no_output_____" ], [ "plt.plot(freq_half, np.abs(fft_x_half))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Amplitude\")", "_____no_output_____" ] ], [ [ "Now the spectrum contains spikes at the correct amplitudes at only positive frequencies, which are the only ones with physicality.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec7f08f0a423dc8bf32a7f72d878d8a0f7840694
212,125
ipynb
Jupyter Notebook
examples/03_novel/NOVEL_visualization.ipynb
cyang019/dnpsoup
7a05ef571d45316b8c84db074364f3193e3e797a
[ "BSD-3-Clause" ]
6
2021-08-11T02:17:37.000Z
2022-01-06T03:40:41.000Z
examples/03_novel/NOVEL_visualization.ipynb
cyang019/dnpsoup
7a05ef571d45316b8c84db074364f3193e3e797a
[ "BSD-3-Clause" ]
null
null
null
examples/03_novel/NOVEL_visualization.ipynb
cyang019/dnpsoup
7a05ef571d45316b8c84db074364f3193e3e797a
[ "BSD-3-Clause" ]
1
2021-12-14T16:14:26.000Z
2021-12-14T16:14:26.000Z
441.927083
92,992
0.932719
[ [ [ "import os\nimport re\nimport json\nfrom copy import deepcopy\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom functools import wraps", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ], [ "def collect_results(result_dir, param_dir, result_type='gB1', param_name=None):\n xs = []\n ys = []\n for filename in sorted(list(os.listdir(result_dir))):\n val = 0.0\n filepath = os.path.join(result_dir, filename)\n with open(filepath, 'r', encoding='utf-8') as f:\n for line in f:\n if line.startswith('#'):\n continue\n val = float(line.strip())\n break\n ys.append(val)\n\n name_part, ext = os.path.splitext(filename)\n param_filename = f'{name_part}.json'\n filepath = os.path.join(param_dir, param_filename)\n with open(filepath, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if result_type == 'gB1':\n x = data['pulseseq']['components']['emr2']['e']['frequency']/1e6\n xs.append(float(x))\n elif result_type == 'length':\n x = data['pulseseq']['sections'][param_name]['size']\n xs.append(float(x))\n elif result_type == 'fp':\n x = data['settings']['Magnet']['b0']\n xs.append(float(x))\n else:\n raise NotImplementedError\n return xs, ys", "_____no_output_____" ], [ "eofe_results_root = 'outputs/eofe_outputs'\neofe_params_root = 'inputs/eofe_inputs'\ndef get_xy_from_collection(collection_name, eofe_res_dir, result_type):\n res_dir = os.path.join(eofe_res_dir, collection_name, f'{collection_name}_results')\n param_dir = os.path.join(eofe_res_dir, collection_name, collection_name)\n xs, ys = collect_results(res_dir, param_dir, result_type)\n return xs, ys", "_____no_output_____" ], [ "idx=74\nloop=200\n\ncollection_name = f'qband_p{idx}_flip1_mix20k_d200k_loop{loop}_fp_ref_inc0p1'\nxs_ref, ys_ref = get_xy_from_collection(collection_name, eofe_results_root, 'fp')\n\ncollection_name = f'qband_p{idx}_flip1_mix20k_d200k_loop{loop}_fp_inc0p1'\nxs_novel, ys_novel = get_xy_from_collection(collection_name, eofe_results_root, 'fp')\n\ncollection_name = f'qband_p{idx}_flip1_mix20k_d200k_loop{loop}_fp_se_inc0p1'\nxs_se, ys_se = get_xy_from_collection(collection_name, eofe_results_root, 'fp')\n\n\nys1 = np.array(ys_novel)/np.array(ys_ref)\nys2 = np.array(ys_se)/np.array(ys_ref)\n\nfactor = np.max(ys1)\n\nfig = plt.figure(figsize=(10, 8), dpi=100)\nax1 = plt.subplot2grid((1,1), (0,0), rowspan=1)\n\nax1.plot(xs_ref, ys1/factor, marker='o', linestyle='solid', color='r', markerfacecolor='none', label='NOVEL')\nax1.plot(xs_ref, ys2/factor, marker='s', linestyle='solid', color='k', markerfacecolor='none', label='SE')\n\nys_diff = np.array(ys1) - np.array(ys2)\nax1.plot(xs_se, ys_diff/factor, marker='D', linestyle='solid', color='blue', markerfacecolor='none', label='Difference')\n\nax1.vlines(1.1957, -0.7, 1.2, linestyle='dashed')\nax1.hlines(0, np.min(xs_ref), np.max(xs_ref), linestyle='dotted')\n\nax1.legend(loc='lower right')\nplt.xlabel('Field (T)')\nax1.set_ylabel('Normalized Enhancement')\nplt.ylim(-0.7, 1.2)\nplt.xlim(1.1919, 1.20)\n# plt.savefig('cody_figure3_fp_simulation_v3.ps')\nplt.show()", "_____no_output_____" ], [ "idx=74\nloop2=200\n\n# Cody 1.2T Simulation data\n\n# DNPSOUP figure data & Cody 1.2T Simulation data\ncollection_name = f'xband_p83_flip16_mix500_d100k_loop{loop2}_scan_gB1_inc1'\nxs_x2, ys_x2 = get_xy_from_collection(collection_name, eofe_results_root, 'gB1')\n\n# Cody 1.2 T simulation data\ncollection_name = f'qband_p74_flip1_mix20k_d200k_loop200_scan_gB1_inc0p1'\n# collection_name = f'qband_p83_flip5_mix500_d100k_loop{loop2}_scan_gB1_inc1'\nxs_q, ys_q = get_xy_from_collection(collection_name, eofe_results_root, 'gB1')\n\nfig = plt.figure(figsize=(8, 6), dpi=100)\nax1 = plt.subplot2grid((1,1), (0,0), rowspan=1)\n\nax1.plot(np.array(xs_x2)/xs_x2[np.argmax(ys_x2)], ys_x2/np.max(ys_x2), \n marker='s', linestyle='solid', color='blue', markerfacecolor='none', label='xband')\nax1.plot(np.array(xs_q)/xs_q[np.argmax(ys_q)], ys_q/np.max(ys_q), marker='^', linestyle='solid', color='r', markerfacecolor='none', label='qband')\n\nax1.legend(loc='upper right')\nax1.set_xlabel('$\\omega_{I1}/\\omega_{S0}$')\nax1.set_ylabel('Normalized Enhancement')\nax1.tick_params(\n direction='in',\n bottom=True,\n top=True,\n left=True,\n right=True\n)\nplt.show()", "_____no_output_____" ] ], [ [ "## MAS", "_____no_output_____" ], [ "Jobs ran per crystal point on the EOFE cluster. Needs to combine them back to powder averaged values.", "_____no_output_____" ] ], [ [ "# Euler angles\ndef fibonacci(n):\n if n == 0:\n return 8\n if n == 1:\n return 13\n return fibonacci(n-1) + fibonacci(n-2)\n\ndef zcw_from_cnst(m, c1, c2, c3):\n n_m = fibonacci(m+2)\n f_m = fibonacci(m)\n results = []\n for i in range(n_m):\n alpha = 2.0 * np.pi / c3 * np.fmod(i * f_m / n_m, 1.0)\n beta = np.arccos(c1 * (c2 * np.fmod(i/n_m, 1.0) - 1.0))\n results.append([alpha, beta, 0.0])\n return results\n\ndef zcw2(n, option):\n if option == 'full':\n return zcw_from_cnst(n, 1, 2, 1)\n elif option == 'hemi':\n return zcw_from_cnst(n, -1, 1, 1)\n elif option == 'octant':\n return zcw_from_cnst(n, -1, 1, 4)\n \ndef zcw3(n, option, gamma_cnt):\n eulers = zcw2(n, option)\n if gamma_cnt < 2:\n return eulers\n gamma_step = np.pi * 2 / gamma_cnt\n results = []\n for euler in eulers:\n for i in range(gamma_cnt):\n temp = euler[:]\n temp[2] = i * gamma_step\n results.append(temp)\n return results", "_____no_output_____" ], [ "angles = zcw2(2, 'full')", "_____no_output_____" ], [ "os.listdir('outputs/eofe_outputs')", "_____no_output_____" ], [ "# MAS\nresults_dir = 'outputs/eofe_outputs/qband_p83_scan_loop_mas10k_inc50ns/'\nfolder_names = []\nfor filename in sorted(list(os.listdir(results_dir))):\n filepath = os.path.join(results_dir, filename)\n if os.path.isdir(filepath):\n folder_names.append(filename)", "_____no_output_____" ], [ "pattern = r'loop(\\d{3})_results'\nxs = []\nfor name in folder_names:\n found = re.search(pattern, name)\n val = int(found.group(1))\n xs.append(val)", "_____no_output_____" ], [ "# static was already powder averaged\nstatic_ys = []\nstatic_xs = []\npattern = r'task_(\\d{3})'\nfolder_path = 'outputs/eofe_outputs/qband_p83_scan_loop_static_inc50ns/qband_p83_scan_loop_static_inc50ns_results/'\nfor filename in sorted(list(os.listdir(folder_path))):\n found = re.search(pattern, filename)\n x = int(found.group(1))\n static_xs.append(x)\n filepath = os.path.join(folder_path, filename)\n try:\n with open(filepath, 'r', encoding='utf-8') as f:\n f.readline()\n val = float(f.readline().strip())\n static_ys.append(val)\n except Exception as e:\n print(f'exception at {filename}:')\n print(e)\n raise e", "_____no_output_____" ], [ "# powder averaging\ndef get_powder_averaged_vals(root_dir, folder_name):\n folder_path = os.path.join(root_dir, folder_name)\n pattern = r'task_(\\d{3})'\n factors = []\n for filename in sorted(list(os.listdir(folder_path))):\n found = re.search(pattern, filename)\n if not found:\n factors.append(0.0)\n else:\n val = int(found.group(1))\n beta = angles[val][1]\n factor = np.sin(beta)\n factors.append(factor)\n if len(factors) == 0:\n print(f'[{folder_name}] empty directory')\n return 0.0\n elif len(factors) < 55:\n print(f'[{folder_name}] has {len(factors)} results, half finished...')\n\n vals = []\n for filename in sorted(list(os.listdir(folder_path))):\n filepath = os.path.join(root_dir, folder_name, filename)\n with open(filepath, 'r', encoding='utf-8') as f:\n f.readline()\n val = float(f.readline().strip())\n vals.append(val)\n factors = np.array(factors)/len(factors)\n return np.multiply(factors, vals).sum()", "_____no_output_____" ], [ "ys = []\nfor name in folder_names:\n val = get_powder_averaged_vals(results_dir, name)\n ys.append(val)", "_____no_output_____" ], [ "cnt = 100\nfig = plt.figure(figsize=(8, 6), dpi=100)\nloop_length = (5 + 500 + 1e5) * 1e-9 * 1e3\nxs_time = np.array(xs) * loop_length\nplt.plot(xs_time[:cnt], ys[:cnt], 'bo-', markerfacecolor='none', label='10kHz MAS')\nstatic_xs_time = np.array(static_xs) * loop_length\nplt.plot(static_xs_time[:cnt], static_ys[:cnt], 'r^-', markerfacecolor='none', label='Static')\nplt.tick_params(\n direction='in',\n bottom=True,\n top=True,\n left=True,\n right=True\n)\nplt.ylabel('Intensities (a.u.)')\nplt.xlabel('BuildUp Time (ms)')\nplt.legend()\n# plt.savefig('dnpsoup_novel_mas_buildup.ps')\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7f15efcdd52e6f6ca8857ac871af687f9ded7c
53,647
ipynb
Jupyter Notebook
WS07-files-iterators.ipynb
PersDep/python-intro-2021
82fb404a9b833ddf0f8a43252ba9d5b8389a1a5e
[ "MIT" ]
null
null
null
WS07-files-iterators.ipynb
PersDep/python-intro-2021
82fb404a9b833ddf0f8a43252ba9d5b8389a1a5e
[ "MIT" ]
null
null
null
WS07-files-iterators.ipynb
PersDep/python-intro-2021
82fb404a9b833ddf0f8a43252ba9d5b8389a1a5e
[ "MIT" ]
null
null
null
25.045285
944
0.408541
[ [ [ "# Работа с файлами", "_____no_output_____" ], [ "Для открытия файлов в python используется встроенная команда open:", "_____no_output_____" ] ], [ [ "# f = open('testmodule.py', 'r', encoding='cp1252')\nf = open('testmodule.py', 'r', encoding='utf-8')\ns8 = 'rpgkkorgmo'\ns32 = s.encode(encoding='UTF-32')", "_____no_output_____" ], [ "s8", "_____no_output_____" ], [ "s32", "_____no_output_____" ], [ "type(s32)", "_____no_output_____" ], [ "import sys\nprint(sys.getsizeof(s8))\nprint(sys.getsizeof(s32))", "59\n77\n" ] ], [ [ "Функция open() принимает 2 основных аргумента:\n- первый аргумент file - это имя файла, т.е. путь к файлу, который мы хотим открыть, путь может быть как абсолютный, так и относительный, как и в ОС Windows.\n- второй аргумент mode - это режим работы с файлом: для чтения, записи, перезаписи и пр.", "_____no_output_____" ], [ "Обозначение режимов работы с фалом:\n- 'r'\tоткрытие на чтение (является значением по умолчанию).\n- 'w'\tоткрытие на запись, содержимое файла удаляется, создается новый пустой файл.\n- 'x'\tоткрытие на запись, если файла не существует, иначе исключение.\n- 'a'\tоткрытие на дозапись, информация добавляется в конец файла.\n- 'b'\tоткрытие в двоичном режиме.\n- 't'\tоткрытие в текстовом режиме.\n- '+'\tоткрытие на чтение и запись.", "_____no_output_____" ], [ "В python файл представляется идентификатором и вся дальнейшая работа с этим файлом связана с его идентификатором:", "_____no_output_____" ] ], [ [ "f = open('testmodule.py', 'r')\nprint(f)", "<_io.TextIOWrapper name='testmodule.py' mode='r' encoding='UTF-8'>\n" ] ], [ [ "# Чтение из файла", "_____no_output_____" ], [ "Существует 3 основных способа чтения информации из текстового файла:", "_____no_output_____" ], [ "1. Чтение всей информации целиком с помощью функции read()", "_____no_output_____" ] ], [ [ "msg = f.read()\nprint(msg)", "Hi\nПривет\nSome data\n0 1 2 3 4 5 6 7 8\n\n" ] ], [ [ "2. Чтение информации построчно с помощью цикла for", "_____no_output_____" ] ], [ [ "dir(f)", "_____no_output_____" ], [ "for line in f:\n print(line)", "Hi\n\nПривет\n\nSome data\n\n0 1 2 3 4 5 6 7 8\n\n" ] ], [ [ "В выводе присутствуют все переводы строк, как и в файле. Дополнительные переводы строк обусловлены стандартным поведением функции print().", "_____no_output_____" ], [ "3. Чтение всех строк одним списком с помощью функции readlines()", "_____no_output_____" ] ], [ [ "lines = f.readlines()\nfor i in range(len(lines)):\n print(lines[i], end='')", "Hi\nПривет\nSome data\n0 1 2 3 4 5 6 7 8\n" ] ], [ [ "Существует также функция readline(), позволяющая считать лишь одну строчку а не все сразу.", "_____no_output_____" ], [ "# Запись в файл", "_____no_output_____" ], [ "Первым делом для записи в файл его нужно открыть в режиме записи:", "_____no_output_____" ] ], [ [ "f = open('output.txt', 'w')\nprint(f)", "<_io.TextIOWrapper name='output.txt' mode='w' encoding='UTF-8'>\n" ] ], [ [ "1. Запись в файл с помощью метода write:", "_____no_output_____" ] ], [ [ "msg = 'Write to file\\n'\nf.write(msg)", "_____no_output_____" ] ], [ [ "Функции write принимает строковый аргумент - данные для записи, и возвращает число записанных символов.", "_____no_output_____" ], [ "2. Запись в файл с помощью метода print:", "_____no_output_____" ] ], [ [ "msg = \"Write with print\\n\"\nprint(msg, file=f)", "_____no_output_____" ] ], [ [ "После чтения/записи не забывайте закрывать файлы:", "_____no_output_____" ] ], [ [ "f.close()", "_____no_output_____" ] ], [ [ "# With ... as - менеджер контекста", "_____no_output_____" ], [ "Конструкция with ... as используется для оборачивания блока инструкций. \nСинтаксис конструкции with ... as:", "_____no_output_____" ] ], [ [ "\"with\" expression [\"as\" target] (\",\" expression [\"as\" target])* \":\"\n suite", "_____no_output_____" ] ], [ [ "Что происходит при выполнении данного блока:\n1) Выполняется выражение в конструкции with ... as.\n2) Загружается специальный метод __exit__ для дальнейшего использования.\n3) Выполняется метод __enter__. Если конструкция with включает в себя слово as, то возвращаемое методом __enter__ значение записывается в переменную.\n4) Выполняется suite.\n5) Вызывается метод __exit__, причём неважно, выполнилось ли suite или произошло исключение. В этот метод передаются параметры исключения, если оно произошло, или во всех аргументах значение None, если исключения не было.", "_____no_output_____" ], [ "Если в конструкции with - as было несколько выражений, то это эквивалентно нескольким вложенным конструкциям:", "_____no_output_____" ] ], [ [ "with A() as a, B() as b:\n suite", "_____no_output_____" ] ], [ [ "эквивалентно", "_____no_output_____" ] ], [ [ "with A() as a:\n with B() as b:\n suite", "_____no_output_____" ] ], [ [ "Для чего применяется конструкция with ... as? Для гарантии того, что критические функции выполнятся в любом случае. Самый распространённый пример использования этой конструкции - открытие файлов, такой способ гарантирует закрытие файла в любом случае.", "_____no_output_____" ] ], [ [ "with open('output.txt','w') as myfile:\n myfile.write('Write without close')", "_____no_output_____" ], [ "print(bool(myfile))\n# myfile.write('data')\nmyfile.closed", "True\n" ], [ "try:\n # fobj = open('path/to/file.txt', 'r')\n fobj = open('testmodule.py', 'r')\n data = fobj.read()\n # print(int(data))\nexcept FileNotFoundError as e:\n print(e)\n print('Could not find the necessary file!')\nexcept ValueError as e:\n print(e)\nelse:\n print('else')\nfinally:\n print('finish')\n if fobj:\n fobj.close()\n", "_____no_output_____" ] ], [ [ "### Как устроен цикл for", "_____no_output_____" ] ], [ [ "for i in range(5):\n print(i)\n\nfor line in open('testmodule.py'):\n print(line)\n\nfor key in {'A' : 1, 'B' : 2, 'C' : 3}:\n print(key)\n\nfor letter in 'Hello, World':\n print(letter, type(letter))\n\nfor i in 1:\n pass", "0\n1\n2\n3\n4\nHi\n\nПривет\n\nSome data\n\n0 1 2 3 4 5 6 7 8\n\nA\nB\nC\nH <class 'str'>\ne <class 'str'>\nl <class 'str'>\nl <class 'str'>\no <class 'str'>\n, <class 'str'>\n <class 'str'>\nW <class 'str'>\no <class 'str'>\nr <class 'str'>\nl <class 'str'>\nd <class 'str'>\n" ], [ "dict_dir = set(dir({}))\nfile_dir = set(dir(open('testmodule.py')))\nint_dir = set(dir(1))\n\nset(dir(list)) & file_dir - int_dir", "_____no_output_____" ], [ "a = [1, 2, 3, 4]\nit = a.__iter__()\n\nit", "_____no_output_____" ], [ "dir(it)", "_____no_output_____" ], [ "set(dir(it)) - int_dir", "_____no_output_____" ], [ "import itertools\nit1 = itertools.cycle('ABC').__iter__()\nset(dir(it1)) - int_dir", "_____no_output_____" ], [ "for i in a:\n print(i)", "1\n2\n3\n4\n" ], [ "a = [1, 2, 3, 4]\nit = a.__iter__()\nprint(it.__length_hint__())\nfor i in range(it.__length_hint__()):\n print(it.__next__())\nit.__next__()", "4\n1\n2\n3\n4\n" ] ], [ [ "### Итерируемая последовательность (aka Iterable)\n\nЭто обьект у которого определён метод \\_\\_iter\\_\\_, возвращающий обьект реализующий протокол *итератора*\n(Примеры: list, dict, file, range)\n\n### Итератор\nЭто обьект у которого определён метод \\_\\_next\\_\\_ (это может быть как отдельный обьект, так и, например, self самой последовательности, то есть она может быть итератором по самой себе)\n\n\nМетод __next__ при каждом вызове должен возвращать следующий элемент последовательности, или выкидывать исключение StopIteration, если последовательность кончилась\n\n", "_____no_output_____" ] ], [ [ "range(0)", "_____no_output_____" ] ], [ [ "### iter и next\n\nОпределены как свободные функции, вызывающие соответствующие методы у обьектов.", "_____no_output_____" ] ], [ [ "a = [1, 2, 3]\nit = iter(a)\nit", "_____no_output_____" ], [ "next(it)", "_____no_output_____" ], [ "lst = iter([1])\nprint(next(lst, 5))\nnext(lst, 5)", "1\n" ], [ "def make_timer(ticks):\n def timer():\n nonlocal ticks\n ticks -= 1\n return ticks\n return timer\n\nfor t in iter(make_timer(10), 0): # iter(function , terminal_value)\n print(t, end=' - ')", "_____no_output_____" ] ], [ [ "### реализация цикла for", "_____no_output_____" ] ], [ [ "def handle(x):\n print(x)", "_____no_output_____" ], [ "seq = [1, 2, 3]\nfor x in seq:\n handle(x)", "1\n2\n3\n" ], [ "dir(it)", "_____no_output_____" ], [ "it = iter(seq)\nwhile True:\n try:\n value = next(it)\n handle(value)\n except StopIteration:\n break", "1\n2\n3\n" ] ], [ [ "### Класс и его итератор", "_____no_output_____" ] ], [ [ "class RangeIter(object):\n def __init__(self, frm, to):\n self.to = to\n self.idx = frm\n def __next__(self):\n if self.idx == self.to: raise StopIteration('range ended')\n self.idx += 1\n return (self.idx - 1)\n def __iter__(self):\n return self\n\nclass Range1(object):\n def __init__(self, frm, to):\n self.to = to\n self.frm = frm\n def __iter__(self):\n return RangeIter(self.frm, self.to)\n\nrange1 = Range1(2, 5)\nprint(range1.frm, range2.to)\nfor i in range1:\n print(i, end=' - ')\nprint()\nprint(range1.frm, range2.to)\nfor i in range1:\n print(i, end=' - ')\nprint()\nprint(range1.frm, range2.to)\n\nmy_range_iter = iter(range1)\nwhile True:\n try:\n print(next(my_range_iter))\n except StopIteration as e:\n print(e)\n break\n\nmy_range_iter = iter(range1)\nfor i in range(3):\n print(next(my_range_iter))", "2 5\n2 - 3 - 4 - \n2 5\n2 - 3 - 4 - \n2 5\n2\n3\n4\nrange ended\n2\n3\n4\n" ], [ "%%time\nmy_range_iter = iter(range1)\nwhile True:\n try:\n print(next(my_range_iter))\n except StopIteration as e:\n break", "2\n3\n4\nCPU times: user 2.48 ms, sys: 0 ns, total: 2.48 ms\nWall time: 2.58 ms\n" ], [ "%%time\nmy_range_iter = iter(range1)\nfor i in range(3):\n print(next(my_range_iter))", "2\n3\n4\nCPU times: user 246 µs, sys: 0 ns, total: 246 µs\nWall time: 177 µs\n" ] ], [ [ "### Класс -- итератор", "_____no_output_____" ] ], [ [ "class Range2(object):\n def __init__(self, frm, to):\n self.to = to\n self.idx = frm\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.idx == self.to: raise StopIteration\n self.idx += 1\n return (self.idx - 1)\n\nrange2 = Range2(2, 5)\nprint(range2.idx, range2.to)\nfor i in range2:\n print(i, end=' - ')\nprint()\nprint(range2.idx, range2.to)\nfor i in range2:\n print(i, end=' - ')", "2 5\n2 - 3 - 4 - \n5 5\n" ] ], [ [ "### Исчерпаемость", "_____no_output_____" ] ], [ [ "r1 = Range1(1, 5)\nr2 = Range2(1, 5)\n\nprint(list(r1), list(r2))\nprint(list(r1), list(r2))\n", "[1, 2, 3, 4] [1, 2, 3, 4]\n[1, 2, 3, 4] []\n" ] ], [ [ "### Несколько итераторов\nТак как итератор является итерируемым обьектом - можно определять несколько итераторов для одного обьекта", "_____no_output_____" ] ], [ [ "class BinaryTree(object):\n def inorder(self):\n return InOrderIterator(self)", "_____no_output_____" ] ], [ [ "### \\_\\_contains__\n\nможет быть определен для итераторов", "_____no_output_____" ] ], [ [ "class object:\n def __contains__(self, value):\n for item in self:\n if item == value:\n return True\n return False\n'abc'.__contains__('a')", "_____no_output_____" ], [ "class Range:\n def __contains__(self, value):\n return self.frm < value < self.to", "_____no_output_____" ] ], [ [ "### Упрощенный протокол итерируемого - последовательность", "_____no_output_____" ] ], [ [ "class Seq(object):\n def __init__(self, lst):\n self.lst = lst\n def __len__(self):\n return len(self.lst)\n def __getitem__(self, idx):\n if idx < 0 or idx >= len(self):\n raise IndexError(idx)\n return self.lst[idx]\n\nfor i in Seq([1, 2, 3]):\n print(i)", "1\n2\n3\n" ] ], [ [ "Itertools::chain", "_____no_output_____" ] ], [ [ "from itertools import chain\nfor i in chain(range(2, 7), range(10, 32, 4), 'ABCD'):\n print(i)", "2\n3\n4\n5\n6\n10\n14\n18\n22\n26\n30\nA\nB\nC\nD\n" ] ], [ [ "### itertools :: islice", "_____no_output_____" ] ], [ [ "from itertools import islice\nseq = range(10)\nlist(islice(seq, 2, 5, 2)) # seq[2:5]", "_____no_output_____" ], [ "seq = range(10)\nlist(islice(seq, 1, 7, 2)) # seq[1:7:2]", "_____no_output_____" ] ], [ [ "### itertools :: count, cycle, repeat", "_____no_output_____" ] ], [ [ "from itertools import count, cycle, repeat\n\nlist(islice(cycle('test'), 2, 15))", "_____no_output_____" ], [ "list(islice(count(42), 10))", "_____no_output_____" ] ], [ [ "### itertools :: dropwhile and takewhile", "_____no_output_____" ] ], [ [ "from itertools import dropwhile, takewhile\n\nlist(takewhile(lambda x : x < 5, range(10)))", "_____no_output_____" ] ], [ [ "### itertools :: chain", "_____no_output_____" ] ], [ [ "from itertools import chain\nlist(chain([1, 2], 'test', range(3)))", "_____no_output_____" ], [ "def geniter(count):\n for i in range(count):\n yield range(3)\n\nlist(chain.from_iterable(geniter(3)))", "_____no_output_____" ] ], [ [ "### itertools :: tee", "_____no_output_____" ] ], [ [ "from itertools import tee\n\na, b, c = tee(range(3), 3)\nprint(list(a), list(b), list(c))", "_____no_output_____" ] ], [ [ "### itertools :: комбинаторика", "_____no_output_____" ] ], [ [ "from itertools import product\n\nlist(product('AB', 'XY'))", "_____no_output_____" ], [ "from itertools import permutations\n\nlist(permutations('ABC'))", "_____no_output_____" ], [ "from itertools import combinations\n\nlist(combinations('ABC', 2))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec7f178cfa53a046796c69f5943b04940982cc2f
307,369
ipynb
Jupyter Notebook
morea/clustering/resources/26_clustering.ipynb
mahdi-b/ICS434-Sp2022
b2a6daf9833f4f4ec67b74c81794b1d7259b9909
[ "Apache-2.0" ]
null
null
null
morea/clustering/resources/26_clustering.ipynb
mahdi-b/ICS434-Sp2022
b2a6daf9833f4f4ec67b74c81794b1d7259b9909
[ "Apache-2.0" ]
null
null
null
morea/clustering/resources/26_clustering.ipynb
mahdi-b/ICS434-Sp2022
b2a6daf9833f4f4ec67b74c81794b1d7259b9909
[ "Apache-2.0" ]
1
2022-03-26T04:28:08.000Z
2022-03-26T04:28:08.000Z
116.163643
76,376
0.840501
[ [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### What is Clustering\n\n\n* Clustering analysis, or clustering, is a computational approach to uncover structure in a dataset.\n\n * Finding groups of points within a dataset that are grouped, or lumped, together\n\n \n\n* It is a machine learning task.\n * It is said to be unsupervised since the analyst does not describe or provide examples of things that should belong together in a cluster.\n * Supervised machine learning provides labeled data (e.g., instances of spam email vs. ham, or legit email) and trains model to recognize if an email is either spam or ham.", "_____no_output_____" ], [ "### What is a Cluster?\n\n* Descriptions such as “groups of points that are similar” or “close to each other”\n\n* The notion of a “cluster” is not well defined \n * Clusters may be at times subjective\n * What the concept of boundary around a set of similar points is subjective\n \n* Once we see cluster we \"kind of\" know what it is\n * Ex. Do any two points that very similar form a cluster\n \n<img src=\"https://www.dropbox.com/s/8v2uvd1mzzwmou7/cluster_def.png?dl=1\" width=\"400\">\n<center>\nData clustering: 50 years beyond K-means, Jain 2009\n</center> ", "_____no_output_____" ], [ "### Question\n\n* Is a cluster simply a group of points that are close?\n* How can you generate points that are distributed like in the graph below?\n\n\n<img src=\"https://www.dropbox.com/s/kwgzpkyn0btikgl/random_points.png?dl=1\" alt=\"drawing\" width=\"300\"/>\n\n", "_____no_output_____" ] ], [ [ "x_axis = np.random.choice(np.arange(100), size=100)\ny_axis = np.random.choice(np.arange(100), size=100)\n\nprint(x_axis[0:5])\nprint(y_axis[0:5])", "[41 53 84 69 52]\n[11 4 14 74 39]\n" ], [ "plt.figure(figsize=(4, 4))\nplt.scatter(x_axis, y_axis)\n\n# I used the code below to remove the x and y axis ticks ()\n_ = plt.xticks([], [])\n_ = plt.yticks([], [])", "_____no_output_____" ] ], [ [ "### What is a Cluster? - Cont'd\n\n- We can define a cluster as a contiguous region of high data point density\n - Points in the region are close to each other.\n - Areas contain more than just a few points that seem to occur randomly together.\n- Regions are separated by areas of lower point density\n - Points across regions are dissimilar\n \n <img src=\"https://www.dropbox.com/s/zyi30gcfhbzfulp/two_well_defined_clusters.png?dl=1\" alt=\"drawing\" width=\"200\"/>\n\n", "_____no_output_____" ], [ "### What is a Cluster? - Cont'd\n \n<img src=\"https://www.dropbox.com/s/zyi30gcfhbzfulp/two_well_defined_clusters.png?dl=1\" alt=\"drawing\" width=\"200px;\"/>\n\n\n* How can we generate a dataset like the one above\n", "_____no_output_____" ] ], [ [ "mean_c1 = [8, 12]\ncov_c1 = [[1,0], [0, 1]]\nc1 = np.random.multivariate_normal(mean_c1, cov_c1, 40)\n\nmean_c2 = [4, 4]\ncov_c2 = [[1,0], [0, 1]]\nc2 = np.random.multivariate_normal(mean_c2, cov_c2, 29)", "_____no_output_____" ], [ "plt.figure(figsize=(5, 5))\n\nplt.scatter(c1[:,0], c1[:, 1])\nplt.scatter(c2[:,0], c2[:, 1])\n\nplt.xticks([], [])\n_ = plt.yticks([], [])", "_____no_output_____" ] ], [ [ "### What is a Cluster - Cont'd\n\n- Contiguous regions of high data point density need not be highly dissimilar\n - On average, points across regions are dissimilar\n - Some points may be closer than others\n \n <img src=\"https://www.dropbox.com/s/udeg22vvrzwkqae/two_cluster_less_well_defined.png?dl=1\" alt=\"drawing\" width=\"300\"/>", "_____no_output_____" ], [ "### Data Shapes\n\n* Clusters may have complicated shapes\n * While the clusters above are orderly but real clusters can be *very* messy\n * Some clusters do not subscribe to the definitions above \n * Such clusters may challenge simple approaches\n\n <img src=\"https://www.dropbox.com/s/aj99oktcxc3dpxp/data_moons.png?dl=1\" alt=\"drawing\" width=\"500\"/>", "_____no_output_____" ], [ "### Applications of Clustering\n\n* Clustering can be applied in a variety of contexts, from genetics and genomics to user input consolidation and investment\n* Definition of the clusters across domains is the same\n * Regions of high-density points where points characterized by high similarity within regions and low similarity across regions \n * Naturally, the concept of similarity and distance between points is domain-specific\n * Similarity between two Netflix users can be defined as the degree of agreement on ratings of movies they watched\n * Or just the movies they watched.\n * The similarity between two stocks, say apple, qualcom\n * Useful to the diversification of portfolios", "_____no_output_____" ], [ "### Ex. Data Reconciliation\n\n* Very useful for finding groups of different values that might be alternative representations of the same thing\n\n* This is called reconciliation and can be a massive problem for some industries\n\n\n<img src=\"https://www.dropbox.com/s/djz78gqdr1izotd/Clustering_strings.png?dl=1\" alt=\"drawing\" width=\"500\"/>\n\n\n* First Avenue, First Ave, 1st Ave, Furst Ave, etc.", "_____no_output_____" ], [ "### Ex. Stocks Clustering\n\n* Useful to derive a strategy for assets diversification\n\n<img src=\"https://www.dropbox.com/s/dg3uh30ty6vrk3u/stocks_clusters.png?dl=1\" alt=\"drawing\" width=\"500\"/>\n", "_____no_output_____" ], [ "### Ex. Market Segmentation\n\n* Who are my potential customer groups?\n\n * Who is buying my app and what features should be most appropriate?\n\n<img src=\"https://www.dropbox.com/s/efa5d3vemfz01xt/market-segmentation.png?dl=1\" alt=\"drawing\" width=\"500\"/>\n", "_____no_output_____" ], [ "### Distance and Similarity Measures \n \n* Clustering requires being able to compute a distance (or similarity) between the data points\n * How similar are two stocks?\n\n* A distance is any function, `d`, that takes two points (`x` and `y`) and returns a scalar value that is a measure for how different these points are\n * The more dissimilar the points, the larger the distance \n * It may be easier to make more sense to compute a similarity using a function `s`\n * s(x, y) can be easily converted into d(x,y)\n \n* While data points do not have to be embedded into a geometric space, it helps to think about their distance (or similarity) as if they were\n * If we can transform data points to have properties of a vector space then we can develop more efficient algorithms that exploit these properties\n", "_____no_output_____" ], [ "### Question \n\n * How can you converte a normalized similarity into a distance?\n * similarity is in [0,1] \n \n \n<img src=\"https://www.dropbox.com/s/5deolbibqg0k5ga/dist_sim.png?dl=1\" alt=\"drawing\" width=\"400\"/>\n\n ", "_____no_output_____" ], [ "### Distance to Similarity\n\n<img src=\"dist_similarity.png\" alt=\"drawing\" width=\"600\"/>\n\n", "_____no_output_____" ] ], [ [ "\nplt.figure(figsize=(22,6))\n\nsimilairity = np.arange(0,1.01, 0.01)\nmethod_1 = 1-similairity\n\nplt.subplot(2,2, 1)\n\nplt.plot(similairity, method_1)\n# plt.xlabel(\"Similarity\", fontsize=18)\nplt.ylabel(\"distance\", fontsize=18)\n_ = plt.xticks(fontsize=14)\n_ = plt.yticks(fontsize=14)\n\nplt.subplot(2,2, 2)\n\n\nmethod_2 = -np.log(similairity + 0.0001)\nplt.plot(similairity, method_2)\nplt.xlabel(\"Similarity\", fontsize=18)\nplt.ylabel(\"distance\", fontsize=18)\n_ = plt.xticks(fontsize=14)\n_ = plt.yticks(fontsize=14)\n\n\n\nplt.subplot(2,2, 3)\n\nmethod_3 = np.sqrt(1 - similairity)\nplt.plot(similairity, method_3)\nplt.xlabel(\"Similarity\", fontsize=18)\nplt.ylabel(\"distance\", fontsize=18)\n_ = plt.xticks(fontsize=14)\n_ = plt.yticks(fontsize=14)\n\n", "_____no_output_____" ] ], [ [ "### Distance and Similarity Measures - Cont'd\n\n* Sometimes, the concept of distance in geometric space is clear\n\n * Example, Use any measure that considers the locations of the points in such a space\n * Most points are not just in 2-d but exist in higher-dimensional space\n \n<img src=\"https://www.dropbox.com/s/xgi9i6hjskklvds/distnace_a_b.png?dl=1\" alt=\"drawing\" style=\"width:500px;\"/>\n \n \n* Oher times, there is no obvious choice as to how to compute the distance between two points\n * You are clustering music genres. How do you compute the distance between two songs?\n * You need to convert the song into features \n * Identify relevant dimensions\n * e.g., Beats per second, the proportion of various instruments soloing, properties of the lyrics or presence/absence of certain words, etc...\n ", "_____no_output_____" ], [ "### Ex. Distance Between Music Genres\n<img src=\"https://www.dropbox.com/s/tybdrop2kpb1fc5/music_genres.png?dl=1\" width=350/>\n<center>Genre Complexes in Popular Music. Silver et al., Plos One, 2016</center>", "_____no_output_____" ], [ "### Common Distance Metrics \n\n* For simplicity, we'll classify the distances into two high-level categories\n * Euclidean and spherical geometry distances\n * Use the location of the points in an Euclidean or spherical coordinate systems to estimate their distance\n * Other \n * Non-Euclidean and non-spherical geometry distances\n * Use properties of points to estimate their distance\n* This is a coarse and somewhat subjective categorization, and other classifications are possible.", "_____no_output_____" ], [ "### Common Distance Metrics -- Cont'd\n\n\n* The Euclidean distance, \n * Most commonly used distance measure\n * Distance between two points P and Q with coordinate $(p_1,p_2)$ and $(q_1,q_2)$ respectively is:\n\n$$\nd_{pq} = \\sqrt{(p_1-q_1)^2 + (p_2-q_2)^2}\n$$\n", "_____no_output_____" ], [ "### Euclidean and Spherical Distances - Cont'd \n\n* The Manhattan (or taxicab) distance\n* Geodesic distance\n \n\n<img src=\"https://www.dropbox.com/s/o4ylcqi43cwn33b/distances.png?dl=1\" alt=\"drawing\" width=\"400\"/>\n", "_____no_output_____" ], [ "### Non-Euclidean Distance\n\nExamples of non-euclidean distances are: \n\n* Edit distance or number of inserts and deletes to change one string into another\n * Example, what is the distance between `x = AGACGTAG` and `y = GTTCAGA`\n\n<img src=\"https://www.dropbox.com/s/xl1gj2nvzhrjtas/edit_distance.png?dl=1\" alt=\"drawing\" width=\"600\"/>\n\n * Remove A, convert A to T, ... \n * The distance between `x` and `y` is 5\n\n* Jaccard distance\n * Used with sets. It computes the number of items shared over all the items in the set.\n\n\n\n", "_____no_output_____" ], [ "### ### Non-Euclidean Distance: Jaccard Distance\n\n<img src=\"https://www.dropbox.com/s/m01tcw8gdfr4qry/jaccard.png?dl=1\" alt=\"drawing\" width=\"600\"/>\n", "_____no_output_____" ] ], [ [ "symptoms = [\"Muscle Cramps\", \"Weight Gain\", \"Easy Bruising\",\"Metallic Taste\",\"Paranoia\",\"Leg Pain\",\"Gas and Bloating\",\"Mouth Sores\",\"Nausea, Upset Stomach\",\"Rectal Bleeding\",\"Shortness of Breath\",\"Muscle Cramps\",\"Urine Odor\",\"Swollen Ankles and Feet\",\"Joint Cracking\",\"Eye Twitch\",\"Dry Skin\"]\n\npatient_ids = pd.util.testing.rands_array(6, 20)\npatient_ids\n\n", "_____no_output_____" ], [ "patient_outcomes = np.random.choice([0, 1], size=(len(symptoms))) \npatient_outcomes", "_____no_output_____" ], [ "data = [np.random.choice([0, 1], size=(len(symptoms))) for _ in range(len(patient_ids))]\ndata", "_____no_output_____" ], [ "patients_data = pd.DataFrame(columns=symptoms, data=data, index=patient_ids)\npatients_data.head()\n", "_____no_output_____" ], [ "vec_1 = np.array([1, 0, 1, 1, 1])\nvec_2 = np.array([1, 0, 0, 1, 1])\n\n\n(vec_1 == vec_2) ", "_____no_output_____" ], [ "x = np.array([ True, True, False])\ny = np.array([ True, False, True])\n\nx & y", "_____no_output_____" ], [ "vec_1 = np.array([1, 0, 1, 1, 1])\nvec_2 = np.array([1, 0, 0, 1, 1])\n\n\nsum((vec_1 == vec_2) & (vec_1 == 1))\n", "_____no_output_____" ], [ "vec_1 = np.array([1, 0, 1, 1, 1])\nvec_2 = np.array([1, 0, 0, 1, 1])\n\n\nsum((vec_1 == 1) | (vec_2 == 1))\n", "_____no_output_____" ], [ "def compute_Jaccard_distance(vec_1, vec_2 ):\n \n # All the symptoms shared by both\n intersection = sum((vec_1 == vec_2) & (vec_1 == 1))\n # All the symptoms that either one has\n union = sum((vec_1 == 1) | (vec_2 == 1))\n return 1 - intersection / union\n ", "_____no_output_____" ], [ "vec_1 = np.array([1, 0, 1, 1, 1])\nvec_2 = np.array([1, 0, 0, 1, 1])\n\n\nprint(compute_Jaccard_distance(vec_1, vec_2 ))", "0.25\n" ] ], [ [ "### Distance Matrix\n\n* Common to store the pair-wise distances between data points in a distance matrix $M$\n\n* The matrix has a dimention $n \\times n$, where n is the number of data points\n\n* For any two points $p_i$ and $p_j$, \n\n\n$$ \nM[i, j] = d(p_i, p_j) \\\\\nM[j, i] = d(p_i, p_j)\n$$\n \n", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ndist_patients = np.zeros([3, 3])\ndist_patients\n", "_____no_output_____" ], [ "# Compute All Pair-Wise Jaccard Coefficients\ndist_patients = np.zeros([len(patient_ids), len(patient_ids)])\n\nfor i in range(patients_data.shape[0]):\n for j in range(i, patients_data.shape[0]):\n vec_1 = patients_data.iloc[i]\n vec_2 = patients_data.iloc[j]\n\n # compute distance and round to two decimal points\n dist_patients[i, j] = round(compute_Jaccard_distnace(vec_1, vec_2 ), 2)\n dist_patients[j, i] = dist_patients[i, j]\n \ndist_patients", "_____no_output_____" ], [ "from numpy import unravel_index\n\nprint(np.max(dist_patients), np.argmax(dist_patients))\n\nprint(unravel_index(np.argmax(dist_patients), dist_patients.shape))\n\n# dist_patients[dist_patients==0] = 10000\n\n# Use nanmin, nanmax, nanargmin and nanargmax \nprint(np.nanmin(dist_patients), np.nanargmin(dist_patients))\nprint(unravel_index(np.argmin(dist_patients), dist_patients.shape))\n", "0.93 8\n(0, 8)\n0.0 0\n(0, 0)\n" ], [ "patients_data.iloc[[0,8]]\n", "_____no_output_____" ], [ "dist_patients_temp = dist_patients.copy() \nfor i in range(patients_data.shape[0]):\n dist_patients_temp[i,i] = 1000\n \nprint(np.nanmin(dist_patients_temp), np.nanargmin(dist_patients_temp))\nprint(unravel_index(np.argmin(dist_patients_temp), dist_patients_temp.shape))\n", "0.31 295\n(14, 15)\n" ], [ "patients_data.iloc[[14,15]]\n", "_____no_output_____" ] ], [ [ "### Devising you Own Distance\n\n* There are certain properties that a distance (or similarity) function should have\n * A distance in a mathematical sense must have at least the following properties \n\n$$\nd(x, y) \\ge 0 \\\\\nd(x, y) = d(y, x) \\\\\nd(x, y) + d(y, z) \\ge d(x, z)\n$$ \n\n* In practice, it may be hard to identify a distance that guarantees triangle inequality\n * Some metrix are not symmetric\n * E.g., length of the shortest clockwise path between $p$ and $q$ \n * Some relatively good metrics are not transitive\n\n * For technical reasons, the symmetry property is usually highly desirable. \n * You can always construct an asymmetric distance function from an asymmetric one\n\n$$\n dS(x,y)= \\frac{d(x,y)+d(y,x)}{2}\n$$", "_____no_output_____" ], [ "### Smooth Function are Better thatn Non-Smooth Ones\n\n* Naive distance\n\n$$\nd(x,y) = \\sum{d_i(x_i, y_i)}~~\\mbox{where,}\\\\\nd_i(x_i, y_i)= \\left\\{\n \\begin{array}{ll}\n 1 & \\mbox{if } x=y \\\\\n 0 & \\mbox{otherwise}\n \\end{array}\n\\right.\n$$\n\n* This function is not smooth\n * Difference between `[1,2]` and `[2,3]` is the same the difference between `[1,2]` and `[100, 200]`\n \n", "_____no_output_____" ], [ "### Clustering Approaches\n\nIn this section will cover two approaches to clustering a set of points into clusters\n\n* Hierarchical \n * Each point is a cluster\n * Repeatedly combine the two “closest” clusters into one\n * Stop when only one cluster remains\n * This is also known as \"agglomerative hierarchical clustering\"\n \n* Point-to-cluster Assignment\n * Maintain a set of k clusters (ex `k = 3`)\n * Place points into “closest” cluster\n * iteratively repeat the process until the solution is stable\n \n \n* There more advanced/complex approaches\n * Difficult to combine into categories because of the diversity of approaches and overlaps\n * Probabilistic methods increasingly popular\n * Will briefly cover other methods to visualize cluster\n * PCA, T-SNE and UMAP\n", "_____no_output_____" ], [ "### Hierarchical Clustering\n\n<img src=\"https://www.dropbox.com/s/pvsw16v49j1a9m4/hierarchical_clustering.png?dl=1\" alt=\"drawing\" width=\"600\"/>\n\n", "_____no_output_____" ], [ "### Hierarchical Clustering Dendogram Representation\n\n<img src=\"https://www.dropbox.com/s/xyohvxzsgwkeo1d/hierarchical_clustering_dendo.png?dl=1\" alt=\"drawing\" width=\"700\"/>\n\n", "_____no_output_____" ], [ "### Hierarchical Clustering - Cont'd\n\n* Repeatedly combine the two “closest” clusters into one\n\n* How do you know which pair of clusters is the closest\n * We can only compute our distances between two points\n\n* One possible solution: \n * represent the location of each cluster using a single measure:\n * E.g., centroid, or the average of all points in a cluster\n\n* Take all centroids and find the closest pair ", "_____no_output_____" ], [ "### Question\n\n* Does the centroid approach work for all types of data?\n\n * How do you compute the centroid for the DNA sequences from before?\n", "_____no_output_____" ], [ "### Hierarchical Clustering with Non Eclidean Distances\n\n* There is no “average” of two points\n\n* We can compute the distance between two clusters using other approaches. For example:\n \n * Minimum of the distances between any two points one from each cluster\n * Minimum Linkage\n * Maximum of the distances between any two points one from each cluster\n * Maximum Linkage\n * Average of the distances between any two points one from each cluster\n * Average Linkage\n\n* Pick a notion of “cohesion” of clusters, e.g., the maximum distance from the clustroid \n\n * A clusteroid is simply the center of the cluster based on, for example, the average of intra-cluster distances\n\n", "_____no_output_____" ], [ "### Question:\n\n* Given $n$ data points, what is the complexity of clustering in the worst case scenario?\n* Recall we need to:\n\n 1- Build the distance matrix\n 2- Merge n points iteratively until we end up with a single cluster\n\n\n", "_____no_output_____" ], [ "### Hierarchical Clustering Results\n\n* Easy to implement and understand (plus)\n\n\n* Method is computationally intensive (minus)\n\n\n* Extracting clusters can tricky! (minus)\n * Data points are leaves \n * Distance in the tree between the leaves is representative of the distance between the data points\n \n\n\n", "_____no_output_____" ], [ "### Second Category: Assigning Points to Clusters\n\n* A popular clustering algorithm\n\n* Two steps\n * Define the number of cluster on \n * Initializing cluster representatives\n * Assignment of points to each cluster\n * Recalculating the centroids\n\n", "_____no_output_____" ], [ "### Kmeans Clustering Steps\n\n* Assumes Euclidean space\n * Cannot handle categorical data\n\n<img src=\"https://www.dropbox.com/s/sbfedy63m3krllm/kmeans_a.png?dl=1\" width=\"300\"/>\n", "_____no_output_____" ], [ "\n### k-Means Clustering: Initialization\n\n* Initialize clusters by $k$ points; one per cluster\n * Those are the cluster representatives; the cluster centroids\n * For instance, take $k$ points in space at random\n \n<img src=\"https://www.dropbox.com/s/ovkur4oeb5r3p9m/kmeans_b.png?dl=1\" width=\"300\"/>", "_____no_output_____" ], [ "### k-Means Clustering: Assign Points to Clusters\n\n* Place each point in the cluster identified with by the closest centroid\n\n * Here, we \"paint\" the examples the same color as the cluster centroid to which it was assigned\n \n<img src=\"https://www.dropbox.com/s/i8ueidywma580a0/kmeans_c.png?dl=1\" width=\"300\"/>\n\n\n", "_____no_output_____" ], [ "### k-Means Clustering: Recalculating the Centroids\n\n* After all points are assigned, recompute the centroids of the $k$ clusters\n\n<img src=\"https://www.dropbox.com/s/msdd7jlf9vq4cje/kmeans_d.png?dl=1\" width=\"300\"/>\n", "_____no_output_____" ], [ "### k-Means Clustering: Assign and Recalculate Centroids \n\n* Repeat these two steps until convergence\n\n * The clustering is stable, and nothing changes.\n * i.e., computing assigning the points to the closest centroid and recomputing the centroids does not change the centroids or the assignments\n\n", "_____no_output_____" ], [ "### K-means Clustering Results\n\n* Algorithm is non-nondeterministic (minus)\n * A different choice of starting values may result in a different assignment of points to clusters\n * Customary to run the k-means algorithm several times and compare results or compute a consensus\n* Cannot be used with categorical data (non-Euclidean space) (minus)\n * Difficult, or sometimes impossible, to construct centroids from categorical data\n \n* Can use k-medoid, the smallest average distance to all other points in the cluster (plus)\n * A medoid is the point in the cluster with the smallest distance to all the other points in the cluster.\n * Less affected by outliers\n * More stable\n", "_____no_output_____" ], [ "### K-means Clustering Results - Cont'd\n\n\n* Can take a long time to converge with some poor initializations (minus) \n * Very rare, but a possibility, nevertheless \n\n* Computationally efficient $O(k \\cdot n)$ (plus)\n * linear in the number of points!\n * Small number of iterations to converge (10–50 iterations are typical)\n\n* Works relatively well in real life.", "_____no_output_____" ], [ "### How Do We compute the Results of Clustering\n\n* What distinguishes a good clustering solution from a bad one?\n\n* Recall that what we want is:\n 1. Points within the same cluster to be very similar; \n * The average distance between points in the same cluster should be small\n * This is called *cohesion* and it should be relatively small\n 2. Points across clusters to be highly dissimilar; \n * Distnace between between points from distinct clusters should be large\n * This is called *separation* and it should be relatively large\n \n* A clustering solution is \n\n$$\n \\frac{separation}{cohesion}\n$$ \n\n* Large separation compared to low cohesion\n* Large values are indicative of a good separation \n", "_____no_output_____" ], [ "### The Silhouette Coefficient\n\n* A good \"score\" to estimate the ratio of separation to cohesion\n\n* For each cluster we denote:\n * $a_i$ is the average distance that point $i$ has from all points in same cluster\n * Represents the cohesion; we want it o be as small as possible \n * $b_i$ be the smallest distance between point $i$ and a point not in the same cluster as $i$\n * Represents separation we want it to be as large as possible \n \n", "_____no_output_____" ], [ "### The Silhouette Coefficient - Cont'd\n\n* We measure the Silhouette Coefficient of a point $i$ as \n\n$$\nS_i = \\frac{b_i - a_i}{max(a_i, b_i)}\n$$\n\n* Silhouette coefficient ranges from −1 to 1\n * Negative values indicate $a_i$ is larger than b_i\n * Low cohesion or small separation or both\n * This suggests poor clustering\n * Positive values indicate $b_i$ is larger than $a_i$\n * High cohesion or high separation or both\n \n", "_____no_output_____" ], [ "### Computing Cluster-Wide Silhouette Coefficient\n\n* Average silhouette coefficients within cluster \n * A good indicator of the quality of the entire cluster\n * Same observations as before; large values denote good separation\n", "_____no_output_____" ] ], [ [ "mean_c1 = [8, 12]\ncov_c1 = [[1,0], [0, 1]]\nc1 = np.random.multivariate_normal(mean_c1, cov_c1, 40)\n\nmean_c2 = [4, 4]\ncov_c2 = [[1,0], [0, 1]]\nc2 = np.random.multivariate_normal(mean_c2, cov_c2, 29)\n\n\n\nplt.figure(figsize=(4,4))\nplt.scatter(c1[:,0], c1[:, 1])\nplt.scatter(c2[:,0], c2[:, 1])\n\nplt.xticks([], [])\nplt.yticks([], [])\n", "_____no_output_____" ], [ "from scipy.spatial import distance\n\ndef compute_b(pt_cl1, c2):\n # Closest points across cluster distant points\n min_dist = np.inf\n hit_pt_c1, hit_pt_c2 = (None, None)\n \n for j, pt2 in enumerate(c2):\n d = distance.euclidean(pt_cl1, pt2)\n if d < min_dist:\n min_dist = d\n hit_pt_c2 = pt2\n return hit_pt_c2, min_dist\n", "_____no_output_____" ], [ "pt_cl1 = c2[10]\nplt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1], c=\"green\", alpha=0.3)\nplt.scatter(c2[:,0], c2[:, 1], c='black', alpha=0.2)\n\n\nfor other_pt in c1:\n plt.plot([pt_cl1[0], other_pt[0]], [pt_cl1[1], other_pt[1]], 'r-', alpha=0.3)\n ", "_____no_output_____" ], [ "compute_b(pt_cl1, c2)", "_____no_output_____" ], [ "c1_pt2, _ = compute_b(pt_cl1, c2)\n\nplt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1], c=\"green\")\nplt.scatter(c2[:,0], c2[:, 1], c='black')\nplt.scatter(c1_pt1[0], c1_pt1[1], color='red')\nplt.scatter(c1_pt2[0], c1_pt2[1], color='red')\nplt.xticks([], [])\nplt.yticks([], [])\n\nplt.plot([c1_pt1[0], c1_pt2[0]], [c1_pt1[1], c1_pt2[1]], 'ro-')\n", "_____no_output_____" ], [ "def compute_a(cluster, target_pt_id):\n distances = []\n for other_pt_id in range(len(cluster)):\n if target_pt_id != other_pt_id:\n distances.append(distance.euclidean(cluster[target_pt_id], cluster[other_pt_id]))\n return np.mean(distances)\n ", "_____no_output_____" ], [ "plt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1], c=\"green\", alpha=0.3)\nplt.scatter(c2[:,0], c2[:, 1], c='black', alpha=0.2)\n\n\nfor pt1 in c2:\n if pt2 in c2:\n plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], 'r-', alpha=0.3)\n ", "_____no_output_____" ], [ "pt_id = 10\ncompute_a(c2, pt_id)", "_____no_output_____" ], [ "pt_id = 10\npt = c2[10]\nplt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1], c=\"green\", alpha=0.2)\nplt.scatter(c2[:,0], c2[:, 1], c='black', alpha=0.3)\n\nplt.xticks([], [])\nplt.yticks([], [])\n\nfor other_pt in c2:\n if other_pt_id != point_id:\n plt.plot([pt[0], other_pt[0]], [pt[1], other_pt[1]], 'r-', alpha=0.3)\n \nplt.scatter(pt[0], pt[1], c='k', marker=\"s\", alpha=1) \n", "_____no_output_____" ], [ "compute_b(pt, c2)", "_____no_output_____" ], [ "def pt_silhouette(cluster, target_pt_id, other_cluster):\n a_i = compute_a(cluster, target_pt_id)\n _, b_i = compute_b(cluster[target_pt_id], other_cluster)\n return (b_i - a_i) / max(a_i, b_i)\n\npt_silhouette(c2, pt_id, c1) ", "_____no_output_____" ], [ "pt_id_c1 = 21\npt_c1 = c1[21]\nprint(compute_a(c1, pt_id_c1))\nprint(compute_b(pt_id_c1, c2)[1])\npt_silhouette(c1, pt_id, c2)", "2.6981120218302315\n21.759467543982822\n" ], [ "pt_id = 21\npt = c1[10]\nplt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1], c=\"green\", alpha=0.2)\nplt.scatter(c2[:,0], c2[:, 1], c='black', alpha=0.3)\n\nplt.xticks([], [])\nplt.yticks([], [])\n\nfor other_pt in c1:\n if other_pt_id != point_id:\n plt.plot([pt[0], other_pt[0]], [pt[1], other_pt[1]], 'r-', alpha=0.3)\n \nplt.scatter(pt[0], pt[1], c='k', marker=\"s\", alpha=1) \n", "_____no_output_____" ] ], [ [ "### Computig Cluster-Wide Silhouette Coefficient\n\n* Average over the silhouette coefficients for all data points\n\n * Provides a good estimator of the quality of the overall clustering\n ", "_____no_output_____" ] ], [ [ "# For simplicity, the solution assumes two clusters \n# Need to considers additional clusters in solution\ndef cluster_silhouette(cluster, other_cluster):\n silhouette_coeffs = []\n for pt_id in range(len(cluster)):\n silhouette_coeffs.append(pt_silhouette(cluster, pt_id, other_cluster))\n return(np.mean(silhouette_coeffs))\n \n ", "_____no_output_____" ], [ "print(cluster_silhouette(c1, c2), cluster_silhouette(c2, c1))\nnp.mean([cluster_silhouette(c1, c2), cluster_silhouette(c2, c1)])", "0.6534134308853174 0.6904926432489982\n" ], [ "mean_c1 = [8, 12]\ncov_c1 = [[6,0], [0, 9]]\nc1 = np.random.multivariate_normal(mean_c1, cov_c1, 40)\n\nmean_c2 = [4, 4]\ncov_c2 = [[2,0], [0, 2]]\nc2 = np.random.multivariate_normal(mean_c2, cov_c2, 29)\n\n\n\nplt.figure(figsize=(4,4))\nplt.scatter(c1[:,0], c1[:, 1])\nplt.scatter(c2[:,0], c2[:, 1])\n\nplt.xticks([], [])\nplt.yticks([], [])\n", "_____no_output_____" ], [ "print(cluster_silhouette(c1, c2), cluster_silhouette(c2, c1))\nnp.mean([cluster_silhouette(c1, c2), cluster_silhouette(c2, c1)])", "0.08026534373225395 0.08920863624598391\n" ], [ "mean_c1 = [8, 12]\ncov_c1 = [[8,0], [0, 13]]\nc1 = np.random.multivariate_normal(mean_c1, cov_c1, 40)\n\nmean_c2 = [4, 4]\ncov_c2 = [[4,0], [0, 4]]\nc2 = np.random.multivariate_normal(mean_c2, cov_c2, 29)\n\n\n\nplt.figure(figsize=(8,8))\nplt.scatter(c1[:,0], c1[:, 1])\nplt.scatter(c2[:,0], c2[:, 1])\n\nplt.xticks([], [])\nplt.yticks([], [])\n", "_____no_output_____" ], [ "print(cluster_silhouette(c1, c2), cluster_silhouette(c2, c1))\nnp.mean([cluster_silhouette(c1, c2), cluster_silhouette(c2, c1)])", "-0.19031767325037774 -0.06837193910062084\n" ] ], [ [ "### Using Silhouettes to Decide on Best Number of Cluster \n\n* Silhouette coefficient can be useful to determine the number of clusters present in the dataset\n\n* Run the algorithm several times for each possible value of $k$ and compute the overall silhouette coefficient each time\n\n * We should observe a peak at the best value of $k$\n\n### Question\n\n* Why do we need to Run the $k$-Means algorithm *several times* for each value of $k$ we would like to test?", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec7f470d190a5f824e4d394d2c350dc0d2b5bf13
37,799
ipynb
Jupyter Notebook
docs/examples/use_cases/webdataset-externalsource.ipynb
szkarpinski/DALI
999379b7ed2145f5da2de9f2dca566b3912fb366
[ "ECL-2.0", "Apache-2.0" ]
2
2022-02-17T19:54:05.000Z
2022-02-17T19:54:08.000Z
docs/examples/use_cases/webdataset-externalsource.ipynb
hugo213/DALI
999379b7ed2145f5da2de9f2dca566b3912fb366
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/examples/use_cases/webdataset-externalsource.ipynb
hugo213/DALI
999379b7ed2145f5da2de9f2dca566b3912fb366
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
67.138544
18,136
0.761713
[ [ [ "# WebDataset integration using External Source\nIn this notebook is an example of how one may combine the [webdataset](https://github.com/webdataset/webdataset) with a DALI pipeline, using an external source operator", "_____no_output_____" ], [ "## Introduction\n### Data Representation\nWeb Dataset is a dataset representation that heavily optimizes networked accessed storage performance. At its simplest, it stores the whole dataset in one tarball file, where each sample is represented by one or more entries with the same name but different extensions. This approach improves drive access caching in RAM, since the data is represented sequentially.", "_____no_output_____" ], [ "### Sharding\nIn order to improve distributed storage access and network data transfer, the webdataset employs a strategy called sharding. In this approach, the tarball holding the data is split into several smaller ones, called shards, which allows for fetching from several storage drives at once, and reduces the packet size that has to be transferred via the network.", "_____no_output_____" ], [ "## Sample Implementation\nFirst, let's import the necessary modules and define the locations of the datasets that will be needed later.\n\n`DALI_EXTRA_PATH` environment variable should point to the place where the data from [DALI extra repository](https://github.com/NVIDIA/DALI_extra) is downloaded. Please make sure that the proper release tag is checked out.\n\nThe `tar_dataset_paths` holds the paths to the shards that will be loaded while showing and testing the webdataset loader.\n\n`batch_size` is the common batch size for both loaders", "_____no_output_____" ] ], [ [ "import nvidia.dali.fn as fn\nimport nvidia.dali as dali\nimport nvidia.dali.types as types\nimport webdataset as wds\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport os\nimport random\nimport tempfile\nimport tarfile\n\nroot_path = os.path.join(os.environ[\"DALI_EXTRA_PATH\"], \"db\", \"webdataset\", \"MNIST\")\ntar_dataset_paths = [os.path.join(root_path, data_file) \n for data_file in [\"devel-0.tar\", \"devel-1.tar\", \"devel-2.tar\"]]\nbatch_size = 1024", "_____no_output_____" ] ], [ [ "Next, let's extract the files that will later be used for comparing the file reader to our custom one.\n\nThe `folder_dataset_files` holds the paths to the files ", "_____no_output_____" ] ], [ [ "folder_dataset_root_dir = tempfile.TemporaryDirectory()\nfolder_dataset_dirs = [tempfile.TemporaryDirectory(dir=folder_dataset_root_dir.name) \n for dataset in tar_dataset_paths]\nfolder_dataset_tars = [tarfile.open(dataset) for dataset in tar_dataset_paths]\n\nfor folder_dataset_tar, folder_dataset_subdir in zip(folder_dataset_tars, folder_dataset_dirs):\n folder_dataset_tar.extractall(path=folder_dataset_subdir.name)\n\nfolder_dataset_files = [\n filepath\n for folder_dataset_subdir in folder_dataset_dirs\n for filepath in sorted(\n glob.glob(os.path.join(folder_dataset_subdir.name, \"*.jpg\")), \n key=lambda s: int(s[s.rfind('/') + 1:s.rfind(\".jpg\")])\n )\n]", "_____no_output_____" ] ], [ [ "The function below is used to later randomize the output from the dataset. The samples are first stored in a prefetch buffer, and then they're randomly yielded in a generator and replaced by a new sample.", "_____no_output_____" ] ], [ [ "def buffered_shuffle(generator_factory, initial_fill, seed):\n def buffered_shuffle_generator():\n nonlocal generator_factory, initial_fill, seed\n generator = generator_factory()\n # The buffer size must be positive\n assert(initial_fill > 0)\n\n # The buffer that will hold the randomized samples\n buffer = []\n\n # The random context for preventing side effects\n random_context = random.Random(seed)\n\n try:\n while len(buffer) < initial_fill: # Fills in the random buffer\n buffer.append(next(generator))\n\n while True: # Selects a random sample from the buffer and then fills it back in with a new one\n idx = random_context.randint(0, initial_fill-1)\n\n yield buffer[idx]\n buffer[idx] = None\n buffer[idx] = next(generator)\n\n except StopIteration: # When the generator runs out of the samples flushes our the buffer\n random_context.shuffle(buffer)\n\n while buffer:\n if buffer[-1] != None: # Prevents the one sample that was not filled from being duplicated\n yield buffer[-1]\n buffer.pop()\n return buffered_shuffle_generator\n ", "_____no_output_____" ] ], [ [ "The next function is used for padding the last batch with the last sample, in order to make it the same size as all the other ones.", "_____no_output_____" ] ], [ [ "def last_batch_padding(generator_factory, batch_size):\n def last_batch_padding_generator():\n nonlocal generator_factory, batch_size\n generator = generator_factory()\n in_batch_idx = 0\n last_item = None\n try:\n while True: # Keeps track of the last sample and the sample number mod batch_size\n if in_batch_idx >= batch_size:\n in_batch_idx -= batch_size\n last_item = next(generator)\n in_batch_idx += 1\n yield last_item\n except StopIteration: # Repeats the last sample the necessary number of times\n while in_batch_idx < batch_size:\n yield last_item\n in_batch_idx += 1\n return last_batch_padding_generator", "_____no_output_____" ] ], [ [ "The final function collects all the data into batches in order to be able to have a variable length batch for the last sample", "_____no_output_____" ] ], [ [ "def collect_batches(generator_factory, batch_size):\n def collect_batches_generator():\n nonlocal generator_factory, batch_size\n generator = generator_factory()\n batch = []\n try:\n while True:\n batch.append(next(generator))\n if len(batch) == batch_size:\n # Converts tuples of samples into tuples of batches of samples\n yield tuple(map(list, zip(*batch)))\n batch = []\n except StopIteration:\n if batch is not []:\n # Converts tuples of samples into tuples of batches of samples\n yield tuple(map(list, zip(*batch)))\n return collect_batches_generator", "_____no_output_____" ] ], [ [ "And finally the data loader, that configures and returns an [ExternalSource](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/general/data_loading/external_input.html) node.\n\n### Keyword Arguments:\n\n`paths`: describes the paths to the file/files containing the webdataset, and can be formatted as any data accepted by the `WebDataset`\n\n`extensions`: describes the extensions containing the data to be output through the dataset. By default, all image format extensions supported by `WebDataset` are used\n\n`random_shuffle`: describes whether to shuffle the data read by the `WebDataset`\n\n`initial_fill`: if `random_shuffle` is True describes the buffer size of the data shuffler. Set to 256 by default.\n\n`seed`: describes the seed for shuffling the data. Useful for getting consistent results. Set to 0 by default\n\n`pad_last_batch`: describes whether to pad the last batch with the final sample to match the regular batch size\n\n`read_ahead`: describes whether to prefetch the data into the memory\n\n`cycle`: can be either `\"raise\"`, in which case the data loader will throw StopIteration once it reaches the end of the data, in which case the user has to invoke `pipeline.reset()` before the next epoch, or `\"quiet\"`(Default), in which case it will keep looping over the data over and over", "_____no_output_____" ] ], [ [ "def read_webdataset(\n paths, \n extensions=None,\n random_shuffle=False, \n initial_fill=256, \n seed=0,\n pad_last_batch=False,\n read_ahead=False,\n cycle=\"quiet\"\n):\n # Parsing the input data\n assert(cycle in {\"quiet\", \"raise\", \"no\"})\n if extensions == None:\n extensions = ';'.join([\"jpg\", \"jpeg\", \"img\", \"image\", \"pbm\", \"pgm\", \"png\"]) # All supported image formats\n if type(extensions) == str:\n extensions = (extensions,)\n \n # For later information for batch collection and padding\n max_batch_size = dali.pipeline.Pipeline.current().max_batch_size\n \n def webdataset_generator():\n bytes_np_mapper = (lambda data: np.frombuffer(data, dtype=np.uint8),)*len(extensions)\n dataset_instance = (wds.WebDataset(paths)\n .to_tuple(*extensions)\n .map_tuple(*bytes_np_mapper))\n \n for sample in dataset_instance:\n yield sample\n \n dataset = webdataset_generator\n \n # Adding the buffered shuffling\n if random_shuffle:\n dataset = buffered_shuffle(dataset, initial_fill, seed)\n \n # Adding the batch padding\n if pad_last_batch:\n dataset = last_batch_padding(dataset, max_batch_size)\n \n # Collecting the data into batches (possibly undefull)\n # Handled by a custom function only when `silent_cycle` is False\n if cycle != \"quiet\":\n dataset = collect_batches(dataset, max_batch_size)\n \n # Prefetching the data\n if read_ahead:\n dataset=list(dataset())\n \n return fn.external_source(\n source=dataset,\n num_outputs=len(extensions),\n batch=(cycle != \"quiet\"), # If `cycle` is \"quiet\" then batching is handled by the external source\n cycle=cycle,\n dtype=types.UINT8\n )", "_____no_output_____" ] ], [ [ "We also define a sample data augmentation function which decodes an image, applies a jitter to it and resizes it to 244x244.", "_____no_output_____" ] ], [ [ "def decode_augment(img, seed=0):\n img = fn.decoders.image(img)\n img = fn.jitter(img.gpu(), seed=seed)\n img = fn.resize(img, size=(224, 224))\n return img", "_____no_output_____" ] ], [ [ "## Usage presentation\nBelow we define the sample webdataset pipeline with our `external_source`-based loader, that just chains the previously defined reader and augmentation function together.", "_____no_output_____" ] ], [ [ "@dali.pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)\ndef webdataset_pipeline(\n paths,\n random_shuffle=False, \n initial_fill=256,\n seed=0,\n pad_last_batch=False,\n read_ahead=False,\n cycle=\"quiet\"\n):\n img, label = read_webdataset(paths=paths, \n extensions=(\"jpg\", \"cls\"),\n random_shuffle=random_shuffle,\n initial_fill=initial_fill,\n seed=seed,\n pad_last_batch=pad_last_batch,\n read_ahead=read_ahead,\n cycle=cycle)\n return decode_augment(img, seed=seed), label", "_____no_output_____" ] ], [ [ "The pipeline can then be build with the desired arguments passed through to the data loader", "_____no_output_____" ] ], [ [ "pipeline = webdataset_pipeline(\n tar_dataset_paths, # Paths for the sharded dataset\n random_shuffle=True, # Random buffered shuffling on\n pad_last_batch=False, # Last batch is filled to the full size\n read_ahead=False,\n cycle=\"raise\") # All the data is preloaded into the memory\npipeline.build()", "_____no_output_____" ] ], [ [ "And executed, printing the example image using matplotlib", "_____no_output_____" ] ], [ [ "img, c = pipeline.run() # If StopIteration is raised, use pipeline.reset() to start a new epoch\nimg = img.as_cpu()\nprint(int(bytes(c.as_array()[0]))) # Conversion from an array of bytes back to bytes and then to int\nplt.imshow(img.as_array()[0])\nplt.show()", "1\n" ] ], [ [ "## Checking consistency\nHere we will check if the custom pipeline for the webdataset matches an equivalent pipeline reading the files from an untarred directory, with `fn.readers.file` reader.\n\nFirst let's define the pipeline to compare against. This is the same pipeline as the one for the webdataset, but instead uses the `fn.readers.file` reader.", "_____no_output_____" ] ], [ [ "@dali.pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)\ndef file_pipeline(files):\n img, _ = fn.readers.file(files=files)\n return decode_augment(img)", "_____no_output_____" ] ], [ [ "Then let's instantiate and build both pipelines", "_____no_output_____" ] ], [ [ "webdataset_pipeline_instance = webdataset_pipeline(tar_dataset_paths)\nwebdataset_pipeline_instance.build()\nfile_pipeline_instance = file_pipeline(folder_dataset_files)\nfile_pipeline_instance.build()", "_____no_output_____" ] ], [ [ "\nAnd run the comparison loop.", "_____no_output_____" ] ], [ [ "# The number of batches to sample between the two pipelines\nnum_batches = 10\n\nfor _ in range(num_batches):\n webdataset_pipeline_threw_exception = False\n file_pipeline_threw_exception = False\n \n # Try running the webdataset pipeline and check if it has run out of the samples\n try:\n web_img, _ = webdataset_pipeline_instance.run()\n except StopIteration:\n webdataset_pipeline_threw_exception = True\n \n # Try running the file pipeline and check if it has run out of the samples\n try:\n (file_img,) = file_pipeline_instance.run()\n except StopIteration:\n file_pipeline_threw_exception = True\n \n # In case of different number of batches\n assert(webdataset_pipeline_threw_exception==file_pipeline_threw_exception)\n\n web_img = web_img.as_cpu().as_array()\n file_img = file_img.as_cpu().as_array()\n\n # In case the pipelines give different outputs\n np.testing.assert_equal(web_img, file_img)\nelse:\n print(\"No difference found!\")", "No difference found!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec7f56a79efb69989f6a32d79f09abec4222088f
379,335
ipynb
Jupyter Notebook
notebook/2018-07-13_slides_for_friday_meeting.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2019-09-13T13:24:18.000Z
2019-09-13T13:24:18.000Z
notebook/2018-07-13_slides_for_friday_meeting.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
65
2019-07-24T16:23:08.000Z
2020-03-06T22:18:47.000Z
notebook/2018-07-13_slides_for_friday_meeting.ipynb
jfear/larval_gonad
624a71741864b74e0372f89bdcca578e5cca3722
[ "MIT" ]
1
2021-06-02T19:09:35.000Z
2021-06-02T19:09:35.000Z
279.745575
115,348
0.895725
[ [ [ "import os\nimport sys\nfrom pathlib import Path\n\nfrom IPython.display import display, HTML, Markdown, Audio\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Project level imports\nfrom larval_gonad.notebook import Nb\nfrom larval_gonad.sounds import beep", "_____no_output_____" ], [ "# Setup notebook\nnbconfig = Nb.setup_notebook(seurat_dir='../output/scrnaseq-wf/scrnaseq_combine_force')", "last updated: 2018-07-13 \nGit hash: ac53eab8570ee235fc7e9d15c84d3e028437e0a6\n" ], [ "from larval_gonad.cell_selection import build_umi_gene_count_table\n\ndef get_umi(rep):\n umi = build_umi_gene_count_table(f'../output/scrnaseq-wf/scrnaseq_samples/testis{rep}_force/outs/raw_gene_bc_matrices_h5.h5', \n f'../output/scrnaseq-wf/scrnaseq_samples/testis{rep}_force/outs/molecule_info.h5')\n umi.sort_values('umi_cnt', inplace=True, ascending=False)\n umi['id'] = [x + 1 for x in range(umi.shape[0])]\n umi['rep'] = rep\n umi.index = [f'rep{rep}_' + x for x in umi.index]\n return umi\n\numi = pd.concat([\n get_umi(1),\n get_umi(2),\n get_umi(3),\n])\n\nclusters = nbconfig.seurat.get_clusters('res.0.6')\numi = umi.join(clusters)", "_____no_output_____" ], [ "def bcrank(rep, cluster, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots()\n \n dat = umi.query(f'rep == {rep}')\n clus = dat.query(f'cluster == {cluster}')\n \n dat.plot('id', 'umi_cnt', loglog=True, kind='scatter', s=10, color='lightgrey', ax=ax, **kwargs)\n try:\n clus.plot('id', 'umi_cnt', loglog=True, kind='scatter', s=10, color='g', ax=ax)\n ax.set_xlabel('')\n except ValueError:\n return", "_____no_output_____" ], [ "def umivio(cluster, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots()\n \n dat = umi.query(f'cluster == {cluster}').dropna()\n sns.violinplot('rep', 'umi_cnt', data=dat, ax=ax, **kwargs)\n ax.set_ylabel('')\n ax.text(0.5, .99, 'nUMI', ha='center', va='top', transform=ax.transAxes, fontsize=12)", "_____no_output_____" ], [ "def genevio(cluster, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots()\n \n dat = umi.query(f'cluster == {cluster}').dropna()\n sns.violinplot('rep', 'gene_cnt', data=dat, ax=ax, **kwargs)\n ax.set_ylabel('')\n ax.text(0.5, .99, 'nGene', ha='center', va='top', transform=ax.transAxes, fontsize=12)", "_____no_output_____" ], [ "cluster = 9\n\nfig = plt.figure(figsize=plt.figaspect(1/2))\ngs1 = plt.GridSpec(1, 3, bottom=0.55, wspace=0.02)\ngs2 = plt.GridSpec(1, 2, top=0.48)\nax1 = plt.subplot(gs1[0, 0])\nax2 = plt.subplot(gs1[0, 1], sharey=ax1)\nax3 = plt.subplot(gs1[0, 2], sharey=ax1)\nax4 = plt.subplot(gs2[0, 0])\nax5 = plt.subplot(gs2[0, 1])\n\nbcrank(1, cluster, ax1, title='rep1')\nbcrank(2, cluster, ax2, title='rep2')\nbcrank(3, cluster, ax3, title='rep3')\numivio(cluster, ax4)\ngenevio(cluster, ax5)", "_____no_output_____" ], [ "cluster = 10\n\nfig = plt.figure(figsize=plt.figaspect(1/2))\ngs1 = plt.GridSpec(1, 3, bottom=0.55, wspace=0.02)\ngs2 = plt.GridSpec(1, 2, top=0.48)\nax1 = plt.subplot(gs1[0, 0])\nax2 = plt.subplot(gs1[0, 1], sharey=ax1)\nax3 = plt.subplot(gs1[0, 2], sharey=ax1)\nax4 = plt.subplot(gs2[0, 0])\nax5 = plt.subplot(gs2[0, 1])\n\nbcrank(1, cluster, ax1, title='rep1')\nbcrank(2, cluster, ax2, title='rep2')\nbcrank(3, cluster, ax3, title='rep3')\numivio(cluster, ax4)\ngenevio(cluster, ax5)", "_____no_output_____" ], [ "cluster = 11\n\nfig = plt.figure(figsize=plt.figaspect(1/2))\ngs1 = plt.GridSpec(1, 3, bottom=0.55, wspace=0.02)\ngs2 = plt.GridSpec(1, 2, top=0.48)\nax1 = plt.subplot(gs1[0, 0])\nax2 = plt.subplot(gs1[0, 1], sharey=ax1)\nax3 = plt.subplot(gs1[0, 2], sharey=ax1)\nax4 = plt.subplot(gs2[0, 0])\nax5 = plt.subplot(gs2[0, 1])\n\nbcrank(1, cluster, ax1, title='rep1')\nbcrank(2, cluster, ax2, title='rep2')\nbcrank(3, cluster, ax3, title='rep3')\numivio(cluster, ax4)\ngenevio(cluster, ax5)", "_____no_output_____" ] ], [ [ "## DEG", "_____no_output_____" ] ], [ [ "biomarkers = nbconfig.seurat.get_biomarkers('res.0.6')", "_____no_output_____" ], [ "biomarkers.head()", "_____no_output_____" ], [ "nbconfig.CLUSTER_ANNOT", "_____no_output_____" ], [ "c11 = pd.read_csv('../output/2018-07-13_scrnaseq_11_vs_germcells_biomarkers.tsv', sep='\\t', index_col=0)", "_____no_output_____" ], [ "c11.index.name = 'FBgn'", "_____no_output_____" ], [ "c11_genes = c11.query('avg_logFC >= 0').index.unique().tolist()", "_____no_output_____" ], [ "len(c11_genes)", "_____no_output_____" ], [ "biomarkers[biomarkers.index.isin(c11_genes)].cluster.value_counts()", "_____no_output_____" ], [ "biomarkers[biomarkers.index.isin(c11_genes)].sort_index()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7f5a003faf167c7d4a8f0ec43140aa40da5806
54,761
ipynb
Jupyter Notebook
Auto Chloro ML.ipynb
farhad324/Auto-Chloro-A-Crop-Disease-Classifier-and-Remedies-Provider-In-Bangla
33a501ca670510497329d129aa1fa1287eddbed3
[ "MIT" ]
2
2021-12-11T15:44:17.000Z
2022-02-05T23:18:19.000Z
Auto Chloro ML.ipynb
SarahZabeen/Auto-Chloro-A-Crop-Disease-Classifier-and-Remedies-Provider-In-Bangla
33a501ca670510497329d129aa1fa1287eddbed3
[ "MIT" ]
null
null
null
Auto Chloro ML.ipynb
SarahZabeen/Auto-Chloro-A-Crop-Disease-Classifier-and-Remedies-Provider-In-Bangla
33a501ca670510497329d129aa1fa1287eddbed3
[ "MIT" ]
3
2021-03-29T11:16:25.000Z
2022-01-15T15:13:47.000Z
142.979112
21,756
0.85875
[ [ [ "![550c205e-04d0-4fdc-9e44-1b91844592d4_200x200.png](attachment:550c205e-04d0-4fdc-9e44-1b91844592d4_200x200.png)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D\nfrom tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.utils import plot_model\nfrom IPython.display import SVG, Image", "_____no_output_____" ], [ "\n# For checking out that how many images are available in the train set we can use import OS\nfor types in os.listdir(\"PlantVillage/My Drive/train_set/\"):\n print(str(len(os.listdir(\"PlantVillage/My Drive/train_set/\"+ types)))+\" \"+ types+' images')", "917 Pepper__bell___Bacterial_spot images\n1402 Pepper__bell___healthy images\n927 Potato___Early_blight images\n127 Potato___healthy images\n919 Potato___Late_blight images\n2017 Tomato_Bacterial_spot images\n919 Tomato_Early_blight images\n1494 Tomato_healthy images\n1816 Tomato_Late_blight images\n879 Tomato_Leaf_Mold images\n1682 Tomato_Septoria_leaf_spot images\n1603 Tomato_Spider_mites_Two_spotted_spider_mite images\n1307 Tomato__Target_Spot images\n308 Tomato__Tomato_mosaic_virus images\n3068 Tomato__Tomato_YellowLeaf__Curl_Virus images\n" ], [ "\n# Complete Dataset images can be loaded using ImageDataGenerator function\nimg_size=48\nbatch_size=64\ndatagen_train=ImageDataGenerator(horizontal_flip=True)\ntrain_generator=datagen_train.flow_from_directory(\"PlantVillage/My Drive/train_set\",\ntarget_size=(img_size,img_size),\nbatch_size=batch_size,\nclass_mode='categorical',\nshuffle=True)\n\ndatagen_test=ImageDataGenerator(horizontal_flip=True)\nvalidation_generator=datagen_test.flow_from_directory(\"PlantVillage/My Drive/test_data\",\ntarget_size=(img_size,img_size),\nbatch_size=batch_size,\nclass_mode='categorical',\nshuffle=True)", "Found 19384 images belonging to 15 classes.\nFound 1254 images belonging to 15 classes.\n" ], [ "detection=Sequential()\n\n#1 -convolutional layer-1\ndetection.add(Conv2D(64,(3,3),padding='same',input_shape=(48,48,3)))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(MaxPooling2D(pool_size=(2,2)))\ndetection.add(Dropout(0.25))\n\n#2 -convolutional layer-2\ndetection.add(Conv2D(128,(5,5),padding='same'))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(MaxPooling2D(pool_size=(2,2)))\ndetection.add(Dropout(0.25))\n\n#3 -convolutional layer-3\ndetection.add(Conv2D(512,(3,3),padding='same'))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(MaxPooling2D(pool_size=(2,2)))\ndetection.add(Dropout(0.25))\n\n#4 -convolutional layer-4\ndetection.add(Conv2D(1024,(3,3),padding='same'))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(MaxPooling2D(pool_size=(2,2)))\ndetection.add(Dropout(0.25))\n\ndetection.add(Flatten())\ndetection.add(Dense(256))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(Dropout(0.25))\n\ndetection.add(Dense(512))\ndetection.add(BatchNormalization())\ndetection.add(Activation('relu'))\ndetection.add(Dropout(0.25))\n\ndetection.add(Dense(15,activation='softmax'))\noptimum=Adam(lr=0.005)\ndetection.compile(optimizer=optimum,loss='categorical_crossentropy',metrics=['accuracy'])", "WARNING:tensorflow:From C:\\Users\\ASUS\\anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\n" ], [ "detection.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 48, 48, 64) 1792 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 48, 48, 64) 256 \n_________________________________________________________________\nactivation (Activation) (None, 48, 48, 64) 0 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 24, 24, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 24, 24, 64) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 24, 24, 128) 204928 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 24, 24, 128) 512 \n_________________________________________________________________\nactivation_1 (Activation) (None, 24, 24, 128) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 12, 12, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 12, 12, 128) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 12, 12, 512) 590336 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 12, 12, 512) 2048 \n_________________________________________________________________\nactivation_2 (Activation) (None, 12, 12, 512) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 6, 6, 512) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 6, 6, 512) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 6, 6, 1024) 4719616 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 6, 6, 1024) 4096 \n_________________________________________________________________\nactivation_3 (Activation) (None, 6, 6, 1024) 0 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 3, 3, 1024) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 3, 3, 1024) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 9216) 0 \n_________________________________________________________________\ndense (Dense) (None, 256) 2359552 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 256) 1024 \n_________________________________________________________________\nactivation_4 (Activation) (None, 256) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 131584 \n_________________________________________________________________\nbatch_normalization_5 (Batch (None, 512) 2048 \n_________________________________________________________________\nactivation_5 (Activation) (None, 512) 0 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 15) 7695 \n=================================================================\nTotal params: 8,025,487\nTrainable params: 8,020,495\nNon-trainable params: 4,992\n_________________________________________________________________\n" ], [ "ephocs=15\nsteps_per_epoch=train_generator.n//train_generator.batch_size\nsteps_per_epoch\nvalidation_steps=validation_generator.n//validation_generator.batch_size\nvalidation_steps\ndetection.fit(x=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=ephocs,\n validation_data=validation_generator,\n validation_steps=validation_steps)\ndetection.save('auto_chloro_model.h5')", "Epoch 1/15\n302/302 [==============================] - 1382s 5s/step - loss: 0.9326 - acc: 0.6925 - val_loss: 7.4230 - val_acc: 0.2804\nEpoch 2/15\n302/302 [==============================] - 1150s 4s/step - loss: 0.5053 - acc: 0.8310 - val_loss: 7.5591 - val_acc: 0.1933\nEpoch 3/15\n302/302 [==============================] - 1058s 4s/step - loss: 0.3928 - acc: 0.8667 - val_loss: 3.3730 - val_acc: 0.4317\nEpoch 4/15\n302/302 [==============================] - 1250s 4s/step - loss: 0.3095 - acc: 0.8952 - val_loss: 0.4983 - val_acc: 0.8306\nEpoch 5/15\n302/302 [==============================] - 1235s 4s/step - loss: 0.2486 - acc: 0.9170 - val_loss: 0.8202 - val_acc: 0.7574\nEpoch 6/15\n302/302 [==============================] - 1018s 3s/step - loss: 0.2299 - acc: 0.9227 - val_loss: 1.6029 - val_acc: 0.6028\nEpoch 7/15\n302/302 [==============================] - 1227s 4s/step - loss: 0.1999 - acc: 0.9313 - val_loss: 0.7951 - val_acc: 0.7788\nEpoch 8/15\n302/302 [==============================] - 1271s 4s/step - loss: 0.1680 - acc: 0.9427 - val_loss: 0.4315 - val_acc: 0.8651\nEpoch 9/15\n302/302 [==============================] - 1290s 4s/step - loss: 0.1666 - acc: 0.9422 - val_loss: 0.6000 - val_acc: 0.8487\nEpoch 10/15\n302/302 [==============================] - 1085s 4s/step - loss: 0.1503 - acc: 0.9488 - val_loss: 1.2814 - val_acc: 0.7237\nEpoch 11/15\n302/302 [==============================] - 1135s 4s/step - loss: 0.1377 - acc: 0.9542 - val_loss: 2.5503 - val_acc: 0.6012\nEpoch 12/15\n302/302 [==============================] - 1167s 4s/step - loss: 0.1435 - acc: 0.9540 - val_loss: 0.5165 - val_acc: 0.8528\nEpoch 13/15\n302/302 [==============================] - 1071s 4s/step - loss: 0.1225 - acc: 0.9580 - val_loss: 3.1499 - val_acc: 0.4268\nEpoch 14/15\n302/302 [==============================] - 1215s 4s/step - loss: 0.1293 - acc: 0.9570 - val_loss: 1.0263 - val_acc: 0.7344\nEpoch 15/15\n302/302 [==============================] - 1145s 4s/step - loss: 0.1093 - acc: 0.9637 - val_loss: 0.2386 - val_acc: 0.9350\n" ], [ "from tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import load_model\n\ntest_img=image.load_img(\"tbs.JPG\",target_size=(48,48))\nplt.imshow(test_img)\ntest_img=image.img_to_array(test_img)\ntest_img=np.expand_dims(test_img,axis=0)\nresult=detection.predict(test_img)\na=result.argmax()\nclasses=train_generator.class_indices\ncategory=[]\nfor i in classes:\n category.append(i)\nfor i in range(len(classes)):\n if(i==a):\n output=category[i]\noutput ", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec7f5fef35ce2cb43b244f8771b15890e5e10f60
30,566
ipynb
Jupyter Notebook
notebooks/02_TorsionNet_training.ipynb
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
15
2021-01-15T01:54:26.000Z
2022-03-31T16:00:52.000Z
notebooks/02_TorsionNet_training.ipynb
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
2
2021-07-21T22:42:09.000Z
2021-11-22T06:39:20.000Z
notebooks/02_TorsionNet_training.ipynb
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
6
2021-01-16T04:07:17.000Z
2022-02-23T02:11:49.000Z
36.431466
92
0.546817
[ [ [ "import sys\nimport time\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.callbacks import Callback", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "sys.path.append(str(Path().resolve().parent))", "_____no_output_____" ], [ "from torsion.model import get_model", "_____no_output_____" ], [ "y_train = np.load('y_train.npy')\ny_test = np.load('y_test.npy')\nX_train = np.load('X_train.npy')\nX_test = np.load('X_test.npy')", "_____no_output_____" ], [ "# Cap relative energies \nREL_ENERGY_CAP = 30.0\ntmp_idx = y_train > REL_ENERGY_CAP\ny_train[tmp_idx] = REL_ENERGY_CAP + np.exp(REL_ENERGY_CAP - y_train[tmp_idx])", "_____no_output_____" ], [ "scaler = StandardScaler().fit(X_train)\nX_train = scaler.transform(X_train)\n\nprint('X_train.shape ', X_train.shape)", "X_train.shape (9600, 293)\n" ], [ "scaler_file = 'scaler.pkl'\nmodel_file = 'model.h5'", "_____no_output_____" ], [ "# save feature transformation\nwith open(scaler_file, 'wb') as fptr:\n pickle.dump(scaler, fptr)", "_____no_output_____" ], [ "_, num_feat = X_train.shape\n\n# early stopping criteria\nearlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=100, \\\n verbose=1, mode='auto')\n\n# create DNN model\nmodel = get_model(num_feat)", "_____no_output_____" ], [ "print(model.summary())", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 2930) 861420 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 2930) 11720 \n_________________________________________________________________\ndropout (Dropout) (None, 2930) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1465) 4293915 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 1465) 5860 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 1465) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 293) 429538 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 293) 1172 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 293) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 58) 17052 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 58) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 1) 59 \n=================================================================\nTotal params: 5,620,736\nTrainable params: 5,611,360\nNon-trainable params: 9,376\n_________________________________________________________________\nNone\n" ], [ "checkpointer = ModelCheckpoint(filepath=model_file, verbose=1, save_best_only=True)\ncallbacks_list = [checkpointer, earlystop]\n\n# train DNN model\nmodel.fit(\n X_train,\n y_train,\n epochs=5000,\n batch_size=256,\n validation_split=0.1,\n callbacks=callbacks_list,\n verbose=2)\n\nprint('Training complete')\nprint(f'Standard scalar is saved in {scaler_file}')\nprint(f'Model is saved in {model_file}')", "Epoch 1/5000\n\nEpoch 00001: val_loss improved from inf to 1.88996, saving model to model.h5\n34/34 - 4s - loss: 1.6116 - val_loss: 1.8900\nEpoch 2/5000\n\nEpoch 00002: val_loss improved from 1.88996 to 1.68420, saving model to model.h5\n34/34 - 1s - loss: 1.2783 - val_loss: 1.6842\nEpoch 3/5000\n\nEpoch 00003: val_loss improved from 1.68420 to 1.62709, saving model to model.h5\n34/34 - 1s - loss: 1.1683 - val_loss: 1.6271\nEpoch 4/5000\n\nEpoch 00004: val_loss improved from 1.62709 to 1.53948, saving model to model.h5\n34/34 - 1s - loss: 0.9984 - val_loss: 1.5395\nEpoch 5/5000\n\nEpoch 00005: val_loss improved from 1.53948 to 1.46478, saving model to model.h5\n34/34 - 1s - loss: 0.9141 - val_loss: 1.4648\nEpoch 6/5000\n\nEpoch 00006: val_loss improved from 1.46478 to 1.42968, saving model to model.h5\n34/34 - 1s - loss: 0.8053 - val_loss: 1.4297\nEpoch 7/5000\n\nEpoch 00007: val_loss improved from 1.42968 to 1.29315, saving model to model.h5\n34/34 - 1s - loss: 0.7853 - val_loss: 1.2932\nEpoch 8/5000\n\nEpoch 00008: val_loss did not improve from 1.29315\n34/34 - 0s - loss: 0.7226 - val_loss: 1.3451\nEpoch 9/5000\n\nEpoch 00009: val_loss improved from 1.29315 to 1.27699, saving model to model.h5\n34/34 - 1s - loss: 0.6619 - val_loss: 1.2770\nEpoch 10/5000\n\nEpoch 00010: val_loss improved from 1.27699 to 1.26097, saving model to model.h5\n34/34 - 1s - loss: 0.6158 - val_loss: 1.2610\nEpoch 11/5000\n\nEpoch 00011: val_loss improved from 1.26097 to 1.22337, saving model to model.h5\n34/34 - 1s - loss: 0.5962 - val_loss: 1.2234\nEpoch 12/5000\n\nEpoch 00012: val_loss improved from 1.22337 to 1.16135, saving model to model.h5\n34/34 - 1s - loss: 0.5469 - val_loss: 1.1613\nEpoch 13/5000\n\nEpoch 00013: val_loss did not improve from 1.16135\n34/34 - 0s - loss: 0.5248 - val_loss: 1.1635\nEpoch 14/5000\n\nEpoch 00014: val_loss improved from 1.16135 to 1.14296, saving model to model.h5\n34/34 - 1s - loss: 0.5224 - val_loss: 1.1430\nEpoch 15/5000\n\nEpoch 00015: val_loss did not improve from 1.14296\n34/34 - 0s - loss: 0.4781 - val_loss: 1.1733\nEpoch 16/5000\n\nEpoch 00016: val_loss did not improve from 1.14296\n34/34 - 0s - loss: 0.4678 - val_loss: 1.1724\nEpoch 17/5000\n\nEpoch 00017: val_loss did not improve from 1.14296\n34/34 - 0s - loss: 0.4478 - val_loss: 1.2110\nEpoch 18/5000\n\nEpoch 00018: val_loss did not improve from 1.14296\n34/34 - 0s - loss: 0.4415 - val_loss: 1.1780\nEpoch 19/5000\n\nEpoch 00019: val_loss did not improve from 1.14296\n34/34 - 0s - loss: 0.4321 - val_loss: 1.1574\nEpoch 20/5000\n\nEpoch 00020: val_loss improved from 1.14296 to 1.10215, saving model to model.h5\n34/34 - 1s - loss: 0.3972 - val_loss: 1.1021\nEpoch 21/5000\n\nEpoch 00021: val_loss did not improve from 1.10215\n34/34 - 0s - loss: 0.3950 - val_loss: 1.1463\nEpoch 22/5000\n\nEpoch 00022: val_loss did not improve from 1.10215\n34/34 - 0s - loss: 0.3805 - val_loss: 1.1387\nEpoch 23/5000\n\nEpoch 00023: val_loss improved from 1.10215 to 1.06352, saving model to model.h5\n34/34 - 1s - loss: 0.3619 - val_loss: 1.0635\nEpoch 24/5000\n\nEpoch 00024: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3603 - val_loss: 1.1360\nEpoch 25/5000\n\nEpoch 00025: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3580 - val_loss: 1.1441\nEpoch 26/5000\n\nEpoch 00026: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3629 - val_loss: 1.1235\nEpoch 27/5000\n\nEpoch 00027: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3432 - val_loss: 1.0846\nEpoch 28/5000\n\nEpoch 00028: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3261 - val_loss: 1.1622\nEpoch 29/5000\n\nEpoch 00029: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3247 - val_loss: 1.1206\nEpoch 30/5000\n\nEpoch 00030: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3226 - val_loss: 1.1181\nEpoch 31/5000\n\nEpoch 00031: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3128 - val_loss: 1.2037\nEpoch 32/5000\n\nEpoch 00032: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.3229 - val_loss: 1.1070\nEpoch 33/5000\n\nEpoch 00033: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2977 - val_loss: 1.1561\nEpoch 34/5000\n\nEpoch 00034: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2957 - val_loss: 1.1678\nEpoch 35/5000\n\nEpoch 00035: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2863 - val_loss: 1.1434\nEpoch 36/5000\n\nEpoch 00036: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2919 - val_loss: 1.1339\nEpoch 37/5000\n\nEpoch 00037: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2826 - val_loss: 1.1516\nEpoch 38/5000\n\nEpoch 00038: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2785 - val_loss: 1.1680\nEpoch 39/5000\n\nEpoch 00039: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2789 - val_loss: 1.2000\nEpoch 40/5000\n\nEpoch 00040: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2703 - val_loss: 1.1595\nEpoch 41/5000\n\nEpoch 00041: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2764 - val_loss: 1.1270\nEpoch 42/5000\n\nEpoch 00042: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2820 - val_loss: 1.1023\nEpoch 43/5000\n\nEpoch 00043: val_loss did not improve from 1.06352\n34/34 - 0s - loss: 0.2708 - val_loss: 1.0920\nEpoch 44/5000\n\nEpoch 00044: val_loss improved from 1.06352 to 1.01950, saving model to model.h5\n34/34 - 1s - loss: 0.2589 - val_loss: 1.0195\nEpoch 45/5000\n\nEpoch 00045: val_loss did not improve from 1.01950\n34/34 - 0s - loss: 0.2726 - val_loss: 1.0968\nEpoch 46/5000\n\nEpoch 00046: val_loss did not improve from 1.01950\n34/34 - 0s - loss: 0.2571 - val_loss: 1.1133\nEpoch 47/5000\n\nEpoch 00047: val_loss did not improve from 1.01950\n34/34 - 0s - loss: 0.2677 - val_loss: 1.1064\nEpoch 48/5000\n\nEpoch 00048: val_loss did not improve from 1.01950\n34/34 - 0s - loss: 0.2469 - val_loss: 1.1117\nEpoch 49/5000\n\nEpoch 00049: val_loss did not improve from 1.01950\n34/34 - 0s - loss: 0.2562 - val_loss: 1.0873\nEpoch 50/5000\n\nEpoch 00050: val_loss improved from 1.01950 to 1.00919, saving model to model.h5\n34/34 - 1s - loss: 0.2498 - val_loss: 1.0092\nEpoch 51/5000\n\nEpoch 00051: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2360 - val_loss: 1.0971\nEpoch 52/5000\n\nEpoch 00052: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2380 - val_loss: 1.0374\nEpoch 53/5000\n\nEpoch 00053: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2406 - val_loss: 1.0459\nEpoch 54/5000\n\nEpoch 00054: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2282 - val_loss: 1.0756\nEpoch 55/5000\n\nEpoch 00055: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2416 - val_loss: 1.1322\nEpoch 56/5000\n\nEpoch 00056: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2378 - val_loss: 1.0812\nEpoch 57/5000\n\nEpoch 00057: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2328 - val_loss: 1.0826\nEpoch 58/5000\n\nEpoch 00058: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2172 - val_loss: 1.1250\nEpoch 59/5000\n\nEpoch 00059: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2248 - val_loss: 1.1031\nEpoch 60/5000\n\nEpoch 00060: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2185 - val_loss: 1.1209\nEpoch 61/5000\n\nEpoch 00061: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2279 - val_loss: 1.1028\nEpoch 62/5000\n\nEpoch 00062: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2159 - val_loss: 1.1212\nEpoch 63/5000\n\nEpoch 00063: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2148 - val_loss: 1.1546\nEpoch 64/5000\n\nEpoch 00064: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2108 - val_loss: 1.1594\nEpoch 65/5000\n\nEpoch 00065: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2284 - val_loss: 1.1007\nEpoch 66/5000\n\nEpoch 00066: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2298 - val_loss: 1.0718\nEpoch 67/5000\n\nEpoch 00067: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2142 - val_loss: 1.1015\nEpoch 68/5000\n\nEpoch 00068: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2130 - val_loss: 1.1002\nEpoch 69/5000\n\nEpoch 00069: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2072 - val_loss: 1.0925\nEpoch 70/5000\n\nEpoch 00070: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2176 - val_loss: 1.0869\nEpoch 71/5000\n\nEpoch 00071: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2154 - val_loss: 1.1122\nEpoch 72/5000\n\nEpoch 00072: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2011 - val_loss: 1.0678\nEpoch 73/5000\n\nEpoch 00073: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2036 - val_loss: 1.0825\nEpoch 74/5000\n\nEpoch 00074: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1939 - val_loss: 1.0751\nEpoch 75/5000\n\nEpoch 00075: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1972 - val_loss: 1.0428\nEpoch 76/5000\n\nEpoch 00076: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1883 - val_loss: 1.0682\nEpoch 77/5000\n\nEpoch 00077: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2040 - val_loss: 1.0885\nEpoch 78/5000\n\nEpoch 00078: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2136 - val_loss: 1.1248\nEpoch 79/5000\n\nEpoch 00079: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2086 - val_loss: 1.0712\nEpoch 80/5000\n\nEpoch 00080: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2014 - val_loss: 1.0919\nEpoch 81/5000\n\nEpoch 00081: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1892 - val_loss: 1.1544\nEpoch 82/5000\n\nEpoch 00082: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1966 - val_loss: 1.0770\nEpoch 83/5000\n\nEpoch 00083: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1925 - val_loss: 1.0961\nEpoch 84/5000\n\nEpoch 00084: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1919 - val_loss: 1.0755\nEpoch 85/5000\n\nEpoch 00085: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1827 - val_loss: 1.0875\nEpoch 86/5000\n\nEpoch 00086: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1911 - val_loss: 1.0761\nEpoch 87/5000\n\nEpoch 00087: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.2058 - val_loss: 1.0491\nEpoch 88/5000\n\nEpoch 00088: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1995 - val_loss: 1.0637\nEpoch 89/5000\n\nEpoch 00089: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1784 - val_loss: 1.0897\nEpoch 90/5000\n\nEpoch 00090: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1878 - val_loss: 1.1139\nEpoch 91/5000\n\nEpoch 00091: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1860 - val_loss: 1.0968\nEpoch 92/5000\n\nEpoch 00092: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1880 - val_loss: 1.0403\nEpoch 93/5000\n\nEpoch 00093: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1897 - val_loss: 1.1207\nEpoch 94/5000\n\nEpoch 00094: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1820 - val_loss: 1.1212\nEpoch 95/5000\n\nEpoch 00095: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1884 - val_loss: 1.0451\nEpoch 96/5000\n\nEpoch 00096: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1943 - val_loss: 1.0291\nEpoch 97/5000\n\nEpoch 00097: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1926 - val_loss: 1.0627\nEpoch 98/5000\n\nEpoch 00098: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1851 - val_loss: 1.0772\nEpoch 99/5000\n\nEpoch 00099: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1876 - val_loss: 1.0643\nEpoch 100/5000\n\nEpoch 00100: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1763 - val_loss: 1.1306\nEpoch 101/5000\n\nEpoch 00101: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1711 - val_loss: 1.0896\nEpoch 102/5000\n\nEpoch 00102: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1690 - val_loss: 1.0702\nEpoch 103/5000\n\nEpoch 00103: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1790 - val_loss: 1.1245\nEpoch 104/5000\n\nEpoch 00104: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1623 - val_loss: 1.1124\nEpoch 105/5000\n\nEpoch 00105: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1702 - val_loss: 1.0887\nEpoch 106/5000\n\nEpoch 00106: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1696 - val_loss: 1.1133\nEpoch 107/5000\n\nEpoch 00107: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1732 - val_loss: 1.1094\nEpoch 108/5000\n\nEpoch 00108: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1640 - val_loss: 1.1823\nEpoch 109/5000\n\nEpoch 00109: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1710 - val_loss: 1.1572\nEpoch 110/5000\n\nEpoch 00110: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1689 - val_loss: 1.1748\nEpoch 111/5000\n\nEpoch 00111: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1710 - val_loss: 1.1823\nEpoch 112/5000\n\nEpoch 00112: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1721 - val_loss: 1.1829\nEpoch 113/5000\n\nEpoch 00113: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1726 - val_loss: 1.1441\nEpoch 114/5000\n\nEpoch 00114: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1759 - val_loss: 1.1083\nEpoch 115/5000\n\nEpoch 00115: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1697 - val_loss: 1.1020\nEpoch 116/5000\n\nEpoch 00116: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1672 - val_loss: 1.1309\nEpoch 117/5000\n\nEpoch 00117: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1566 - val_loss: 1.1407\nEpoch 118/5000\n\nEpoch 00118: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1595 - val_loss: 1.1354\nEpoch 119/5000\n\nEpoch 00119: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1620 - val_loss: 1.1034\nEpoch 120/5000\n\nEpoch 00120: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1744 - val_loss: 1.1826\nEpoch 121/5000\n\nEpoch 00121: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1716 - val_loss: 1.1692\nEpoch 122/5000\n\nEpoch 00122: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1764 - val_loss: 1.1539\nEpoch 123/5000\n\nEpoch 00123: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1644 - val_loss: 1.1159\nEpoch 124/5000\n\nEpoch 00124: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1651 - val_loss: 1.0922\nEpoch 125/5000\n\nEpoch 00125: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1572 - val_loss: 1.1333\nEpoch 126/5000\n\nEpoch 00126: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1641 - val_loss: 1.2097\nEpoch 127/5000\n\nEpoch 00127: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1541 - val_loss: 1.1451\nEpoch 128/5000\n\nEpoch 00128: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1608 - val_loss: 1.1427\nEpoch 129/5000\n\nEpoch 00129: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1586 - val_loss: 1.1689\nEpoch 130/5000\n\nEpoch 00130: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1637 - val_loss: 1.1858\nEpoch 131/5000\n\nEpoch 00131: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1611 - val_loss: 1.1659\nEpoch 132/5000\n\nEpoch 00132: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1508 - val_loss: 1.1512\nEpoch 133/5000\n\nEpoch 00133: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1484 - val_loss: 1.1668\nEpoch 134/5000\n\nEpoch 00134: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1525 - val_loss: 1.1559\nEpoch 135/5000\n\nEpoch 00135: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1423 - val_loss: 1.1322\nEpoch 136/5000\n\nEpoch 00136: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1556 - val_loss: 1.1214\nEpoch 137/5000\n\nEpoch 00137: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1633 - val_loss: 1.1294\nEpoch 138/5000\n\nEpoch 00138: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1533 - val_loss: 1.1415\nEpoch 139/5000\n\nEpoch 00139: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1527 - val_loss: 1.1235\nEpoch 140/5000\n\nEpoch 00140: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1433 - val_loss: 1.1359\nEpoch 141/5000\n\nEpoch 00141: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1557 - val_loss: 1.1149\nEpoch 142/5000\n\nEpoch 00142: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1512 - val_loss: 1.1505\nEpoch 143/5000\n\nEpoch 00143: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1481 - val_loss: 1.1918\nEpoch 144/5000\n\nEpoch 00144: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1507 - val_loss: 1.1149\nEpoch 145/5000\n\nEpoch 00145: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1525 - val_loss: 1.2044\nEpoch 146/5000\n\nEpoch 00146: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1462 - val_loss: 1.1989\nEpoch 147/5000\n\nEpoch 00147: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1526 - val_loss: 1.2478\nEpoch 148/5000\n\nEpoch 00148: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1515 - val_loss: 1.2358\nEpoch 149/5000\n\nEpoch 00149: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1573 - val_loss: 1.2503\nEpoch 150/5000\n\nEpoch 00150: val_loss did not improve from 1.00919\n34/34 - 0s - loss: 0.1579 - val_loss: 1.1837\nEpoch 00150: early stopping\nTraining complete\nStandard scalar is saved in scaler.pkl\nModel is saved in model.h5\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7f6c2ce22a5a209bd1182c07ac899d96f8390f
769,700
ipynb
Jupyter Notebook
Housing_Prices.ipynb
majesticio/Housing_Prices
aea5850211a2b1c18c616e0dd559a59cad02cb4b
[ "Apache-2.0" ]
1
2020-09-23T17:08:49.000Z
2020-09-23T17:08:49.000Z
Housing_Prices.ipynb
majesticio/Housing_Prices
aea5850211a2b1c18c616e0dd559a59cad02cb4b
[ "Apache-2.0" ]
null
null
null
Housing_Prices.ipynb
majesticio/Housing_Prices
aea5850211a2b1c18c616e0dd559a59cad02cb4b
[ "Apache-2.0" ]
null
null
null
67.487944
125,566
0.547225
[ [ [ "<a href=\"https://colab.research.google.com/github/majesticio/Housing_Prices/blob/master/Housing_Prices.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#Predicting Housing Prices\n*Using Linear/Nonlinear Regression*", "_____no_output_____" ], [ "---\nby **Gabriel Fosse**", "_____no_output_____" ], [ "##Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd \nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nimport sklearn.model_selection as model_selection\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import Ridge\nimport statsmodels.api as sm\n", "_____no_output_____" ] ], [ [ "##Data Collection\nThis data was provided by CNM Ingenuity. **train & test_data**", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "_____no_output_____" ], [ "paste_train_path_here = '/content/drive/My Drive/Colab Notebooks/csv files/Housing-Data.csv'\npaste_blind_path_here = '/content/drive/My Drive/Colab Notebooks/csv files/Housing_Data_Blind_Test.csv'\n\ntrain = pd.read_csv(paste_train_path_here)\n\ntest_data = pd.read_csv(paste_blind_path_here)\n\nprint(test_data.shape, train.shape)", "(246, 79) (2637, 81)\n" ] ], [ [ "Perameters", "_____no_output_____" ] ], [ [ "pd.set_option('display.max_rows', 150)\npd.set_option('display.max_columns', 150)", "_____no_output_____" ] ], [ [ "##Data Cleaning", "_____no_output_____" ], [ "Let's have a look at our dataframe...", "_____no_output_____" ] ], [ [ "train.head()", "_____no_output_____" ], [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2637 entries, 0 to 2636\nData columns (total 81 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PID 2637 non-null int64 \n 1 MS SubClass 2637 non-null int64 \n 2 MS Zoning 2637 non-null object \n 3 Lot Frontage 2188 non-null float64\n 4 Lot Area 2637 non-null int64 \n 5 Street 2637 non-null object \n 6 Alley 180 non-null object \n 7 Lot Shape 2637 non-null object \n 8 Land Contour 2637 non-null object \n 9 Utilities 2637 non-null object \n 10 Lot Config 2637 non-null object \n 11 Land Slope 2637 non-null object \n 12 Neighborhood 2637 non-null object \n 13 Condition 1 2637 non-null object \n 14 Condition 2 2637 non-null object \n 15 Bldg Type 2637 non-null object \n 16 House Style 2637 non-null object \n 17 Overall Qual 2637 non-null int64 \n 18 Overall Cond 2637 non-null int64 \n 19 Year Built 2637 non-null int64 \n 20 Year Remod/Add 2637 non-null int64 \n 21 Roof Style 2637 non-null object \n 22 Roof Matl 2637 non-null object \n 23 Exterior 1st 2637 non-null object \n 24 Exterior 2nd 2637 non-null object \n 25 Mas Vnr Type 2614 non-null object \n 26 Mas Vnr Area 2614 non-null float64\n 27 Exter Qual 2637 non-null object \n 28 Exter Cond 2637 non-null object \n 29 Foundation 2637 non-null object \n 30 Bsmt Qual 2564 non-null object \n 31 Bsmt Cond 2564 non-null object \n 32 Bsmt Exposure 2561 non-null object \n 33 BsmtFin Type 1 2564 non-null object \n 34 BsmtFin SF 1 2636 non-null float64\n 35 BsmtFin Type 2 2563 non-null object \n 36 BsmtFin SF 2 2636 non-null float64\n 37 Bsmt Unf SF 2636 non-null float64\n 38 Total Bsmt SF 2636 non-null float64\n 39 Heating 2637 non-null object \n 40 Heating QC 2637 non-null object \n 41 Central Air 2637 non-null object \n 42 Electrical 2637 non-null object \n 43 1st Flr SF 2637 non-null int64 \n 44 2nd Flr SF 2637 non-null int64 \n 45 Low Qual Fin SF 2637 non-null int64 \n 46 Gr Liv Area 2637 non-null int64 \n 47 Bsmt Full Bath 2635 non-null float64\n 48 Bsmt Half Bath 2635 non-null float64\n 49 Full Bath 2637 non-null int64 \n 50 Half Bath 2637 non-null int64 \n 51 Bedroom AbvGr 2637 non-null int64 \n 52 Kitchen AbvGr 2637 non-null int64 \n 53 Kitchen Qual 2637 non-null object \n 54 TotRms AbvGrd 2637 non-null int64 \n 55 Functional 2637 non-null object \n 56 Fireplaces 2637 non-null int64 \n 57 Fireplace Qu 1353 non-null object \n 58 Garage Type 2492 non-null object \n 59 Garage Yr Blt 2490 non-null float64\n 60 Garage Finish 2490 non-null object \n 61 Garage Cars 2636 non-null float64\n 62 Garage Area 2636 non-null float64\n 63 Garage Qual 2490 non-null object \n 64 Garage Cond 2490 non-null object \n 65 Paved Drive 2637 non-null object \n 66 Wood Deck SF 2637 non-null int64 \n 67 Open Porch SF 2637 non-null int64 \n 68 Enclosed Porch 2637 non-null int64 \n 69 3Ssn Porch 2637 non-null int64 \n 70 Screen Porch 2637 non-null int64 \n 71 Pool Area 2637 non-null int64 \n 72 Pool QC 11 non-null object \n 73 Fence 528 non-null object \n 74 Misc Feature 96 non-null object \n 75 Misc Val 2637 non-null int64 \n 76 Mo Sold 2637 non-null int64 \n 77 Yr Sold 2637 non-null int64 \n 78 Sale Type 2637 non-null object \n 79 Sale Condition 2637 non-null object \n 80 SalePrice 2637 non-null int64 \ndtypes: float64(11), int64(27), object(43)\nmemory usage: 1.6+ MB\n" ], [ "train_copy = train.copy()", "_____no_output_____" ] ], [ [ "###Drop appropriate rows and columns", "_____no_output_____" ], [ "**Drop row 2001 because basement information is incomplete.**", "_____no_output_____" ] ], [ [ "train_copy = train_copy.drop(2001,axis=0)", "_____no_output_____" ] ], [ [ "**Drop rows 162 and 329 because Year Garage Built information is inconsistent.**", "_____no_output_____" ] ], [ [ "train_copy = train_copy.drop(162,axis=0)\ntrain_copy = train_copy.drop(329,axis=0)", "_____no_output_____" ] ], [ [ "**Drop row 1555 because it is the only instance with a tennis court ('TenC')** ", "_____no_output_____" ] ], [ [ "train_copy = train_copy.drop(1555,axis=0)", "_____no_output_____" ] ], [ [ "**Remove 'PID' feature.**", "_____no_output_____" ] ], [ [ "train_copy.drop('PID',axis=1,inplace=True)", "_____no_output_____" ] ], [ [ "**Drop 'Garage Area'**", "_____no_output_____" ], [ "We will drop 'Garage Area' and see if it improves the prediction, supposing that it is over correlated to 'Garage Cars' and the amount of cars the garage holds is more important than how big it is.", "_____no_output_____" ] ], [ [ "train_copy.drop('Garage Area',axis=1,inplace=True)", "_____no_output_____" ] ], [ [ "###Dealing with NaNs", "_____no_output_____" ], [ "**First let's fill null values with appropriate substitutions...**", "_____no_output_____" ] ], [ [ "train_copy['Lot Frontage'] = train_copy['Lot Frontage'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['Alley'] = train_copy['Alley'].fillna('No Alley')", "_____no_output_____" ], [ "train_copy['Mas Vnr Type'] = train_copy['Mas Vnr Type'].fillna('None')", "_____no_output_____" ], [ "train_copy['Mas Vnr Area'] = train_copy['Mas Vnr Area'].fillna(0.0)", "_____no_output_____" ], [ "train_bsmt_null = train_copy[train_copy['BsmtFin Type 2'].isnull()].copy()", "_____no_output_____" ], [ "train_copy['Bsmt Qual'] = train_copy['Bsmt Qual'].fillna('No Basement')", "_____no_output_____" ], [ "train_copy['Bsmt Cond'] = train_copy['Bsmt Cond'].fillna('No Basement')", "_____no_output_____" ], [ "train_copy['Bsmt Exposure'] = train_copy['Bsmt Exposure'].fillna('No Basement')", "_____no_output_____" ], [ "train_copy['BsmtFin Type 1'] = train_copy['BsmtFin Type 1'].fillna('No Basement')", "_____no_output_____" ], [ "train_copy['BsmtFin Type 2'] = train_copy['BsmtFin Type 2'].fillna('No Basement')", "_____no_output_____" ], [ "train_copy['BsmtFin SF 1'] = train_copy['BsmtFin SF 1'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['BsmtFin SF 2'] = train_copy['BsmtFin SF 2'].fillna(0.0)", "_____no_output_____" ], [ "train_bsmt_null = train_copy[train_copy['Bsmt Unf SF'].isnull()].copy()", "_____no_output_____" ], [ "train_copy['Bsmt Unf SF'] = train_copy['Bsmt Unf SF'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['Total Bsmt SF'] = train_copy['Total Bsmt SF'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['Bsmt Full Bath'] = train_copy['Bsmt Full Bath'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['Bsmt Half Bath'] = train_copy['Bsmt Half Bath'].fillna(0.0)", "_____no_output_____" ], [ "train_copy['Fireplace Qu'] = train_copy['Fireplace Qu'].fillna('No Fireplace')", "_____no_output_____" ], [ "train_copy['Garage Type'] = train_copy['Garage Type'].fillna('No Garage')", "_____no_output_____" ], [ "train_copy['Garage Yr Blt'].fillna(train_copy['Year Built'], inplace=True)", "_____no_output_____" ], [ "train_copy['Garage Finish'] = train_copy['Garage Finish'].fillna('No Garage')", "_____no_output_____" ], [ "train_copy['Garage Qual'] = train_copy['Garage Qual'].fillna('No Garage')", "_____no_output_____" ], [ "train_copy['Garage Cond'] = train_copy['Garage Cond'].fillna('No Garage')", "_____no_output_____" ], [ "train_copy['Pool QC'] = train_copy['Pool QC'].fillna('No Pool')", "_____no_output_____" ], [ "train_copy['Fence'] = train_copy['Fence'].fillna('No Fence')", "_____no_output_____" ], [ "train_copy['Misc Feature'] = train_copy['Misc Feature'].fillna('No Misc Features')", "_____no_output_____" ], [ "train_copy.isna().sum().sum()", "_____no_output_____" ] ], [ [ "###The devil is in the details...", "_____no_output_____" ], [ "**Note that feature 'MS Subclass' is actually a categorical feature. Change numerical values to 'String' values.**", "_____no_output_____" ] ], [ [ "train_copy['MS SubClass'] = train_copy['MS SubClass'].apply(str)", "_____no_output_____" ], [ "train_copy['Full Bath'] = train_copy['Full Bath'].apply(str)", "_____no_output_____" ], [ "train_copy['Half Bath'] = train_copy['Half Bath'].apply(str)", "_____no_output_____" ] ], [ [ "**Found outliers due to NON \"Normal\" data instances. Removing all NON \"Normal\" data instances.**\n\n", "_____no_output_____" ] ], [ [ "train_copy = train_copy[train_copy['Sale Condition']=='Normal']", "_____no_output_____" ] ], [ [ "###Cleaned Data", "_____no_output_____" ], [ "**Let's make a new variable for the cleaned dataset.**", "_____no_output_____" ] ], [ [ "train_clean = train_copy.copy()", "_____no_output_____" ] ], [ [ "**All NaNs have been removed.**", "_____no_output_____" ] ], [ [ "train_clean.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2163 entries, 0 to 2636\nData columns (total 79 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MS SubClass 2163 non-null object \n 1 MS Zoning 2163 non-null object \n 2 Lot Frontage 2163 non-null float64\n 3 Lot Area 2163 non-null int64 \n 4 Street 2163 non-null object \n 5 Alley 2163 non-null object \n 6 Lot Shape 2163 non-null object \n 7 Land Contour 2163 non-null object \n 8 Utilities 2163 non-null object \n 9 Lot Config 2163 non-null object \n 10 Land Slope 2163 non-null object \n 11 Neighborhood 2163 non-null object \n 12 Condition 1 2163 non-null object \n 13 Condition 2 2163 non-null object \n 14 Bldg Type 2163 non-null object \n 15 House Style 2163 non-null object \n 16 Overall Qual 2163 non-null int64 \n 17 Overall Cond 2163 non-null int64 \n 18 Year Built 2163 non-null int64 \n 19 Year Remod/Add 2163 non-null int64 \n 20 Roof Style 2163 non-null object \n 21 Roof Matl 2163 non-null object \n 22 Exterior 1st 2163 non-null object \n 23 Exterior 2nd 2163 non-null object \n 24 Mas Vnr Type 2163 non-null object \n 25 Mas Vnr Area 2163 non-null float64\n 26 Exter Qual 2163 non-null object \n 27 Exter Cond 2163 non-null object \n 28 Foundation 2163 non-null object \n 29 Bsmt Qual 2163 non-null object \n 30 Bsmt Cond 2163 non-null object \n 31 Bsmt Exposure 2163 non-null object \n 32 BsmtFin Type 1 2163 non-null object \n 33 BsmtFin SF 1 2163 non-null float64\n 34 BsmtFin Type 2 2163 non-null object \n 35 BsmtFin SF 2 2163 non-null float64\n 36 Bsmt Unf SF 2163 non-null float64\n 37 Total Bsmt SF 2163 non-null float64\n 38 Heating 2163 non-null object \n 39 Heating QC 2163 non-null object \n 40 Central Air 2163 non-null object \n 41 Electrical 2163 non-null object \n 42 1st Flr SF 2163 non-null int64 \n 43 2nd Flr SF 2163 non-null int64 \n 44 Low Qual Fin SF 2163 non-null int64 \n 45 Gr Liv Area 2163 non-null int64 \n 46 Bsmt Full Bath 2163 non-null float64\n 47 Bsmt Half Bath 2163 non-null float64\n 48 Full Bath 2163 non-null object \n 49 Half Bath 2163 non-null object \n 50 Bedroom AbvGr 2163 non-null int64 \n 51 Kitchen AbvGr 2163 non-null int64 \n 52 Kitchen Qual 2163 non-null object \n 53 TotRms AbvGrd 2163 non-null int64 \n 54 Functional 2163 non-null object \n 55 Fireplaces 2163 non-null int64 \n 56 Fireplace Qu 2163 non-null object \n 57 Garage Type 2163 non-null object \n 58 Garage Yr Blt 2163 non-null float64\n 59 Garage Finish 2163 non-null object \n 60 Garage Cars 2163 non-null float64\n 61 Garage Qual 2163 non-null object \n 62 Garage Cond 2163 non-null object \n 63 Paved Drive 2163 non-null object \n 64 Wood Deck SF 2163 non-null int64 \n 65 Open Porch SF 2163 non-null int64 \n 66 Enclosed Porch 2163 non-null int64 \n 67 3Ssn Porch 2163 non-null int64 \n 68 Screen Porch 2163 non-null int64 \n 69 Pool Area 2163 non-null int64 \n 70 Pool QC 2163 non-null object \n 71 Fence 2163 non-null object \n 72 Misc Feature 2163 non-null object \n 73 Misc Val 2163 non-null int64 \n 74 Mo Sold 2163 non-null int64 \n 75 Yr Sold 2163 non-null int64 \n 76 Sale Type 2163 non-null object \n 77 Sale Condition 2163 non-null object \n 78 SalePrice 2163 non-null int64 \ndtypes: float64(10), int64(23), object(46)\nmemory usage: 1.3+ MB\n" ] ], [ [ "**Re-index the dataframe.**", "_____no_output_____" ] ], [ [ "train_clean = train_clean.reset_index(drop=True)", "_____no_output_____" ] ], [ [ "##Exploratory Data Analysis", "_____no_output_____" ], [ "**Make a new dataframe with just the numeric features. We will do some analysis before adding features with categorical values (using onehot encoding).**", "_____no_output_____" ] ], [ [ "train_numeric = train_clean.select_dtypes(include=['int64','float64'])", "_____no_output_____" ], [ "train_numeric.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2163 entries, 0 to 2162\nData columns (total 33 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Lot Frontage 2163 non-null float64\n 1 Lot Area 2163 non-null int64 \n 2 Overall Qual 2163 non-null int64 \n 3 Overall Cond 2163 non-null int64 \n 4 Year Built 2163 non-null int64 \n 5 Year Remod/Add 2163 non-null int64 \n 6 Mas Vnr Area 2163 non-null float64\n 7 BsmtFin SF 1 2163 non-null float64\n 8 BsmtFin SF 2 2163 non-null float64\n 9 Bsmt Unf SF 2163 non-null float64\n 10 Total Bsmt SF 2163 non-null float64\n 11 1st Flr SF 2163 non-null int64 \n 12 2nd Flr SF 2163 non-null int64 \n 13 Low Qual Fin SF 2163 non-null int64 \n 14 Gr Liv Area 2163 non-null int64 \n 15 Bsmt Full Bath 2163 non-null float64\n 16 Bsmt Half Bath 2163 non-null float64\n 17 Bedroom AbvGr 2163 non-null int64 \n 18 Kitchen AbvGr 2163 non-null int64 \n 19 TotRms AbvGrd 2163 non-null int64 \n 20 Fireplaces 2163 non-null int64 \n 21 Garage Yr Blt 2163 non-null float64\n 22 Garage Cars 2163 non-null float64\n 23 Wood Deck SF 2163 non-null int64 \n 24 Open Porch SF 2163 non-null int64 \n 25 Enclosed Porch 2163 non-null int64 \n 26 3Ssn Porch 2163 non-null int64 \n 27 Screen Porch 2163 non-null int64 \n 28 Pool Area 2163 non-null int64 \n 29 Misc Val 2163 non-null int64 \n 30 Mo Sold 2163 non-null int64 \n 31 Yr Sold 2163 non-null int64 \n 32 SalePrice 2163 non-null int64 \ndtypes: float64(10), int64(23)\nmemory usage: 557.8 KB\n" ] ], [ [ "**Create a variable that shows correlations to the sales price.**", "_____no_output_____" ] ], [ [ "train_numeric_corr = train_numeric.corr()['SalePrice']", "_____no_output_____" ], [ "type(train_numeric_corr)", "_____no_output_____" ] ], [ [ "Check out some of the correlations with the heat map...", "_____no_output_____" ] ], [ [ "sns.set()\nplt.figure(figsize=(20,12))\n\nsns.heatmap(train.corr(), cmap='BuPu', annot=False);", "_____no_output_____" ] ], [ [ "**Make a 'golden features list' that has the absolute values of the correlations from highest to lowest, the drop the sales price column because we don't care that it's correlated to itself.**", "_____no_output_____" ], [ "###Golden Features List \n**numeric values with high correlation**", "_____no_output_____" ] ], [ [ "golden_features_list = train_numeric_corr.abs().sort_values(ascending=False)\ngolden_features_list = golden_features_list.drop('SalePrice',axis=0)\ngolden_features_list", "_____no_output_____" ], [ "golden_features_list.shape", "_____no_output_____" ], [ "golden_features_list.index[:4]", "_____no_output_____" ], [ "#assign sales price as variable 'outcomes'\noutcomes = train_numeric['SalePrice']", "_____no_output_____" ] ], [ [ "###Trying out different models", "_____no_output_____" ], [ "**Regression with one numerical feature (top correlation).**", "_____no_output_____" ] ], [ [ "#created a dataframe with one feature from goldenf features list\ntrain_numeric_one_feature = pd.DataFrame(train_numeric[golden_features_list.index[:1]].copy())\n#add a '1' for the y intercept\ntrain_numeric_one_feature['Ones'] = 1\ntrain_numeric_one_feature", "_____no_output_____" ] ], [ [ "Create a OLS (ordinary least squares) model with sm (statsmodels.api)", "_____no_output_____" ] ], [ [ "#fit the model\nmodel_one_feature = sm.OLS(outcomes,train_numeric_one_feature).fit()\n#Store prediction in this variable\npredictions_one_feature = model_one_feature.predict(train_numeric_one_feature)\n\nmodel_one_feature.summary()\n#Check the R-squared value for a good fit...", "_____no_output_____" ] ], [ [ "(Above) has an R-squared value of 0.628... not great", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,10))\nplt.scatter(outcomes,predictions_one_feature)\nplt.plot([0, max(predictions_one_feature)],[0, max(predictions_one_feature)], c='red');", "_____no_output_____" ] ], [ [ "We can see that the result(above) is... okay - kind of choppy. ", "_____no_output_____" ], [ "\n**Let's add another feature. Create a new dataframe with two features...**", "_____no_output_____" ] ], [ [ "train_numeric_two_features = pd.DataFrame(train_numeric[golden_features_list.index[:2]].copy())\n\n#adding our '1's again...\ntrain_numeric_two_features['Ones'] = 1\n\ntrain_numeric_two_features", "_____no_output_____" ] ], [ [ "**Create the model with two features and check the fit...**", "_____no_output_____" ] ], [ [ "model_two_features = sm.OLS(outcomes,train_numeric_two_features).fit()\npredictions_two_features = model_two_features.predict(train_numeric_two_features)\nmodel_two_features.summary()", "_____no_output_____" ] ], [ [ "(above) R-squared value has increased a bit, but still not impressive", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,10))\nplt.scatter(outcomes,predictions_two_features)\nplt.plot([0, max(predictions_two_features)],[0, max(predictions_two_features)], c='red');\n\nplt.xlabel('Actual Sales Price',fontsize=18);\nplt.ylabel('Predictions',fontsize=18);\n\nplt.axis('equal');\nplt.axis('square');", "_____no_output_____" ] ], [ [ "(Above) We can see it's starting to line up, but there are some outliers and nonlinear patterns.", "_____no_output_____" ], [ "###Observing change in R-squared", "_____no_output_____" ], [ "**Let's investigate the change in R-squared values when adding new features...**", "_____no_output_____" ] ], [ [ "numLoops = 37\nr_squared_array = np.zeros(numLoops)\n\nfor idx in range(0,numLoops):\n train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]].copy())\n #add '1's for y intercept\n train_numeric_temp_df['Ones'] = 1\n model_temp = sm.OLS(outcomes,train_numeric_temp_df).fit()\n r_squared_array[idx] = model_temp.rsquared\n\n#Declare plot size\nplt.figure(figsize=(12,10));\n#Plot 'r_squared_array'\nplt.plot(r_squared_array);\n#label the x & y axes\nplt.xlabel('Number of Features',fontsize=18);\nplt.ylabel('R-Squared',fontsize=18);\n#Change font for the ticks \nplt.xticks(fontsize=12);\nplt.yticks(fontsize=12);", "_____no_output_____" ] ], [ [ "(Above) Visualize the increase in R-Squared as you increase the number of features in the regression. We can see that the R-squared value caps at about ~25 features.\n\n", "_____no_output_____" ], [ "**Visualize the increase in prediction (on the same training data) as you increase the number of features in the regression.**\n", "_____no_output_____" ] ], [ [ "#create a function for the percentage of error\n\ndef root_mean_squared_percentage_error(y_true, y_predicted):\n\n#root mean squared percentage errror is rmspe\n rmspe = np.sqrt(np.mean(np.square((y_true - y_predicted)/y_true)))*100\n \n return rmspe", "_____no_output_____" ] ], [ [ "Look at the percentage of error for prediction with 1 feature... it's 26.1%", "_____no_output_____" ] ], [ [ "y_predict_one_feature = model_one_feature.predict(train_numeric_one_feature)\ntotal_percent_error = root_mean_squared_percentage_error(outcomes, y_predict_one_feature)\nprint(total_percent_error,\"%\")", "26.088533668732804 %\n" ] ], [ [ "Do the same with two features... it's 22.5%... a lil better", "_____no_output_____" ] ], [ [ "y_predict_two_features = model_two_features.predict(train_numeric_two_features)\ntotal_percent_error = root_mean_squared_percentage_error(outcomes, y_predict_two_features)\nprint(total_percent_error,\"%\")", "22.45263019837068 %\n" ] ], [ [ "**Make a 'for loop' showing the percentage error with each feature added...**", "_____no_output_____" ] ], [ [ "numLoops = 37\npercentage_error_array = np.zeros(numLoops)\n\nfor idx in range(0,numLoops):\n train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]].copy())\n train_numeric_temp_df['Ones'] = 1\n model_temp = sm.OLS(outcomes, train_numeric_temp_df).fit()\n predict_temp = model_temp.predict(train_numeric_temp_df)\n percentage_error_array[idx] = root_mean_squared_percentage_error(outcomes, predict_temp)\n\nplt.figure(figsize=(12,10));\n#Plot array, add argument for dots instead of a line\nplt.plot(percentage_error_array,'.');\nplt.xlabel('Number of Features', fontsize=18);\nplt.ylabel('Percentage Error', fontsize=18);\nplt.xticks(fontsize=12);\nplt.yticks(fontsize=12);", "_____no_output_____" ], [ "y_predict_two_features[y_predict_two_features>400000]", "_____no_output_____" ], [ "train_clean.iloc[670,:]", "_____no_output_____" ] ], [ [ "##Data Processing", "_____no_output_____" ], [ "### Cross Validation (Numeric Values)", "_____no_output_____" ], [ "**Let's see cross validation for the numerical columns from the golden features list.**", "_____no_output_____" ] ], [ [ "res = 100\npercentage_error_mean = np.zeros(36)\nfor idx in range(0,36):\n print(idx)\n train_numeric_temp_df = pd.DataFrame(train_numeric[golden_features_list.index[:(idx+1)]]).copy()\n train_numeric_temp_df['Ones'] = 1\n percentage_error_array = np.zeros(res)\n for idx2 in range(0, res):\n x_train, x_test, y_train, y_test = model_selection.train_test_split(train_numeric_temp_df,outcomes,test_size=0.1)\n model_temp = sm.OLS(y_train, x_train).fit()\n predict_temp = model_temp.predict(x_test)\n percentage_error_array[idx2] = root_mean_squared_percentage_error(y_test, predict_temp)\n percentage_error_mean[idx] = percentage_error_array.mean()\nprint(percentage_error_mean) ", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n[25.87258879 22.18965895 21.2544914 20.20910217 20.00879133 18.48048898\n 18.0782799 17.96037652 17.90075872 17.02186616 16.93401383 16.45915903\n 16.28251952 16.48967409 16.54760431 16.63086745 16.25788896 15.93463186\n 15.49744457 15.75697525 15.51116981 15.45125421 15.46924035 15.77770553\n 15.25222426 15.44447562 15.67562736 15.26507694 15.42660622 15.53709158\n 15.35646662 15.11225157 15.22072388 15.37823515 15.48429722 15.01601389]\n" ], [ "print(percentage_error_mean.mean())", "16.892934498983273\n" ], [ "\nplt.figure(figsize=(12,10));\nplt.plot(percentage_error_mean);\n", "_____no_output_____" ] ], [ [ "###Scaling (Robust)", "_____no_output_____" ] ], [ [ "train_numeric_all_features = pd.DataFrame(train_numeric[golden_features_list.index[:36]].copy())\n", "_____no_output_____" ], [ "scaler = preprocessing.RobustScaler()\ntrain_numeric_robust = scaler.fit_transform(train_numeric_all_features)\ntrain_numeric_robust = pd.DataFrame(train_numeric_robust, columns=train_numeric_all_features.columns)\n\ntrain_numeric_robust['Ones'] = 1", "_____no_output_____" ], [ "train_numeric_robust.head()", "_____no_output_____" ], [ "model_robust_features = sm.OLS(outcomes,train_numeric_robust).fit()\npredictions_robust_features = model_robust_features.predict(train_numeric_robust)\nmodel_robust_features.summary()", "_____no_output_____" ], [ "type(train_numeric_robust)", "_____no_output_____" ], [ "train_numeric_robust", "_____no_output_____" ], [ "type(model_robust_features)", "_____no_output_____" ] ], [ [ "###One-Hot Encoding", "_____no_output_____" ], [ "An example with one feature one-hot encoded", "_____no_output_____" ] ], [ [ "train_neighborhood = pd.DataFrame(train_clean['Neighborhood'].copy())\ntrain_one_hot = pd.get_dummies(train_neighborhood['Neighborhood'])\ntrain_one_hot", "_____no_output_____" ], [ "train_numeric_one_hot = pd.concat([train_numeric, train_one_hot], axis=1)\ntrain_numeric_one_hot.drop(['SalePrice'], axis=1, inplace=True)", "_____no_output_____" ], [ "train_numeric_one_hot['Ones'] = 1\nmodel_one_hot = sm.OLS(outcomes, train_numeric_one_hot).fit()\npredict_one_hot = model_one_hot.predict(train_numeric_one_hot)\npercentage_error = root_mean_squared_percentage_error(outcomes, predict_one_hot)\npercentage_error", "_____no_output_____" ] ], [ [ "###Making a list of all the non-numeric features", "_____no_output_____" ] ], [ [ "train_non_numeric = train_clean.select_dtypes(include=['object'])", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "train_non_numeric.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2163 entries, 0 to 2162\nData columns (total 46 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MS SubClass 2163 non-null object\n 1 MS Zoning 2163 non-null object\n 2 Street 2163 non-null object\n 3 Alley 2163 non-null object\n 4 Lot Shape 2163 non-null object\n 5 Land Contour 2163 non-null object\n 6 Utilities 2163 non-null object\n 7 Lot Config 2163 non-null object\n 8 Land Slope 2163 non-null object\n 9 Neighborhood 2163 non-null object\n 10 Condition 1 2163 non-null object\n 11 Condition 2 2163 non-null object\n 12 Bldg Type 2163 non-null object\n 13 House Style 2163 non-null object\n 14 Roof Style 2163 non-null object\n 15 Roof Matl 2163 non-null object\n 16 Exterior 1st 2163 non-null object\n 17 Exterior 2nd 2163 non-null object\n 18 Mas Vnr Type 2163 non-null object\n 19 Exter Qual 2163 non-null object\n 20 Exter Cond 2163 non-null object\n 21 Foundation 2163 non-null object\n 22 Bsmt Qual 2163 non-null object\n 23 Bsmt Cond 2163 non-null object\n 24 Bsmt Exposure 2163 non-null object\n 25 BsmtFin Type 1 2163 non-null object\n 26 BsmtFin Type 2 2163 non-null object\n 27 Heating 2163 non-null object\n 28 Heating QC 2163 non-null object\n 29 Central Air 2163 non-null object\n 30 Electrical 2163 non-null object\n 31 Full Bath 2163 non-null object\n 32 Half Bath 2163 non-null object\n 33 Kitchen Qual 2163 non-null object\n 34 Functional 2163 non-null object\n 35 Fireplace Qu 2163 non-null object\n 36 Garage Type 2163 non-null object\n 37 Garage Finish 2163 non-null object\n 38 Garage Qual 2163 non-null object\n 39 Garage Cond 2163 non-null object\n 40 Paved Drive 2163 non-null object\n 41 Pool QC 2163 non-null object\n 42 Fence 2163 non-null object\n 43 Misc Feature 2163 non-null object\n 44 Sale Type 2163 non-null object\n 45 Sale Condition 2163 non-null object\ndtypes: object(46)\nmemory usage: 777.5+ KB\n" ], [ "train_non_numeric", "_____no_output_____" ] ], [ [ "One hot encoding all non-numeric features", "_____no_output_____" ] ], [ [ "train_objects = pd.DataFrame(train_non_numeric.copy())\ntrain_one_hot_all = pd.get_dummies(train_objects)\ntrain_one_hot_all['SalePrice'] = train_clean['SalePrice']\ntrain_one_hot_all.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2163 entries, 0 to 2162\nColumns: 290 entries, MS SubClass_120 to SalePrice\ndtypes: int64(1), uint8(289)\nmemory usage: 627.5 KB\n" ] ], [ [ "Adding those features to our select numeric features", "_____no_output_____" ] ], [ [ "train_numeric_one_hot_all = pd.concat([train_numeric_robust, train_one_hot_all], axis=1)\ntrain_numeric_one_hot_all.drop(['SalePrice'], axis=1, inplace=True)\n", "_____no_output_____" ], [ "train_numeric_one_hot_all['Ones'] = 1\nmodel_one_hot_all = sm.OLS(outcomes, train_numeric_one_hot_all).fit()\npredict_one_hot_all = model_one_hot_all.predict(train_numeric_one_hot_all)\npercentage_error_total_one_hot = root_mean_squared_percentage_error(outcomes, predict_one_hot_all)\npercentage_error_total_one_hot", "_____no_output_____" ] ], [ [ "###Nonlinear Fitting", "_____no_output_____" ], [ "Determine the best power to use for nonlinear fitting (outcomes) with cross-validation", "_____no_output_____" ] ], [ [ "# Look for 'powNum' that gives us the optimal (lowest) percentage error.\n\nintArray = 1/np.arange(1,11,1)\nprint(intArray)\nnumInnerLoops = 100\n\n# Pre-allocate outer loop result array\npercentage_error_mean = np.zeros(len(intArray))\n\nfor outerIdx, powNum in enumerate(intArray): \n print('idx = ',outerIdx)\n \n # Create dataframe for prediction\n #train_nonlinear = train_numeric_one_hot_all.drop(['SalePrice'], axis=1).copy()\n #train_nonlinear['Ones'] = 1\n\n # Pre-allocate inner result array\n percentage_error = np.zeros(numInnerLoops)\n \n # Cross validation loop\n for innerIdx in range(0,numInnerLoops):\n # Cross validation process: Split data, train model, predict from model, check accuracy\n X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.2)\n # For training use X_train, y_train\n y_train_with_power = y_train.pow(powNum) # Set training outcome to a power (powNum) of the 'SalePrice'\n model_loop_temp = sm.OLS(y_train_with_power, X_train).fit()\n # For prediction use X_test (didn't train on it)\n predict_loop_temp = model_loop_temp.predict(X_test)\n final_predict_loop_temp = predict_loop_temp.pow(1/powNum) # Set final prediction outcome to the power (1/powNum)\n # Check accuracy of predict_loop_temp versus y_test (real outcomes)\n percentage_error[innerIdx] = root_mean_squared_percentage_error(y_test, final_predict_loop_temp)\n\n percentage_error_mean[outerIdx] = percentage_error.mean()", "[1. 0.5 0.33333333 0.25 0.2 0.16666667\n 0.14285714 0.125 0.11111111 0.1 ]\nidx = 0\nidx = 1\nidx = 2\nidx = 3\nidx = 4\nidx = 5\nidx = 6\nidx = 7\nidx = 8\nidx = 9\n" ], [ "plt.plot(percentage_error_mean);", "_____no_output_____" ] ], [ [ "Above shows us that best power number is 1/3", "_____no_output_____" ] ], [ [ "percentage_error_mean.mean()", "_____no_output_____" ] ], [ [ "###Ridge Regression", "_____no_output_____" ] ], [ [ "model_one_hot = sm.OLS(outcomes, train_numeric_one_hot_all).fit()\npredict_one_hot = model_one_hot.predict(train_numeric_one_hot_all)\npercentage_error_one_hot = root_mean_squared_percentage_error(outcomes,predict_one_hot)\npercentage_error_one_hot", "_____no_output_____" ], [ "alpha_value = [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]\n\npercentage_error_ridge = np.zeros(len(alpha_value))\nnumLoops = 250\n\nfor idx, alpha_val in enumerate(alpha_value):\n print('idx = ',idx)\n percentage_error_temp = np.zeros(numLoops)\n ridgereg = Ridge(alpha=alpha_val, normalize=False)\n \n for innerIdx in range(0,numLoops):\n X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.1)\n ridgereg.fit(X_train, y_train)\n y_pred = ridgereg.predict(X_test)\n percentage_error_temp[innerIdx] = root_mean_squared_percentage_error(y_test, y_pred)\n\n percentage_error_ridge[idx] = percentage_error_temp.mean()\n\nplt.plot(percentage_error_ridge);\nprint(percentage_error_ridge)\nprint(percentage_error_ridge.mean())", "idx = 0\nidx = 1\nidx = 2\nidx = 3\nidx = 4\nidx = 5\nidx = 6\n[11.41438828 11.34231913 11.14577984 10.89167412 10.62045807 10.92868437\n 12.63506895]\n11.282624679372622\n" ], [ "percentage_error_ridge.mean()", "_____no_output_____" ] ], [ [ "Above shows best alpha value is 10 ", "_____no_output_____" ] ], [ [ "alpha_value = [10]\nbest_powNum = 1/3\npercentage_error_ridge = np.zeros(len(alpha_value))\nnumLoops = 1000\n\nfor idx, alpha_val in enumerate(alpha_value):\n print('idx = ',idx)\n percentage_error_temp = np.zeros(numLoops)\n ridgereg = Ridge(alpha=alpha_val, normalize=False)\n \n for innerIdx in range(0,numLoops):\n\n X_train, X_test, y_train, y_test = model_selection.train_test_split(train_numeric_one_hot_all, outcomes, test_size=0.1)\n ridgereg.fit(X_train, y_train.pow(best_powNum))\n y_pred = ridgereg.predict(X_test)\n full_y_pred = np.power(y_pred, 1/best_powNum)\n percentage_error_temp[innerIdx] = root_mean_squared_percentage_error(y_test, full_y_pred)\n\n percentage_error_ridge[idx] = percentage_error_temp.mean()\n\n# plt.plot(percentage_error_ridge);\nprint(percentage_error_ridge)\nprint(percentage_error_ridge.mean())", "idx = 0\n[9.24729477]\n9.247294765011254\n" ], [ "train_numeric_one_hot_all", "_____no_output_____" ] ], [ [ "###Final Model", "_____no_output_____" ] ], [ [ "# Set our optimized variables.\nbest_powNum = 1/3\nbest_alpha = 10\n\n# Save final outcomes\noutcomes_final = train_numeric['SalePrice'].copy()\n\n# Save final numeric features\ntrain_numeric_final = train_numeric.drop(['SalePrice'], axis=1).copy()\n", "_____no_output_____" ] ], [ [ "Produce final model using ridge regression.", "_____no_output_____" ] ], [ [ "model_final_ridge = Ridge(alpha=best_alpha, normalize=False)\nmodel_final_ridge.fit(train_numeric_one_hot_all, outcomes_final.pow(best_powNum))", "_____no_output_____" ], [ "train_numeric_one_hot_all.shape\n", "_____no_output_____" ] ], [ [ "###Process the blind test data.", "_____no_output_____" ], [ "Load blind test data.", "_____no_output_____" ], [ "Examine blind test data.", "_____no_output_____" ] ], [ [ "test_data.shape", "_____no_output_____" ] ], [ [ "Insure there are no NaNs.", "_____no_output_____" ] ], [ [ "test_data.isna().sum().sum()", "_____no_output_____" ] ], [ [ "Change 'MS Subclass' from int64 to str.", "_____no_output_____" ] ], [ [ "test_data['MS SubClass'] = test_data['MS SubClass'].apply(str)", "_____no_output_____" ], [ "test_data['Full Bath'] = test_data['Full Bath'].apply(str)", "_____no_output_____" ], [ "test_data['Half Bath'] = test_data['Half Bath'].apply(str)", "_____no_output_____" ] ], [ [ "Make a dataframe of the numerical features used to train the final model.", "_____no_output_____" ] ], [ [ "test_data_numeric = test_data[train_numeric_final.columns]", "_____no_output_____" ] ], [ [ "###Scaling the test data (Robust)", "_____no_output_____" ] ], [ [ "test_numeric_all_features = pd.DataFrame(test_data_numeric[golden_features_list.index[:36]].copy())\n", "_____no_output_____" ], [ "scaler = preprocessing.RobustScaler()\ntest_numeric_robust = scaler.fit_transform(test_numeric_all_features)\ntest_numeric_robust = pd.DataFrame(test_numeric_robust, columns=test_numeric_all_features.columns)\n\ntest_numeric_robust['Ones'] = 1", "_____no_output_____" ], [ "test_numeric_robust.head()", "_____no_output_____" ], [ "type(test_numeric_robust)", "_____no_output_____" ], [ "test_numeric_robust.head()", "_____no_output_____" ] ], [ [ "###One-Hot the Test Data", "_____no_output_____" ], [ "Make a dataframe of the categorical features (before one hot encoding) used to train the final model.", "_____no_output_____" ] ], [ [ "test_data_category = test_data[train_non_numeric.columns]", "_____no_output_____" ] ], [ [ "One hot encode the test_data categories used to train the final model.", "_____no_output_____" ] ], [ [ "test_category_one_hot = pd.get_dummies(test_data_category)\ntest_data_one_hot = pd.concat([test_numeric_robust, test_category_one_hot], axis=1)\ntest_data_one_hot['Ones'] = 1\ntest_data_one_hot.shape", "_____no_output_____" ] ], [ [ "###Create DataFrame with Correct Size", "_____no_output_____" ], [ "Get the number of rows (data instances) in the test_data.", "_____no_output_____" ] ], [ [ "numRows = test_data_one_hot.shape[0]\nnumRows", "_____no_output_____" ] ], [ [ "Creat a dataframe of all zeros with the size of: Number of rows equal to the number of data instances in the test_data; and Columns equal to the features used to train the final model. You will get an error if the sizes are not the same.", "_____no_output_____" ] ], [ [ "test_data_full = pd.DataFrame(0, index=np.arange(numRows), columns=train_numeric_one_hot_all.columns)", "_____no_output_____" ] ], [ [ "Need to find the intersection of the features used to train the model (train_numeric_one_hot_all) and the features in the test_data set (test_data_one_hot). ", "_____no_output_____" ] ], [ [ "column_intersection = test_data_one_hot.columns.intersection(train_numeric_one_hot_all.columns)", "_____no_output_____" ] ], [ [ "###Populate DataFrame with Intersection Values", "_____no_output_____" ], [ "Populate the dataframe of zeros with intersection features from above code block.", "_____no_output_____" ] ], [ [ "test_data_full[column_intersection] = test_data_one_hot[column_intersection]", "_____no_output_____" ], [ "test_data_full.head()", "_____no_output_____" ] ], [ [ "###Final Prediction", "_____no_output_____" ], [ "Predict using ridge regression.", "_____no_output_____" ] ], [ [ "test_predict_final_ridge = model_final_ridge.predict(test_data_full)\nfull_test_predict_final_ridge = np.power(test_predict_final_ridge, 1/best_powNum)", "_____no_output_____" ], [ "full_test_predict_final_ridge_result = pd.DataFrame(full_test_predict_final_ridge,columns=['Result'])", "_____no_output_____" ], [ "full_test_predict_final_ridge_result.to_csv('/content/drive/My Drive/Colab Notebooks/csv files/Blind_Test_Predictions.csv', index=False)", "_____no_output_____" ], [ "temp = pd.read_csv('/content/drive/My Drive/Colab Notebooks/csv files/Blind_Test_Predictions.csv')", "_____no_output_____" ], [ "temp.head()", "_____no_output_____" ] ], [ [ "##Conclusion", "_____no_output_____" ], [ "- We can increase the prediction accuracy by using numerical values highly correlated to sales price.\n- Scaling the numerical values can help by giving us more useful coefficients.\n-Nonlinear fitting can decrease error percentage by dealing with outliers that don't fit the linear model.\n-We can improve the model even more by breaking out string type values into additional features with one-hot encoding.\n-Too many features can lead to over-fitting the model, which decreases it's predictive power but...\n-We can address over-fitting issues with another nonlinear regression technique, for example ridge regression.\n", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec7f82df2e74ae4d10cb75ebeb78149a1b9edfd7
1,446
ipynb
Jupyter Notebook
Multiplication Tables.ipynb
Bestofdev/Multiplication-Tables-
5598e322c03f39f0c09b778f5c925a19c0b2c826
[ "Apache-2.0" ]
1
2021-11-28T08:58:26.000Z
2021-11-28T08:58:26.000Z
Multiplication Tables.ipynb
Bestofdev/Multiplication-Tables-
5598e322c03f39f0c09b778f5c925a19c0b2c826
[ "Apache-2.0" ]
null
null
null
Multiplication Tables.ipynb
Bestofdev/Multiplication-Tables-
5598e322c03f39f0c09b778f5c925a19c0b2c826
[ "Apache-2.0" ]
null
null
null
22.952381
95
0.560166
[ [ [ "#number= Number of multiplication table(integer)\nnumber=int(input('Enter the Number of multiplication table: '))\n\n#multiplier (integer)\nmultipliers=int(input('Enter the Maximum Number of multiplier: ') )\n\nlen_of_muliplier=len(str(multipliers))\nmaxresult_length=len(str(multipliers*number))\n\nfor multiplier in range(1,multipliers+1):\n multiplier_str=str(multiplier).rjust(len_of_muliplier,' ')\n result=str(multiplier*number)\n print( number , \"*\" , multiplier_str , \"=\" , result.rjust(maxresult_length,' ') )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec7f90ed2ff670cb4fff467c315959b12d4fb323
11,441
ipynb
Jupyter Notebook
homeworks/hw3/vgg11bn/Evaluator.ipynb
baixianger/DLAV-2022
4c43caa0da1b48c5c7320c0923bac0b33a9e9b04
[ "MIT" ]
null
null
null
homeworks/hw3/vgg11bn/Evaluator.ipynb
baixianger/DLAV-2022
4c43caa0da1b48c5c7320c0923bac0b33a9e9b04
[ "MIT" ]
null
null
null
homeworks/hw3/vgg11bn/Evaluator.ipynb
baixianger/DLAV-2022
4c43caa0da1b48c5c7320c0923bac0b33a9e9b04
[ "MIT" ]
null
null
null
5,720.5
11,440
0.479591
[ [ [ "import numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport torch.utils.data as utils\nfrom torchvision import datasets, transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport os\nimport platform\nimport pickle", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "_____no_output_____" ], [ "!rm -r DLAV-2022\n!git clone https://github.com/vita-epfl/DLAV-2022.git\npath = os.getcwd() + '/DLAV-2022/homeworks/hw2/test_batch'", "rm: DLAV-2022: No such file or directory\nCloning into 'DLAV-2022'...\nremote: Enumerating objects: 83, done.\u001b[K\nremote: Counting objects: 100% (83/83), done.\u001b[K\nremote: Compressing objects: 100% (63/63), done.\u001b[K\nremote: Total 83 (delta 31), reused 60 (delta 16), pack-reused 0\u001b[K\nReceiving objects: 100% (83/83), 27.73 MiB | 3.60 MiB/s, done.\nResolving deltas: 100% (31/31), done.\n" ], [ "# Set the variable to the location of the trained model\nmodel_path = './cifar10_vgg11bn_CUDA.ckpt'", "_____no_output_____" ], [ "class ConvNet(nn.Module):\n def __init__(self, n_input_channels=3, n_output=10):\n super().__init__()\n ################################################################################\n # TODO: #\n # Define 2 or more different layers of the neural network #\n ################################################################################\n # use the VGG11bn model which 8 convolution layers and 3 full connect layers\n # '?' denotes the batch size, which is 64 in our default setting from func above\n # network:[64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n self.features = nn.Sequential(\n # 1. 64\n nn.Conv2d(3, 64, kernel_size=3, padding=1), # ( 3,32,32) => ( 64,32,32)\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2), # ( 64,32,32) => ( 64,16,16)\n # 2. 128 conv\n nn.Conv2d(64, 128, kernel_size=3, padding=1), # ( 64,16,16) => (128,16,16)\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2), # (128,16,16) => (128, 8, 8)\n # 3. 256 conv\n nn.Conv2d(128, 256, kernel_size=3, padding=1), # (128, 8, 8) => (256, 8, 8)\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n # 4. 256\n nn.Conv2d(256, 256, kernel_size=3, padding=1), # (256, 8, 8) => (256, 8, 8)\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2), # (256, 8, 8) => (256, 4, 4)\n # 5. 512\n nn.Conv2d(256, 512, kernel_size=3, padding=1), # (512, 4, 4) => (512, 4, 4)\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True), \n # 6. 512\n nn.Conv2d(512, 512, kernel_size=3, padding=1), # (512, 4, 4) => (512, 4, 4)\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True), \n nn.MaxPool2d(kernel_size=2, stride=2), # (512, 4, 4) => (512, 2, 2)\n # 7. 512\n nn.Conv2d(512, 512, kernel_size=3, padding=1), # (512, 2, 2) => (512, 2, 2)\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True), \n # 8. 512\n nn.Conv2d(512, 512, kernel_size=3, padding=1), # (512, 2, 2) => (512, 2, 2)\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True), \n nn.MaxPool2d(kernel_size=2, stride=2), # (512, 2, 2) => (512, 1, 1)\n )\n self.classifier = nn.Sequential(\n # 9. Linear\n nn.Dropout(),\n nn.Linear(512, 512), # (512, 1, 1) => flatten => (512,)\n nn.ReLU(True),\n # 10. linear\n nn.Dropout(),\n nn.Linear(512, 512), # (512,) => (512,)\n nn.ReLU(True),\n # 11. linear\n nn.Linear(512, 10), # (512,) => (10,)\n )\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################\n \n def forward(self, x):\n ################################################################################\n # TODO: #\n # Set up the forward pass that the input data will go through. #\n # A good activation function betweent the layers is a ReLu function. #\n # #\n # Note that the output of the last convolution layer should be flattened #\n # before being inputted to the fully connected layer. We can flatten #\n # Tensor `x` with `x.view`. #\n ################################################################################\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################\n \n return x\n \n def predict(self, x):\n logits = self.forward(x)\n return F.softmax(logits)", "_____no_output_____" ], [ "def predict_usingCNN(X):\n #########################################################################\n # TODO: #\n # - Load your saved model #\n # - Do the operation required to get the predictions #\n # - Return predictions in a numpy array #\n # Note: For the predictions, you have to return the index of the max #\n # value #\n #########################################################################\n # CUDA setting\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n net = ConvNet().to(device)\n # Load my optimized model\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n\n net.load_state_dict(checkpoint)\n # prediction/inference\n with torch.no_grad():\n X = X.to(device)\n outputs = net(X)\n _, predicted = torch.max(outputs.data, 1)\n # _, predicted = torch.max(F.softmax(outputs,dim=1).data, 1)\n # cannot turn a CUDA tensor to numpy, so move to CPU first\n y_pred = predicted.to('cpu').numpy()\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n return y_pred\n ", "_____no_output_____" ], [ "## Read DATA\ndef load_pickle(f):\n version = platform.python_version_tuple()\n if version[0] == '2':\n return pickle.load(f)\n elif version[0] == '3':\n return pickle.load(f, encoding='latin1')\n raise ValueError(\"invalid python version: {}\".format(version))\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).astype(\"float\")\n Y = np.array(Y)\n return X, Y\ntest_filename = path\nX,Y = load_CIFAR_batch(test_filename)", "_____no_output_____" ], [ "# Data Manipulation\nmean_pytorch = np.array([0.4914, 0.4822, 0.4465])\nstd_pytorch = np.array([0.2023, 0.1994, 0.2010])\nX_pytorch = np.divide(np.subtract( X/255 , mean_pytorch[np.newaxis, :,np.newaxis,np.newaxis]), std_pytorch[np.newaxis, :,np.newaxis,np.newaxis])\n\n# Run Prediction and Evaluation\nprediction_cnn = predict_usingCNN(torch.from_numpy(X_pytorch).float())\nacc_cnn = sum(prediction_cnn == Y)/len(X_pytorch)\nprint(\"CNN Accuracy= %f\"%(acc_cnn))", "CNN Accuracy= 0.966100\n" ] ], [ [ "I found that the accuracy on the final test data is a little higher than the accuracy when i was tranning on the validation data. \n\nI check the code in the `CNN_Exercise.ipynb`, and i suppose is the way we calculate the validation accuracy, because the last bach of every epoch is usually not the normal size (The number of samples in the data set is not divisible by the batch size) but our way to calulate the accuracy is using accumulated batch accuracy dividied by its batch size in which the last batch in one epoch will be smaller than the normal batch size, and than accumulating the averaged batch accuracy , and lastly divided by its step. \n\nSo the last batch in every epoch will heavily affect (`lead to underestimation`) the calculation of final accuracy in our validation.", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec7f9bbddd0a7887357fe09a4efb036cf90b4f2b
13,552
ipynb
Jupyter Notebook
ICCT_si/examples/04/.ipynb_checkpoints/SS-08-Modalna_analiza_sistema_masa_vzmet_dusilka-checkpoint.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
6
2021-05-22T18:42:14.000Z
2021-10-03T14:10:22.000Z
ICCT_si/examples/04/SS-08-Modalna_analiza_sistema_masa_vzmet_dusilka.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
null
null
null
ICCT_si/examples/04/SS-08-Modalna_analiza_sistema_masa_vzmet_dusilka.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
2
2021-05-24T11:40:09.000Z
2021-08-29T16:36:18.000Z
36.33244
233
0.492473
[ [ [ "\nfrom IPython.display import HTML\ntag = HTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide()\n } else {\n $('div.input').show()\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\nToggle cell visibility <a href=\"javascript:code_toggle()\">here</a>.''')\ndisplay(tag)\n\n# Hide the code completely\n\n# from IPython.display import HTML\n# tag = HTML('''<style>\n# div.input {\n# display:none;\n# }\n# </style>''')\n# display(tag)", "_____no_output_____" ] ], [ [ "## Modalna analiza sistema masa-vzmet-dušilka\n\nTa interaktivni primer se navezuje na sistem masa-vzmet-dušilka, predstavljenem v primeru [Modalna analiza](SS-02-Modalna_analiza.ipynb); grafično so prikazane modalne oblike sistema, ki predstavljajo odziv sistema.\n\nDinamično matriko sistema lahko zapišemo kot:\n\n$$ A=\n\\begin{bmatrix}\n0 && 1 \\\\\n-\\frac{k}{m} && -\\frac{c}{m}\n\\end{bmatrix}$$\n\nin njen karakteristični polinom (matrika $A$ je zapisana v kanonični vodljivostni obliki) kot:\n\n$$\\lambda^2+\\frac{c}{m}\\lambda+\\frac{k}{m}.$$\n\nSledi, da so lastne vrednosti in z njimi povezane modalne oblike enake\n\n$$\\lambda_{1,2}=-\\frac{c}{m}\\pm\\frac{1}{m}\\sqrt{c^2-4km}.$$\n\nČlen znotraj kvadratnega korena je kritičen, saj odloča o tem, kakšen bo odziv sistema; npr. v primeru vrednosti tega člena $c\\ge2\\sqrt{km}$ bo imel sistem zgolj realne lastne vrednosti in v odzivu sistema ne bo oscilacij.\n\nPrimer omogoča vpogled v to, kaj se dogaja z modalnimi oblikami sistema, ko spreminjamo vrednosti parametrov $k$, $m$, in $c$.", "_____no_output_____" ] ], [ [ "#Preparatory Cell \nimport control\nimport numpy\nfrom IPython.display import display, Markdown\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\n%matplotlib inline\n\n#print a matrix latex-like\ndef bmatrix(a):\n \"\"\"Returns a LaTeX bmatrix - by Damir Arbula (ICCT project)\n\n :a: numpy array\n :returns: LaTeX bmatrix as a string\n \"\"\"\n if len(a.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)\n\n\n# Display formatted matrix: \ndef vmatrix(a):\n if len(a.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{vmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{vmatrix}']\n return '\\n'.join(rv)\n\n\n#matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value !\nclass matrixWidget(widgets.VBox):\n def updateM(self,change):\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.M_[irow,icol] = self.children[irow].children[icol].value\n #print(self.M_[irow,icol])\n self.value = self.M_\n\n def dummychangecallback(self,change):\n pass\n \n \n def __init__(self,n,m):\n self.n = n\n self.m = m\n self.M_ = numpy.matrix(numpy.zeros((self.n,self.m)))\n self.value = self.M_\n widgets.VBox.__init__(self,\n children = [\n widgets.HBox(children = \n [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)]\n ) \n for j in range(n)\n ])\n \n #fill in widgets and tell interact to call updateM each time a children changes value\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].value = self.M_[irow,icol]\n self.children[irow].children[icol].observe(self.updateM, names='value')\n #value = Unicode('[email protected]', help=\"The email value.\").tag(sync=True)\n self.observe(self.updateM, names='value', type= 'All')\n \n def setM(self, newM):\n #disable callbacks, change values, and reenable\n self.unobserve(self.updateM, names='value', type= 'All')\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].unobserve(self.updateM, names='value')\n self.M_ = newM\n self.value = self.M_\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].value = self.M_[irow,icol]\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].observe(self.updateM, names='value')\n self.observe(self.updateM, names='value', type= 'All') \n\n #self.children[irow].children[icol].observe(self.updateM, names='value')\n\n \n#overlaod class for state space systems that DO NOT remove \"useless\" states (what \"professor\" of automatic control would do this?)\nclass sss(control.StateSpace):\n def __init__(self,*args):\n #call base class init constructor\n control.StateSpace.__init__(self,*args)\n #disable function below in base class\n def _remove_useless_states(self):\n pass", "_____no_output_____" ], [ "#define the sliders for m, k and c\nm = widgets.FloatSlider(\n value=4,\n min=0.1,\n max=10.0,\n step=0.1,\n description='$m$ [kg]:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)\nk = widgets.FloatSlider(\n value=1,\n min=0,\n max=10.0,\n step=0.1,\n description='$k$ [N/m]:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)\nc = widgets.FloatSlider(\n value=4,\n min=0,\n max=10.0,\n step=0.1,\n description='$c$ [Ns/m]:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)", "_____no_output_____" ], [ "#function that make all the computations\ndef main_callback(m, k, c):\n if c**2-4*k*m >= 0:\n eig1 = -c/m+1/m*numpy.sqrt(c**2-4*k*m)\n eig2 = -c/m-1/m*numpy.sqrt(c**2-4*k*m)\n else:\n eig1 = -c/m+1j*(1/m*numpy.sqrt(-c**2+4*k*m))\n eig2 = -c/m-1j*(1/m*numpy.sqrt(-c**2+4*k*m))\n \n if numpy.real([eig1,eig2])[0] == 0 and numpy.real([eig1,eig2])[1] == 0:\n T = numpy.linspace(0,20,1000)\n else:\n if min(numpy.abs(numpy.real([eig1,eig2]))) != 0:\n T = numpy.linspace(0,7*1/min(numpy.abs(numpy.real([eig1,eig2]))),1000)\n else:\n T = numpy.linspace(0,7*1/max(numpy.abs(numpy.real([eig1,eig2]))),1000)\n if numpy.isreal(eig1):\n if eig1 == eig2:\n mode1 = numpy.exp(eig1*T)\n mode2 = T*numpy.exp(eig2*T)\n else:\n mode1 = numpy.exp(eig1*T)\n mode2 = numpy.exp(eig2*T)\n else:\n mode1 = numpy.exp(eig1.real*T)*numpy.cos(abs(eig1.imag)*T)\n mode2 = numpy.exp(eig2.real*T)*numpy.sin(abs(eig2.imag)*T)\n \n fig = plt.figure(figsize=[16, 5])\n fig.set_label('Modalne oblike')\n g1 = fig.add_subplot(121)\n g2 = fig.add_subplot(122)\n \n g1.plot(T,mode1)\n g1.grid()\n g1.set_xlabel('čas [s]')\n g1.set_ylabel('prva modalna oblika')\n \n g2.plot(T,mode2)\n g2.grid()\n g2.set_xlabel('čas [s]')\n g2.set_ylabel('druga modalna oblika')\n \n # print('The eigenvalues are: -%.3f+%.3fj -%.3f-%.3fj' %(abs(eig1.real),abs(eig1.imag),abs(eig2.real),abs(eig2.imag)))\n modesString = 'Lastni vrednosti sta $' + str(numpy.around(eig1,decimals=3)) + '$ in $' + str(numpy.around(eig2,decimals=3)) + '$ '\n if numpy.isreal(eig1):\n if eig1 == eig2:\n modesString = modesString + 's pripadajočimi modalnimi oblikami $e^{' + str(numpy.around(eig1,decimals=3))\\\n + ' t}$ in $te^{' + str(numpy.around(eig2,decimals=3)) + ' t}$.'\n else:\n modesString = modesString + 's pripadajočimi modalnimi oblikami $e^{' + str(numpy.around(eig1,decimals=3))\\\n + ' t}$ in $e^{' + str(numpy.around(eig2,decimals=3)) + ' t}$.'\n else:\n modesString = modesString + 's pripadajočimi modalnimi oblikami $e^{' + str(numpy.around(numpy.real(eig1),decimals=3))\\\n + ' t} \\cos{(' + str(numpy.around(abs(numpy.imag(eig1)),decimals=3)) + 't)}$ in $e^{'\\\n + str(numpy.around(numpy.real(eig2),decimals=3)) + ' t} \\sin{(' + str(numpy.around(abs(numpy.imag(eig2)),decimals=3)) + 't)}$.'\n \n display(Markdown(modesString))\n\n \nout = widgets.interactive_output(main_callback,{'m':m,'k':k,'c':c})\nsliders = widgets.HBox([k,m,c])\ndisplay(out,sliders)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec7fa984e5c323a82f4468be72bc8ddc526a12e7
2,199
ipynb
Jupyter Notebook
Day_4_assignment.ipynb
priiiiiiii/let-s-upgrade
b73f873bd7723096bb1cd51034b1855bd555c07d
[ "Apache-2.0" ]
null
null
null
Day_4_assignment.ipynb
priiiiiiii/let-s-upgrade
b73f873bd7723096bb1cd51034b1855bd555c07d
[ "Apache-2.0" ]
null
null
null
Day_4_assignment.ipynb
priiiiiiii/let-s-upgrade
b73f873bd7723096bb1cd51034b1855bd555c07d
[ "Apache-2.0" ]
null
null
null
24.988636
239
0.445202
[ [ [ "<a href=\"https://colab.research.google.com/github/priiiiiiii/let-s-upgrade/blob/master/Day_4_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Assignment 1", "_____no_output_____" ], [ "Q1. Print the first Armstrong number in the range of 1042000 to 702648265 and exist the loop as soon as you incounter", "_____no_output_____" ] ], [ [ " \nupper = 702648265\nlowers=1042000\nfor num in range (lowers , upper + 1):\n o= len (str(num))\n sum = 0\n temp = num\n while temp > 0:\n x = temp % 10\n sum += x ** o\n temp //= 10\n \n if num == sum:\n print(num)\n break", "1741725\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ] ]
ec7fb06a3691802163938b8762c5729b64dacd6b
2,971
ipynb
Jupyter Notebook
01.NLP Assignment/Procedural_Calculator.ipynb
mebsahle/NLP-Lab
9ad6d5ecb1988823a40f0ab0c5844ad3db4cd97f
[ "MIT" ]
null
null
null
01.NLP Assignment/Procedural_Calculator.ipynb
mebsahle/NLP-Lab
9ad6d5ecb1988823a40f0ab0c5844ad3db4cd97f
[ "MIT" ]
null
null
null
01.NLP Assignment/Procedural_Calculator.ipynb
mebsahle/NLP-Lab
9ad6d5ecb1988823a40f0ab0c5844ad3db4cd97f
[ "MIT" ]
null
null
null
27.009091
89
0.465163
[ [ [ "## Welcome to Pythonista Procedural Calculator", "_____no_output_____" ] ], [ [ "\"\"\"\n Python Calculator using Procedural Programming\n by Mebatsion Sahle\n April 09, 2021\n\"\"\"\n\n\ndef add(x, y):\n return x+y\n\ndef subtract(x, y):\n return x - y\n\ndef multiply(x, y):\n return x * y\n\ndef division(x, y):\n return x / y\n\n\nprint(\"Welcome to Procedural Python Calculator\")\nprint(\"Operations\")\nprint(\"Hit 1. For aaddition\")\nprint(\"Hit 2. for subtraction\")\nprint(\"Hit 3. for multiplication\")\nprint(\"Hit 4. for division\")\nprint(\"Hit 0. to terminate the program\")\n\nwhile True:\n try: \n print(\"select your choice number\")\n choice = int(input())\n if choice == 1:\n print(\"Nice now enter two numbers\")\n number_1, number_2 = map(int, input().split())\n print(f\"{number_1} + {number_2} = {add(number_1, number_2)}\")\n \n elif choice == 2:\n print(\"Nice now enter two numbers\")\n number_1, number_2 = map(int, input().split())\n print(f\"{number_1} - {number_2} = {subtract(number_1, number_2)}\")\n \n elif choice == 3:\n print(\"Nice now enter two numbers\")\n number_1, number_2 = map(int, input().split())\n print(f\"{number_1} * {number_2} = {multiply(number_1, number_2)}\")\n \n elif choice == 4:\n print(\"Nice now enter two numbers\")\n number_1, number_2 = map(int, input().split())\n print(f\"{number_1} / {number_2} = {division(number_1, number_2)}\")\n elif choice == 0:\n break\n except EOFError:\n break", "_____no_output_____" ] ], [ [ "### Mebatsion Sahle", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec7fb980253a662f06af8c28fa3c375e83c96af3
394,898
ipynb
Jupyter Notebook
Naive_Bayes.ipynb
BenjaminChanChunHo/Data-Science-Proof-of-Concept
f5bbacbb4b1106781c67b4dce54510fc1fac2837
[ "MIT" ]
1
2020-10-17T08:21:35.000Z
2020-10-17T08:21:35.000Z
Naive_Bayes.ipynb
BenjaminChanChunHo/Data-Science-Proof-of-Concept
f5bbacbb4b1106781c67b4dce54510fc1fac2837
[ "MIT" ]
null
null
null
Naive_Bayes.ipynb
BenjaminChanChunHo/Data-Science-Proof-of-Concept
f5bbacbb4b1106781c67b4dce54510fc1fac2837
[ "MIT" ]
null
null
null
314.159109
119,816
0.899642
[ [ [ "[Benjamin Chan](https://www.linkedin.com/in/benjamin-chan-chun-ho) - [Data Science Proof of Concept](https://github.com/BenjaminChanChunHo/Data-Science-Proof-of-Concept)\n\nThe theories are summarized from [Reference](#Reference) at the end. The data analysis and Python coding are done by me. The core program is in [Class Definition](#Class-Definition). Important terminologies are highlighted in the first mention.", "_____no_output_____" ], [ "# Introduction\n\nA `Naive Bayes classifier` is a probabilistic model based on the `Bayes' theorem` with the assumption of `conditional independence` between the features. It is highly scalable since the conditional independence implies that each distribution can be independently estimated as one-dimensional distribution. It can handle mixed variable types separately. The computation grows linearly with the number of features. Despite strong assumption, they often outperform sophisticated classifiers.\n\nSuppose the goal is to classify an observation into one of $K$ classes, where $K\\geq 2$, i.e. there is a set of classes $C\\in \\{C_1,\\dots,C_K\\}$. Assume that there are $J$ features, i.e. $X=(X_1,\\dots,X_J)'$. In other words, $X_j$ is a random variable and $X$ is a random vector. Note that $X=x$ means $(X_1=x_1,\\dots,X_J=x_J)'$.\n\nFor illustration, the 10th most popular data called [abalone](https://archive.ics.uci.edu/ml/datasets/Abalone) in [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) is used. The original goal is to predict the age of abalone from physical measurements. To turn it into a classification problem, let's treat Sex as the target class with unique labels being F (female), M (male) and I (infant). In this data, there are $K=3$ classes and $J=8$ variables. \n\nThe variables are highly correlated, as indicated by the correlation matrix [here](#EDA). Making conditional independence assumption in Naive Bayes classifier clearly simplifies the matter. In fact, I have once applied Bayesian Structural Equation Model (SEM) to this data to group highly correlated variables into latent variables and assess interrelationship among them. Please visit [this repository](https://github.com/BenjaminChanChunHo/Bayesian-Latent-Variable-Model-On-Abalone-Data) if interested.\n\nOn the other hand, the bell shape in histograms [here](#EDA) suggests that Gaussian (or normal) distribution can be a candidate for estimating conditional marginal densities mentioned [later](#Prob-Estimation). To be more rigorous, you should look at conditional distributions [here](#KDE-Plot). Kernel density estimation of continuous distribution is an alternative that is outside the scope of this study. For the time being, let's work for the implementation of Gaussian Naive Bayes classifier in [Class Definition](#Class-Definition).", "_____no_output_____" ], [ "# Data: Preprocessing", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\n\nprint(f'pandas=={pd.__version__}')\nprint(f'numpy=={np.__version__}')\nprint(f'seaborn=={sns.__version__}')", "pandas==1.0.5\nnumpy==1.18.5\nseaborn==0.11.0\n" ] ], [ [ "For reproducibility, there is no need to download data yourself. The following code chunk would automatically download online and put it in a repository. Note that Rings (the number of rings) plus 1.5 gives the age in years. Sex is the target class and the rest are considered to be predictors. Data is split into training set (70%) and testing set (30%) randomly.", "_____no_output_____" ] ], [ [ "import os\nfrom urllib.request import urlretrieve\n\nData_URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'\nData_Path = './Data/abalone.csv'\n\nif not os.path.exists('./Data'): # See if the folder Data exists\n os.mkdir('./Data') # If not, create it \n \nif not os.path.exists(Data_Path): # See if the data has been downloaded\n urlretrieve(Data_URL, Data_Path) # If not, download it\n \nCol_Name = ['Sex', 'Length', 'Diameter', 'Height', 'Whole_Weight', 'Shucked_Weight', 'Viscera_Weight', 'Shell_Weight', 'Rings']\nData = pd.read_csv(Data_Path, names = Col_Name)\n\nLabel = 'Sex'\nPredictor_List = [Col for Col in Data.columns if Col != Label]\nprint(f'The label is {Label} while the predictors are \\n\\t{\", \".join(Predictor_List)}.')\n\nX = Data.loc[:, Predictor_List] # Predictors\ny = Data[Label] # Labels\n\nfrom sklearn.model_selection import train_test_split # Train-test split of data\nX_Train, X_Test, y_Train, y_Test = train_test_split(X, y, test_size = 0.3, random_state = 0)\n\nFull_Train = pd.concat([y_Train, X_Train], axis = 1) # Full training data\nFull_Test = pd.concat([y_Test, X_Test], axis = 1) # Full testing data\n\nData.head()", "The label is Sex while the predictors are \n\tLength, Diameter, Height, Whole_Weight, Shucked_Weight, Viscera_Weight, Shell_Weight, Rings.\n" ] ], [ [ "# Concept of Bayes' Theorem\n\nThe Bayes' theorem states that\n$$\\mathbb{P}(A\\mid B) = \\frac{\\mathbb{P}(B\\mid A)\\ \\mathbb{P}(A)}{\\mathbb{P}(B)}$$\n\nwhere $A$ and $B$ are events. For notational convenience, $\\mathbb{P}$ denotes the term probability for a discrete variable, but the term density for a continuous variable.\n\n# Concept of Prior and Posterior Probability\n\nLet $\\pi_k = \\mathbb{P}(\\mathcal{C} = C_k)$ be the `prior probability` that a randomly chosen example comes from the class $C_k$. Let $f_k(x) = \\mathbb{P}(X = x\\mid \\mathcal{C} = C_k)$ be the `conditional joint density (or probability)`. Note that $f_k(x)$ is sometimes called the `likelihood`. Let $f_{kj}(x_j) = \\mathbb{P}(X_j=x_j\\mid \\mathcal{C} = C_k)$ be the `conditional marginal density (or probability)`. \n\nLet $p_k(x)=\\mathbb{P}(\\mathcal{C} = C_k\\mid X=x)$ be the `posterior probability` that an observation $X = x$ belongs to the class $C_k$. Using the Bayes' theorem gives\n\n$$\\begin{align*}\np_k(x) & = \\mathbb{P}(\\mathcal{C} = C_k\\mid X=x) \\\\[3pt]\n& = \\frac{\\mathbb{P}(\\mathcal{C} = C_k)\\ \\mathbb{P}(X=x\\mid \\mathcal{C} = C_k)}{\\mathbb{P}(X=x)} \\\\[3pt]\n& = \\frac{\\mathbb{P}(\\mathcal{C} = C_k)\\ \\mathbb{P}(X=x\\mid \\mathcal{C} = C_k)}{\\sum_{l=1}^K\\mathbb{P}(\\mathcal{C} = C_l)\\ \\mathbb{P}(X=x\\mid \\mathcal{C} = C_l)} \\\\[3pt]\n& = \\frac{\\pi_kf_k(x)}{\\sum_{l=1}^K\\pi_lf_l(x)} \\\\[3pt]\n& \\propto \\pi_kf_k(x)\n\\end{align*} $$\n\nwhere the proportionality comes from the fact that $\\mathbb{P}(X=x)$ is a constant that does not depend on $\\mathcal{C}$ when the data $X=x$ is observed. Note that the `law of total probability` is used in $\\mathbb{P}(X=x) = \\sum_{l=1}^K\\mathbb{P}(\\mathcal{C} = C_l)\\ \\mathbb{P}(X=x\\mid \\mathcal{C} = C_l)$. Informally speaking, \n> Posterior probability is proportional to prior probability times likelihood.\n\n# Concept of Conditional Independence\n\nThe chain rule of probability states that\n$$\\mathbb{P}(A \\cap B) = \\mathbb{P}(A)\\ \\mathbb{P}(B \\mid A)$$\n\nwhere $\\cap$ stands for intersection and $B \\mid A$ stands for $B$ given $A$. Using the chain rule repeatedly gives \n\n$$\\begin{align*} \nf_k(x) & = \\mathbb{P}(X_1=x_1,X_2=x_2,\\dots,X_J=x_J\\mid \\mathcal{C} = C_k) \\\\\n& = \\mathbb{P}(x_1\\mid C_k)\\ \\mathbb{P}(x_2 \\mid x_1,C_k)\\cdots \\mathbb{P}(x_J\\mid x_1,\\dots, x_{J-1}, C_k).\n\\end{align*}$$\n\nThe Naive Bayes classifier assumes that the features are conditionally independent, i.e.\n\n$$\\mathbb{P}(X_j = x_j\\mid X_1=x_1,\\dots,X_{j-1}=x_{j-1}, \\mathcal{C} = C_k) = \\mathbb{P}(X_j = x_j\\mid \\mathcal{C} = C_k) = f_{kj}(x_j).$$\n\nIn other words, it simplifies the likelihood to be\n$$f_k(x)=\\prod_{j=1}^J f_{kj}(x_j).$$\n> Variables are conditionally independent if and only if the conditional joint density (or probability) is the product of conditional marginal ones.\n\n# Concept of Maximum a Posteriori Estimation\n\nUnder the `maximum a posteriori` (MAP) estimation, the Naive Bayes classifier generates the predicted class label $\\hat{y}$ as \n$$\\begin{align*} \n\\hat{y} & = \\mathop{\\arg\\max}_{k\\in \\{1,\\dots,K\\}} \\pi_k \\prod_{j=1}^J f_{kj}(x_j) \\\\\n& = \\mathop{\\arg\\max}_{k\\in \\{1,\\dots,K\\}} \\mathbb{P}(\\mathcal{C} = C_k)\\prod_{j=1}^J \\mathbb{P}(X_j=x_j\\mid \\mathcal{C} = C_k) \n\\end{align*}$$", "_____no_output_____" ], [ "<a id = 'EDA'></a>\n# Data: Exploratory Data Analysis", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nprint('Correlation matrix is created from the training data:')\ndisplay(X_Train.corr().round(3))\n\nfig, axes = plt.subplots(2, 4, figsize = (15, 6))\nfig.suptitle('Unconditional Histogram of Each Predictor')\nfig.subplots_adjust(wspace = 0.3, hspace = 0.3)\n\nfor Col, ax in zip(Predictor_List, axes.flat):\n sns.histplot(data = Full_Train, x = Col, ax = ax) # Plot histograms", "Correlation matrix is created from the training data:\n" ] ], [ [ "<a id = 'Prob-Estimation'></a>\n# Concept of Probability and Density Estimation\nThe Naive Bayes classifier handles mixed distributions seamlessly. By conditional independence, $f_k(x)=\\prod_{j=1}^J f_{kj}(x_j)$. Without loss of generality, let's consider one distribution at a time as if any $f_{kj}$ comes from that form. The results can always be combined by the product operation.\n\n## Prior Probability\n\nThe prior probability is usually estimated by the proportion of class occurrence in the training data. Let $\\mathcal{C}^{(i)}$ be the observed class of example $i\\in\\{1,\\dots,N\\}$, where $N$ is the number of examples. A prior probability is estimated by\n\n$$\\hat{\\pi}_k = \\frac{1}{N}\\sum_{i=1}^N \\mathbb{1}(\\mathcal{C}^{(i)} = C_k) \\triangleq \\frac{N_k}{N}$$\n\nwhere $N_k=\\sum_{i=1}^N \\mathbb{1}(\\mathcal{C}^{(i)} = C_k)$ is the number of examples in class $C_k$. The empirical prior probabilities of the abalone data can be found [here](#Fit).\n\n## Continuous Distribution\n\nIf $X_j$ is continuous, the conditional marginal density $f_{kj}(x_j)$ can be estimated using any kernel or parametric method, depending on the probability distribution of a variable. There are no restrictions on the distribution form of $f_{kj}$ in the Naive Bayes classifier. One needs to check the empirical distribution before making any assumption about a variable. In fact, there are goodness-of-fit tests such as the Kolmogorov–Smirnov (KS) test.\n\nFor the time being, consider a common continuous distribution. Let $Y$ follow a Gaussian distribution with mean $\\mu$ and variance $\\sigma^2$, denoted by $Y\\sim N(\\mu,\\sigma^2)$, where $\\Omega =\\{(\\mu,\\sigma^2): \\mu\\in \\mathbb{R}, \\sigma > 0\\}$. Its probability density function (pdf) is \n$$f(y) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}} \\exp\\bigg\\{-\\frac{(y-\\mu)^2}{2\\sigma^2}\\bigg\\}$$\nwhere $y\\in \\mathbb{R}$ and $(\\mu,\\sigma^2) \\in \\Omega$. \n\nIn the context of Naive Bayes classifier, let $\\mu_{kj}$ and $\\sigma^2_{kj}$ be the mean and variance of $X_j$ under $\\mathcal{C} = C_k$. Assume that $X_j\\mid C_k \\sim N(\\mu_{kj}, \\sigma^2_{kj})$. Then\n$$f_{kj}(x_j) = \\frac{1}{\\sqrt{2\\pi\\sigma^2_{kj}}} \\exp\\bigg\\{-\\frac{(x_j-\\mu_{kj})^2}{2\\sigma^2_{kj}}\\bigg\\}.$$\n\nIf the distribution of a continuous random variable is far from being Gaussian, the `kernel density estimate` can be utilized. Common smoothing algorithms use a Gaussian kernel. Its technical details are outside the scope of this study. However, you can find my empirical results [here](#KDE-Plot).\n\nFor the sake of completeness, it is important to mention the possibility of some other distributions, even though they are not applicable to the abalone data. Please refer to some details in [Appendix](#Appendix).", "_____no_output_____" ], [ "# Class Definition", "_____no_output_____" ] ], [ [ "class Gaussian_Naive_Bayes:\n '''\n Data and output are stored to trade memory for efficiency. \n Here Data should be interpreted as the training data.\n Such implementation can be changed.\n \n For simplicity, only Gaussian distribution is considered.\n '''\n \n def __init__(self, Predictor_List, Label, Class_List):\n '''\n Predictor_List:\n the list of predictor(s) assumed to have Gaussian distribution(s) \n Label: \n the column name in Data that gives the class labels\n Class_List:\n the list of unique classes\n '''\n \n self.Predictor_List = Predictor_List\n self.Label = Label\n \n Class_Dict = dict() # Initialize Class_Dict \n Predictor_Dict = dict() # Initialize Predictor_Dict\n \n for k, C_k in enumerate(Class_List, 1):\n Class_Dict[k] = C_k # Key: Class index; Value: Class\n \n for j, X_j in enumerate(Predictor_List, 1):\n Predictor_Dict[j] = X_j # Key: Predictor index; Value: Predictor\n \n self.Class_Dict = Class_Dict\n self.Predictor_Dict = Predictor_Dict\n \n def Calculate_Gauss_Density(self, x, Mean, SD):\n '''\n If X follows a Gaussian distribution with parameters Mean and SD^2, i.e. X ~ N(Mean, SD^2), \n the output is its density of X = x.\n '''\n \n V = SD ** 2 # Variance = SD^2\n Density = np.exp(-(x - Mean) ** 2 / (2 * V)) / (np.sqrt(2 * np.pi * V))\n\n return np.array(Density)\n\n def Calculate_Unconditional_Gauss(self, Var, x):\n '''\n Similar to the function Calculate_Gauss_Density. Mean and SD are estimated from Data.\n Var is the variable under investigation and x is value(s) of it.\n The output is its unconditional Gaussian density of X = x.\n '''\n \n Mean = np.mean(self.Data[Var]) # Compute sample mean \n SD = np.std(self.Data[Var], ddof = 1) # Compute sample standard deviation\n Density = self.Calculate_Gauss_Density(x, Mean, SD)\n\n return Density\n \n def Calculate_Conditional_Marginal_Mean_SD(self):\n '''\n mu_kj and sd_kj are the mean and standard deviation of X_j under class C_k.\n Each output is a dictionary with key being tuple (k, j). The value is either Mean or SD. \n '''\n \n Cond_Marginal_Mean_Dict = dict()\n Cond_Marginal_SD_Dict = dict()\n \n for k, C_k in self.Class_Dict.items(): # Loop over classes\n for j, X_j in self.Predictor_Dict.items(): # Loop over predictors\n Row_Ind = self.Data[self.Label] == C_k # Row indicators of class k\n Data_kj = self.Data.loc[Row_Ind, X_j].copy() # Select relevant data for mean & sd\n \n # Store mean & sd of class k & predictor j in two dictionaries\n Cond_Marginal_Mean_Dict[(k, j)] = np.mean(Data_kj)\n Cond_Marginal_SD_Dict[(k, j)] = np.std(Data_kj, ddof = 1)\n \n self.Cond_Marginal_Mean_Dict = Cond_Marginal_Mean_Dict\n self.Cond_Marginal_SD_Dict = Cond_Marginal_SD_Dict\n \n def Calculate_Conditional_Marginal_Gauss(self, Data):\n '''\n Use the function Calculate_Conditional_Marginal_Mean_SD to obtain mu_kj and sd_kj,\n mean and standard deviation of X_j under class C_k.\n The output is a dictionary with key being tuple (k, j) and value being Gaussian densities of \n X_j = x_j and C_k.\n '''\n \n Density = dict()\n \n for k, C_k in self.Class_Dict.items(): \n for j, X_j in self.Predictor_Dict.items():\n Mean_kj = self.Cond_Marginal_Mean_Dict[(k, j)] \n SD_kj = self.Cond_Marginal_SD_Dict[(k, j)]\n \n # Store Gaussian densities of X_j & C_k in a dictionary\n Density_kj = self.Calculate_Gauss_Density(Data.loc[:, X_j], Mean_kj, SD_kj)\n Density[(k, j)] = Density_kj\n \n return Density\n \n def Calculate_Prior_Probability(self, Data):\n '''\n Estimate the prior probabilities by class proportions.\n The output is a dictionary with key being class label and value being its prior probability.\n '''\n \n Prior_Probability = dict()\n \n for k, C_k in self.Class_Dict.items():\n Prior_Probability[C_k] = (Data.loc[:, self.Label] == C_k).mean()\n \n assert sum(Prior_Probability.values()) == 1, 'The sum of prior probabilities should be 1!'\n \n return Prior_Probability\n \n def Fit(self, Data):\n '''\n Data is the training data used to estimate Gaussian densities.\n \n Wrapper function to \n 1) call Calculate_Conditional_Marginal_Mean_SD;\n 2) fit Data into Calculate_Prior_Probability;\n '''\n \n self.Data = Data\n self.Calculate_Conditional_Marginal_Mean_SD() # Objective 1\n \n Prior_Probability = self.Calculate_Prior_Probability(Data) # Objective 2\n self.Prior_Probability = Prior_Probability\n \n def Plot_Fit(self, axes):\n '''\n Plot Gaussian densities of each predictor by class into axes.\n '''\n \n for j, ax in enumerate(axes.flat, 1):\n Col = self.Predictor_Dict[j] # Current column name\n Min, Max = self.Data[Col].min(), self.Data[Col].max() # Minimum & maximum in x-axis\n x = np.linspace(Min, Max, 1000) # x values in x-axis\n ax.set_xlabel(Col); ax.set_ylabel('Density') # Set labels of x-axis & y-axis\n \n for k, C_k in self.Class_Dict.items(): \n Mean_kj = self.Cond_Marginal_Mean_Dict[(k, j)] # Extract mean of class k, variable j\n SD_kj = self.Cond_Marginal_SD_Dict[(k, j)] # Extract sd of class k, variable j\n \n Density_kj = self.Calculate_Gauss_Density(x, Mean_kj, SD_kj)\n ax.plot(x, Density_kj, label = C_k) # Plot conditional marginal Gaussian densities\n \n ax.legend(title = self.Label) # Add legend\n \n def Predict(self, Data):\n '''\n Compute posterior probabilities and make prediction.\n '''\n \n Density = self.Calculate_Conditional_Marginal_Gauss(Data)\n \n Class_List = list(self.Class_Dict.values())\n Prob = pd.DataFrame(columns = Class_List)\n \n for k, C_k in self.Class_Dict.items():\n Prior_k = self.Prior_Probability[C_k]\n Density_k = [val for key, val in Density.items() if key[0] == k]\n Likelihood_k = np.prod(Density_k, axis = 0) # Array of likelihood of class k per record\n Prob[C_k] = Likelihood_k * Prior_k\n \n Prob = Prob.div(Prob.sum(axis = 1), axis = 0) # Normalized by row-sums to give probabilities\n Predict = np.array(Prob.idxmax(axis = 1))\n \n return Prob, Predict\n \n def Fit_Predict(self, Data):\n '''\n Fit Data and then compute posterior probabilities. Finally make prediction.\n '''\n \n self.Fit(Data)\n Prob, Predict = self.Predict(Data)\n \n return Prob, Predict", "_____no_output_____" ] ], [ [ "<a id = 'Fit'></a>\n# Data: Fitting Gaussian Naive Bayes Model\n\nInside the object, Class_Dict is a dictionary whose key is $k$ and value is $C_k$ for $k=1,2,3$. On the other hand, Predictor_Dict is a dictionary whose key is $j$ and value is $X_j$ for $j=1,\\dots,8$. Based on the training data, the prior probability of male is estimated to be 0.3705. It is the highest among others, meaning that male abalones are most prevailing.", "_____no_output_____" ] ], [ [ "Class_List = ['F', 'M', 'I']\nAbalone_Gauss = Gaussian_Naive_Bayes(Predictor_List, Label, Class_List)\nAbalone_Gauss.Fit(Full_Train) # Fit using training data \nAbalone_Prob, Abalone_Predict = Abalone_Gauss.Predict(Full_Test) # Predict using testing data\n\nAbalone_Prior = {k: round(v, 4) for k, v in Abalone_Gauss.Prior_Probability.items()}\n\nprint(f'Abalone class dictionary: {Abalone_Gauss.Class_Dict}')\nprint(f'\\nAbalone predictor dictionary: {Abalone_Gauss.Predictor_Dict}')\nprint(f'\\nAbalone prior probabilities: {Abalone_Prior}')", "Abalone class dictionary: {1: 'F', 2: 'M', 3: 'I'}\n\nAbalone predictor dictionary: {1: 'Length', 2: 'Diameter', 3: 'Height', 4: 'Whole_Weight', 5: 'Shucked_Weight', 6: 'Viscera_Weight', 7: 'Shell_Weight', 8: 'Rings'}\n\nAbalone prior probabilities: {'F': 0.3151, 'M': 0.3705, 'I': 0.3144}\n" ] ], [ [ "# Data: Sanity Check", "_____no_output_____" ] ], [ [ "from scipy.stats import norm # Using scipy package for Gaussian random variable\n\nx = np.linspace(-3.5, 3.5, 1000)\nMean = 0; SD = 1 # Set to be standard Gaussian: N(0,1)\nDef_Density = Abalone_Gauss.Calculate_Gauss_Density(x, Mean, SD) # Gaussian densities from self-defined function\nSP_Density = norm.pdf(x, Mean, SD) # Gaussian densities from scipy method\n\nif np.allclose(Def_Density, SP_Density): # True if arrays are element-wise equal within a tolerance\n print('Using self-defined function gives the same Gaussian densities as using scipy.stats.norm.')", "Using self-defined function gives the same Gaussian densities as using scipy.stats.norm.\n" ] ], [ [ "<a id = 'KDE-Plot'></a>\n# Data: Density Estimation\n\nThe Gaussian density plots resemble the corresponding kernel density estimate plots, which suggests that they are comparable for density estimation in this application. Since kernel methods are outside the scope of my study, the Naive Bayes classifier using kernel density estimates is not demonstrated.", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(2, 4, figsize = (15, 6))\nfig.suptitle('Gaussian Density Estimate Plots of Each Predictor by Class')\nfig.subplots_adjust(wspace = 0.3, hspace = 0.3)\n\nAbalone_Gauss.Plot_Fit(axes)\n\nfig, axes = plt.subplots(2, 4, figsize = (15, 6))\nfig.suptitle('Kernel Density Estimate Plots of Each Predictor by Class (Default: Gaussian Kernel)')\nfig.subplots_adjust(wspace = 0.3, hspace = 0.3)\n\nfor Col, ax in zip(Predictor_List, axes.flat):\n ax.set_xlim(Full_Train[Col].min(), Full_Train[Col].max()) # Minimum & maximum in x-axis are variable's ones\n sns.kdeplot(data = Full_Train, x = Col, hue = Label, hue_order = ['F', 'M', 'I'], ax = ax)", "_____no_output_____" ] ], [ [ "# Data: Evaluation on Testing Data\n\nNote that there are very small discrepancies between sklearn and self-defined probability estimates, as shown by the side-by-side comparison of the first few records and the mean absolute deviation by class. However, all the predicted classes are the same in this case. \n\nThe classification accuracy is 52.9%, which is rather low, mainly because male and female abalones are very similar in every variable, as shown above. If they are combined to form one class called non-infant, the task can be simpler, as shown [here](#Experiment). ", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import GaussianNB # Using sklearn to fit Gaussian Naive Bayes\nfrom utils import display_side_by_side # Self-defined: display tables side by side\n\nSK_GaussNB = GaussianNB() # Initialize\nSK_GaussNB.fit(X_Train, y_Train) # Fit data\nSK_GaussNB_Prob = SK_GaussNB.predict_proba(X_Test) # Return probability estimates (testing data)\nSK_GaussNB_Prob = pd.DataFrame(SK_GaussNB_Prob, columns = SK_GaussNB.classes_)\nSK_GaussNB_Prob = SK_GaussNB_Prob.loc[:, Class_List] # Re-order columns\nSK_GaussNB_Predict = SK_GaussNB.predict(X_Test) # Return prediction\n\ndisplay_side_by_side([SK_GaussNB_Prob.head(), Abalone_Prob.head()], captions = ['sklearn', 'self-defined'])\n\nprint('Mean absolute deviation of probability estimates by class:', \n np.absolute(SK_GaussNB_Prob - Abalone_Prob).mean().round(5).to_dict())\nprint('Proportion of the same predicted classes:', (Abalone_Predict == SK_GaussNB_Predict).mean())\n\nAbalone_Crosstab = pd.crosstab(Abalone_Predict, y_Test, rownames = ['Predicted'], colnames = ['Actual'])\nAbalone_Crosstab = Abalone_Crosstab.loc[Class_List, Class_List]\ndisplay(Abalone_Crosstab)\nprint(f'The prediction accuracy in testing data is {(Abalone_Predict == y_Test).mean():.1%}.')", "_____no_output_____" ] ], [ [ "<a id = 'Experiment'></a>\n# Data: Experiment on Combining Classes\n\nBy combining M and F into Non-I (non-infant) and keeping I (infant), it reduces to binary classification. Since the difference between infant and non-infant abalones is obvious, the accuracy increases from 52.9% to 77.8%.", "_____no_output_____" ] ], [ [ "Train_New = Full_Train.copy() # Create a copy of training data \nTest_New = Full_Test.copy() # Create a copy of testing data \n\nTrain_New['Sex'] = Train_New['Sex'].map({'M': 'Non-I', 'F': 'Non-I', 'I': 'I'}) # Change M & F to Non-I\nTest_New['Sex'] = Test_New['Sex'].map({'M': 'Non-I', 'F': 'Non-I', 'I': 'I'})\n\nAbalone_New = Gaussian_Naive_Bayes(Predictor_List, Label, ['I', 'Non-I'])\nAbalone_New.Fit(Train_New) # Fit new training data \nProb_New, Predict_New = Abalone_New.Predict(Test_New) # Predict using new testing data\n\nprint('New prior probabilities:', {k: round(v, 4) for k, v in Abalone_New.Prior_Probability.items()})\n\nCrosstab_New = pd.crosstab(Predict_New, Test_New['Sex'], rownames = ['Predicted'], colnames = ['Actual'])\ndisplay(Crosstab_New)\n\nTarget_New = Test_New['Sex']\nprint(f'The prediction accuracy in new testing data is {(Predict_New == Target_New).mean():.1%}.')", "New prior probabilities: {'I': 0.3144, 'Non-I': 0.6856}\n" ] ], [ [ "# Appendix\n\n## Bernoulli Distribution\nIn the case of bag of words in document classification, suppose that there is a Bernoulli feature $X_j$ associated with each of $J$ unique words in the vocabulary. The term Bernoulli means that $x_j=1$ if that word appears in the document, and $x_j=0$ otherwise. In other words, $x=(x_1,\\dots,x_J)'$ where $x_j\\in\\{0,1\\}$. In short, $X_j$ represents the presence or absence of a word.\n\nStatistically, a conditional marginal probability is given by\n\n$$\\begin{align*}\nf_{kj}(x_j) & = \\mathbb{P}(X_j = x_j\\mid \\mathcal{C} = C_k) \\\\\n& = p_{kj}^{x_j} (1-p_{kj})^{1-x_j}\n\\end{align*}$$\n\nwhere $p_{kj}=\\mathbb{P}(X_j=1\\mid \\mathcal{C} = C_k)$. In general, $p_{kj}$ is estimated by the sample proportion of $x_j=1$ in the training data.\n\n## Multinomial Distribution\nIn the case of bag of words, suppose that there are $J$ unique words in the vocabulary. Each document is thought to be drawn from a multinomial distribution of words. Let $x_j$ be the number of occurrences of word $j$, hence $\\sum_{j=1}^J x_j$ is the length of document.\n\nA multinomial distribution has the probability mass function (pmf)\n$$\\mathbb{P}(X_1=x_1,\\dots,X_J=x_J)=\\frac{(\\sum_{j=1}^J x_j)!}{\\prod_{j=1}^J x_j!}\\prod_{j=1}^J p_j^{x_j}.$$\n\nIt corresponds to obtaining $x_1,\\dots,x_J$ of the word each with probability $p_1,\\dots,p_J$ of occurrence respectively, where $0<p_j<1$ for $j=1,\\dots,J$ and $\\sum_{j=1}^J p_j=1$. \n\nIn the context of Naive Bayes classifier, assume that the length of document is independent of class. Let $p_{kj}$ be the probability of occurrence of word $j$ in class $C_k$. Then\n$$f_k(x) = \\frac{(\\sum_{j=1}^J x_j)!}{\\prod_{j=1}^J x_j!}\\prod_{j=1}^J p_{kj}^{x_j}.$$\n\n## Categorical Distribution\nAssume that a categorical variable $X^c$ has $J$ categories that are encoded into $1,\\dots,J$. By dummy encoding, it leads to a random vector $X=(X_1,\\dots,X_J)'$ where $X_j=\\mathbb{1}(X^c=j)$ for $j=1,\\dots,J$. It is also known as one-hot encoding, since only one of the $J$ variables has the value one. \n\nIn fact, a categorical distribution is a special case of multinomial distribution when $\\sum_{j=1}^J x_j=1$. Thus\n$$f_k(x) = \\prod_{j=1}^J p_{kj}^{x_j}$$\nwhere $p_{kj} = \\mathbb{P}(X^c=j\\mid \\mathcal{C} = C_k)=\\mathbb{P}(X_j=1\\mid \\mathcal{C} = C_k)$.", "_____no_output_____" ], [ "<a id = 'Experiment'></a>\n## Data: Unconditional Plot", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(2, 4, figsize = (15, 6))\nfig.suptitle('Unconditional Gaussian Density Plot of Each Predictor')\nfig.subplots_adjust(wspace = 0.3, hspace = 0.3)\n\nfor Col, ax in zip(Predictor_List, axes.flat):\n Min, Max = Full_Train[Col].min(), Full_Train[Col].max()\n x = np.linspace(Min, Max, 1000) \n Uncond_Gauss_Density = Abalone_Gauss.Calculate_Unconditional_Gauss(Col, x)\n ax.set_xlabel(Col); ax.set_ylabel('Density')\n ax.plot(x, Uncond_Gauss_Density) # Plot densities", "_____no_output_____" ] ], [ [ "# Reference\n1. [Wikipedia: Naive Bayes classifier](https://en.wikipedia.org/wiki/Naive_Bayes_classifier)\n2. [scipy: normal random variable](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html)\n3. [sklearn: Gaussian Naive Bayes](https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html)\n4. [sklearn: Kernel Density Estimation](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html)\n5. [seaborn: kernel density estimate](https://seaborn.pydata.org/generated/seaborn.kdeplot.html#seaborn.kdeplot)\n6. [The Elements of Statistical Learning](https://web.stanford.edu/~hastie/ElemStatLearn/) by Hastie, Tibshirani and Friedman in 2001\n7. [Naive (Bayes) at Forty: The Independence Assumption in Information Retrieval](https://link.springer.com/chapter/10.1007/BFb0026666) by Lewis in 1998\n8. [A Comparison of Event Models for Naive Bayes Text Classification](https://www.semanticscholar.org/paper/A-comparison-of-event-models-for-naive-bayes-text-McCallum-Nigam/04ce064505b1635583fa0d9cc07cac7e9ea993cc) by McCallum and Nigam in 1998\n9. [Machine Learning: A Probabilistic Perspective](https://dl.acm.org/doi/book/10.5555/2380985) by Murphy in 2012", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec7fd1c75a7a3bda0efdc946d0c39c2dae8b1429
7,676
ipynb
Jupyter Notebook
2016_05_Halifax/Day1/Exercises_Python_MNE.ipynb
mne-tools/mne-workshops
5852ab2c1b351dd13efcc794032f2967cb0819d8
[ "BSD-3-Clause" ]
21
2019-05-31T03:47:12.000Z
2022-03-06T23:08:58.000Z
2016_05_Halifax/Day1/Exercises_Python_MNE.ipynb
berdakh/mne-workshops
5852ab2c1b351dd13efcc794032f2967cb0819d8
[ "BSD-3-Clause" ]
1
2019-08-16T13:59:53.000Z
2019-08-19T16:37:35.000Z
mne-workshops-master/2016_05_Halifax/Day1/Exercises_Python_MNE.ipynb
GanshengT/INSERM_EEG_Enrico_Proc
343edc32e5c9705213189a088855c635b31ca22b
[ "CNRI-Python" ]
16
2019-08-30T11:07:10.000Z
2022-03-27T20:13:16.000Z
51.516779
4,756
0.692418
[ [ [ "# Exercise\n\nAuthor: Alexandre Gramfort\n\nWe'll use Python to explore the content of the MNE-sample-data folder", "_____no_output_____" ] ], [ [ "from __future__ import print_function", "_____no_output_____" ], [ "from mne.datasets import sample\n\ndata_path = sample.data_path()\nprint(data_path)", "/Users/alex/work/src/mne-python/examples/MNE-sample-data\n" ], [ "import glob\n\nfif_files = glob.glob(data_path + '/MEG/sample/' + '*.fif')\nprint(fif_files)", "[u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/all-trans.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/ecg_proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/ernoise_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample-epo.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample-trans.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-ave.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-cov.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-eeg-oct-6-eeg-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-eeg-ico-4-fwd.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-diagnoise-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-oct-6-fwd.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-oct-6-meg-diagnoise-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-oct-6-meg-nodepth-fixed-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-vol-7-fwd.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-no-filter-ave.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-oct-6p-src.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis-shrunk-cov.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_ecg-eve.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_ecg-proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_ecg_avg-proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_ecg_avg_proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_eog-eve.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_eog-proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_eog_avg_proj.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-1-80_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-1HP_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-hp-05-lp-40_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-hp-05_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_left-cov.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_noBad_raw_sss.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_noBad_raw_sss_nobad.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw-eve.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw-trans.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_right-cov.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_stand_raw_sss.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_stdOrigin_raw_sss.fif', u'/Users/alex/work/src/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_stdOrigin_raw_sss_nobad.fif']\n" ] ], [ [ "# Questions\n\n- What is the type of the variable `fif_files` ?\n- Sort inplace the content of `fif_files`\n- What is the length of `fif_files` ?\n- How many files are raw files (end with `raw.fif`) ?\n- Create a new list restricted to the files of type raw?", "_____no_output_____" ] ], [ [ "subjects_dir = data_path + '/subjects'\nsubject = 'sample'\nsurf_dir = subjects_dir + \"/\" + subject + '/surf'\nprint(surf_dir)", "/Users/alex/work/src/mne-python/examples/MNE-sample-data/subjects/sample/surf\n" ] ], [ [ "# Questions\n\n- How many files is there in the folder `surf_dir` ?\n- How many files have a name that starts with `lh`?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec7fd97ae56afd25e0495475745699d8cfcf7a46
3,641
ipynb
Jupyter Notebook
notebooks/audio_adaptation.ipynb
pamamu/S2T_GetAudiosTrans
b8f6122434b1bb4edca64c8904af575db0cd7cae
[ "MIT" ]
null
null
null
notebooks/audio_adaptation.ipynb
pamamu/S2T_GetAudiosTrans
b8f6122434b1bb4edca64c8904af575db0cd7cae
[ "MIT" ]
null
null
null
notebooks/audio_adaptation.ipynb
pamamu/S2T_GetAudiosTrans
b8f6122434b1bb4edca64c8904af575db0cd7cae
[ "MIT" ]
null
null
null
24.436242
126
0.532271
[ [ [ "import json\nimport os\nimport sox", "_____no_output_____" ], [ "info_path = '/Users/pablomaciasmunoz/Dev/WS_TFG/S2T/input_audios/cadenaser_elfaro_20190313_013000_040000.json'\naudio_path = '/Users/pablomaciasmunoz/Dev/WS_TFG/S2T/input_audios/cadenaser_elfaro_20190313_013000_040000.mp3'", "_____no_output_____" ], [ "info = json.load(open(info_path, 'r'))\ntotal_info = json.load(open(info_path, 'r'))\nspeech = [i for i in info if i['value'] == 'speech']\n\n", "_____no_output_____" ], [ "output_folder = \"/Users/pablomaciasmunoz/Dev/WS_TFG/S2T/input_audios/out\"", "_____no_output_____" ], [ "def split_audio(audio_path, start, end, output_path):\n tfm = sox.Transformer()\n tfm.trim(start, end)\n tfm.build(audio_path, output_path)\n return output_path\n", "_____no_output_____" ], [ "silence_path = \"/Users/pablomaciasmunoz/Dev/WS_TFG/S2T/input_audios/silence.wav\"\ndef split_silence(duration, output_path):\n tfm = sox.Transformer()\n tfm.trim(0, duration)\n tfm.build(silence_path, output_path)\n return output_path\n", "_____no_output_____" ], [ "info = []\nfor i, j in enumerate(total_info):\n if j['value'] == 'speech':\n path = split_audio(audio_path, int(j['from']), int(j['to']), os.path.join(output_folder, '{}.wav'.format(i)))\n else:\n path = split_silence(int(j['to']) - int(j['from']), os.path.join(output_folder, '{}.wav'.format(i)))\n info.append(\n {\n \"path\": path,\n \"start_time\": int(j['from']) - 25,\n \"end_time\": int(j['to']) - 25\n }\n )\nwith open(os.path.join(output_folder, 'info.json'), 'w') as out:\n json.dump(info, out, indent=4, ensure_ascii=False)\n", "_____no_output_____" ], [ "split_silence(5, \"/Users/pablomaciasmunoz/Dev/WS_TFG/S2T/input_audios/silence2.wav\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7fe04987915e36a89d37a11b07e3c4766a6d54
9,962
ipynb
Jupyter Notebook
it6.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
it6.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
it6.ipynb
kojiyam/information-theory
601cf119b8c7b5d24bd61594e2a4d7b9a8c6f223
[ "MIT" ]
null
null
null
22.039823
226
0.382152
[ [ [ "<a href=\"https://colab.research.google.com/github/kojiyam/information-theory/blob/main/it6.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "$ p I(p) = p \\log_2 (1/p) $\nを関数 `pI` として定義.pI(0)=0 としたいため.\n\n単純にやると `0*np.log2(0)=nan` となる", "_____no_output_____" ] ], [ [ "def pI_(p):\n if p==0:\n return 0\n else:\n return p*np.log2(1/p)\npI = np.vectorize(pI_) # numpy arrayを引数に取るため", "_____no_output_____" ] ], [ [ "## SLD. 43\n$p_y = P_Y(y)$", "_____no_output_____" ] ], [ [ "py = np.array([1/4, 0, 1/8, 1/2, 1/8])\nnp.sum(py)", "_____no_output_____" ] ], [ [ "$ p_y \\log_2 (1/p_y) $", "_____no_output_____" ] ], [ [ "print( pI(py) )", "[0.5 0. 0.375 0.5 0.375]\n" ] ], [ [ "$ \\displaystyle \\sum_{y} p_y \\log_2 (1/p_y) $", "_____no_output_____" ] ], [ [ "np.sum( pI(py) )", "_____no_output_____" ] ], [ [ "## SLD. 46 同時確率 $P_{X,Y}(x,y)$", "_____no_output_____" ] ], [ [ "pxy = np.array([[1/8, 0, 1/16, 1/4, 1/16], [1/16, 1/16, 0, 1/4, 1/8]])\nnp.sum(pxy)", "_____no_output_____" ], [ "print(pxy)", "[[0.125 0. 0.0625 0.25 0.0625]\n [0.0625 0.0625 0. 0.25 0.125 ]]\n" ] ], [ [ "### 同時エントロピー\n\n$ H(X,Y) $", "_____no_output_____" ] ], [ [ "np.sum( pI(pxy) )", "_____no_output_____" ] ], [ [ "## 周辺確率 $ P_Y(y) $ SLD. 48\n\n$P_Y(y)$", "_____no_output_____" ] ], [ [ "py = np.sum(pxy, axis=0) # 列方向に足す\nprint(py)", "[0.1875 0.0625 0.0625 0.5 0.1875]\n" ], [ "np.sum( pI(py) )", "_____no_output_____" ] ], [ [ "# 周辺確率 $ P_X(x) $", "_____no_output_____" ] ], [ [ "print(np.sum(pxy, axis=1)) # 行方向に足す", "[0.5 0.5]\n" ] ], [ [ "## 条件付き確率 $P_{X|Y}(x|y) $ SLD. 50", "_____no_output_____" ] ], [ [ "px1y = pxy/py\nprint(px1y)", "[[0.66666667 0. 1. 0.5 0.33333333]\n [0.33333333 1. 0. 0.5 0.66666667]]\n" ], [ "print( pI(px1y) )", "[[0.389975 0. 0. 0.5 0.52832083]\n [0.52832083 0. 0. 0.5 0.389975 ]]\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec7fe0c5f8d72310b23dd4e77d49745c09f440b0
671,333
ipynb
Jupyter Notebook
notebooks/lectures/lect4-unsupervised.ipynb
sivico26/ML-biocomp
3ffc68d7490c77f56b5656af0c6c0c6418d7a6b3
[ "MIT" ]
null
null
null
notebooks/lectures/lect4-unsupervised.ipynb
sivico26/ML-biocomp
3ffc68d7490c77f56b5656af0c6c0c6418d7a6b3
[ "MIT" ]
null
null
null
notebooks/lectures/lect4-unsupervised.ipynb
sivico26/ML-biocomp
3ffc68d7490c77f56b5656af0c6c0c6418d7a6b3
[ "MIT" ]
null
null
null
470.780505
111,992
0.943183
[ [ [ "## Last class", "_____no_output_____" ], [ "![](img/fig6.png)", "_____no_output_____" ], [ "# Unsupervised Learning\n**by: Santiago Hincapie-Potes**", "_____no_output_____" ], [ "## Unsupervised vs Supervised\n### Supervised\n* More accurate\n* Labeled data required\n* Requieres human in the loop\n### Unsupervised\n* Less accurate\n* No labeled data required\n* Minimal human effort", "_____no_output_____" ], [ "## Today\n* Clustering\n* Dimensionality Reduction", "_____no_output_____" ], [ "## Cluster Analysis\nFinding groups of objects such that the objects in a group will be similar (or related) to one another and different from (or unrelated to) the objects in other groups", "_____no_output_____" ], [ "Central to all of the goals of cluster analysis is the notion of the degree of similarity (or dissimilarity) between the individual objects being clustered.", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')\nimport Utils.mlutils as mlutils\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn.datasets import make_blobs, make_moons\nfrom sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering, DBSCAN\nfrom IPython.display import HTML", "_____no_output_____" ], [ "X, y = make_blobs(400, centers=2, cluster_std=1.2)\nplt.scatter(X[:, 0], X[:, 1]);", "_____no_output_____" ] ], [ [ "* What clusters would you do with the following data? \n* What would the process be like?", "_____no_output_____" ], [ "## Centroid-based clustering\nClusters are represented by a central vector, which may not necessarily be a member of the data set.", "_____no_output_____" ], [ "### K-means clustering\nDefines a cluster in terms of a centroid, which is usually the mean of a group of points, and is typically applied to objects in a continous $n-$dimensional space.", "_____no_output_____" ], [ "### Algorithm\n\n```python\ndef KMeans(data, K):\n centroids = select_initial(data, K)\n while centroids_do_not_change(centroids):\n clusters = form_clusters(K) # by assigning each point to its closest centroid\n centroids = [centroid(i) for i in clusters]\n return centroids, clusters\n```", "_____no_output_____" ], [ "* Live coding? \n* Just continue", "_____no_output_____" ] ], [ [ "from sklearn.cluster import k_means\n\nK = 2\n\nkm = KMeans(K)\nkm.fit(X)\n\ny = km.predict(X)\nprint(y.shape, np.unique(y))", "(400,) [0 1]\n" ], [ "km.cluster_centers_", "_____no_output_____" ], [ "from Utils.lect4 import * \n\nplot_clusters(km, X, y)", "_____no_output_____" ] ], [ [ "## Evaluation\nMost common measure is Sum of Squared Error (SSE)\n* For each point, the error is the distance to the nearest cluster\n* To get SSE, we square these errors and sum them.\n$$ SSE = \\sum_{i=1}^K\\sum_{x\\in C_i} dist^2(m_i, x) $$\n* $x$ is a data point in cluster $C_i$ and $m_i$ is the representative point for cluster $C_i$\n* Given two sets of clusters, we prefer the one with the smallest error", "_____no_output_____" ] ], [ [ "X, y = make_moons(400, noise=0.03)\n\nK = 2\n\nkm = KMeans(K)\nkm.fit(X)\n\ny = km.predict(X)\n\nplot_clusters(km, X, y)", "_____no_output_____" ], [ "K = 6\n\nkm = KMeans(K)\nkm.fit(X)\n\ny = km.predict(X)\n\nplot_clusters(km, X, y)", "_____no_output_____" ] ], [ [ "## Limitations of K-means\n* K-means has problems when clusters are of differing\n + Sizes\n + Densities \n + Non-globular shapes\n\n* K-means has problems when the data contains outliers.", "_____no_output_____" ], [ "## Connectivity-based (Hierarchical) Clustering\nProduces a set of nested clusters organized as a hierarchical tree", "_____no_output_____" ], [ "### Strengths \n* Do not have to assume any particular number of clusters\n* They may correspond to meaningful taxonomies", "_____no_output_____" ], [ "Two main types of hierarchical clustering\n* **Agglomerative:** Start with the points as individual clusters and, at each step, merge the closest pair of clusters, a notion of cluster proximity is needed\n* **Divisive:** Start with one, all-inclusive cluster and, at each step slit a cluster until only sinpleton clusters of individual points remain. In this case we need to decide which cluster to split at each step and how to do the splitting\n\n![](img/fig7.png)", "_____no_output_____" ], [ "### Algorithm\n```python\ndef agglomerative(data):\n clusters = data\n M = proximity_matrix(clusters)\n while len(clusters) != 1:\n a, b = closest_clusters(M)\n clusters = merge(clusters, a, b)\n M = proximity_matrix(clusters)\n```", "_____no_output_____" ], [ "### Proximity between clusters\n* Graph theory rules\n + MIN\n + MAX\n + Average\n* Centroids\n + Distance between centroids\n* Ward: Similarity of two cluster is based on the increase in squared error when two are merged", "_____no_output_____" ], [ "### Key issues\n* Lack of a global objective function\n* Merging decisions are final\n* Different schemes have problems with one or more of the following:\n + Sensitive to noise and outliers\n + Difficulty handling clusters of different sizes\n + Breaking large clusters", "_____no_output_____" ], [ "#### Strengths and weaknesses\n* Typically used because the underlyning application.\n* Computational expensive\n* Merging are finals :c", "_____no_output_____" ] ], [ [ "from sklearn.cluster import AgglomerativeClustering\n\nX, y = make_moons(400, noise=0.03)\n\ncluster = AgglomerativeClustering(affinity='euclidean', linkage='average') \n\ncluster.fit(X)\n\nplt.scatter(X[:,0], X[:,1], c=cluster.labels_, cmap='rainbow')", "_____no_output_____" ], [ "cluster = AgglomerativeClustering(affinity='euclidean', linkage='single') \n\ncluster.fit(X)\n\nplt.scatter(X[:,0], X[:,1], c=cluster.labels_, cmap='rainbow')", "_____no_output_____" ] ], [ [ "## Density-based\nClustering locates regions of high density that are separated from one another by regions of low density.", "_____no_output_____" ], [ "# DBSCAN\n* Density = # of points within a $B(p, \\varepsilon)$\n* **core point**: it has at least a specified number of points $M$ within $\\varepsilon$, i.e., is the center of some ball\n* **border point** is not a core point, but is in the neighborhood of a core point, i.e., is in the ball\n* **noise point** is any point that is not a core point nor a border point", "_____no_output_____" ], [ "### Algorithm\n```python\n def DBSCAN(data, M, eps):\n labels = []\n for i in data:\n lb = label(i, M, eps) # as core, border, or noise\n if lb is noise:\n del i\n labels.append(lb)\n clusters = merge_core_point(within=eps, labels, data)\n return assign_border_points(clusters, eps, data, labels)\n``` ", "_____no_output_____" ], [ "### Strengths and weaknesses\n* Resistant to noise and can handle cluster of arbitrary shape and size\n* trouble when the clusters have widely varying densities\n* trouble with high-dimensional data\n* parameters!", "_____no_output_____" ] ], [ [ "from sklearn.cluster import DBSCAN\n\nX, y = make_moons(400, noise=0.03)\n\ncluster = DBSCAN(eps=0.2, min_samples=10) \n\ncluster.fit(X)\n\nplt.scatter(X[:,0], X[:,1], c=cluster.labels_, cmap='rainbow')", "_____no_output_____" ] ], [ [ "## Cluster evaluation\n1. Determining the clustering tendency of a set of data, i.e., distinguishing whether non-random structure actually exists in the data.\n2. Comparing the results of a cluster analysis to externally known results, e.g., to externally given class labels.\n3. Evaluating how well the results of a cluster analysis fit the data without reference to external information.-- Use only the data\n4. Comparing the results of two different sets of cluster analyses to determine which is better.\n5. Determining the ‘correct’ number of clusters", "_____no_output_____" ], [ "# Dimensionality Reduction", "_____no_output_____" ], [ "## The Curse of Dimensionality", "_____no_output_____" ], [ "It turns out that many things behave very differently in high-dimensional space. \n* If you pick a random point in a unit square (a $1 \\times 1$ square), it will have only about a $0.4\\%$ chance of being located less than $0.001$ from a border, but in a $10,000-$dimensional unit hypercube (a $1 \\times 1 \\times\\dots\\times 1$ cube, with ten thousand 1s), this probability is greater than $99.999999\\%$.\n\n* if you pick two points randomly in a unit square, the distance between these two points will be, on average, roughly 0.52. If you pick two random points in a unit 3D cube, the average distance will be roughly 0.66. But what about two points picked randomly in a 1,000,000-dimensional hypercube? Well, the average distance, believe it or not, will be about 408.25\n\n\nThis fact implies that high-dimensional datasets are at risk of being very sparse\n\n\nUnfortunately, in practice, the number of training instances required to reach a given density grows exponentially with the number of dimensions.", "_____no_output_____" ], [ "## Main Approaches for Dimensionality Reduction\n* Projection\n* Manifold Learning", "_____no_output_____" ], [ "### Projection\ntraining instances are not spread out uniformly across all dimensions. Many features are almost constant, while others are highly correlated As a result, all training instances actually lie within (or close to) a much lower-dimensional subspace of the high-dimensional space", "_____no_output_____" ], [ "### Manifold learning", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_swiss_roll\nfrom mpl_toolkits.mplot3d import Axes3D\n\nX, t = make_swiss_roll(1500)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=t)", "_____no_output_____" ] ], [ [ "### Intuitive idea", "_____no_output_____" ] ], [ [ "np.random.seed(1)\nX = np.dot(np.random.random(size=(2, 2)), np.random.normal(size=(2, 200))).T+10\n\n# center data on 0,0\nX=X-np.mean(X, axis=0)\nprint(X.shape)\nplt.scatter(X[:,0], X[:,1])", "(200, 2)\n" ], [ "plt.figure(figsize=(15,3))\n\nunit_vector = lambda angle: np.array([np.cos(angle), np.sin(angle)])\n\nfor i in range(3):\n plt.subplot(1,3,i+1)\n angle = np.random.random()*np.pi*2 if i!=0 else 1.8\n v = unit_vector(angle)\n c = X @ (v.reshape(-1,1))/(np.linalg.norm(v)**2)\n Xp = np.repeat(v.reshape(-1,2),len(X),axis=0)*c\n\n plt.scatter(X[:,0], X[:,1], color=\"blue\", alpha=.5, label=\"original data\")\n plt.scatter(Xp[:,0], Xp[:,1], color=\"red\", alpha=.5, label=\"projected data\")\n plt.axvline(0, color=\"gray\")\n plt.axhline(0, color=\"gray\")\n plt.plot([0,v[0]], [0,v[1]], color=\"black\", lw=3, label=\"projection vector\")\n plt.axis('equal')\n plt.ylim(-2,2)\n plt.title(\"$\\\\alpha$=%.2f rads, proj std=%.3f\"%(angle, np.std(c)))\n if i==2:\n plt.legend(loc=\"center left\", bbox_to_anchor=(1.01,.5))", "_____no_output_____" ] ], [ [ "### Just brute force", "_____no_output_____" ], [ "### Preserving the Variance\nVariance measures how spread out some data is!", "_____no_output_____" ] ], [ [ "def get_maxmin_projections(X):\n stds = []\n angles = np.linspace(0,np.pi*2, 100)\n for a in angles:\n v = np.array([np.cos(a), np.sin(a)])\n c = X @ (v.reshape(-1,1))/(np.linalg.norm(v)**2)\n stds.append(np.std(c))\n v2 = unit_vector(angles[np.argmin(stds)])\n v1 = unit_vector(angles[np.argmax(stds)])\n \n return angles, stds, v1, v2\nangles, stds, v1, v2 = get_maxmin_projections(X)\n\nplt.plot(angles, stds)\nplt.xlabel(\"projection $\\\\alpha$ (in rads)\")\nplt.ylabel(\"projection std\")", "_____no_output_____" ], [ "plt.scatter(X[:,0], X[:,1], color=\"blue\", alpha=.5, label=\"original data\")\nplt.axvline(0, color=\"gray\")\nplt.axhline(0, color=\"gray\")\nplt.plot([0,v1[0]], [0,v1[1]], color=\"black\", lw=5, label=\"max std projection vector\")\nplt.plot([0,v2[0]], [0,v2[1]], color=\"black\", ls=\"--\", lw=2, label=\"min std projection vector\")\nplt.axis('equal')\nplt.ylim(-2,2)\nplt.legend(loc=\"center left\", bbox_to_anchor=(1.01,.5))", "_____no_output_____" ] ], [ [ "## PCA\nPrincipal Component Analysis (PCA) is by far the most popular dimensionality reduction algorithm. First it identifies the hyperplane that lies closest to the data, and then it projects the data onto it.", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA\npca = PCA(n_components=1) \npca.fit(X)\nprint(\"sklearn PCA components\")\nprint(pca.components_)\nprint(\"brute force components\")\nprint(v1)\nprint(v2)", "sklearn PCA components\n[[-0.94446029 -0.32862557]]\nbrute force components\n[-0.93969262 -0.34202014]\n[-0.32706796 0.94500082]\n" ], [ "c = pca.transform(X)\nprint(c.shape)", "(200, 1)\n" ] ], [ [ "### PCA for Compression", "_____no_output_____" ] ], [ [ "pca = PCA(n_components=1)\npca.fit(X)\nXt = pca.transform(X)[:,0]\nplt.scatter(X[:,0], X[:,1], color=\"blue\", alpha=.5, label=\"$\\mathbf{X}$: original data\")\nplt.scatter(Xt, [0]*len(Xt), color=\"red\", alpha=.5, label=\"$\\mathbf{X_t}$: reduced data\")\nplt.axis(\"equal\");\nplt.legend(loc=\"center left\", bbox_to_anchor=(1.01,.5))", "_____no_output_____" ], [ "v0 = pca.components_[0]\nc = X.dot(v0)\nXr = np.r_[[i*v0 for i in c]]\nplt.scatter(X[:,0], X[:,1], color=\"blue\", alpha=.5, label=\"original data\")\nplt.scatter(Xr[:,0], Xr[:,1], color=\"red\", alpha=.5, label=\"reconstructed data from largest component\")\nplt.legend(loc=\"center left\", bbox_to_anchor=(1.01,.5))", "_____no_output_____" ] ], [ [ "# PCA for Compression", "_____no_output_____" ] ], [ [ "mnist = pd.read_csv(\"data/mnist1.5k.csv.gz\", compression=\"gzip\", header=None).values\nd=mnist[:,1:785]\nc=mnist[:,0]\nprint(d.shape, c.shape)", "(1500, 784) (1500,)\n" ], [ "plt.imshow(d[9].reshape(28,28), cmap=plt.cm.gray)", "_____no_output_____" ], [ "perm = np.random.permutation(range(d.shape[0]))[0:50]\nrandom_imgs = d[perm]\nrandom_labels = c[perm] \nfig = plt.figure(figsize=(10,6))\nfor i in range(random_imgs.shape[0]):\n ax=fig.add_subplot(5,10,i+1)\n plt.imshow(random_imgs[i].reshape(28,28), interpolation=\"nearest\", cmap = plt.cm.Greys_r)\n ax.set_title(int(random_labels[i]))\n ax.set_xticklabels([])\n ax.set_yticklabels([])", "_____no_output_____" ], [ "mnist = pd.read_csv(\"data/mnist1.5k.csv.gz\", compression=\"gzip\", header=None).values\n\nX=mnist[:,1:785]\ny=mnist[:,0]\n\npca = PCA(n_components=60)\nXp = pca.fit_transform(X)", "_____no_output_____" ], [ "X.shape, y.shape", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\nXtr, Xts, ytr, yts = train_test_split(X,y,test_size=.3)\nXtr.shape, Xts.shape, ytr.shape, yts.shape", "_____no_output_____" ], [ "from sklearn.naive_bayes import GaussianNB\n\ndt = GaussianNB()\n\ndt.fit(Xtr, ytr)\ndt.score(Xtr, ytr), dt.score(Xts, yts)", "_____no_output_____" ], [ "from tqdm import tqdm\ndtr, dts = [], []\nfor n_components in tqdm(range(10, 200, 5)):\n pca = PCA(n_components=n_components)\n pca.fit(Xtr)\n\n Xt_tr = pca.transform(Xtr)\n Xt_ts = pca.transform(Xts)\n\n dt.fit(Xt_tr,ytr)\n ypreds_tr = dt.predict(Xt_tr)\n ypreds_ts = dt.predict(Xt_ts)\n ypreds_tr.shape, ypreds_ts.shape\n dtr.append(np.mean(ytr==ypreds_tr))\n dts.append(np.mean(yts==ypreds_ts))", "100%|██████████| 38/38 [00:18<00:00, 1.50it/s]\n" ], [ "cs = np.arange(10, 200, 5)\nplt.plot(cs, dtr, label=\"train\")\nplt.plot(cs, dts, label=\"test\")\nplt.xlabel(\"n components\")\nplt.ylabel(\"% acierto\")\nplt.legend()", "_____no_output_____" ], [ "best_cs = cs[np.argmax(dts)]\nbest_cs", "_____no_output_____" ], [ "pca = PCA(n_components=best_cs)\npca.fit(Xtr)\n\nXt_tr = pca.transform(Xtr)\nXt_ts = pca.transform(Xts)\ndt.fit(Xt_tr,ytr)\nypreds_tr = dt.predict(Xt_tr)\nypreds_ts = dt.predict(Xt_ts)\nypreds_tr.shape, ypreds_ts.shape\nnp.mean(ytr==ypreds_tr),np.mean(yts==ypreds_ts)", "_____no_output_____" ], [ "from sklearn.pipeline import Pipeline\n\nestimator = Pipeline(((\"pca\", PCA(n_components=best_cs)), (\"naive\", dt)))\nestimator.fit(Xtr, ytr)\nestimator.score(Xtr, ytr), estimator.score(Xts, yts)", "_____no_output_____" ], [ "from sklearn.model_selection import cross_val_score\npip = Pipeline([(\"PCA\", PCA(n_components=best_cs)), (\"gaussian\", GaussianNB())])\nscores = cross_val_score(pip, X,y, cv=5 )\nprint(\"%.2f +/- %.4f\"%(np.mean(scores), np.std(scores)))", "0.83 +/- 0.0268\n" ], [ "cols=20\nplt.figure(figsize=(15,3))\nfor i in range(len(pca.components_)):\n plt.subplot(np.ceil(len(pca.components_)/15.),15,i+1)\n plt.imshow((pca.components_[i].reshape(28,28)), cmap = plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])", "_____no_output_____" ], [ "pca = PCA(n_components=best_cs)\npca.fit(Xtr)\nXp = pca.transform(X)", "_____no_output_____" ], [ "plt.figure(figsize=(10,6))\nfor i in range(6):\n plt.subplot(3,6,i+1)\n k = np.random.randint(len(X))\n plt.imshow((np.sum((pca.components_*Xp[k].reshape(-1,1)), axis=0)).reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])\n plt.subplot(3,6,6+i+1)\n plt.imshow(X[k].reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec7ff92d346854201f66423988206b6bfa694f35
134,896
ipynb
Jupyter Notebook
tests/scatter.ipynb
xblahoud/pandas2pgfplots
7b5622d9e6e78faa1670bd9456c1f8ef2422901d
[ "MIT" ]
1
2020-10-23T17:41:28.000Z
2020-10-23T17:41:28.000Z
tests/scatter.ipynb
xblahoud/pandas2pgfplots
7b5622d9e6e78faa1670bd9456c1f8ef2422901d
[ "MIT" ]
2
2020-06-26T04:27:48.000Z
2020-06-26T06:05:30.000Z
tests/scatter.ipynb
xblahoud/pandas2pgfplots
7b5622d9e6e78faa1670bd9456c1f8ef2422901d
[ "MIT" ]
null
null
null
120.121104
3,190
0.639811
[ [ [ "import pandas as pd\nimport numpy as np\n\nfrom pandas2pgfplots import scatter, Plot", "_____no_output_____" ], [ "df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])\ndf.head()", "_____no_output_____" ], [ "plot = scatter(df, diagonal=True)\nplot", "_____no_output_____" ], [ "plot.tikzcode", "_____no_output_____" ], [ "axis_args = {\"title style\" : \"{font=\\\\tiny, color=red}\", \"title\":\"Random points\"}\naxis_args = {\"title style\" : {\"font\" : r\"\\tiny\", \"color\" : \"red\"}, \"title\":\"Random points\"}\naxis_args[\"colorbar style\"] = {\"opacity\" : .1}\nmarks_dict = {\"color\" : \"red\"}\nscatter(df, c=\"c\", colorbar=True, diagonal=True, marks_dict=marks_dict, **axis_args)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec7ffa6ef1b0849fa51cf16da5c73ee35f387447
427,298
ipynb
Jupyter Notebook
Week 3 Capstone - 3.ipynb
Soumya44/Coursera_Capstone
71066179b762f0d3f2093bf54b720e8e5d4e0f17
[ "MIT" ]
1
2020-01-03T20:10:26.000Z
2020-01-03T20:10:26.000Z
Week 3 Capstone - 3.ipynb
Soumya44/Coursera_Capstone
71066179b762f0d3f2093bf54b720e8e5d4e0f17
[ "MIT" ]
1
2020-01-13T15:01:53.000Z
2020-01-13T15:01:53.000Z
Week 3 Capstone - 3.ipynb
Soumya44/Coursera_Capstone
71066179b762f0d3f2093bf54b720e8e5d4e0f17
[ "MIT" ]
12
2019-02-02T11:43:30.000Z
2020-08-01T19:17:51.000Z
74.898861
130,712
0.653125
[ [ [ "### Explore and cluster the neighborhoods in Toronto. You can decide to work with only boroughs that contain the word Toronto and then replicate the same analysis we did to the New York City data. It is up to you.\n\n### Just make sure:\n\n#### 1- to add enough Markdown cells to explain what you decided to do and to report any observations you make.\n#### 2-to generate maps to visualize your neighborhoods and how they cluster together.", "_____no_output_____" ] ], [ [ "# importing old libraries (also used in the two first parts)\nimport pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport requests\nfrom bs4 import BeautifulSoup as bs", "_____no_output_____" ], [ "# importing new libraries\nimport numpy as np # library to handle data in a vectorized manner\nimport pandas as pd # library for data analsysis\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\n# !conda install -c conda-forge geopy --yes # uncomment this line if you haven't completed the Foursquare API lab\nfrom geopy.geocoders import Nominatim # convert an address into latitude and longitude values\n\nimport requests # library to handle requests\n\n# !conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab\nimport folium # map rendering library\n\nprint('Libraries imported.')", "Libraries imported.\n" ] ], [ [ "### Load the pandas dataframe (created in part 2 of the assignment)", "_____no_output_____" ] ], [ [ "df_toronto = pd.read_csv('toronto_base.csv')\ndf_toronto.head(7)", "_____no_output_____" ] ], [ [ "### Create a map of Toronto", "_____no_output_____" ] ], [ [ "# for the city Toronto, latitude and longtitude are manually extracted via google search\ntoronto_latitude = 43.6532; toronto_longitude = -79.3832\nmap_toronto = folium.Map(location = [toronto_latitude, toronto_longitude], zoom_start = 10.7)\n\n# add markers to map\nfor lat, lng, borough, neighborhood in zip(df_toronto['Latitude'], df_toronto['Longitude'], df_toronto['Borough'], df_toronto['Neighbourhood']):\n label = '{}, {}'.format(neighborhood, borough)\n label = folium.Popup(label, parse_html=True)\n folium.CircleMarker(\n [lat, lng],\n radius=5,\n popup=label,\n color='blue',\n fill=True,\n fill_color='#3186cc',\n fill_opacity=0.7).add_to(map_toronto) \n \n\nmap_toronto", "_____no_output_____" ] ], [ [ "### Create a new data frame with neighborhoods in Scarborough ", "_____no_output_____" ] ], [ [ "# @hiddel_cell\nCLIENT_ID = '0MJA3NYYG3U2ZY1LTZN2OYEHS3Y3WVSON2GBSO3IL4EDYVIR' # your Foursquare ID\nCLIENT_SECRET = 'WGWSAF2TKVUQPE3PD0N3EOITFVBY5EYP1VCZI3BMUG0ROUS5' # your Foursquare Secret\nVERSION = '20180605' # Foursquare API version\n\n", "_____no_output_____" ], [ "scarborough_data = df_toronto[df_toronto['Borough'] == 'Scarborough'].reset_index(drop=True)\nscarborough_data.head(7)", "_____no_output_____" ] ], [ [ "### Create a map of Scarborough and its neighbourhoods", "_____no_output_____" ] ], [ [ "address_scar = 'Scarborough,Toronto'\nlatitude_scar = 43.773077\nlongitude_scar = -79.257774\nprint('The geograpical coordinate of Scarborough are {}, {}.'.format(latitude_scar, longitude_scar))", "The geograpical coordinate of Scarborough are 43.773077, -79.257774.\n" ], [ "map_scarb = folium.Map(location=[latitude_scar, longitude_scar], zoom_start=12)\n\n# add markers to map\nfor lat, lng, label in zip(scarborough_data['Latitude'], scarborough_data['Longitude'], scarborough_data['Neighbourhood']):\n label = folium.Popup(label, parse_html=True)\n folium.CircleMarker(\n [lat, lng],\n radius=5,\n popup=label,\n color='blue',\n fill=True,\n fill_color='#3186cc',\n fill_opacity=0.7).add_to(map_scarb) \n \nmap_scarb", "_____no_output_____" ] ], [ [ "### Get the top 100 venues in the neighborhood 'Steeles West', from Scarborough", "_____no_output_____" ] ], [ [ "neighborhood_latitude = scarborough_data.loc[0, 'Latitude'] # neighbourhood latitude value\nneighborhood_longitude = scarborough_data.loc[0, 'Longitude'] # neighbourhood longitude value\n\nneighborhood_name = scarborough_data.loc[0, 'Neighbourhood'] # neighbourhood name\n\nprint('Latitude and longitude values of \"{}\" are {}, {}.'.format(neighborhood_name, \n neighborhood_latitude, \n neighborhood_longitude))", "Latitude and longitude values of \"Steeles West\" are 43.799525200000005, -79.3183887.\n" ], [ "LIMIT = 100\nradius = 500\nurl = 'https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&ll={},{}&v={}&radius={}&limit={}'.format(CLIENT_ID, CLIENT_SECRET, latitude_scar, longitude_scar, VERSION, radius, LIMIT)", "_____no_output_____" ], [ "results = requests.get(url).json()\nresults", "_____no_output_____" ], [ "def get_category_type(row):\n try:\n categories_list = row['categories']\n except:\n categories_list = row['venue.categories']\n \n if len(categories_list) == 0:\n return None\n else:\n return categories_list[0]['name']", "_____no_output_____" ], [ "import json # library to handle JSON files\nfrom pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe\n\nvenues = results['response']['groups'][0]['items'] \nnearby_venues = json_normalize(venues) # flatten JSON\n\n# filter columns\nfiltered_columns = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng']\nnearby_venues =nearby_venues.loc[:, filtered_columns]\n\n# filter the category for each row\nnearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1)\n\n# clean columns\nnearby_venues.columns = [col.split(\".\")[-1] for col in nearby_venues.columns]\n\nnearby_venues.head(10)", "_____no_output_____" ], [ "print('{} venues were returned by Foursquare.'.format(nearby_venues.shape[0]))", "43 venues were returned by Foursquare.\n" ], [ "def getNearbyVenues(names, latitudes, longitudes, radius=500):\n \n venues_list=[]\n for name, lat, lng in zip(names, latitudes, longitudes):\n print(name)\n \n # create the API request URL\n url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(\n CLIENT_ID, \n CLIENT_SECRET, \n VERSION, \n lat, \n lng, \n radius, \n LIMIT)\n \n # make the GET request\n results = requests.get(url).json()[\"response\"]['groups'][0]['items']\n \n # return only relevant information for each nearby venue\n venues_list.append([(\n name, \n lat, \n lng, \n v['venue']['name'], \n v['venue']['location']['lat'], \n v['venue']['location']['lng'], \n v['venue']['categories'][0]['name']) for v in results])\n\n nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])\n nearby_venues.columns = ['Neighborhood', \n 'Neighborhood Latitude', \n 'Neighborhood Longitude', \n 'Venue', \n 'Venue Latitude', \n 'Venue Longitude', \n 'Venue Category']\n \n return(nearby_venues)", "_____no_output_____" ] ], [ [ "### Get venues for each neighborhood in Scarborough", "_____no_output_____" ] ], [ [ "scarborough_venues = getNearbyVenues(names=scarborough_data['Neighbourhood'],\n latitudes=scarborough_data['Latitude'],\n longitudes=scarborough_data['Longitude']\n )", "Steeles West\nScarborough Village\nWoburn\nHighland Creek, Rouge Hill, Port Union\nBirch Cliff\nMaryvale, Wexford\nAgincourt North, Milliken\nCedarbrae\nTam O'Shanter\nCliffcrest, Cliffside\nMorningside, West Hill\nRouge, Malvern\nAgincourt\nIonview, Kennedy Park\nDorset Park, Scarborough Town Centre, Wexford Heights\nUpper Rouge\nClairlea, Golden Mile, Oakridge\n" ], [ "scarborough_venues.head(10)", "_____no_output_____" ], [ "scarborough_venues.tail(10)", "_____no_output_____" ], [ "scarborough_venues.groupby('Neighborhood').count()", "_____no_output_____" ], [ "print('There are {} uniques categories.'.format(len(scarborough_venues['Venue Category'].unique())))", "There are 56 uniques categories.\n" ], [ "# one hot encoding\nscarb_onehot = pd.get_dummies(scarborough_venues[['Venue Category']], prefix=\"\", prefix_sep=\"\")\n\n# add neighborhood column back to dataframe\nscarb_onehot['Neighborhood'] = scarborough_venues['Neighborhood'] \n\n# move neighborhood column to the first column\nfixed_columns = [scarb_onehot.columns[-1]] + list(scarb_onehot.columns[:-1])\nscarb_onehot = scarb_onehot[fixed_columns]\n\nscarb_onehot.head()", "_____no_output_____" ], [ "scarb_onehot.shape", "_____no_output_____" ], [ "scarb_grouped = scarb_onehot.groupby('Neighborhood').mean().reset_index()\nscarb_grouped.head(7)", "_____no_output_____" ] ], [ [ "### Get top 10 venues per neighborhood", "_____no_output_____" ] ], [ [ "def return_most_common_venues(row, num_top_venues):\n row_categories = row.iloc[1:]\n row_categories_sorted = row_categories.sort_values(ascending=False)\n \n return row_categories_sorted.index.values[0:num_top_venues]", "_____no_output_____" ], [ "num_top_venues = 10\n\nindicators = ['st', 'nd', 'rd']\n\n# create columns according to number of top venues\ncolumns = ['Neighborhood']\nfor ind in np.arange(num_top_venues):\n try:\n columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))\n except:\n columns.append('{}th Most Common Venue'.format(ind+1))\n\n# create a new dataframe\nneighborhoods_venues_sorted = pd.DataFrame(columns=columns)\nneighborhoods_venues_sorted['Neighborhood'] = scarb_grouped['Neighborhood']\n\nfor ind in np.arange(scarb_grouped.shape[0]):\n neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(scarb_grouped.iloc[ind, :], num_top_venues)\n\nneighborhoods_venues_sorted", "_____no_output_____" ] ], [ [ "### Run k-means to cluster the neighborhoods into 5 clusters", "_____no_output_____" ] ], [ [ "# import k-means from clustering stage\nfrom sklearn.cluster import KMeans\n\nscarb_data = scarborough_data.drop(16)\n# set number of clusters\nkclusters = 5\n\nscarb_grouped_clustering = scarb_grouped.drop('Neighborhood', 1)\n\n\n# run k-means clustering\nkmeans = KMeans(n_clusters=kclusters, random_state=0).fit(scarb_grouped_clustering)\n\n# check cluster labels generated for each row in the dataframe\nkmeans.labels_[0:10] \n#len(kmeans.labels_)#=16\n#scarborough_data.shape", "_____no_output_____" ] ], [ [ "### Include kmeans.labels_ into the original Scarborough dataframe", "_____no_output_____" ] ], [ [ "scarb_merged = scarb_data\n\n# add clustering labels\nscarb_merged['Cluster Labels'] = kmeans.labels_\n\n# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood\nscarb_merged = scarb_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighbourhood')\n\nscarb_merged", "_____no_output_____" ] ], [ [ "### Visualize the clusters in the map", "_____no_output_____" ] ], [ [ "# Matplotlib and associated plotting modules\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n# create map\nmap_clusters = folium.Map(location = [latitude_scar, longitude_scar], zoom_start=11)\n\n# set color scheme for the clusters\nx = np.arange(kclusters)\nys = [i+x+(i*x)**2 for i in range(kclusters)]\ncolors_array = cm.rainbow(np.linspace(0, 1, len(ys)))\nrainbow = [colors.rgb2hex(i) for i in colors_array]\n\n# add markers to the map\nmarkers_colors = []\nfor lat, lon, poi, cluster in zip(scarb_merged['Latitude'], scarb_merged['Longitude'], scarb_merged['Neighbourhood'], scarb_merged['Cluster Labels']):\n label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)\n folium.CircleMarker(\n [lat, lon],\n radius=5,\n popup=label,\n color=rainbow[cluster-1],\n fill=True,\n fill_color=rainbow[cluster-1],\n fill_opacity=0.7).add_to(map_clusters)\n \nmap_clusters", "_____no_output_____" ] ], [ [ "### Examine each of the five clusters", "_____no_output_____" ] ], [ [ "scarb_merged.loc[scarb_merged['Cluster Labels'] == 0, scarb_merged.columns[[1] + list(range(5, scarb_merged.shape[1]))]]", "_____no_output_____" ], [ "scarb_merged.loc[scarb_merged['Cluster Labels'] == 1, scarb_merged.columns[[1] + list(range(5, scarb_merged.shape[1]))]]", "_____no_output_____" ], [ "scarb_merged.loc[scarb_merged['Cluster Labels'] == 2, scarb_merged.columns[[1] + list(range(5, scarb_merged.shape[1]))]] ", "_____no_output_____" ], [ "scarb_merged.loc[scarb_merged['Cluster Labels'] == 3, scarb_merged.columns[[1] + list(range(5, scarb_merged.shape[1]))]]", "_____no_output_____" ], [ "scarb_merged.loc[scarb_merged['Cluster Labels'] == 4, scarb_merged.columns[[1] + list(range(5, scarb_merged.shape[1]))]]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec80036a5b872b82532b29e2d753a5f21eb52853
18,933
ipynb
Jupyter Notebook
testing/mpl_pydata_workshop-master/elaborate_example.ipynb
aravindhnivas/FELion-Spectrum-Analyser
430f16884482089b2f717ea7dd50625078971e48
[ "MIT" ]
null
null
null
testing/mpl_pydata_workshop-master/elaborate_example.ipynb
aravindhnivas/FELion-Spectrum-Analyser
430f16884482089b2f717ea7dd50625078971e48
[ "MIT" ]
null
null
null
testing/mpl_pydata_workshop-master/elaborate_example.ipynb
aravindhnivas/FELion-Spectrum-Analyser
430f16884482089b2f717ea7dd50625078971e48
[ "MIT" ]
1
2019-01-25T20:37:57.000Z
2019-01-25T20:37:57.000Z
32.144312
124
0.491998
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec8010a49a9e4aa39ff2f731faad10e09f1372c9
458,374
ipynb
Jupyter Notebook
notebooks/p4_Boston_AirBnB_with CRISP-DM_data_cleaning.ipynb
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
notebooks/p4_Boston_AirBnB_with CRISP-DM_data_cleaning.ipynb
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
notebooks/p4_Boston_AirBnB_with CRISP-DM_data_cleaning.ipynb
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
72.619455
115,156
0.589556
[ [ [ "## Boston AirBnB open data with the CRISP-DM (Cross Industry Process for Data Mining)", "_____no_output_____" ], [ "To get general AirBnB business insight, the Boston data from AirBnB will be analyzed based on the CRISP-DM. The subprocesses of the CRISP-DM are written below.\n\n`1`. Business Understanding\n\n`2`. Data Understanding\n\n`3`. Prepare Data\n\n`4`. Data Modeling\n\n`5`. Evaluate the Results\n\n`6`. Deploy", "_____no_output_____" ] ], [ [ "# import necessary libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S\nimport time\nimport copy\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\n", "_____no_output_____" ], [ "# Load the AirBnB files\ndf_listing = pd.read_csv('./listings.csv')\ndf_calendar = pd.read_csv('./calendar.csv')\ndf_reviews = pd.read_csv('./reviews.csv')", "_____no_output_____" ] ], [ [ "## 1. Business Understanding\n\nBusiness Questions \n - What are the most relevant features of the groups over the 90 percentile and under the 10 percentile?\n - What are the most relevant features to be cared for the two traveler group and more than four traveler group?\n - How is the price of AirBnb adjusted in terms of seasons and locations?", "_____no_output_____" ], [ "## 2. Data Understanding\n\nCheck and play around the dataframes loaded before to get some idea or to get the business quesations progressed.\n### 2.1. For df_listing", "_____no_output_____" ] ], [ [ "# Setting option for seeing the whole columns of dataframes\npd.set_option('display.max_columns', None) ", "_____no_output_____" ], [ "print(df_listing.shape)\ndf_listing.head(n=5)", "(3585, 95)\n" ], [ "df_listing.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3585 entries, 0 to 3584\nData columns (total 95 columns):\nid 3585 non-null int64\nlisting_url 3585 non-null object\nscrape_id 3585 non-null int64\nlast_scraped 3585 non-null object\nname 3585 non-null object\nsummary 3442 non-null object\nspace 2528 non-null object\ndescription 3585 non-null object\nexperiences_offered 3585 non-null object\nneighborhood_overview 2170 non-null object\nnotes 1610 non-null object\ntransit 2295 non-null object\naccess 2096 non-null object\ninteraction 2031 non-null object\nhouse_rules 2393 non-null object\nthumbnail_url 2986 non-null object\nmedium_url 2986 non-null object\npicture_url 3585 non-null object\nxl_picture_url 2986 non-null object\nhost_id 3585 non-null int64\nhost_url 3585 non-null object\nhost_name 3585 non-null object\nhost_since 3585 non-null object\nhost_location 3574 non-null object\nhost_about 2276 non-null object\nhost_response_time 3114 non-null object\nhost_response_rate 3114 non-null object\nhost_acceptance_rate 3114 non-null object\nhost_is_superhost 3585 non-null object\nhost_thumbnail_url 3585 non-null object\nhost_picture_url 3585 non-null object\nhost_neighbourhood 3246 non-null object\nhost_listings_count 3585 non-null int64\nhost_total_listings_count 3585 non-null int64\nhost_verifications 3585 non-null object\nhost_has_profile_pic 3585 non-null object\nhost_identity_verified 3585 non-null object\nstreet 3585 non-null object\nneighbourhood 3042 non-null object\nneighbourhood_cleansed 3585 non-null object\nneighbourhood_group_cleansed 0 non-null float64\ncity 3583 non-null object\nstate 3585 non-null object\nzipcode 3547 non-null object\nmarket 3571 non-null object\nsmart_location 3585 non-null object\ncountry_code 3585 non-null object\ncountry 3585 non-null object\nlatitude 3585 non-null float64\nlongitude 3585 non-null float64\nis_location_exact 3585 non-null object\nproperty_type 3582 non-null object\nroom_type 3585 non-null object\naccommodates 3585 non-null int64\nbathrooms 3571 non-null float64\nbedrooms 3575 non-null float64\nbeds 3576 non-null float64\nbed_type 3585 non-null object\namenities 3585 non-null object\nsquare_feet 56 non-null float64\nprice 3585 non-null object\nweekly_price 892 non-null object\nmonthly_price 888 non-null object\nsecurity_deposit 1342 non-null object\ncleaning_fee 2478 non-null object\nguests_included 3585 non-null int64\nextra_people 3585 non-null object\nminimum_nights 3585 non-null int64\nmaximum_nights 3585 non-null int64\ncalendar_updated 3585 non-null object\nhas_availability 0 non-null float64\navailability_30 3585 non-null int64\navailability_60 3585 non-null int64\navailability_90 3585 non-null int64\navailability_365 3585 non-null int64\ncalendar_last_scraped 3585 non-null object\nnumber_of_reviews 3585 non-null int64\nfirst_review 2829 non-null object\nlast_review 2829 non-null object\nreview_scores_rating 2772 non-null float64\nreview_scores_accuracy 2762 non-null float64\nreview_scores_cleanliness 2767 non-null float64\nreview_scores_checkin 2765 non-null float64\nreview_scores_communication 2767 non-null float64\nreview_scores_location 2763 non-null float64\nreview_scores_value 2764 non-null float64\nrequires_license 3585 non-null object\nlicense 0 non-null float64\njurisdiction_names 0 non-null float64\ninstant_bookable 3585 non-null object\ncancellation_policy 3585 non-null object\nrequire_guest_profile_picture 3585 non-null object\nrequire_guest_phone_verification 3585 non-null object\ncalculated_host_listings_count 3585 non-null int64\nreviews_per_month 2829 non-null float64\ndtypes: float64(18), int64(15), object(62)\nmemory usage: 2.6+ MB\n" ], [ "# Checking the columns consisting of missing values over 50%\ndf_missing = df_listing.isna().mean()\ndf_missing[df_missing>0.5].plot.bar()", "_____no_output_____" ], [ "print(\"# of id:\",df_listing.id.unique().size)\nprint(\"# of host_id:\",df_listing.host_id.unique().size)\n# There are some hosts who take care of more than two ids", "# of id: 3585\n# of host_id: 2181\n" ], [ "print(\"# of accommodates:\",df_listing.accommodates.unique())\ntmp = df_listing.accommodates.value_counts(ascending=False)/df_listing.accommodates.size\nprint(tmp)\nprint(\"Accomodates equal or less than 2 is {:5.2f} %\".format((tmp[1]+tmp[2])*100))\nprint(\"Accomodates more than 2 is {:5.2f} %\".format((tmp[3]+tmp[4]+tmp[5]+tmp[6])*100))\n# 96% of total id can be categorized into the group with less than 2 people or the group with more than 2 people\nplt.hist(df_listing.accommodates, bins=15)\nplt.show()", "# of accommodates: [ 4 2 3 5 1 8 6 16 7 9 10 12 11 14]\n2 0.413668\n4 0.181311\n1 0.122455\n3 0.119386\n5 0.068340\n6 0.056625\n8 0.015342\n7 0.010879\n10 0.005300\n9 0.002789\n12 0.001395\n11 0.001116\n14 0.000837\n16 0.000558\nName: accommodates, dtype: float64\nAccomodates equal or less than 2 is 53.61 %\nAccomodates more than 2 is 42.57 %\n" ], [ "print(\"# of bed_type:\",df_listing.bed_type.unique())", "# of bed_type: ['Real Bed' 'Pull-out Sofa' 'Futon' 'Airbed' 'Couch']\n" ] ], [ [ "### 2.2. For df_calendar", "_____no_output_____" ] ], [ [ "print(df_calendar.shape)\nprint(df_calendar.info())\ndf_calendar.head()\ndf_missing_calendar = df_calendar.isna().mean()\ndf_missing_calendar.plot.bar()", "(1308890, 4)\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1308890 entries, 0 to 1308889\nData columns (total 4 columns):\nlisting_id 1308890 non-null int64\ndate 1308890 non-null object\navailable 1308890 non-null object\nprice 643037 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 39.9+ MB\nNone\n" ], [ "# Modify df_calendar for future work\n# Special event : marathon, new academic season\n\ndef modify_calendar(df_calendar):\n '''\n This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar \n and remove '$' string from 'price' coulmn.\n \n Input : a Pandas dataframe having a date data column\n Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns\n '''\n # Split date column into year, month,day, weekday columns\n # The day of the week with Monday=0, Sunday=6\n # Set the range of weekends from Friday to Sunday\n df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year\n df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month\n df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day\n df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday\n df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week\n df_calendar['price']= df_calendar['price'].str.replace('$','')\n df_calendar['price']=df_calendar['price'].str.replace(',','')\n df_calendar['price'] = df_calendar['price'].astype(float)\n \n # Add us_holiday column\n cal = calendar()\n holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max())\n df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays)\n \n # Add weekend column #Friday, Saturday\n weekend = [4,5]\n df_calendar['weekend'] = df_calendar.weekday.isin(weekend)\n \n # Replace values in weekday column \n df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True)\n \n return df_calendar", "_____no_output_____" ], [ "df_cal_modified = modify_calendar(df_calendar)\nprint(df_cal_modified.shape[0]/365)\n\n# Each listing_id has 365 rows", "3586.0\n" ], [ "def add_availabledays_price(df_listing, df_cal_modified):\n '''\n This function creates columns of 'unavail_days', 'avail_days_weekends', \n 'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing.\n \n Input : \n - A Pandas dataframe made from 'listings.csv' : df_listing\n - A pandas dataframe modified by modify_calendar() : df_cal_modified\n \n Output :\n - the modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends',\n 'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns \n '''\n id_list = df_listing.id[:]\n unavailable_days_array = np.array([])\n avail_days_weekends_array = np.array([])\n avail_days_weekdays_array = np.array([])\n price_weekend_array = np.array([])\n price_weekday_array = np.array([])\n\n for i in np.nditer(id_list):\n tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id\n available_dict = tmp.available.value_counts().to_dict()\n if 'f' in available_dict:\n unavailable_days = tmp[tmp.available == 'f'].shape[0]\n else:\n unavailable_days = 0\n\n if 't' in available_dict:\n available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0]\n available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0]\n price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean']\n price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean']\n\n else:\n available_weekends = 0\n available_weekdays = 0\n price_weekend = np.nan\n price_weekday = np.nan\n\n\n unavailable_days_array = np.append(unavailable_days_array, unavailable_days)\n avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends)\n avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays)\n price_weekend_array = np.append(price_weekend_array, price_weekend)\n price_weekday_array = np.append(price_weekday_array, price_weekday)\n\n df_listing['unavail_days'] = pd.Series(unavailable_days_array)\n df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array)\n df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array)\n df_listing['price_weekend'] = pd.Series(price_weekend_array)\n df_listing['price_weekday'] = pd.Series(price_weekday_array)\n", "_____no_output_____" ], [ "start = time.time() \nadd_availabledays_price(df_listing, df_cal_modified)\nprint(\"time :\", time.time() - start)\ndf_listing.tail(n=10)", "time : 52.92951488494873\n" ], [ "print(df_listing.shape)\ndf_listing.info()", "(3585, 100)\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3585 entries, 0 to 3584\nData columns (total 100 columns):\nid 3585 non-null int64\nlisting_url 3585 non-null object\nscrape_id 3585 non-null int64\nlast_scraped 3585 non-null object\nname 3585 non-null object\nsummary 3442 non-null object\nspace 2528 non-null object\ndescription 3585 non-null object\nexperiences_offered 3585 non-null object\nneighborhood_overview 2170 non-null object\nnotes 1610 non-null object\ntransit 2295 non-null object\naccess 2096 non-null object\ninteraction 2031 non-null object\nhouse_rules 2393 non-null object\nthumbnail_url 2986 non-null object\nmedium_url 2986 non-null object\npicture_url 3585 non-null object\nxl_picture_url 2986 non-null object\nhost_id 3585 non-null int64\nhost_url 3585 non-null object\nhost_name 3585 non-null object\nhost_since 3585 non-null object\nhost_location 3574 non-null object\nhost_about 2276 non-null object\nhost_response_time 3114 non-null object\nhost_response_rate 3114 non-null object\nhost_acceptance_rate 3114 non-null object\nhost_is_superhost 3585 non-null object\nhost_thumbnail_url 3585 non-null object\nhost_picture_url 3585 non-null object\nhost_neighbourhood 3246 non-null object\nhost_listings_count 3585 non-null int64\nhost_total_listings_count 3585 non-null int64\nhost_verifications 3585 non-null object\nhost_has_profile_pic 3585 non-null object\nhost_identity_verified 3585 non-null object\nstreet 3585 non-null object\nneighbourhood 3042 non-null object\nneighbourhood_cleansed 3585 non-null object\nneighbourhood_group_cleansed 0 non-null float64\ncity 3583 non-null object\nstate 3585 non-null object\nzipcode 3547 non-null object\nmarket 3571 non-null object\nsmart_location 3585 non-null object\ncountry_code 3585 non-null object\ncountry 3585 non-null object\nlatitude 3585 non-null float64\nlongitude 3585 non-null float64\nis_location_exact 3585 non-null object\nproperty_type 3582 non-null object\nroom_type 3585 non-null object\naccommodates 3585 non-null int64\nbathrooms 3571 non-null float64\nbedrooms 3575 non-null float64\nbeds 3576 non-null float64\nbed_type 3585 non-null object\namenities 3585 non-null object\nsquare_feet 56 non-null float64\nprice 3585 non-null object\nweekly_price 892 non-null object\nmonthly_price 888 non-null object\nsecurity_deposit 1342 non-null object\ncleaning_fee 2478 non-null object\nguests_included 3585 non-null int64\nextra_people 3585 non-null object\nminimum_nights 3585 non-null int64\nmaximum_nights 3585 non-null int64\ncalendar_updated 3585 non-null object\nhas_availability 0 non-null float64\navailability_30 3585 non-null int64\navailability_60 3585 non-null int64\navailability_90 3585 non-null int64\navailability_365 3585 non-null int64\ncalendar_last_scraped 3585 non-null object\nnumber_of_reviews 3585 non-null int64\nfirst_review 2829 non-null object\nlast_review 2829 non-null object\nreview_scores_rating 2772 non-null float64\nreview_scores_accuracy 2762 non-null float64\nreview_scores_cleanliness 2767 non-null float64\nreview_scores_checkin 2765 non-null float64\nreview_scores_communication 2767 non-null float64\nreview_scores_location 2763 non-null float64\nreview_scores_value 2764 non-null float64\nrequires_license 3585 non-null object\nlicense 0 non-null float64\njurisdiction_names 0 non-null float64\ninstant_bookable 3585 non-null object\ncancellation_policy 3585 non-null object\nrequire_guest_profile_picture 3585 non-null object\nrequire_guest_phone_verification 3585 non-null object\ncalculated_host_listings_count 3585 non-null int64\nreviews_per_month 2829 non-null float64\nunavail_days 3585 non-null float64\navail_days_weekends 3585 non-null float64\navail_days_weekdays 3585 non-null float64\nprice_weekend 2868 non-null float64\nprice_weekday 2892 non-null float64\ndtypes: float64(23), int64(15), object(62)\nmemory usage: 2.7+ MB\n" ] ], [ [ "## 3. Prepare data\n\n## Strategies\n - Remove irrelevant columns\n - Change object type columns to numeric columns or manipulate them using one hot encoding\n - Nan 채우기\n - integrated_score 만들기\n", "_____no_output_____" ] ], [ [ "def clean_listing_df(df_listing):\n '''\n This function removes irrelevant columns in the df_listing dataframe.\n\n Input : \n - A Pandas dataframe made from 'listings.csv' : df_listing\n\n Output :\n - Cleaned df_listing\n '''\n # Drop columns having 50% of nan value\n df_missing = df_listing.isna().mean()\n df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1)\n # Drop columns related with urls and other irrelevant columns\n remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url', \n 'host_thumbnail_url', 'host_picture_url', 'country_code', 'country']\n df_listing_modi1.drop(remove_list1, axis=1, inplace=True)\n # Drop the columns because of data overlap [city, smart_location], Only one value [state], \n # Wrong data [market, calendar_last_scraped]\n remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview',\n 'transit','access','market','calendar_last_scraped']\n df_listing_modi1.drop(remove_list2, axis=1, inplace=True)\n \n # Modify 'house_rules' columns to 'house_rules_exist_tf' having True value if there is a rule.\n # False value, if there is no rule.\n df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules)\n df_listing_modi1.drop(['house_rules'], axis=1, inplace=True)\n # Remove columns having 1000 unique string valuses and irrelevant data\n remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license',\n 'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license']\n df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1)\n\n # Change the columns 'host_response_rate', 'host_acceptance_rate' to float type\n columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']\n for i in columns_change_type:\n df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','')\n df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','')\n df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','')\n df_listing_modi2[i] = df_listing_modi2[i].astype(float)\n \n # Modify and Split values in 'amenities' column\n df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace(\"[{}]\", \"\")\n df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = \",\")\n df_amenities = df_amenities.add_prefix('amenities_')\n df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1)\n df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1)\n \n # Use get_dummies for columns having unique values less then 10\n columns_of_object_less10 =[]\n for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()):\n if j == object and len(df_listing_modi2[i].value_counts()) < 10 :\n columns_of_object_less10.append(i)\n df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10, \n dummy_na=True)\n \n # Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf'\n df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str)\n df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','')\n df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','')\n df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float)\n df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan)\n df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people)\n df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1)\n \n # Modify and Split values in 'host_verifications' column\n df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace(\"[\", \"\")\n df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace(\"]\", \"\")\n df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = \",\")\n df_host_verifications = df_host_verifications.add_prefix('host_verification_')\n df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1)\n df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1)\n df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1)\n \n # Modify 'calendar_updated' column\n df_listing_modi2[\"calendar_updated_1weekago\"] = np.where(df_listing_modi2['calendar_updated'].str.contains(\n \"days|yesterday|today|a week ago\")==True, 'yes', 'more_than_1week')\n df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1)\n \n # Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type'\n tmp = df_listing_modi2.columns.to_list()\n tmp1 = df_listing_modi2.dtypes.to_list()\n columns_of_object_over10 =[]\n for i,j in zip(tmp,tmp1):\n if j == object and len(df_listing_modi2[i].value_counts()) > 10 :\n columns_of_object_over10.append(i)\n \n df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10, \n prefix=columns_of_object_over10, dummy_na=True)\n \n df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], \n prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True)\n \n df_listing_modi2[\"host_response_rate_100\"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False)\n df_listing_modi2[\"host_acceptance_rate_100\"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False)\n df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1)\n \n #bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value\n columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',\n 'review_scores_communication','review_scores_location','review_scores_value']\n df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean())\n df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True)\n df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True)\n df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1)\n \n df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'], \n prefix=['host_response_rate_100','host_acceptance_rate_100'])\n df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count'], axis=1)\n \n \n \n return df_listing_modi2\n\n\n \n ", "_____no_output_____" ], [ "# columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']\n# for i in columns_change_type:\n# print(df_listing[i].head())\ndf_listing_modi2 = clean_listing_df(df_listing) \nprint(df_listing_modi2.shape)\n", "(3585, 269)\n" ], [ "plt.figure(figsize=(14, 10))\nsns.heatmap(df_listing_modi2.isnull(), cbar=False)", "_____no_output_____" ], [ "check_null=list(df_listing_modi2.isnull().sum())\nprint(check_null)", "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n" ], [ "df_listing_modi2.head()", "_____no_output_____" ], [ "# df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'], \n# prefix=['host_response_rate_100','host_acceptance_rate_100'])\n# df_listing_modi2.head()\n", "_____no_output_____" ], [ "df_listing_modi2.integrated_score_log.describe()\n", "_____no_output_____" ] ], [ [ "## PCA", "_____no_output_____" ] ], [ [ "# Apply feature scaling to the general population demographics data.\nscaler = StandardScaler()\ncolumn_list = list(df_listing_modi2.columns)\nscaled_values = scaler.fit_transform(df_listing_modi2.values)\ndf_listing_scaled = pd.DataFrame(scaled_values, index=df_listing_modi2.index, columns=df_listing_modi2.columns)", "_____no_output_____" ], [ "df_listing_scaled.shape", "_____no_output_____" ], [ "# Apply PCA to the data.\npca = PCA(n_components=200)\npca.fit(df_listing_scaled)", "_____no_output_____" ], [ "# Investigate the variance accounted for by each principal component.\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "_____no_output_____" ], [ "# Investigate the variance accounted for by each principal component.\ntmp1 = 0\ntmp2 = 0\nfor i in pca.explained_variance_ratio_:\n tmp1 += 1\n tmp2 += i\n print(tmp1, tmp2)\n if tmp2 > 0.9:\n break", "1 0.03670592718880253\n2 0.06957964715502717\n3 0.095498949239883\n4 0.11322620039040884\n5 0.12933080306599654\n6 0.14472075693121944\n7 0.15920398544760736\n8 0.1720295450386972\n9 0.1846363683255139\n10 0.196885751107622\n11 0.20841242454715872\n12 0.21943541406819456\n13 0.2302851961441375\n14 0.24094162577776132\n15 0.25142656554258025\n16 0.26166022347420503\n17 0.27138815585945\n18 0.28088458662824173\n19 0.2902634800354276\n20 0.2995606771375635\n21 0.3085161966553548\n22 0.31738463561690944\n23 0.32616530119323284\n24 0.3348104656086806\n25 0.3434486916680912\n26 0.35193637382894116\n27 0.3603394303865326\n28 0.3685601164667137\n29 0.3766587322254779\n30 0.38469309049093425\n31 0.39260849653520447\n32 0.4005003755648436\n33 0.4082029125573909\n34 0.41580325967469184\n35 0.42314870303548063\n36 0.4304307026883423\n37 0.43759680745615676\n38 0.4447333254322031\n39 0.4518004105574423\n40 0.45866704337810965\n41 0.46546981671754933\n42 0.4721892977071554\n43 0.4787524779091076\n44 0.4852064881057457\n45 0.4915576086933389\n46 0.49775260889790646\n47 0.5039174499032298\n48 0.5099856478098629\n49 0.5158991450779125\n50 0.5217720936160274\n51 0.5274778229021082\n52 0.5331162524103684\n53 0.5386668781926637\n54 0.5441311552400551\n55 0.5495398626639688\n56 0.5549062298956071\n57 0.5601906106093232\n58 0.5653509651345321\n59 0.5704313244824166\n60 0.575495632096402\n61 0.580506731086826\n62 0.5854287433348291\n63 0.5903220641526226\n64 0.595107316639754\n65 0.5998398858909542\n66 0.6045408463233864\n67 0.6092113161504422\n68 0.6137705524777777\n69 0.6183025313675307\n70 0.6228127211975003\n71 0.6272438130832054\n72 0.631627116253471\n73 0.6359953538983849\n74 0.6403216693720432\n75 0.6446173879072629\n76 0.6489109021629048\n77 0.6531324274366581\n78 0.6573383278861089\n79 0.6615175747809201\n80 0.6656700681169118\n81 0.6698092153055167\n82 0.6739203484279096\n83 0.6780234277753842\n84 0.6820920321571003\n85 0.6861497925373468\n86 0.6901825392085416\n87 0.6941823424739825\n88 0.6981635542185688\n89 0.7021326504286779\n90 0.7060878917705679\n91 0.7100392142686899\n92 0.7139849184853232\n93 0.7179130549420316\n94 0.7218382807829353\n95 0.7257468340893704\n96 0.7296470048548233\n97 0.7335338503749359\n98 0.7374093494496686\n99 0.7412796576834416\n100 0.7451475027336392\n101 0.7489980575193956\n102 0.7528208096722594\n103 0.7566239977179247\n104 0.7604105919801807\n105 0.7641866135861585\n106 0.7679515585633304\n107 0.7716984234413239\n108 0.7754221241127703\n109 0.7791286226988747\n110 0.7828049031395119\n111 0.7864718686651757\n112 0.7901073459738895\n113 0.7937330263265318\n114 0.7973307231645207\n115 0.8009178418630148\n116 0.8044910963398363\n117 0.8080190149425597\n118 0.8115267066291275\n119 0.8150020542711249\n120 0.8184532613016472\n121 0.821886815947919\n122 0.8253081857563191\n123 0.8286832401619104\n124 0.8320321118535632\n125 0.8353568024010886\n126 0.8386553104986146\n127 0.8419233994150839\n128 0.8451611460831558\n129 0.8483726546980461\n130 0.8515691830567973\n131 0.8546893660953662\n132 0.8577744532619191\n133 0.8608292437335368\n134 0.8638438307293016\n135 0.8668429940026494\n136 0.8698062924763694\n137 0.8727667044121327\n138 0.8756886820130603\n139 0.8785757675372542\n140 0.8814301764015073\n141 0.8842613541741439\n142 0.8870743327392789\n143 0.8898357825100643\n144 0.8925547388142951\n145 0.8952635881705144\n146 0.8979204715386341\n147 0.9005179114567118\n" ], [ "# Apply PCA to the data.\npca = PCA(n_components=150)\npca.fit(df_listing_scaled)", "_____no_output_____" ], [ "def investigate(df_listing_scaled, pca, i):\n feature_names = list(df_listing_scaled.columns)\n weights_pca = copy.deepcopy(pca.components_[i])\n combined = list(zip(feature_names, weights_pca))\n combined_sorted= sorted(combined, key=lambda tup: tup[1], reverse=True)\n tmp_list = [list(x) for x in combined_sorted]\n tmp_list = [(x[0],\"{0:.3f}\".format(x[1])) for x in tmp_list]\n print(\"positive to pca{}:\".format(i), tmp_list[0:5])\n print()\n print(\"negative to pca{}:\".format(i), tmp_list[-1:-5:-1])\n print()\n\n\n# return tmp_list\n# print(\"Accomodates equal or less than 2 is {:5.2f} %\".format((tmp[1]+tmp[2])*100))", "_____no_output_____" ], [ "np.set_printoptions(precision=4)\ninvestigate(df_listing_scaled, pca, 0)\ninvestigate(df_listing_scaled, pca, 1)\ninvestigate(df_listing_scaled, pca, 2)\ninvestigate(df_listing_scaled, pca, 3)", "positive to pca0: [('calculated_host_listings_count', '0.223'), ('price_weekday', '0.211'), ('price_weekend', '0.211'), ('room_type_Entire home/apt', '0.202'), ('amenities_Gym', '0.187')]\n\nnegative to pca0: [('room_type_Private room', '-0.194'), ('require_guest_phone_verification_f', '-0.121'), ('cancellation_policy_flexible', '-0.118'), ('host_acceptance_rate_100_True', '-0.098')]\n\npositive to pca1: [('host_response_rate_100_False', '0.190'), ('extra_people_fee_tf_False', '0.157'), ('host_response_time_nan', '0.155'), ('calendar_updated_1weekago_more_than_1week', '0.152'), ('house_rules_exist_tf_False', '0.149')]\n\nnegative to pca1: [('host_response_rate_100_True', '-0.190'), ('integrated_score_log', '-0.174'), ('host_response_time_within an hour', '-0.172'), ('extra_people_fee_tf_True', '-0.157')]\n\npositive to pca2: [('avail_days_weekends', '0.288'), ('availability_365', '0.287'), ('avail_days_weekdays', '0.287'), ('availability_90', '0.248'), ('availability_60', '0.235')]\n\nnegative to pca2: [('unavail_days', '-0.287'), ('review_scores_rating', '-0.162'), ('review_scores_value', '-0.155'), ('review_scores_accuracy', '-0.144')]\n\npositive to pca3: [('instant_bookable_t', '0.176'), ('amenities_Hangers', '0.173'), ('require_guest_phone_verification_f', '0.159'), ('amenities_\"Hair Dryer\"', '0.158'), ('amenities_Iron', '0.156')]\n\nnegative to pca3: [('instant_bookable_f', '-0.176'), ('review_scores_rating', '-0.175'), ('require_guest_phone_verification_t', '-0.159'), ('review_scores_value', '-0.154')]\n\n" ], [ "df_listing_scaled_transformed = pca.fit_transform(df_listing_scaled)", "_____no_output_____" ], [ "num_clusters = list(range(4, 17, 1))\nscores = []\ncenters =[]\n\nfor i in num_clusters:\n cluster = KMeans(n_clusters=i, n_jobs=-1)\n cluster.fit(df_listing_scaled_transformed)\n print(\"n_clusters:\" ,i)\n print(cluster.score(df_listing_scaled_transformed))\n print(cluster.inertia_)\n scores.append(cluster.inertia_)\n centers.append(i)\n \nplt.plot(centers, scores);\nplt.xlabel('number of clusters');\nplt.ylabel('inertia');\n\n", "n_clusters: 4\n-777954.558148264\n777954.5581482641\nn_clusters: 5\n-769332.6153632149\n769332.615363215\nn_clusters: 6\n-759951.1882842379\n759951.1882842375\nn_clusters: 7\n-754398.7982820966\n754398.7982820967\nn_clusters: 8\n-748741.3078266245\n748741.3078266247\nn_clusters: 9\n-738210.9685577614\n738210.9685577612\nn_clusters: 10\n-732188.0484510039\n732188.0484510037\nn_clusters: 11\n-725581.8696258496\n725581.8696258495\nn_clusters: 12\n-720134.4853530797\n720134.4853530801\nn_clusters: 13\n-711894.3953626296\n711894.3953626298\nn_clusters: 14\n-703302.5061006399\n703302.5061006402\nn_clusters: 15\n-696139.1161808383\n696139.1161808384\nn_clusters: 16\n-692633.241583536\n692633.2415835363\n" ], [ "cluster = KMeans(n_clusters=16, n_jobs=-1)\ncluster.fit(df_listing_scaled_transformed)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec801afce967a925e27c4551d6b2129964baa0e1
6,788
ipynb
Jupyter Notebook
docs/contents/Quick_Guide.ipynb
dprada/evidence
d8400fe1a3c662be01f6f9f658fc5b92b894556d
[ "MIT" ]
null
null
null
docs/contents/Quick_Guide.ipynb
dprada/evidence
d8400fe1a3c662be01f6f9f658fc5b92b894556d
[ "MIT" ]
null
null
null
docs/contents/Quick_Guide.ipynb
dprada/evidence
d8400fe1a3c662be01f6f9f658fc5b92b894556d
[ "MIT" ]
1
2021-11-06T16:03:46.000Z
2021-11-06T16:03:46.000Z
18.699725
296
0.475545
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "# Quick Guide\n\n*-Brief tutorial for those in a hurry-*\n\nEvidency allows you to work with more than a physical units library in python -such as pint, unyt or openmm.unit- with a unique API. PyUnitWizard works as the man in the middle between your code ", "_____no_output_____" ] ], [ [ "import evidence as evi", "_____no_output_____" ], [ "datum1 = evi.Evidence(3.12)\ndatum1.add_reference({'database':'DOI', 'id':'AAA'})\ndatum1.add_reference({'database':'PubMed', 'id':'BBB'})", "_____no_output_____" ], [ "datum1", "_____no_output_____" ], [ "print(datum1)", "3.12 <DOI: AAA, PubMed: BBB>\n" ], [ "datum1.value", "_____no_output_____" ], [ "datum1.references", "_____no_output_____" ], [ "type(datum1.references[0])", "_____no_output_____" ], [ "datum1.references[0].database", "_____no_output_____" ], [ "datum1.references[0].id", "_____no_output_____" ], [ "datum2 = evi.Evidence(3.12)\ndatum2.add_reference({'database':'PubMed', 'id':'BBB'})\n\ndatum3 = evi.Evidence(3.12)\ndatum3.add_reference({'database':'PubMed', 'id':'CCC'})\n\ndatum4 = evi.Evidence(3.12)\ndatum4.add_reference({'database':'UniProtKB', 'id':'DDD'})\ndatum4.add_reference({'database':'PDB', 'id':'EEE'})", "_____no_output_____" ], [ "evi.identity(datum1, datum1)", "_____no_output_____" ], [ "evi.identity(datum1, datum2)", "_____no_output_____" ], [ "evi.is_subset(datum2, datum1)", "_____no_output_____" ], [ "evi.is_subset(datum1, datum2)", "_____no_output_____" ], [ "evi.same_value([datum1, datum2, datum3, datum4])", "_____no_output_____" ], [ "datum = evi.join([datum1, datum2, datum3, datum4])", "_____no_output_____" ], [ "datum", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec801c876e9b266044b4d406d3ec91a995d4ddf7
137,464
ipynb
Jupyter Notebook
notebooks/Mappings Similarity Analysis.ipynb
ComPath/compath-resources
e8da7b511c2b558b8fd0bf38888b512008ac1ba3
[ "MIT" ]
3
2020-11-24T18:43:59.000Z
2021-06-04T20:52:10.000Z
notebooks/Mappings Similarity Analysis.ipynb
ComPath/compath-resources
e8da7b511c2b558b8fd0bf38888b512008ac1ba3
[ "MIT" ]
13
2020-03-28T13:36:32.000Z
2021-01-19T15:00:07.000Z
notebooks/Mappings Similarity Analysis.ipynb
ComPath/resources
e8da7b511c2b558b8fd0bf38888b512008ac1ba3
[ "MIT" ]
1
2021-12-01T09:49:59.000Z
2021-12-01T09:49:59.000Z
221.003215
39,512
0.901807
[ [ [ "# Similarity analysis of mapped pathways\n\nThis notebook shows explores the similarity of the pathways mapped across KEGG, Reactome, and WikiPathways. For that, we look at the overlap of genes between the curated mappings. Similarity is calculated using the [Szymkiewicz–Simpson coefficient](https://en.wikipedia.org/wiki/Overlap_coefficient).\n\nAuthor: [Daniel Domingo-Fernández](https://github.com/ddomingof)", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\nfrom bio2bel_kegg.manager import Manager as KeggManager\nfrom bio2bel_wikipathways.manager import Manager as WikiPathwaysManager\nfrom bio2bel_reactome.manager import Manager as ReactomeManager\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "Load Established Mappings", "_____no_output_____" ] ], [ [ "kegg_wikipathways_df = pd.read_excel(\n 'https://github.com/ComPath/curation/raw/master/mappings/kegg_wikipathways.xlsx',\n index_col=0\n)\nkegg_reactome_df = pd.read_excel(\n 'https://github.com/ComPath/curation/raw/master/mappings/kegg_reactome.xlsx',\n index_col=0\n)\nwikipathways_reactome_df = pd.read_excel(\n 'https://github.com/ComPath/curation/raw/master/mappings/wikipathways_reactome.xlsx',\n index_col=0\n)", "_____no_output_____" ] ], [ [ "Call the Database Managers to query gene sets", "_____no_output_____" ] ], [ [ "kegg_manager = KeggManager()\nwikipathways_manager = WikiPathwaysManager()\nreactome_manager = ReactomeManager()", "_____no_output_____" ] ], [ [ "Methods to calculate similarity and process mappings", "_____no_output_____" ] ], [ [ "def calculate_szymkiewicz_simpson_coefficient(set_1, set_2):\n \"\"\"Calculate Szymkiewicz-Simpson coefficient between two sets.\n \n :param set set_1: set 1\n :param set set_2: set 2\n :returns similarity\n :rtype: float\n \"\"\"\n \n intersection = len(set_1.intersection(set_2))\n smaller_set = min(len(set_1), len(set_2))\n \n return intersection/smaller_set\n\ndef get_gene_set_from_pathway_model(pathway_model):\n \"\"\"Return the gene set given a Bio2BEL ComPath Pathway model.\n \n :rtype: set\n \"\"\"\n return {\n gene.hgnc_symbol\n for gene in pathway_model.proteins\n }\n\ndef get_pathway_models(reference_manager, compared_manager, pathway_1_name, pathway_2_name):\n \"\"\"Return the pathway models from their correspondent managers.\n \n :rtype: tuple(Pathway, Pathway)\n \"\"\"\n pathway_1 = reference_manager.get_pathway_by_name(pathway_1_name)\n \n pathway_2 = compared_manager.get_pathway_by_name(pathway_2_name)\n \n if pathway_1 is None:\n raise ValueError(\"Not Valid Pathway Name: {}\".format(pathway_1_name))\n \n if pathway_2 is None:\n raise ValueError(\"Not Valid Pathway Name: {}\".format(pathway_2_name))\n \n return pathway_1, pathway_2\n \ndef get_overlap(reference_manager, compared_manager, reference_pathway_name, compared_pathway_name):\n \"\"\"Calculate the overlap between two pathways\"\"\"\n \n pathway_1, pathway_2 = get_pathway_models(reference_manager, compared_manager, reference_pathway_name, compared_pathway_name)\n \n return calculate_szymkiewicz_simpson_coefficient(get_gene_set_from_pathway_model(pathway_1), get_gene_set_from_pathway_model(pathway_2))\n \ndef get_pathways_from_statement(mapping_statement, mapping_type):\n \"\"\"Return the subject, object of the mapping.\n \n :param str mapping_statement: statement\n :param str mapping_type: type of relationship\n :rtype: tuple[str,str]\n \"\"\"\n _pathways = mapping_statement.split(mapping_type)\n \n return _pathways[0].strip(), _pathways[1].strip()\n\n\ndef remove_star_from_pathway_name(pathway_name):\n \"\"\"Remove the star that label the reference pathway in isPartOf statements.\n \n :param str statements: pathway name\n \"\"\"\n return pathway_name.replace(\"*\", \"\").strip()\n\n \ndef get_pathways_from_is_part_of_mapping(mapping_statement):\n \"\"\"Return the pathways of a hierarchical mapping.\"\"\"\n\n pathway_1, pathway_2 = get_pathways_from_statement(mapping_statement, 'isPartOf')\n\n if \"*\" in pathway_1:\n\n pathway_1 = remove_star_from_pathway_name(pathway_1)\n return pathway_1, pathway_2\n\n\n else:\n pathway_2 = remove_star_from_pathway_name(pathway_2)\n return pathway_2, pathway_1\n\n\ndef parse_equivalent_to(df, reference_manager, compared_manager):\n \"\"\"Parse the column corresponding to equivalentTo mappings in the excel sheet.\n \n :returns: list of overlaps and name of pathways with equivalentTo mappings\n :rtype: tuple(list, set)\n \"\"\"\n \n equivalent_pathways = set()\n equivalent_ids= set()\n \n overlaps = []\n \n for index, row in df.iterrows(): \n\n equivalent_to_mappings = row['equivalentTo Mappings']\n\n if pd.isnull(equivalent_to_mappings):\n continue\n\n for mapping_statement in equivalent_to_mappings.split(\"\\n\"):\n \n if mapping_statement == '':\n continue\n \n reference_pathway, compared_pathway = get_pathways_from_statement(mapping_statement, \"equivalentTo\")\n\n similarity = get_overlap(reference_manager, compared_manager, reference_pathway, compared_pathway)\n\n overlaps.append(similarity)\n\n equivalent_pathways.add(reference_pathway)\n equivalent_pathways.add(compared_pathway) \n \n pathway_1, pathway_2 = get_pathway_models(reference_manager, compared_manager, reference_pathway, compared_pathway)\n equivalent_ids.add(pathway_1.resource_id) \n equivalent_ids.add(pathway_2.resource_id)\n\n# if similarity < 0.2:\n# print('{} = {} has a similarity of {}'.format(reference_pathway, compared_pathway, similarity))\n \n return overlaps, equivalent_pathways, equivalent_ids\n\n\ndef parse_is_part_of(df, reference_manager, compared_manager):\n \"\"\"Parse the column corresponding to isPartOf mappings in the excel sheet.\n \n :returns: list of overlaps and name of pathways with isPartOf mappings\n :rtype: tuple(list, list, set)\n \"\"\"\n \n is_part_of_pathways = []\n overlaps = []\n\n for index, row in df.iterrows(): \n \n is_part_of_mappings = row['isPartOf Mappings']\n\n if pd.isnull(is_part_of_mappings):\n continue\n\n for mapping_statement in is_part_of_mappings.split('\\n'):\n \n if mapping_statement == '':\n continue\n\n reference_pathway, compared_pathway = get_pathways_from_is_part_of_mapping(mapping_statement)\n \n similarity = get_overlap(reference_manager, compared_manager, reference_pathway, compared_pathway)\n\n overlaps.append(similarity)\n \n # List of pathways with mappings\n is_part_of_pathways.append(reference_pathway)\n is_part_of_pathways.append(compared_pathway)\n\n# if similarity < 0.5:\n# print('{} = {} has a similarity of {}'.format(reference_pathway, compared_pathway, similarity))\n \n return overlaps, set(is_part_of_pathways), is_part_of_pathways\n", "_____no_output_____" ] ], [ [ "# Distribution of mappings based on similarity\n\n## Equivalent pathways distribution based on similarity\n\nIn the following plots, we compare similar are content the equivalent pathways across the multiple pathway databases comparisons. The histogram plots the distribution of mappings (y axis) versus similarity (x axis). The distributions show how most of the equivalent mappings share high similarity based on content (left-skewed distribution).", "_____no_output_____" ] ], [ [ "print(\"############### KEGG vs WikiPathways #################\\n\")\n\nkegg_wikipathways_equivalent_overlaps, kegg_wikipathways_equivalents, kegg_wikipathways_mappings_ids = parse_equivalent_to(\n kegg_wikipathways_df,\n kegg_manager,\n wikipathways_manager\n)\n\nprint(\"{} equivalentTo Overlaps. avg = {}\\n\".format(len(kegg_wikipathways_equivalent_overlaps), np.mean(kegg_wikipathways_equivalent_overlaps))) \n\nprint(\"############### KEGG vs Reactome #################\\n\")\n\nkegg_reactome_equivalent_overlaps, kegg_reactome_equivalents, kegg_reactome_mappings_ids = parse_equivalent_to(\n kegg_reactome_df,\n kegg_manager,\n reactome_manager\n)\n\nprint(\"{} equivalentTo Overlaps. avg = {}\\n\".format(len(kegg_reactome_equivalent_overlaps), np.mean(kegg_reactome_equivalent_overlaps))) \n\nprint(\"############### WikiPathways vs Reactome #################\\n\")\n\nwikipathways_reactome_equivalent_overlaps, wikipathways_reactome_equivalents, wikipathways_reactome_mappings_ids = parse_equivalent_to(\n wikipathways_reactome_df,\n wikipathways_manager,\n reactome_manager\n)\n\nprint(\"{} equivalentTo Overlaps. avg = {}\\n\".format(len(wikipathways_reactome_equivalent_overlaps), np.mean(wikipathways_reactome_equivalent_overlaps))) \n", "############### KEGG vs WikiPathways #################\n\n55 equivalentTo Overlaps. avg = 0.7943990591926416\n\n############### KEGG vs Reactome #################\n\n58 equivalentTo Overlaps. avg = 0.6154507314258154\n\n############### WikiPathways vs Reactome #################\n\n64 equivalentTo Overlaps. avg = 0.6702750950726711\n\n" ], [ "fig = plt.figure(figsize=(25, 6))\n\nax = fig.add_subplot(1, 3, 1)\nax.set_title('Distribution KEGG and WikiPathways ({} mappings)'.format(len(kegg_wikipathways_equivalent_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(kegg_wikipathways_equivalent_overlaps, kde=False, rug=True, bins=20, color='orange')\n\nax = fig.add_subplot(1, 3, 2)\nax.set_title('Distribution KEGG and Reactome ({} mappings)'.format(len(kegg_reactome_equivalent_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(kegg_reactome_equivalent_overlaps, kde=False, rug=True, bins=20, color='green')\n\nax = fig.add_subplot(1, 3, 3)\nax.set_title('Distribution WikiPathways and Reactome ({} mappings)'.format(len(wikipathways_reactome_equivalent_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(wikipathways_reactome_equivalent_overlaps, kde=False, rug=True, bins=20, color='blue')\n\nplt.show()", "_____no_output_____" ] ], [ [ "The distributions present us with interesting results:\n\n- The number of equivalent pathways between the three is fairly consistent (56, 58, and 64 shared pathtways).\n- KEGG and WikiPathways share the highest similarity within the shared pathways (left-skewed distribution where the average is equals to 0.787 and the mode to 1). The reason for this might be related to the fact that Reactome granularity (smaller pathways in general, see plots at the bottom of the notebook) is different to KEGG and WikiPathways which contain mostly medium-sized pathways (~100 genes).\n- Few pathways share a similarity lower than 0.4 (40% overlap).\n\n_Note that there is one outlier in the left side of each histogram (equivalent pathways with less than 0.2 overlap)._\n\n1. _In the first histogram (KEGG-WikiPathways): Steroid biosynthesis with 0.1 overlap (10%)_\n2. _In the histogram on the middle (KEGG-Reactome): Linoleic acid metabolism does not share any gene with its the two pathways._\n3. _In the last histogram (WikiPathways-Reactome): TGF-B Signaling in Thyroid Cells for Epithelial-Mesenchymal Transition with 0.0625 overlap._\n\nIt would be interesting to see what is the reason behind this low similarity between the equivalent pathways.", "_____no_output_____" ], [ "## Hierarchical pathways distribution based on similarity\n\nIn the following plots, we compare the similarity of pathways related by hierarhical mappings across the multiple pathway databases comparisons. The histogram plots the distribution of mappings (y axis) versus similarity (x axis). Similarity is calculated using the [Szymkiewicz–Simpson coefficient](https://en.wikipedia.org/wiki/Overlap_coefficient). The distributions show how most of the hierarchical mappings share high similarity based on content (distribution is assymetrical and right skewed). However, the number of mappings with low similarity is higher than the equivalent mappings.", "_____no_output_____" ] ], [ [ "print(\"############### KEGG vs WikiPathways #################\\n\")\n\nkegg_wikipathways_is_part_of_overlaps, kegg_wikipathways_is_part_of_set , kegg_wikipathways_is_part_of_list = parse_is_part_of(\n kegg_wikipathways_df,\n kegg_manager,\n wikipathways_manager\n)\n \nprint(\"{} isPartOf Overlaps. avg = {}\\n\".format(len(kegg_wikipathways_is_part_of_overlaps), np.mean(kegg_wikipathways_is_part_of_overlaps)))\n\nprint(\"############### KEGG vs Reactome #################\\n\")\n\nkegg_reactome_is_part_of_overlaps, kegg_reactome_is_part_of_set , kegg_reactome_is_part_of_list = parse_is_part_of(\n kegg_reactome_df,\n kegg_manager,\n reactome_manager\n)\n \nprint(\"{} isPartOf Overlaps. avg = {}\\n\".format(len(kegg_reactome_is_part_of_overlaps), np.mean(kegg_reactome_is_part_of_overlaps)))\n\nprint(\"############### WikiPathways vs Reactome #################\\n\")\n\nwikipathways_reactome_is_part_of_overlaps, wikipathways_reactome_is_part_of_set , wikipathways_reactome_is_part_of_list = parse_is_part_of(\n wikipathways_reactome_df,\n wikipathways_manager,\n reactome_manager\n)\n \nprint(\"{} isPartOf Overlaps. avg = {}\\n\".format(len(wikipathways_reactome_is_part_of_overlaps), np.mean(wikipathways_reactome_is_part_of_overlaps))) ", "############### KEGG vs WikiPathways #################\n\n247 isPartOf Overlaps. avg = 0.6089752808565702\n\n############### KEGG vs Reactome #################\n\n598 isPartOf Overlaps. avg = 0.6683946518700807\n\n############### WikiPathways vs Reactome #################\n\n564 isPartOf Overlaps. avg = 0.672734107485201\n\n" ], [ "fig = plt.figure(figsize=(25, 6))\n\nax = fig.add_subplot(1, 3, 1)\nax.set_title('Distribution KEGG and WikiPathways ({} mappings)'.format(len(kegg_wikipathways_is_part_of_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(kegg_wikipathways_is_part_of_overlaps, kde=False, rug=True, bins=20, color='orange')\n\nax = fig.add_subplot(1, 3, 2)\nax.set_title('Distribution KEGG and Reactome ({} mappings)'.format(len(kegg_reactome_is_part_of_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(kegg_reactome_is_part_of_overlaps, kde=False, rug=True, bins=20, color='green')\n\nax = fig.add_subplot(1, 3, 3)\nax.set_title('Distribution WikiPathways and Reactome ({} mappings)'.format(len(wikipathways_reactome_is_part_of_overlaps)), fontsize=18)\nax.set_xlabel('Szymkiewicz-Simpson coefficient', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nax.set_xlim(0, 1)\nsns.distplot(wikipathways_reactome_is_part_of_overlaps, kde=False, rug=True, bins=20, color='blue')\n\nplt.show()\n", "_____no_output_____" ] ], [ [ "The distributions of hierarchical mappings are consistent across all inter-database mappings. As expected, hierarchical mappings share high similarity (peak of the distribution is located around 1.0). \n\n_It is important to mention that even more mappings are stored in the database thanks to the ComPath inference system. For example, when a KEGG/WikiPathways pathway is assigned as equivalent to a Reactome pathway, ComPath uses the Reactome hierarchy to infer new hierarchical mappings and map the super/sub pathways of the Reactome pathway to its corresponding KEGG/WikiPathways pathway. This plot is only for illustration purposes, because infered mappings were not included here. However, those mappings will no change the overall picture because they will share high similarity with the other pathway (since they are completely embedded in the other pathways)._", "_____no_output_____" ], [ "## Mapping Statistics", "_____no_output_____" ], [ "- | KEGG | Reactome | WikiPathways\n---- |---- |----------|-------------\nKEGG | - | 58 equivalentTo mappings / 597 isPartOf mappings | 55 equivalentTo mappings / 247 isPartOf mappings\nReactome | - | - | 64 equivalentTo mappings / 564 isPartOf mappings\nWikiPathways | - | - | -\n\nTable Summarizing the Mappings between Databases", "_____no_output_____" ], [ "### Extra: Distribution of pathway size for each database", "_____no_output_____" ] ], [ [ "kegg_distribution = kegg_manager.get_pathway_size_distribution()\nreactome_distribution = reactome_manager.get_pathway_size_distribution()\nwikipathways_distribution = wikipathways_manager.get_pathway_size_distribution()", "_____no_output_____" ], [ "fig = plt.figure(figsize=(25, 6))\n\nax = fig.add_subplot(1, 3, 1)\nax.set_title('Pathway sizes in KEGG (Total {})'.format(len(kegg_distribution)), fontsize=18)\nax.set_xlabel('Number of genes in pathway', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nsns.distplot(list(kegg_distribution.values()), kde=False, rug=True, bins=50, color='orange')\n\nax = fig.add_subplot(1, 3, 2)\nax.set_title('Pathway sizes in Reactome (Total {})'.format(len(reactome_distribution)), fontsize=18)\nax.set_xlabel('Number of genes in pathway', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nsns.distplot(list(reactome_distribution.values()), kde=False, rug=True, bins=100, color='green')\n\nax = fig.add_subplot(1, 3, 3)\nax.set_title('Pathway sizes in WikiPathways (Total {})'.format(len(wikipathways_distribution)), fontsize=18)\nax.set_xlabel('Number of genes in pathway', fontsize=18)\nax.set_ylabel('Frequency', fontsize=18)\nsns.distplot(list(wikipathways_distribution.values()), kde=False, rug=True, bins=50, color='blue')\n\nplt.show()", "_____no_output_____" ] ], [ [ "__Distribution of pathway size for each database__. Note that KEGG and WikiPathways present a similar pathway landscape, where most of the pathways with a range of 25-100 genes, in constrast to Reactome where most of the pathways contain less than 50 genes. The rationale for this is that Reactome pathways are more specific, and therefore smaller in size.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec8021593225acdbda9d843ec59eee5293f5a33a
59,190
ipynb
Jupyter Notebook
tensorflow_examples/lite/model_maker/third_party/efficientdet/det_advprop_tutorial.ipynb
lukjung/examples
86e95a594ffd7a74016a2b15991d2294725d7e28
[ "Apache-2.0" ]
6,484
2019-02-13T21:32:29.000Z
2022-03-31T20:50:20.000Z
tensorflow_examples/lite/model_maker/third_party/efficientdet/det_advprop_tutorial.ipynb
lukjung/examples
86e95a594ffd7a74016a2b15991d2294725d7e28
[ "Apache-2.0" ]
988
2020-03-17T02:53:40.000Z
2022-03-17T19:34:10.000Z
tensorflow_examples/lite/model_maker/third_party/efficientdet/det_advprop_tutorial.ipynb
lukjung/examples
86e95a594ffd7a74016a2b15991d2294725d7e28
[ "Apache-2.0" ]
7,222
2019-02-13T21:39:34.000Z
2022-03-31T22:23:54.000Z
91.625387
2,376
0.663068
[ [ [ "# Det-AdvProp Tutorial: eval \n\n\n\n<table align=\"left\"><td>\n <a target=\"_blank\" href=\"https://github.com/google/automl/blob/master/efficientdet/Det-AdvProp-tutorial.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on github\n </a>\n</td><td>\n <a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/google/automl/blob/master/efficientdet/det-advprop-tutorial.ipynb\">\n <img width=32px src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n</td></table>", "_____no_output_____" ], [ "# 0. Install and view graph.", "_____no_output_____" ], [ "## 0.1 Install package and download source code/image.\n\n", "_____no_output_____" ] ], [ [ "%%capture\n#@title\nimport os\nimport sys\nimport tensorflow.compat.v1 as tf\n\n# Download source code.\nif \"efficientdet\" not in os.getcwd():\n !git clone --depth 1 https://github.com/google/automl\n os.chdir('automl/efficientdet')\n sys.path.append('.')\n !pip install -r requirements.txt\n !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\nelse:\n !git pull", "_____no_output_____" ], [ "MODEL = 'efficientdet-d1' #@param\n\ndef download(m):\n if m not in os.listdir():\n !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/advprop/{m}.tar.gz\n !tar zxf {m}.tar.gz\n ckpt_path = os.path.join(os.getcwd(), m)\n return ckpt_path\n\n# Download checkpoint.\nckpt_path = download(MODEL)\nprint('Use model in {}'.format(ckpt_path))\n\n# Prepare image and visualization settings.\nimage_url = 'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param\nimage_name = 'img.png' #@param\n!wget {image_url} -O img.png\nimport os\nimg_path = os.path.join(os.getcwd(), 'img.png')\n\nmin_score_thresh = 0.35 #@param\nmax_boxes_to_draw = 200 #@param\nline_thickness = 2#@param\n\nimport PIL\n# Get the largest of height/width and round to 128.\nimage_size = max(PIL.Image.open(img_path).size)", "--2021-05-10 20:07:35-- https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/advprop/efficientdet-d1.tar.gz\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.71.128, 64.233.184.128, 74.125.133.128, ...\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.71.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 149546361 (143M) [application/octet-stream]\nSaving to: ‘efficientdet-d1.tar.gz’\n\nefficientdet-d1.tar 100%[===================>] 142.62M 39.4MB/s in 3.6s \n\n2021-05-10 20:07:40 (39.4 MB/s) - ‘efficientdet-d1.tar.gz’ saved [149546361/149546361]\n\nUse model in /content/automl/efficientdet/efficientdet-d1\n--2021-05-10 20:07:42-- https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png\nResolving user-images.githubusercontent.com (user-images.githubusercontent.com)... 185.199.109.133, 185.199.111.133, 185.199.108.133, ...\nConnecting to user-images.githubusercontent.com (user-images.githubusercontent.com)|185.199.109.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 4080549 (3.9M) [image/png]\nSaving to: ‘img.png’\n\nimg.png 100%[===================>] 3.89M --.-KB/s in 0.1s \n\n2021-05-10 20:07:42 (38.4 MB/s) - ‘img.png’ saved [4080549/4080549]\n\n" ] ], [ [ "## 0.2 View graph in TensorBoard", "_____no_output_____" ] ], [ [ "!python model_inspect.py --model_name={MODEL} --logdir=logs &> /dev/null\n%load_ext tensorboard\n%tensorboard --logdir logs", "_____no_output_____" ] ], [ [ "# 1. COCO evaluation", "_____no_output_____" ], [ "## 1.1 COCO evaluation on validation set.", "_____no_output_____" ] ], [ [ "if 'val2017' not in os.listdir():\n !wget http://images.cocodataset.org/zips/val2017.zip\n !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n !unzip -q val2017.zip\n !unzip annotations_trainval2017.zip\n\n !mkdir tfrecord\n !PYTHONPATH=\".:$PYTHONPATH\" python dataset/create_coco_tfrecord.py \\\n --image_dir=val2017 \\\n --caption_annotations_file=annotations/captions_val2017.json \\\n --output_file_prefix=tfrecord/val \\\n --num_shards=32", "--2021-05-10 19:43:48-- http://images.cocodataset.org/zips/val2017.zip\nResolving images.cocodataset.org (images.cocodataset.org)... 52.216.27.28\nConnecting to images.cocodataset.org (images.cocodataset.org)|52.216.27.28|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 815585330 (778M) [application/zip]\nSaving to: ‘val2017.zip’\n\nval2017.zip 100%[===================>] 777.80M 36.0MB/s in 22s \n\n2021-05-10 19:44:10 (35.1 MB/s) - ‘val2017.zip’ saved [815585330/815585330]\n\n--2021-05-10 19:44:11-- http://images.cocodataset.org/annotations/annotations_trainval2017.zip\nResolving images.cocodataset.org (images.cocodataset.org)... 52.217.92.164\nConnecting to images.cocodataset.org (images.cocodataset.org)|52.217.92.164|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 252907541 (241M) [application/zip]\nSaving to: ‘annotations_trainval2017.zip’\n\nannotations_trainva 100%[===================>] 241.19M 37.0MB/s in 7.2s \n\n2021-05-10 19:44:18 (33.6 MB/s) - ‘annotations_trainval2017.zip’ saved [252907541/252907541]\n\nArchive: annotations_trainval2017.zip\n inflating: annotations/instances_train2017.json \n inflating: annotations/instances_val2017.json \n inflating: annotations/captions_train2017.json \n inflating: annotations/captions_val2017.json \n inflating: annotations/person_keypoints_train2017.json \n inflating: annotations/person_keypoints_val2017.json \n2021-05-10 19:44:33.210219: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\nI0510 19:44:35.059431 140632818317184 create_coco_tfrecord.py:285] writing to output path: tfrecord/val\nI0510 19:44:35.159756 140632818317184 create_coco_tfrecord.py:237] Building caption index.\nI0510 19:44:35.166594 140632818317184 create_coco_tfrecord.py:249] 0 images are missing captions.\nI0510 19:44:36.985283 140632818317184 create_coco_tfrecord.py:323] On image 0 of 5000\nI0510 19:44:37.145407 140632818317184 create_coco_tfrecord.py:323] On image 100 of 5000\nI0510 19:44:37.295032 140632818317184 create_coco_tfrecord.py:323] On image 200 of 5000\nI0510 19:44:37.435169 140632818317184 create_coco_tfrecord.py:323] On image 300 of 5000\nI0510 19:44:37.580727 140632818317184 create_coco_tfrecord.py:323] On image 400 of 5000\nI0510 19:44:37.715295 140632818317184 create_coco_tfrecord.py:323] On image 500 of 5000\nI0510 19:44:37.862454 140632818317184 create_coco_tfrecord.py:323] On image 600 of 5000\nI0510 19:44:37.993027 140632818317184 create_coco_tfrecord.py:323] On image 700 of 5000\nI0510 19:44:38.239493 140632818317184 create_coco_tfrecord.py:323] On image 800 of 5000\nI0510 19:44:38.386704 140632818317184 create_coco_tfrecord.py:323] On image 900 of 5000\nI0510 19:44:38.555314 140632818317184 create_coco_tfrecord.py:323] On image 1000 of 5000\nI0510 19:44:38.714288 140632818317184 create_coco_tfrecord.py:323] On image 1100 of 5000\nI0510 19:44:38.871498 140632818317184 create_coco_tfrecord.py:323] On image 1200 of 5000\nI0510 19:44:39.004296 140632818317184 create_coco_tfrecord.py:323] On image 1300 of 5000\nI0510 19:44:39.177606 140632818317184 create_coco_tfrecord.py:323] On image 1400 of 5000\nI0510 19:44:39.347730 140632818317184 create_coco_tfrecord.py:323] On image 1500 of 5000\nI0510 19:44:39.513748 140632818317184 create_coco_tfrecord.py:323] On image 1600 of 5000\nI0510 19:44:39.655339 140632818317184 create_coco_tfrecord.py:323] On image 1700 of 5000\nI0510 19:44:39.812914 140632818317184 create_coco_tfrecord.py:323] On image 1800 of 5000\nI0510 19:44:39.939900 140632818317184 create_coco_tfrecord.py:323] On image 1900 of 5000\nI0510 19:44:40.087029 140632818317184 create_coco_tfrecord.py:323] On image 2000 of 5000\nI0510 19:44:40.226217 140632818317184 create_coco_tfrecord.py:323] On image 2100 of 5000\nI0510 19:44:40.365591 140632818317184 create_coco_tfrecord.py:323] On image 2200 of 5000\nI0510 19:44:40.499703 140632818317184 create_coco_tfrecord.py:323] On image 2300 of 5000\nI0510 19:44:40.638760 140632818317184 create_coco_tfrecord.py:323] On image 2400 of 5000\nI0510 19:44:41.050563 140632818317184 create_coco_tfrecord.py:323] On image 2500 of 5000\nI0510 19:44:41.431067 140632818317184 create_coco_tfrecord.py:323] On image 2600 of 5000\nI0510 19:44:41.786906 140632818317184 create_coco_tfrecord.py:323] On image 2700 of 5000\nI0510 19:44:42.212045 140632818317184 create_coco_tfrecord.py:323] On image 2800 of 5000\nI0510 19:44:42.739339 140632818317184 create_coco_tfrecord.py:323] On image 2900 of 5000\nI0510 19:44:43.172071 140632818317184 create_coco_tfrecord.py:323] On image 3000 of 5000\nI0510 19:44:43.651128 140632818317184 create_coco_tfrecord.py:323] On image 3100 of 5000\nI0510 19:44:44.109982 140632818317184 create_coco_tfrecord.py:323] On image 3200 of 5000\nI0510 19:44:44.504407 140632818317184 create_coco_tfrecord.py:323] On image 3300 of 5000\nI0510 19:44:44.679261 140632818317184 create_coco_tfrecord.py:323] On image 3400 of 5000\nI0510 19:44:44.843348 140632818317184 create_coco_tfrecord.py:323] On image 3500 of 5000\nI0510 19:44:45.013960 140632818317184 create_coco_tfrecord.py:323] On image 3600 of 5000\nI0510 19:44:45.209258 140632818317184 create_coco_tfrecord.py:323] On image 3700 of 5000\nI0510 19:44:45.453207 140632818317184 create_coco_tfrecord.py:323] On image 3800 of 5000\nI0510 19:44:45.758646 140632818317184 create_coco_tfrecord.py:323] On image 3900 of 5000\nI0510 19:44:50.063420 140632818317184 create_coco_tfrecord.py:323] On image 4000 of 5000\nI0510 19:44:50.098973 140632818317184 create_coco_tfrecord.py:323] On image 4100 of 5000\nI0510 19:44:50.144104 140632818317184 create_coco_tfrecord.py:323] On image 4200 of 5000\nI0510 19:44:50.187048 140632818317184 create_coco_tfrecord.py:323] On image 4300 of 5000\nI0510 19:44:50.278038 140632818317184 create_coco_tfrecord.py:323] On image 4400 of 5000\nI0510 19:44:50.474480 140632818317184 create_coco_tfrecord.py:323] On image 4500 of 5000\nI0510 19:44:50.887932 140632818317184 create_coco_tfrecord.py:323] On image 4600 of 5000\nI0510 19:44:51.437325 140632818317184 create_coco_tfrecord.py:323] On image 4700 of 5000\nI0510 19:44:51.890431 140632818317184 create_coco_tfrecord.py:323] On image 4800 of 5000\nI0510 19:44:52.091532 140632818317184 create_coco_tfrecord.py:323] On image 4900 of 5000\nI0510 19:44:52.366421 140632818317184 create_coco_tfrecord.py:335] Finished writing, skipped 0 annotations.\n" ], [ "# Evalute on validation set (takes about 10 mins for efficientdet-d0)\n!python main.py --mode=eval \\\n --model_name={MODEL} --model_dir={ckpt_path} \\\n --val_file_pattern=tfrecord/val* \\\n --val_json_file=annotations/instances_val2017.json \\\n --hparams=\"mean_rgb=0.0,stddev_rgb=1.0,scale_range=True\"", "2021-05-10 20:07:49.603102: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\nI0510 20:07:52.228707 139834041194368 main.py:264] {'name': 'efficientdet-d1', 'act_type': 'swish', 'image_size': (640, 640), 'target_size': None, 'input_rand_hflip': True, 'jitter_min': 0.1, 'jitter_max': 2.0, 'autoaugment_policy': None, 'grid_mask': False, 'sample_image': None, 'map_freq': 5, 'num_classes': 90, 'seg_num_classes': 3, 'heads': ['object_detection'], 'skip_crowd_during_training': True, 'label_map': None, 'max_instances_per_image': 100, 'regenerate_source_id': False, 'min_level': 3, 'max_level': 7, 'num_scales': 3, 'aspect_ratios': [1.0, 2.0, 0.5], 'anchor_scale': 4.0, 'is_training_bn': True, 'momentum': 0.9, 'optimizer': 'sgd', 'learning_rate': 0.08, 'lr_warmup_init': 0.008, 'lr_warmup_epoch': 1.0, 'first_lr_drop_epoch': 200.0, 'second_lr_drop_epoch': 250.0, 'poly_lr_power': 0.9, 'clip_gradients_norm': 10.0, 'num_epochs': 300, 'data_format': 'channels_last', 'mean_rgb': 0.0, 'stddev_rgb': 1.0, 'scale_range': True, 'label_smoothing': 0.0, 'alpha': 0.25, 'gamma': 1.5, 'delta': 0.1, 'box_loss_weight': 50.0, 'iou_loss_type': None, 'iou_loss_weight': 1.0, 'weight_decay': 4e-05, 'strategy': None, 'mixed_precision': False, 'loss_scale': None, 'model_optimizations': {}, 'box_class_repeats': 3, 'fpn_cell_repeats': 4, 'fpn_num_filters': 88, 'separable_conv': True, 'apply_bn_for_resampling': True, 'conv_after_downsample': False, 'conv_bn_act_pattern': False, 'drop_remainder': True, 'nms_configs': {'method': 'gaussian', 'iou_thresh': None, 'score_thresh': 0.0, 'sigma': None, 'pyfunc': False, 'max_nms_inputs': 0, 'max_output_size': 100}, 'tflite_max_detections': 100, 'fpn_name': None, 'fpn_weight_method': None, 'fpn_config': None, 'survival_prob': None, 'img_summary_steps': None, 'lr_decay_method': 'cosine', 'moving_average_decay': 0.9998, 'ckpt_var_scope': None, 'skip_mismatch': True, 'backbone_name': 'efficientnet-b1', 'backbone_config': None, 'var_freeze_expr': None, 'use_keras_model': True, 'dataset_type': None, 'positives_momentum': None, 'grad_checkpoint': False, 'verbose': 1, 'save_freq': 'epoch', 'model_name': 'efficientdet-d1', 'iterations_per_loop': 100, 'model_dir': '/content/automl/efficientdet/efficientdet-d1', 'num_shards': 8, 'num_examples_per_epoch': 120000, 'backbone_ckpt': '', 'ckpt': None, 'val_json_file': 'annotations/instances_val2017.json', 'testdev_dir': None, 'profile': False, 'mode': 'eval'}\nINFO:tensorflow:Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nI0510 20:07:52.287737 139834041194368 estimator.py:191] Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nINFO:tensorflow:Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nI0510 20:07:52.288912 139834041194368 estimator.py:191] Using config: {'_model_dir': '/content/automl/efficientdet/efficientdet-d1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_checkpoint_save_graph_def': True, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nINFO:tensorflow:Waiting for new checkpoint at /content/automl/efficientdet/efficientdet-d1\nI0510 20:07:52.289403 139834041194368 checkpoint_utils.py:139] Waiting for new checkpoint at /content/automl/efficientdet/efficientdet-d1\nINFO:tensorflow:Found new checkpoint at /content/automl/efficientdet/efficientdet-d1/model\nI0510 20:07:52.292483 139834041194368 checkpoint_utils.py:148] Found new checkpoint at /content/automl/efficientdet/efficientdet-d1/model\nI0510 20:07:52.292706 139834041194368 main.py:344] Starting to evaluate.\n2021-05-10 20:07:52.489416: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set\n2021-05-10 20:07:52.490577: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n2021-05-10 20:07:52.508703: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:07:52.509616: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \npciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0\ncoreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s\n2021-05-10 20:07:52.509660: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n2021-05-10 20:07:52.512220: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n2021-05-10 20:07:52.512295: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n2021-05-10 20:07:52.514123: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n2021-05-10 20:07:52.514558: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n2021-05-10 20:07:52.516516: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10\n2021-05-10 20:07:52.517094: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n2021-05-10 20:07:52.517346: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n2021-05-10 20:07:52.517450: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:07:52.518379: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:07:52.519223: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0\nINFO:tensorflow:Calling model_fn.\nI0510 20:07:53.088464 139834041194368 estimator.py:1162] Calling model_fn.\nI0510 20:07:53.093623 139834041194368 efficientnet_builder.py:215] global_params= GlobalParams(batch_norm_momentum=0.99, batch_norm_epsilon=0.001, dropout_rate=0.2, data_format='channels_last', num_classes=1000, width_coefficient=1.0, depth_coefficient=1.1, depth_divisor=8, min_depth=None, survival_prob=0.8, relu_fn=functools.partial(<function activation_fn at 0x7f2d4fb79cb0>, act_type='swish'), batch_norm=<class 'utils.BatchNormalization'>, use_se=True, local_pooling=None, condconv_num_experts=None, clip_projection_output=False, blocks_args=['r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25'], fix_head_stem=None, grad_checkpoint=False)\nI0510 20:07:53.595246 139834041194368 efficientdet_keras.py:760] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\nI0510 20:07:53.596441 139834041194368 efficientdet_keras.py:760] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\nI0510 20:07:53.597646 139834041194368 efficientdet_keras.py:760] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\nI0510 20:07:53.598873 139834041194368 efficientdet_keras.py:760] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\nI0510 20:07:53.600070 139834041194368 efficientdet_keras.py:760] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\nI0510 20:07:53.601099 139834041194368 efficientdet_keras.py:760] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\nI0510 20:07:53.602200 139834041194368 efficientdet_keras.py:760] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\nI0510 20:07:53.603379 139834041194368 efficientdet_keras.py:760] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\nI0510 20:07:53.606173 139834041194368 efficientdet_keras.py:760] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\nI0510 20:07:53.607471 139834041194368 efficientdet_keras.py:760] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\nI0510 20:07:53.608979 139834041194368 efficientdet_keras.py:760] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\nI0510 20:07:53.610496 139834041194368 efficientdet_keras.py:760] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\nI0510 20:07:53.611800 139834041194368 efficientdet_keras.py:760] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\nI0510 20:07:53.613338 139834041194368 efficientdet_keras.py:760] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\nI0510 20:07:53.615097 139834041194368 efficientdet_keras.py:760] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\nI0510 20:07:53.617069 139834041194368 efficientdet_keras.py:760] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\nI0510 20:07:53.619574 139834041194368 efficientdet_keras.py:760] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\nI0510 20:07:53.620916 139834041194368 efficientdet_keras.py:760] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\nI0510 20:07:53.622064 139834041194368 efficientdet_keras.py:760] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\nI0510 20:07:53.623369 139834041194368 efficientdet_keras.py:760] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\nI0510 20:07:53.624463 139834041194368 efficientdet_keras.py:760] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\nI0510 20:07:53.625886 139834041194368 efficientdet_keras.py:760] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\nI0510 20:07:53.627297 139834041194368 efficientdet_keras.py:760] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\nI0510 20:07:53.628497 139834041194368 efficientdet_keras.py:760] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\nI0510 20:07:53.630418 139834041194368 efficientdet_keras.py:760] fnode 0 : {'feat_level': 6, 'inputs_offsets': [3, 4]}\nI0510 20:07:53.631624 139834041194368 efficientdet_keras.py:760] fnode 1 : {'feat_level': 5, 'inputs_offsets': [2, 5]}\nI0510 20:07:53.632795 139834041194368 efficientdet_keras.py:760] fnode 2 : {'feat_level': 4, 'inputs_offsets': [1, 6]}\nI0510 20:07:53.633984 139834041194368 efficientdet_keras.py:760] fnode 3 : {'feat_level': 3, 'inputs_offsets': [0, 7]}\nI0510 20:07:53.635079 139834041194368 efficientdet_keras.py:760] fnode 4 : {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}\nI0510 20:07:53.636193 139834041194368 efficientdet_keras.py:760] fnode 5 : {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}\nI0510 20:07:53.637408 139834041194368 efficientdet_keras.py:760] fnode 6 : {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}\nI0510 20:07:53.638536 139834041194368 efficientdet_keras.py:760] fnode 7 : {'feat_level': 7, 'inputs_offsets': [4, 11]}\nI0510 20:07:53.750490 139834041194368 efficientnet_model.py:735] Built stem stem : (1, 320, 320, 32)\nI0510 20:07:53.750730 139834041194368 efficientnet_model.py:756] block_0 survival_prob: 1.0\nI0510 20:07:53.751127 139834041194368 efficientnet_model.py:374] Block blocks_0 input shape: (1, 320, 320, 32)\nI0510 20:07:53.780265 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 320, 320, 32)\nI0510 20:07:53.809039 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 32)\nI0510 20:07:53.836266 139834041194368 efficientnet_model.py:414] Project shape: (1, 320, 320, 16)\nI0510 20:07:53.836619 139834041194368 efficientnet_model.py:756] block_1 survival_prob: 0.991304347826087\nI0510 20:07:53.837053 139834041194368 efficientnet_model.py:374] Block blocks_1 input shape: (1, 320, 320, 16)\nI0510 20:07:53.871055 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 320, 320, 16)\nI0510 20:07:53.900743 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 16)\nI0510 20:07:53.930487 139834041194368 efficientnet_model.py:414] Project shape: (1, 320, 320, 16)\nI0510 20:07:53.930959 139834041194368 efficientnet_model.py:756] block_2 survival_prob: 0.9826086956521739\nI0510 20:07:53.931439 139834041194368 efficientnet_model.py:374] Block blocks_2 input shape: (1, 320, 320, 16)\nI0510 20:07:53.961296 139834041194368 efficientnet_model.py:390] Expand shape: (1, 320, 320, 96)\nI0510 20:07:53.993832 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 160, 160, 96)\nI0510 20:07:54.025872 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 96)\nI0510 20:07:54.059447 139834041194368 efficientnet_model.py:414] Project shape: (1, 160, 160, 24)\nI0510 20:07:54.059780 139834041194368 efficientnet_model.py:756] block_3 survival_prob: 0.9739130434782609\nI0510 20:07:54.060328 139834041194368 efficientnet_model.py:374] Block blocks_3 input shape: (1, 160, 160, 24)\nI0510 20:07:54.088966 139834041194368 efficientnet_model.py:390] Expand shape: (1, 160, 160, 144)\nI0510 20:07:54.121675 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 160, 160, 144)\nI0510 20:07:54.150545 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 144)\nI0510 20:07:54.182002 139834041194368 efficientnet_model.py:414] Project shape: (1, 160, 160, 24)\nI0510 20:07:54.182313 139834041194368 efficientnet_model.py:756] block_4 survival_prob: 0.9652173913043478\nI0510 20:07:54.182894 139834041194368 efficientnet_model.py:374] Block blocks_4 input shape: (1, 160, 160, 24)\nI0510 20:07:54.211359 139834041194368 efficientnet_model.py:390] Expand shape: (1, 160, 160, 144)\nI0510 20:07:54.241073 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 160, 160, 144)\nI0510 20:07:54.271023 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 144)\nI0510 20:07:54.299235 139834041194368 efficientnet_model.py:414] Project shape: (1, 160, 160, 24)\nI0510 20:07:54.299630 139834041194368 efficientnet_model.py:756] block_5 survival_prob: 0.9565217391304348\nI0510 20:07:54.300070 139834041194368 efficientnet_model.py:374] Block blocks_5 input shape: (1, 160, 160, 24)\nI0510 20:07:54.328530 139834041194368 efficientnet_model.py:390] Expand shape: (1, 160, 160, 144)\nI0510 20:07:54.358716 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 80, 80, 144)\nI0510 20:07:54.389357 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 144)\nI0510 20:07:54.418314 139834041194368 efficientnet_model.py:414] Project shape: (1, 80, 80, 40)\nI0510 20:07:54.418715 139834041194368 efficientnet_model.py:756] block_6 survival_prob: 0.9478260869565217\nI0510 20:07:54.419190 139834041194368 efficientnet_model.py:374] Block blocks_6 input shape: (1, 80, 80, 40)\nI0510 20:07:54.451548 139834041194368 efficientnet_model.py:390] Expand shape: (1, 80, 80, 240)\nI0510 20:07:54.486734 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 80, 80, 240)\nI0510 20:07:54.516430 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 240)\nI0510 20:07:54.543974 139834041194368 efficientnet_model.py:414] Project shape: (1, 80, 80, 40)\nI0510 20:07:54.544296 139834041194368 efficientnet_model.py:756] block_7 survival_prob: 0.9391304347826087\nI0510 20:07:54.544718 139834041194368 efficientnet_model.py:374] Block blocks_7 input shape: (1, 80, 80, 40)\nI0510 20:07:54.573758 139834041194368 efficientnet_model.py:390] Expand shape: (1, 80, 80, 240)\nI0510 20:07:54.607336 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 80, 80, 240)\nI0510 20:07:54.636443 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 240)\nI0510 20:07:54.663759 139834041194368 efficientnet_model.py:414] Project shape: (1, 80, 80, 40)\nI0510 20:07:54.664189 139834041194368 efficientnet_model.py:756] block_8 survival_prob: 0.9304347826086956\nI0510 20:07:54.664561 139834041194368 efficientnet_model.py:374] Block blocks_8 input shape: (1, 80, 80, 40)\nI0510 20:07:54.696891 139834041194368 efficientnet_model.py:390] Expand shape: (1, 80, 80, 240)\nI0510 20:07:54.726265 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 240)\nI0510 20:07:54.755805 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 240)\nI0510 20:07:54.786793 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 80)\nI0510 20:07:54.787131 139834041194368 efficientnet_model.py:756] block_9 survival_prob: 0.9217391304347826\nI0510 20:07:54.787655 139834041194368 efficientnet_model.py:374] Block blocks_9 input shape: (1, 40, 40, 80)\nI0510 20:07:54.815595 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 480)\nI0510 20:07:54.844414 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 480)\nI0510 20:07:54.874174 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\nI0510 20:07:54.902137 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 80)\nI0510 20:07:54.902514 139834041194368 efficientnet_model.py:756] block_10 survival_prob: 0.9130434782608696\nI0510 20:07:54.902967 139834041194368 efficientnet_model.py:374] Block blocks_10 input shape: (1, 40, 40, 80)\nI0510 20:07:54.931709 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 480)\nI0510 20:07:54.962241 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 480)\nI0510 20:07:54.993660 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\nI0510 20:07:55.021482 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 80)\nI0510 20:07:55.021827 139834041194368 efficientnet_model.py:756] block_11 survival_prob: 0.9043478260869565\nI0510 20:07:55.022259 139834041194368 efficientnet_model.py:374] Block blocks_11 input shape: (1, 40, 40, 80)\nI0510 20:07:55.057896 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 480)\nI0510 20:07:55.091666 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 480)\nI0510 20:07:55.121704 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\nI0510 20:07:55.149158 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 80)\nI0510 20:07:55.149526 139834041194368 efficientnet_model.py:756] block_12 survival_prob: 0.8956521739130435\nI0510 20:07:55.150015 139834041194368 efficientnet_model.py:374] Block blocks_12 input shape: (1, 40, 40, 80)\nI0510 20:07:55.178953 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 480)\nI0510 20:07:55.209172 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 480)\nI0510 20:07:55.238858 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 480)\nI0510 20:07:55.267069 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 112)\nI0510 20:07:55.267507 139834041194368 efficientnet_model.py:756] block_13 survival_prob: 0.8869565217391304\nI0510 20:07:55.268048 139834041194368 efficientnet_model.py:374] Block blocks_13 input shape: (1, 40, 40, 112)\nI0510 20:07:55.296053 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 672)\nI0510 20:07:55.331140 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 672)\nI0510 20:07:55.360097 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\nI0510 20:07:55.391819 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 112)\nI0510 20:07:55.392213 139834041194368 efficientnet_model.py:756] block_14 survival_prob: 0.8782608695652174\nI0510 20:07:55.392651 139834041194368 efficientnet_model.py:374] Block blocks_14 input shape: (1, 40, 40, 112)\nI0510 20:07:55.422633 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 672)\nI0510 20:07:55.452295 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 672)\nI0510 20:07:55.482393 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\nI0510 20:07:55.512572 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 112)\nI0510 20:07:55.512948 139834041194368 efficientnet_model.py:756] block_15 survival_prob: 0.8695652173913044\nI0510 20:07:55.513380 139834041194368 efficientnet_model.py:374] Block blocks_15 input shape: (1, 40, 40, 112)\nI0510 20:07:55.542927 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 672)\nI0510 20:07:55.574158 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 40, 40, 672)\nI0510 20:07:55.605713 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\nI0510 20:07:55.633415 139834041194368 efficientnet_model.py:414] Project shape: (1, 40, 40, 112)\nI0510 20:07:55.633908 139834041194368 efficientnet_model.py:756] block_16 survival_prob: 0.8608695652173913\nI0510 20:07:55.634328 139834041194368 efficientnet_model.py:374] Block blocks_16 input shape: (1, 40, 40, 112)\nI0510 20:07:55.663101 139834041194368 efficientnet_model.py:390] Expand shape: (1, 40, 40, 672)\nI0510 20:07:55.696253 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 672)\nI0510 20:07:55.728366 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 672)\nI0510 20:07:55.756615 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 192)\nI0510 20:07:55.756961 139834041194368 efficientnet_model.py:756] block_17 survival_prob: 0.8521739130434782\nI0510 20:07:55.757386 139834041194368 efficientnet_model.py:374] Block blocks_17 input shape: (1, 20, 20, 192)\nI0510 20:07:55.792434 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1152)\nI0510 20:07:55.829316 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1152)\nI0510 20:07:55.860946 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\nI0510 20:07:55.888971 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 192)\nI0510 20:07:55.889437 139834041194368 efficientnet_model.py:756] block_18 survival_prob: 0.8434782608695652\nI0510 20:07:55.889867 139834041194368 efficientnet_model.py:374] Block blocks_18 input shape: (1, 20, 20, 192)\nI0510 20:07:55.924473 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1152)\nI0510 20:07:55.968310 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1152)\nI0510 20:07:56.009804 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\nI0510 20:07:56.041101 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 192)\nI0510 20:07:56.041589 139834041194368 efficientnet_model.py:756] block_19 survival_prob: 0.8347826086956522\nI0510 20:07:56.042091 139834041194368 efficientnet_model.py:374] Block blocks_19 input shape: (1, 20, 20, 192)\nI0510 20:07:56.093295 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1152)\nI0510 20:07:56.136825 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1152)\nI0510 20:07:56.168875 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\nI0510 20:07:56.195683 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 192)\nI0510 20:07:56.196047 139834041194368 efficientnet_model.py:756] block_20 survival_prob: 0.8260869565217391\nI0510 20:07:56.196481 139834041194368 efficientnet_model.py:374] Block blocks_20 input shape: (1, 20, 20, 192)\nI0510 20:07:56.234993 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1152)\nI0510 20:07:56.272101 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1152)\nI0510 20:07:56.306418 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\nI0510 20:07:56.335123 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 192)\nI0510 20:07:56.335595 139834041194368 efficientnet_model.py:756] block_21 survival_prob: 0.8173913043478261\nI0510 20:07:56.336132 139834041194368 efficientnet_model.py:374] Block blocks_21 input shape: (1, 20, 20, 192)\nI0510 20:07:56.374043 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1152)\nI0510 20:07:56.412933 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1152)\nI0510 20:07:56.446010 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1152)\nI0510 20:07:56.475004 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 320)\nI0510 20:07:56.475410 139834041194368 efficientnet_model.py:756] block_22 survival_prob: 0.808695652173913\nI0510 20:07:56.476164 139834041194368 efficientnet_model.py:374] Block blocks_22 input shape: (1, 20, 20, 320)\nI0510 20:07:56.514158 139834041194368 efficientnet_model.py:390] Expand shape: (1, 20, 20, 1920)\nI0510 20:07:56.550471 139834041194368 efficientnet_model.py:393] DWConv shape: (1, 20, 20, 1920)\nI0510 20:07:56.586025 139834041194368 efficientnet_model.py:195] Built SE se : (1, 1, 1, 1920)\nI0510 20:07:56.615339 139834041194368 efficientnet_model.py:414] Project shape: (1, 20, 20, 320)\nI0510 20:08:00.628787 139834041194368 det_model_fn.py:81] LR schedule method: cosine\nI0510 20:08:01.077699 139834041194368 postprocess.py:90] use max_nms_inputs for pre-nms topk.\nI0510 20:08:02.011834 139834041194368 det_model_fn.py:476] Eval val with groudtruths annotations/instances_val2017.json.\nI0510 20:08:02.068171 139834041194368 det_model_fn.py:553] Load EMA vars with ema_decay=0.999800\nINFO:tensorflow:Done calling model_fn.\nI0510 20:08:03.211773 139834041194368 estimator.py:1164] Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2021-05-10T20:08:03Z\nI0510 20:08:03.234877 139834041194368 evaluation.py:255] Starting evaluation at 2021-05-10T20:08:03Z\nINFO:tensorflow:Graph was finalized.\nI0510 20:08:04.095394 139834041194368 monitored_session.py:246] Graph was finalized.\n2021-05-10 20:08:04.096106: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set\n2021-05-10 20:08:04.096396: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.097427: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \npciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0\ncoreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s\n2021-05-10 20:08:04.097507: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n2021-05-10 20:08:04.097573: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n2021-05-10 20:08:04.097644: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n2021-05-10 20:08:04.097691: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n2021-05-10 20:08:04.097739: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n2021-05-10 20:08:04.097786: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10\n2021-05-10 20:08:04.097891: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n2021-05-10 20:08:04.097938: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n2021-05-10 20:08:04.098070: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.099073: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.099983: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0\n2021-05-10 20:08:04.100061: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n2021-05-10 20:08:04.802699: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix:\n2021-05-10 20:08:04.802772: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] 0 \n2021-05-10 20:08:04.802798: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0: N \n2021-05-10 20:08:04.803063: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.804184: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.805216: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-05-10 20:08:04.806026: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n2021-05-10 20:08:04.806089: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1406] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 14975 MB memory) -> physical GPU (device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0, compute capability: 6.0)\nINFO:tensorflow:Restoring parameters from /content/automl/efficientdet/efficientdet-d1/model\nI0510 20:08:04.806891 139834041194368 saver.py:1292] Restoring parameters from /content/automl/efficientdet/efficientdet-d1/model\n2021-05-10 20:08:04.944665: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:196] None of the MLIR optimization passes are enabled (registered 0 passes)\n2021-05-10 20:08:05.123288: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2299995000 Hz\nINFO:tensorflow:Running local_init_op.\nI0510 20:08:06.682386 139834041194368 session_manager.py:505] Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nI0510 20:08:06.769455 139834041194368 session_manager.py:508] Done running local_init_op.\n2021-05-10 20:08:07.729877: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)\n2021-05-10 20:08:10.942540: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n2021-05-10 20:08:12.145170: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n2021-05-10 20:08:12.421326: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\nINFO:tensorflow:Evaluation [500/5000]\nI0510 20:09:43.242785 139834041194368 evaluation.py:167] Evaluation [500/5000]\nINFO:tensorflow:Evaluation [1000/5000]\nI0510 20:11:13.138355 139834041194368 evaluation.py:167] Evaluation [1000/5000]\nINFO:tensorflow:Evaluation [1500/5000]\nI0510 20:12:42.090077 139834041194368 evaluation.py:167] Evaluation [1500/5000]\nINFO:tensorflow:Evaluation [2000/5000]\nI0510 20:14:10.714957 139834041194368 evaluation.py:167] Evaluation [2000/5000]\nINFO:tensorflow:Evaluation [2500/5000]\nI0510 20:15:39.312114 139834041194368 evaluation.py:167] Evaluation [2500/5000]\nINFO:tensorflow:Evaluation [3000/5000]\nI0510 20:17:08.327113 139834041194368 evaluation.py:167] Evaluation [3000/5000]\nINFO:tensorflow:Evaluation [3500/5000]\nI0510 20:18:37.902203 139834041194368 evaluation.py:167] Evaluation [3500/5000]\nINFO:tensorflow:Evaluation [4000/5000]\nI0510 20:20:06.601424 139834041194368 evaluation.py:167] Evaluation [4000/5000]\nINFO:tensorflow:Evaluation [4500/5000]\nI0510 20:21:35.209655 139834041194368 evaluation.py:167] Evaluation [4500/5000]\nINFO:tensorflow:Evaluation [5000/5000]\nI0510 20:23:04.230208 139834041194368 evaluation.py:167] Evaluation [5000/5000]\nloading annotations into memory...\nDone (t=0.76s)\ncreating index...\nindex created!\nLoading and preparing results...\nConverting ndarray to lists...\n(500000, 7)\n0/500000\nDONE (t=4.48s)\ncreating index...\nindex created!\nRunning per image evaluation...\nEvaluate annotation type *bbox*\nDONE (t=90.24s).\nAccumulating evaluation results...\nDONE (t=14.09s).\n Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.408\n Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.599\n Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.440\n Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.214\n Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.463\n Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.591\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.328\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.520\n Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.551\n Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.326\n Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.624\n Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.748\nINFO:tensorflow:Inference Time : 1013.67302s\nI0510 20:24:56.908151 139834041194368 evaluation.py:273] Inference Time : 1013.67302s\nINFO:tensorflow:Finished evaluation at 2021-05-10-20:24:56\nI0510 20:24:56.908450 139834041194368 evaluation.py:276] Finished evaluation at 2021-05-10-20:24:56\nINFO:tensorflow:Saving dict for global step 0: AP = 0.40841353, AP50 = 0.5991094, AP75 = 0.4402199, APl = 0.59102863, APm = 0.46323815, APs = 0.21414877, ARl = 0.74805754, ARm = 0.62390226, ARmax1 = 0.32788682, ARmax10 = 0.52038383, ARmax100 = 0.55101603, ARs = 0.32568377, box_loss = 0.0, cls_loss = 39.20923, global_step = 0, loss = 39.2865\nI0510 20:24:56.908696 139834041194368 estimator.py:2066] Saving dict for global step 0: AP = 0.40841353, AP50 = 0.5991094, AP75 = 0.4402199, APl = 0.59102863, APm = 0.46323815, APs = 0.21414877, ARl = 0.74805754, ARm = 0.62390226, ARmax1 = 0.32788682, ARmax10 = 0.52038383, ARmax100 = 0.55101603, ARs = 0.32568377, box_loss = 0.0, cls_loss = 39.20923, global_step = 0, loss = 39.2865\nINFO:tensorflow:Saving 'checkpoint_path' summary for global step 0: /content/automl/efficientdet/efficientdet-d1/model\nI0510 20:24:58.433520 139834041194368 estimator.py:2127] Saving 'checkpoint_path' summary for global step 0: /content/automl/efficientdet/efficientdet-d1/model\nI0510 20:24:58.434658 139834041194368 main.py:351] /content/automl/efficientdet/efficientdet-d1/model has no global step info: stop!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ec803507f7bd219324511b63c1faa1314e82a9e0
23,219
ipynb
Jupyter Notebook
notebooks/python/transactions_rmw_pattern.ipynb
artanderson/interactive-notebooks
73a4744eeabe53dfdfeb6a97d72d3969f9389700
[ "MIT" ]
11
2020-09-28T08:00:57.000Z
2021-07-21T01:40:08.000Z
notebooks/python/transactions_rmw_pattern.ipynb
artanderson/interactive-notebooks
73a4744eeabe53dfdfeb6a97d72d3969f9389700
[ "MIT" ]
19
2020-10-02T16:35:32.000Z
2022-02-12T22:46:04.000Z
notebooks/python/transactions_rmw_pattern.ipynb
artanderson/interactive-notebooks
73a4744eeabe53dfdfeb6a97d72d3969f9389700
[ "MIT" ]
17
2020-09-29T16:55:38.000Z
2022-03-22T15:03:10.000Z
43.31903
3,508
0.61889
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Implementing-Read-Write-Transactions-with-R-M-W-Pattern\" data-toc-modified-id=\"Implementing-Read-Write-Transactions-with-R-M-W-Pattern-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Implementing Read-Write Transactions with R-M-W Pattern</a></span><ul class=\"toc-item\"><li><span><a href=\"#Introduction\" data-toc-modified-id=\"Introduction-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Introduction</a></span></li><li><span><a href=\"#Prerequisites\" data-toc-modified-id=\"Prerequisites-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Prerequisites</a></span></li><li><span><a href=\"#Initialization\" data-toc-modified-id=\"Initialization-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Initialization</a></span><ul class=\"toc-item\"><li><span><a href=\"#Ensure-database-is-running\" data-toc-modified-id=\"Ensure-database-is-running-1.3.1\"><span class=\"toc-item-num\">1.3.1&nbsp;&nbsp;</span>Ensure database is running</a></span></li><li><span><a href=\"#Connect-to-database.\" data-toc-modified-id=\"Connect-to-database.-1.3.2\"><span class=\"toc-item-num\">1.3.2&nbsp;&nbsp;</span>Connect to database.</a></span></li><li><span><a href=\"#Populate-database-with-test-data.\" data-toc-modified-id=\"Populate-database-with-test-data.-1.3.3\"><span class=\"toc-item-num\">1.3.3&nbsp;&nbsp;</span>Populate database with test data.</a></span></li></ul></li></ul></li><li><span><a href=\"#The-Problem-of-Lost-Writes\" data-toc-modified-id=\"The-Problem-of-Lost-Writes-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>The Problem of Lost Writes</a></span><ul class=\"toc-item\"><li><span><a href=\"#Test-Framework\" data-toc-modified-id=\"Test-Framework-2.1\"><span class=\"toc-item-num\">2.1&nbsp;&nbsp;</span>Test Framework</a></span></li><li><span><a href=\"#Simple-RMW-Function\" data-toc-modified-id=\"Simple-RMW-Function-2.2\"><span class=\"toc-item-num\">2.2&nbsp;&nbsp;</span>Simple RMW Function</a></span></li><li><span><a href=\"#Test-Results\" data-toc-modified-id=\"Test-Results-2.3\"><span class=\"toc-item-num\">2.3&nbsp;&nbsp;</span>Test Results</a></span></li></ul></li><li><span><a href=\"#Using-Generation-Check\" data-toc-modified-id=\"Using-Generation-Check-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Using Generation Check</a></span><ul class=\"toc-item\"><li><span><a href=\"#RMW-Function-with-Version-Check-and-Retries\" data-toc-modified-id=\"RMW-Function-with-Version-Check-and-Retries-3.1\"><span class=\"toc-item-num\">3.1&nbsp;&nbsp;</span>RMW Function with Version Check and Retries</a></span></li><li><span><a href=\"#Test-Results\" data-toc-modified-id=\"Test-Results-3.2\"><span class=\"toc-item-num\">3.2&nbsp;&nbsp;</span>Test Results</a></span></li></ul></li><li><span><a href=\"#Takeaways\" data-toc-modified-id=\"Takeaways-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Takeaways</a></span></li><li><span><a href=\"#Clean-up\" data-toc-modified-id=\"Clean-up-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Clean up</a></span></li><li><span><a href=\"#Further-Exploration-and-Resources\" data-toc-modified-id=\"Further-Exploration-and-Resources-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Further Exploration and Resources</a></span><ul class=\"toc-item\"><li><span><a href=\"#Next-steps\" data-toc-modified-id=\"Next-steps-6.1\"><span class=\"toc-item-num\">6.1&nbsp;&nbsp;</span>Next steps</a></span></li></ul></li></ul></div>", "_____no_output_____" ], [ "# Implementing Read-Write Transactions with R-M-W Pattern \nThis tutorial explains how to use the Read-Modify-Write pattern in order to ensure atomicity and isolation for read-write single-record transactions. \n\nThis notebook requires Aerospike datbase running on localhost and that python and the Aerospike python client have been installed (`pip install aerospike`). Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker container.", "_____no_output_____" ], [ "## Introduction\nIn Aerospike, the transactional boundaries are \"single request, single record\". While multiple operations may be specified in a single request on a single record, each such operation can involve a single bin and only certain write operations are allowed. Therefore, neither updates involving multiple bins (e.g, \"a=a+b\") nor general logic (e.g., \"concatenate alternate letters and append\") are possible as server-side operations. Of course, UDFs allow complex logic in a transactional update of a single record, however they are not suitable for all situations for various reasons such as performance and ease. Therefore most updates entail the R-M-W pattern or Reading the record, Modifying bins on the client side, and then Writing the record updates back to the server. \n\nThe tutorial first demonstrates how read-write operations can result in lost writes in a concurrent multi-client environment. \n\nThen we show how to specify conditional writes with version check to address the problem by disallowing intereaved read-write and thus protecting against lost writes.", "_____no_output_____" ], [ "## Prerequisites\nThis tutorial assumes familiarity with the following topics:\n\n[Provide topics and links. For example:]\n- [Hello World](hello_world.ipynb)\n- [Aerospike Basic Operations](basic_operations.ipynb)", "_____no_output_____" ], [ "## Initialization", "_____no_output_____" ], [ "### Ensure database is running\nThis notebook requires that Aerospike datbase is running. \n[Include the right code cell for Java or Python from the two cells below.] ", "_____no_output_____" ] ], [ [ "!asd >& /dev/null\n!pgrep -x asd >/dev/null && echo \"Aerospike database is running!\" || echo \"**Aerospike database is not running!**\"", "Aerospike database is running!\r\n" ] ], [ [ "### Connect to database.", "_____no_output_____" ] ], [ [ "# import the modules\nimport sys\nimport aerospike\n\n# Configure the client\nconfig = {\n 'hosts': [ ('127.0.0.1', 3000) ],\n 'policy' : {'key': aerospike.POLICY_KEY_SEND}\n}\n\n# Create a client and connect it to the cluster\ntry:\n client = aerospike.client(config).connect()\nexcept:\n print(\"failed to connect to the cluster with\", config['hosts'])\n sys.exit(1)\nprint('Client successfully connected to the database.')", "Client successfully connected to the database.\n" ] ], [ [ "### Populate database with test data.\nWe create one record with an integer bin \"gen-times-2\" (the names will become clear below), initialized to 1.", "_____no_output_____" ] ], [ [ "namespace = 'test'\ntutorial_set = 'rmw-tutorial-set'\nuser_key = 'id-1'\n# Records are addressable via a tuple of (namespace, set, user_key)\nrec_key = (namespace, tutorial_set, user_key)\nrmw_bin = 'gen-times-2'\ntry:\n # Create the record\n client.put(rec_key, {rmw_bin: 1})\nexcept Exception as e:\n print(\"error: {0}\".format(e), file=sys.stderr)\n\nprint('Test data populated.')", "Test data populated.\n" ] ], [ [ "# The Problem of Lost Writes\nIn a concurrent setting, multiple clients may be performaing Read-Modify-Write on the same record in a way that get in each other's way. Since various R-M-W transactions can interleave, a transaction can be lost, if another client updates the record without reading the transaction's update.\n\nTo demonstrate this, we make use of a record's \"generation\" or version, that is available as the record metadata, and is automatically incremented on each successful update of the record.\n\nThe integer bin “gen-times-2” holds the value that is 2 times the value of the current generation of the record. A client first reads the current generation of the record, and then updates the bin value 2 times that value.\n\nIn the case of a single client, there are no issues in maintaining the semantics of the bin. However when there are multiple clients, the interleaving of reads and writes of different transactions can violate the semantics. By updating the bin using an older generation value, it may not be 2 times the current generation, which is the constraint that we want to preserve.\n\nFirst, we will show how transaction writes are lost in a simple concurrent case by observing whether the relationship between record's current generation and the bin value is maintained. Then we will show how the problem is solved using a conditional write with version check.\n", "_____no_output_____" ], [ "## Test Framework\nWe spawn multiple (num_threads) threads to simulate concurrent access. Each thread repeatedly (num_txns) does the following:\n- waits for a random duration (with average of txn_wait_ms) \n- executes a passed-in R-M-W function that returns the failure type (string, null if success).\n\nAt the end the thread prints out the aggregate counts for each error type. In aggregate, they signify the likelihood of a read-write transaction failing.", "_____no_output_____" ] ], [ [ "import threading\nimport time\nimport random\n\nnum_txns = 10\ntxn_wait_ms = 500\n\ndef thread_fn(thread_id, rmw_fn):\n random.seed(thread_id)\n lost_writes_count = 0\n failures = {}\n for i in range(num_txns):\n failure = rmw_fn()\n if failure:\n if not failure in failures:\n failures[failure] = 1\n else: \n failures[failure] += 1 \n print('\\tThead {0} failures: {1}'.format(thread_id, failures))\n return\n \n \ndef run_test(num_threads, rmw_fn):\n threads = list()\n print('{0} threads, {1} transcations per thread:'.format(num_threads, num_txns))\n for thread_index in range(num_threads):\n thread = threading.Thread(target=thread_fn, args=(thread_index, rmw_fn))\n threads.append(thread)\n thread.start()\n for thread in threads:\n thread.join()\n return", "_____no_output_____" ] ], [ [ "## Simple RMW Function\nNext we implement a simple RMW function simple_rmw_fn to pass into the above framework. The function: \n- Reads the record.\n- Computes new value of gen_times_2 (= 2 * read generation). Then waits for a random duration, with average of write_wait_ms average to simulate the application computation time between read and write.\n- Writes the new bin value. In the same (multi-op) request, reads back the record for the record's new generation value.\n- Returns \"lost writes\" if the updated value of gen_times_2/2 is smaller than the new gen. If they are the same, it returns None.", "_____no_output_____" ] ], [ [ "import aerospike_helpers.operations.operations as op_helpers\n\nwrite_wait_ms = 50\n\ndef rmw_simple():\n #read\n _, meta, bins = client.get(rec_key)\n # wait before write to simulate computation time\n time.sleep(random.uniform(0,2*write_wait_ms/1000.0))\n # modify \n read_gen = meta['gen']\n new_rmw_bin_value = 2*(read_gen+1)\n # write and read back bin_inc to compare\n ops = [op_helpers.write(rmw_bin, new_rmw_bin_value),\n op_helpers.read(rmw_bin)]\n try:\n _, meta, bins = client.operate(rec_key, ops)\n except Exception as e:\n print(\"error: {0}\".format(e), file=sys.stderr)\n exit(-1)\n # compare new_rmw_bin_value//2 and new gen; if different return 'lost writes'\n new_gen = meta['gen']\n if new_rmw_bin_value//2 != new_gen: \n #print('gen: {0}, bin: {1}, lost: {2}'.format(new_gen, new_rmw_bin_value//2, new_gen-new_rmw_bin_value//2))\n return 'lost writes'\n return None", "_____no_output_____" ] ], [ [ "## Test Results\nFor various values of concurrency (num_threads), we can see that with greater concurrent updates, a larger percentage of read-write transactions are lost, meaning greater likelihood of the semantics of the gen_times_2 bin not being preserved.", "_____no_output_____" ] ], [ [ "run_test(num_threads=1, rmw_fn=rmw_simple)\nrun_test(num_threads=2, rmw_fn=rmw_simple)\nrun_test(num_threads=3, rmw_fn=rmw_simple)\nrun_test(num_threads=4, rmw_fn=rmw_simple)", "1 threads, 10 transcations per thread:\n\tThead 0 failures: {}\n2 threads, 10 transcations per thread:\n\tThead 0 failures: {'lost writes': 5}\n\tThead 1 failures: {'lost writes': 6}\n3 threads, 10 transcations per thread:\n\tThead 0 failures: {'lost writes': 4}\n\tThead 1 failures: {'lost writes': 8}\n\tThead 2 failures: {'lost writes': 7}\n4 threads, 10 transcations per thread:\n\tThead 0 failures: {'lost writes': 9}\n\tThead 3 failures: {'lost writes': 8}\n\tThead 1 failures: {'lost writes': 8}\n\tThead 2 failures: {'lost writes': 8}\n" ] ], [ [ "# Using Generation Check\nTo solve the problem of lost writes, the simple R-M-W is modified with how the Write is done: by making it conditional on the record not having been modified since the Read. It is a \"check-and-set (CAS)\" like operation that succeeds if the record generation (version) is still the same as at the time of Read. Otherwise it fails, and the client must retry the whole R-M-W pattern. The syntax and usage is shown in the code below.", "_____no_output_____" ], [ "## RMW Function with Version Check and Retries\nIn the rmw_with_gen_check function below, a failed read-write due to generation mismatch is retried for max_retries attempts or until the write is successful. Each retry is attempted after a exponential backoff wait of (retry_number ** 2) * retry_wait_ms.\n\nA write can still fail after max_retries attempts, and the client can suitably handle it. However no writes are overwritten or lost, and the intended semantics of the gen-times-2 bin is always preserved.\n\nWe perform the same concurrent test with the version check at Write. We expect no interleaved_writes reported in any thread.", "_____no_output_____" ] ], [ [ "from aerospike_helpers.operations import operations as op_helpers\nfrom aerospike import exception as ex\n\nmax_retries = 3\nretry_wait_ms = 20\n\ndef rmw_with_gen_check():\n retryRMWCount = 0\n done = False\n while (not done):\n #read\n _, meta, bins = client.get(rec_key)\n # wait before write to simulate computation time\n time.sleep(random.uniform(0,2*write_wait_ms/1000.0))\n # modify \n read_gen = meta['gen']\n new_rmw_bin_value = 2*(read_gen+1)\n # write and read back bin_inc to compare\n ops = [op_helpers.write(rmw_bin, new_rmw_bin_value),\n op_helpers.read(rmw_bin)]\n write_policy = { 'gen': aerospike.POLICY_GEN_EQ }\n try:\n _, meta, bins = client.operate(rec_key, ops, meta={'gen': read_gen}, policy=write_policy)\n except ex.RecordGenerationError as e:\n if retryRMWCount < max_retries:\n retryRMWCount += 1\n time.sleep((2**retryRMWCount)*retry_wait_ms/1000.0) \n else:\n return 'max retries exceeded' \n except Exception as e:\n print(\"error: {0}\".format(e), file=sys.stderr)\n exit(-1)\n else:\n done = True \n # compare new_rmw_bin_value//2 and new gen; if different \n new_gen = meta['gen']\n if new_rmw_bin_value//2 != new_gen: \n return 'lost writes'\n return None", "_____no_output_____" ] ], [ [ "## Test Results\nLet's execute for various levels of concurrency and see the results. We expect to see no lost writes. Even when max-retries are exceeded, transaction and database integrity is preserved.", "_____no_output_____" ] ], [ [ "run_test(num_threads=2, rmw_fn=rmw_with_gen_check)\nrun_test(num_threads=3, rmw_fn=rmw_with_gen_check)\nrun_test(num_threads=4, rmw_fn=rmw_with_gen_check)", "2 threads, 10 transcations per thread:\n\tThead 1 failures: {}\n\tThead 0 failures: {}\n3 threads, 10 transcations per thread:\n\tThead 1 failures: {}\n\tThead 0 failures: {}\n\tThead 2 failures: {}\n4 threads, 10 transcations per thread:\n\tThead 0 failures: {}\n\tThead 3 failures: {'max retries exceeded': 1}\n\tThead 2 failures: {'max retries exceeded': 1}\n\tThead 1 failures: {'max retries exceeded': 2}\n" ] ], [ [ "# Takeaways\nIn the tutorial we showed:\n- the need for read-write transactions in Aerospike to use the R-M-W pattern \n- how writes can be overwritten and lost in a concurrent environment if performed simply\n- how the developer can ensure atomicity and isolation of a read-write transaction by using version check logic and syntax.", "_____no_output_____" ], [ "# Clean up\nRemove data and close connection.", "_____no_output_____" ] ], [ [ "client.truncate(namespace, tutorial_set, 0)\n# Close the connection to the Aerospike cluster\nclient.close()\nprint('Removed tutorial data. Connection closed.')", "Removed tutorial data. Connection closed.\n" ] ], [ [ "# Further Exploration and Resources\nFor further exploration of transactions support in Aerospike, check out the following resources:\n\n- Blog posts\n - [Developers: Understanding Aerospike Transactions](https://www.aerospike.com/blog/developers-understanding-aerospike-transactions/)\n - [Twelve Do's of Consistency in Aerospike](https://www.aerospike.com/blog/twelve-dos-of-consistency-in-aerospike/)\n- Video\n - [Strong Consistency in Databases. What does it actually guarantee?](https://www.aerospike.com/resources/videos/strong-consistency-in-databases-what-does-it-actually-guarantee/)", "_____no_output_____" ], [ "## Next steps\n\nVisit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec8055f51743bbd339bff5ada13b984eab51bf1b
23,162
ipynb
Jupyter Notebook
data_handler/motifs/motifs_for_link_prediction_hetero-to-homo.ipynb
amazon-research/panrep
57e6f71bb70c0908f3db28be97af0d818a863e19
[ "Apache-2.0" ]
10
2020-12-18T22:53:43.000Z
2021-12-13T19:07:25.000Z
data_handler/motifs/motifs_for_link_prediction_hetero-to-homo.ipynb
amazon-research/panrep
57e6f71bb70c0908f3db28be97af0d818a863e19
[ "Apache-2.0" ]
null
null
null
data_handler/motifs/motifs_for_link_prediction_hetero-to-homo.ipynb
amazon-research/panrep
57e6f71bb70c0908f3db28be97af0d818a863e19
[ "Apache-2.0" ]
1
2021-10-30T12:33:55.000Z
2021-10-30T12:33:55.000Z
32.853901
535
0.531776
[ [ [ "# %load imdb_data_to_graph.py\nimport numpy as np\nimport dgl\nimport pickle\nimport os\nfrom sklearn.preprocessing import MultiLabelBinarizer\nimport torch\nimport sys\nsys.path.append('../../panrep/') \nimport load_data\nimport argparse\nimport copy\nimport itertools", "_____no_output_____" ], [ " parser = argparse.ArgumentParser(description='PanRep')\n parser.add_argument(\"--dropout\", type=float, default=0.2,\n help=\"dropout probability\")\n parser.add_argument(\"--n-hidden\", type=int, default=60,\n help=\"number of hidden units\") # use 16, 2 for debug\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--n-bases\", type=int, default=20,\n help=\"number of filter weight matrices, default: -1 [use all]\")\n parser.add_argument(\"--n-layers\", type=int, default=3,\n help=\"number of propagation rounds\")\n parser.add_argument(\"-e\", \"--n-epochs\", type=int, default=50,\n help=\"number of training epochs for decoder\")\n parser.add_argument(\"-ec\", \"--n-cepochs\", type=int, default=400,\n help=\"number of training epochs for classification\")\n parser.add_argument(\"-num_masked\", \"--n-masked-nodes\", type=int, default=100,\n help=\"number of masked nodes\")\n parser.add_argument(\"-pct_masked_links\", \"--pct-masked-links\", type=int, default=0.5,\n help=\"number of masked links\")\n parser.add_argument(\"-negative_rate\", \"--negative-rate\", type=int, default=4,\n help=\"number of negative examples per masked link\")\n\n\n parser.add_argument(\"-d\", \"--dataset\", type=str, required=True,\n help=\"dataset to use\")\n parser.add_argument(\"-en\", \"--encoder\", type=str, required=True,\n help=\"Encoder to use\")\n parser.add_argument(\"--l2norm\", type=float, default=0.0000,\n help=\"l2 norm coef\")\n parser.add_argument(\"--relabel\", default=False, action='store_true',\n help=\"remove untouched nodes and relabel\")\n parser.add_argument(\"--use-self-loop\", default=False, action='store_true',\n help=\"include self feature as a special relation\")\n parser.add_argument(\"--use-infomax-loss\", default=False, action='store_true',\n help=\"use infomax task supervision\")\n parser.add_argument(\"--use-reconstruction-loss\", default=True, action='store_true',\n help=\"use feature reconstruction task supervision\")\n parser.add_argument(\"--node-masking\", default=False, action='store_true',\n help=\"mask a subset of node features\")\n parser.add_argument(\"--loss-over-all-nodes\", default=True, action='store_true',\n help=\"compute the feature reconstruction loss over all nods or just the masked\")\n parser.add_argument(\"--link-prediction\", default=False, action='store_true',\n help=\"use link prediction as supervision task\")\n parser.add_argument(\"--mask-links\", default=True, action='store_true',\n help=\"mask the links to be predicted\")\n\n parser.add_argument(\"--batch-size\", type=int, default=100,\n help=\"Mini-batch size. If -1, use full graph training.\")\n parser.add_argument(\"--model_path\", type=str, default=None,\n help='path for save the model')\n parser.add_argument(\"--fanout\", type=int, default=10,\n help=\"Fan-out of neighbor sampling.\")\n\n fp = parser.add_mutually_exclusive_group(required=False)\n fp.add_argument('--validation', dest='validation', action='store_true')\n fp.add_argument('--testing', dest='validation', action='store_false')\n parser.set_defaults(validation=True)\n dataset='wn18'\n args = parser.parse_args(['--dataset', dataset,'--encoder', 'RGCN'])", "_____no_output_____" ], [ "train_edges, test_edges, valid_edges, train_g, valid_g, test_g, featless_node_types=\\\n load_data.load_hetero_link_pred_data(args)", "Using device cuda:0\n" ], [ "mapping_dict={}\nedge_lists={}\nreverse_mapping_dict={}", "_____no_output_____" ], [ "g_without_features=copy.deepcopy(train_g)\nfor ntype in g_without_features.ntypes:\n del g_without_features.nodes[ntype].data['h_f']\nhomo_g=dgl.to_homo(g_without_features)\n# return the mapping among id of the homogenous graph and id and ntype of the hetero graph\n#not needed probabily\n#mapping_dict=[(homo_g.ndata['_ID'][i],g.ntypes[int(homo_g.ndata['_TYPE'][i])]) for i in range(len(homo_g.ndata['_TYPE']))]\n\nu,v=homo_g.all_edges()\n\nu=u.data.numpy()\nv=v.data.numpy()\nu=u.astype(int)\nv=v.astype(int)\nedges=np.concatenate((np.expand_dims(u, axis=0),np.expand_dims(v, axis=0)),axis=0).transpose()\n", "_____no_output_____" ], [ "import pickle\ndata_folder=\"../../data/kg/wn18/\"", "_____no_output_____" ], [ "np.savetxt(data_folder+\"lp_edge_list_complete.txt\",edges,fmt='%i')", "_____no_output_____" ], [ "motif_features={}", "_____no_output_____" ] ], [ [ "For the IMDB type of graph where a lot of relationships are among different entinties the motif representation\nwill be quite poor. Consider combining several relationtypes to get more interesting signals...", "_____no_output_____" ] ], [ [ "folder='../../../../PGD-orbit-master/'\netype='complete'\ndataset='wn18_lp'\n\n# the row index corresponds to the original id and the value corresponds to the internal id used by nasreen\nvertex_mapping_2_nasreens_ids=np.loadtxt(folder+\"vertex_mapping.txt\") \nmapping_from_nasreens_ids={int(vertex_mapping_2_nasreens_ids[i]): i for i in range(len(vertex_mapping_2_nasreens_ids))}\nmotif_per_node=np.loadtxt(folder+dataset+'_'+etype+\"-node.txt\",skiprows=1,delimiter=',')\n\nfor ntype in train_g.ntypes:\n train_g.nodes[ntype].data['motifs']=torch.zeros((train_g.number_of_nodes(ntype),motif_per_node.shape[1]-1)).int()", "_____no_output_____" ] ], [ [ "The following code extracts the motifs per node and then maps it as node features in the original graph by using some id mapping. It also performs some check to validate that the predicted degree is the same with the actual degree and hence the vertex mapping is correct.", "_____no_output_____" ] ], [ [ "#To check consistency see if deggrees same.\nedges_dict={}\nfor e in edges:\n #if e[0]==3 or e[1]==3:\n # print(e)\n e=tuple(e)\n if e not in edges_dict:\n edges_dict[e]=1\nedges_li=list(edges_dict.keys())\n\ndglonelistgraph=dgl.heterograph(data_dict={('0','1','0'):edges_li})\nntg= dgl.to_networkx(dglonelistgraph)\ndgl_grp=dgl.DGLGraph(ntg)\nbidercted_g=dgl.transform.to_bidirected(dgl_grp)", "_____no_output_____" ], [ "\nc=0\nfor i in range(motif_per_node.shape[0]):\n homo_id=int(mapping_from_nasreens_ids[motif_per_node[i,0]-1])\n #print('--'+str(homo_id))\n if ((bidercted_g.in_degree(homo_id)))==(int(motif_per_node[i,1])):\n c+=1\n else:\n print(bidercted_g.in_degree(homo_id))\n print(int(motif_per_node[i,1]))\n ntype=train_g.ntypes[homo_g.ndata['_TYPE'][homo_id]]\n ntype_id=homo_g.ndata['_ID'][homo_id]\n train_g.nodes[ntype].data['motifs'][ntype_id]=torch.tensor(motif_per_node[i,1:]).int()\nprint(c==motif_per_node.shape[0])", "4\n3\n3\n2\n4\n3\n3\n2\n5\n4\n3\n2\n2\n1\nFalse\n" ], [ "#save graph with features\ndata_folder=\"../../data/kg/wn18/\"\n", "_____no_output_____" ], [ "train_g.nodes['word'].data\n", "_____no_output_____" ] ], [ [ "The following code retrieves the motif characteristics of each edge in the graph", "_____no_output_____" ] ], [ [ "etype='complete'\nmotif_per_edge=np.loadtxt(folder+dataset+'_'+etype+\"-edge.txt\",skiprows=1,delimiter=',')\n\n\n ", "_____no_output_____" ] ], [ [ "Some nodes are disconnected, for this the mapping from nasreen will be -1", "_____no_output_____" ] ], [ [ "print(len(np.unique(vertex_mapping_2_nasreens_ids)))\nprint(len(vertex_mapping_2_nasreens_ids))\nprint(homo_g.number_of_nodes())\ncondition = (vertex_mapping_2_nasreens_ids==-1)\nprint(len(vertex_mapping_2_nasreens_ids[condition]))", "40943\n40943\n40943\n0\n" ] ], [ [ "Nasreens code returns bidirectional graph by considering both directions of the edges of the original directed graphs.\nHence the returned graph has some edges that do not exist in the original graph. For now I just skip these.", "_____no_output_____" ], [ "The following creates a new edge, called motif that has the motif data.", "_____no_output_____" ] ], [ [ "# currently works for one node type\n\nedata=[]\nsrc_id=[]\ndest_id=[]\n#dict_motif_edges={}\n#dict_motif_edata={}\nfor i in range(motif_per_edge.shape[0]):\n \n homo_id_dest=int(mapping_from_nasreens_ids[motif_per_edge[i,0]-1])\n homo_id_src=int(mapping_from_nasreens_ids[motif_per_edge[i,1]-1])\n #print('--'+str(homo_id))\n \n ntype_src=train_g.ntypes[homo_g.ndata['_TYPE'][homo_id_src]]\n ntype_id_src=homo_g.ndata['_ID'][homo_id_src]\n ntype_dest=train_g.ntypes[homo_g.ndata['_TYPE'][homo_id_dest]]\n ntype_id_dest=homo_g.ndata['_ID'][homo_id_dest]\n homo_e_id=homo_g.edge_id(homo_id_src,homo_id_dest)\n homo_e_id=homo_e_id.data.cpu().numpy()\n n_etype=(ntype_src,'motif_edge',ntype_dest)\n src_id+=[int(ntype_id_src.data.cpu().numpy())]\n dest_id+=[int(ntype_id_dest.data.cpu().numpy())]\n edata+=[torch.tensor(motif_per_edge[i,2:]).int()]\nedata=torch.stack(edata)\nsrc_id=torch.tensor(src_id).int()\ndest_id=torch.tensor(dest_id).int()\n", "_____no_output_____" ], [ "edata", "_____no_output_____" ], [ "\ndata={\"train_edges\":train_edges,\"test_edges\":test_edges,\"valid_edges\":valid_edges, \"train_g\":train_g,\"valid_g\":valid_g,\n \"test_g\":test_g,\"featless_node_types\": featless_node_types, \"src_id\":src_id,\"dest_id\":dest_id,\"edata\":edata}\npickle.dump(data, open(os.path.join(data_folder, \"data_lp_motifs.pickle\"), \"wb\"),\n protocol=4);", "_____no_output_____" ], [ "sum(train_g.nodes['word'].data['motifs'])", "_____no_output_____" ], [ "np.where(~train_g.nodes['word'].data['motifs'].data.cpu().numpy().any(axis=0))[0]", "_____no_output_____" ] ], [ [ "Check that the eid for the first edge in dict_homo_edge is 1 so that the dict_homo_edata are correctly aligned", "_____no_output_____" ] ], [ [ "print(len(np.where(~dict_motif_edata[e].data.cpu().numpy()[:,:].any(axis=1))[0]))", "_____no_output_____" ] ], [ [ "Here we have multiple edge ids in the hetero graph that possibly map to the same id in the graph of nasreen. The\nfollowing creates the motif edata for each of the existing links. Since the current design treats it as homogenous\nfocus on above.", "_____no_output_____" ] ], [ [ "\n\n", "_____no_output_____" ], [ "result=True\nfor etype in g.etypes:\n g.edges[etype].data['motifs']=torch.zeros((g.number_of_edges(etype),motif_per_edge.shape[1]-2)).int()\nfor i in range(motif_per_edge.shape[0]):\n \n homo_id_dest=int(mapping_from_nasreens_ids[motif_per_edge[i,0]-1])\n homo_id_src=int(mapping_from_nasreens_ids[motif_per_edge[i,1]-1])\n #print('--'+str(homo_id))\n \n #print(str(homo_id_src)+','+str(homo_id_dest))\n ntype_src=g.ntypes[homo_g.ndata['_TYPE'][homo_id_src]]\n ntype_id_src=homo_g.ndata['_ID'][homo_id_src]\n ntype_dest=g.ntypes[homo_g.ndata['_TYPE'][homo_id_dest]]\n ntype_id_dest=homo_g.ndata['_ID'][homo_id_dest]\n homo_e_id=homo_g.edge_id(homo_id_src,homo_id_dest)\n homo_e_id=homo_e_id.data.cpu().numpy()\n\n \n if len(homo_e_id)!=0:\n homo_e_id=homo_e_id[0]\n #print('homo_id '+str(homo_e_id))\n cetype=g.etypes[homo_g.edata['_TYPE'][homo_e_id]]\n hetero_e_id=homo_g.edata['_ID'][homo_e_id]\n # TODO probably here we need to add the features for \n # all edge types that may contain this specific src-dest pair\n \n het_e_id=g.edge_id(ntype_id_src,ntype_id_dest,etype=(ntype_src,cetype,ntype_dest))\n het_e_id=het_e_id.data.cpu().numpy()\n \n print('hetero_id '+str(het_e_id))\n #print(cetype)\n if len(het_e_id)<=1:\n het_e_id=int(het_e_id)\n result=result and (hetero_e_id==het_e_id)\n #print(result)\n g.edges[cetype].data['motifs'][het_e_id]=torch.tensor(motif_per_edge[i,2:]).int()\n else:\n # for some edge type ( participated by ) we may have multiple egdes of the same type among actor-movies.\n for eid in het_e_id:\n g.edges[cetype].data['motifs'][eid]=torch.tensor(motif_per_edge[i,2:]).int()\nprint(result)", "_____no_output_____" ] ], [ [ "SOS The same edge that corresponds to different edge types is counted multiple timed by the dgl.graph.in_degrees implementation. On the other hand multiple edges are ignored in Nasreens code. This may lead to a discrepancy in the degree reported by her code and dgl. ", "_____no_output_____" ] ], [ [ "#(torch.sum(,1))\netype='written_by'\nprint(len(g.edges[etype].data['motifs']))\nprint(len(np.where(~g.edges[etype].data['motifs'].data.cpu().numpy().any(axis=1))[0]))\nsum(g.edges[etype].data['motifs'].data.cpu().numpy())", "_____no_output_____" ], [ "\npickle.dump(g, open(os.path.join(data_folder, \"graph_reduced_m.pickle\"), \"wb\"),\n protocol=4);", "_____no_output_____" ], [ "import pickle\ndata_folder=\"../data/kg/wn18/\"", "_____no_output_____" ], [ "g=pickle.load(open(os.path.join(data_folder, \"graph_reduced_m.pickle\"), \"rb\"))", "_____no_output_____" ], [ "g.edges['12'].data", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec80581a0f69f31552d0b45d661d146f7b2062f8
77,672
ipynb
Jupyter Notebook
datasets/cil-gdpcir/ensemble.ipynb
RadiantMLHub/PlanetaryComputerExamples
cd7f7f2f19a369d51f8fe991cb7103e560c74e22
[ "MIT" ]
null
null
null
datasets/cil-gdpcir/ensemble.ipynb
RadiantMLHub/PlanetaryComputerExamples
cd7f7f2f19a369d51f8fe991cb7103e560c74e22
[ "MIT" ]
1
2022-03-29T21:08:22.000Z
2022-03-30T22:24:44.000Z
datasets/cil-gdpcir/ensemble.ipynb
RadiantMLHub/PlanetaryComputerExamples
cd7f7f2f19a369d51f8fe991cb7103e560c74e22
[ "MIT" ]
1
2022-03-24T17:23:17.000Z
2022-03-24T17:23:17.000Z
50.600651
4,550
0.515398
[ [ [ "## Creating a cross-model ensemble using STAC", "_____no_output_____" ], [ "This tutorial builds a cross-collection ensemble of GDPCIR bias corrected and downscaled data, and plots a single variable time series for the ensemble.", "_____no_output_____" ] ], [ [ "# required to locate and authenticate with the stac collection\nimport planetary_computer\nimport pystac_client\nimport pystac\n\n# required to load a zarr array using xarray\nimport xarray as xr\n\n# optional imports used in this notebook\nimport pandas as pd\nfrom dask.diagnostics import ProgressBar\nfrom tqdm.auto import tqdm", "_____no_output_____" ] ], [ [ "### Understanding the GDPCIR collections\n\nThe [CIL-GDPCIR datsets](https://planetarycomputer-staging.microsoft.com/dataset/group/cil-gdpcir) are grouped into several collections, depending on the license the data are provided under.\n\n- [CIL-GDPCIR-CC0](https://planetarycomputer-staging.microsoft.com/dataset/cil-gdpcir-cc0) - provided in public domain using a [CC 1.0 Universal Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/)\n- [CIL-GDPCIR-CC-BY](https://planetarycomputer-staging.microsoft.com/dataset/cil-gdpcir-cc-by) - provided under a [CC Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/)\n- [CIL-GDPCIR-CC-BY-SA](https://planetarycomputer-staging.microsoft.com/dataset/cil-gdpcir-cc-by-sa) - provided under a [CC Attribution-ShareAlike 4.0 License](https://creativecommons.org/licenses/by-sa/4.0/)\n\nNote that the first group, CC0, places no restrictions on the data. The second two (CC-BY and CC-BY-SA) require citations of the climate models these datasets are derived from, and the third group, shared under a CC-BY-SA 4.0 license, requires that derived works (that means your work!) be shared under the same license. See the [ClimateImpactLab/downscaleCMIP6 README](github.com/ClimateImpactLab/downscaleCMIP6) for the citation information for each GCM.\n\nAlso, note that none of the descriptions of these licenses on this page, in this repository, and associated with this repository constitute legal advice. We are highlighting some of the key terms of these licenses, but this information should not be considered a replacement for the actual license terms, which are provided on the Creative Commons website at the links above.\n\n### Structure of the STAC collection\n\nThe data assets in this collection are a set of [Zarr](https://zarr.readthedocs.io/) groups which can be opend by tools like [xarray](https://xarray.pydata.org/). Each Zarr group contains a single data variable (either `pr`, `tasmax`, or `tasmin`). The Planetary Computer provides a single STAC item per experiment, and each STAC item has one asset per data variable.\n\nAltogether, the collection is just over 21TB, with 247,997 individual files. The STAC collection is here to help search and make sense of this huge archive!\n\nFor example, let's take a look at the CC0 collection:", "_____no_output_____" ] ], [ [ "collection_cc0 = pystac.read_file(\n \"https://planetarycomputer-staging.microsoft.com/api/stac/v1/collections/cil-gdpcir-cc0\" # noqa\n)\ncollection_cc0", "_____no_output_____" ], [ "collection_cc0.summaries.to_dict()", "_____no_output_____" ] ], [ [ "The CC0 (Public Domain) collection has three models, from which a historical scenario and four future simulations are available. \n\n*Note that not all models provide all simulations. See the [ClimateImpactLab/downscaleCMIP6 README](https://github.com/ClimateImpactLab/downscaleCMIP6) for a list of the available model/scenario/variable combinations.*", "_____no_output_____" ], [ "### Querying the STAC API\n\nUse the Planetary Computer STAC API to find the exact data you want. You'll most likely want to query on the controlled vocabularies fields, under the `cmip6:` prefix.. See the collection summary for the set of allowed values for each of those.", "_____no_output_____" ] ], [ [ "catalog = pystac_client.Client.open(\n \"https://planetarycomputer-staging.microsoft.com/api/stac/v1\"\n)", "_____no_output_____" ] ], [ [ "### Combining collections to form a custom ensemble", "_____no_output_____" ], [ "As an example, if you would like to use both the CC0 and CC-BY collections, you can combine them as follows:", "_____no_output_____" ] ], [ [ "search = catalog.search(\n collections=[\"cil-gdpcir-cc0\", \"cil-gdpcir-cc-by\"],\n query={\"cmip6:experiment_id\": {\"eq\": \"ssp370\"}},\n)\nensemble = search.get_all_items()\nlen(ensemble)", "_____no_output_____" ], [ "import collections\n\ncollections.Counter(x.collection_id for x in ensemble)", "_____no_output_____" ] ], [ [ "### Reading a single variable across models into xarray", "_____no_output_____" ] ], [ [ "# select this variable ID for all models in a collection\nvariable_id = \"tasmax\"\n\ndatasets_by_model = []\n\nfor item in tqdm(ensemble):\n signed = planetary_computer.sign(item)\n asset = signed.assets[variable_id]\n datasets_by_model.append(\n xr.open_dataset(asset.href, **asset.extra_fields[\"xarray:open_kwargs\"])\n )\n\nall_datasets = xr.concat(\n datasets_by_model,\n dim=pd.Index([ds.attrs[\"source_id\"] for ds in datasets_by_model], name=\"model\"),\n combine_attrs=\"drop_conflicts\",\n)", "_____no_output_____" ], [ "all_datasets", "_____no_output_____" ] ], [ [ "### Subsetting the data\n\nNow that the metadata has been loaded into xarray, you can use xarray's methods for [Indexing and Selecting Data](https://xarray.pydata.org/en/latest/user-guide/indexing.html) to extract the subset the arrays to the portions meaningful to your analysis.\n\nNote that the data has not been read yet - this is simply working with the coordinates to schedule the task graph using [dask](https://docs.xarray.dev/en/latest/user-guide/dask.html).", "_____no_output_____" ] ], [ [ "# let's select a subset of the data for the first five days of 2020 over Japan.\n# Thanks to https://gist.github.com/graydon/11198540 for the bounding box!\nsubset = all_datasets.tasmax.sel(\n lon=slice(129.408463169, 145.543137242),\n lat=slice(31.0295791692, 45.5514834662),\n time=slice(\"2020-01-01\", \"2020-01-05\"),\n)", "_____no_output_____" ], [ "with ProgressBar():\n subset = subset.compute()", "[########################################] | 100% Completed | 12.3s\n" ], [ "subset", "_____no_output_____" ] ], [ [ "At this point, you could do anything you like with the data. See the great [xarray getting started guide](https://xarray.pydata.org/en/latest/getting-started-guide/quick-overview.html#) for more information. For now, we'll plot it all!", "_____no_output_____" ] ], [ [ "subset.plot(row=\"model\", col=\"time\");", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec807628c9fe64153cf55a48aa9da27e4c9eeafe
164,473
ipynb
Jupyter Notebook
example_notebooks/predicting_vib_thermo.ipynb
ml-evs/modnet
14e391eea664c4d65d49432d451588278c643060
[ "MIT" ]
null
null
null
example_notebooks/predicting_vib_thermo.ipynb
ml-evs/modnet
14e391eea664c4d65d49432d451588278c643060
[ "MIT" ]
null
null
null
example_notebooks/predicting_vib_thermo.ipynb
ml-evs/modnet
14e391eea664c4d65d49432d451588278c643060
[ "MIT" ]
1
2020-06-19T12:05:26.000Z
2020-06-19T12:05:26.000Z
334.294715
38,028
0.913311
[ [ [ "# Predicting vibrational thermodynamics\n\nThe vibrational entropy, enthalpy, free energy and specific heat will be predicted for a series of 5 semiconductors from the Materials Project with following MP IDs: mp-14363, mp-988, mp-38487, mp-559200, mp-4661.\n\nThe predicted results will be compared with DFPT computes values.", "_____no_output_____" ] ], [ [ "import sys\nfrom modnet.models import MODNetModel\nfrom modnet.preprocessing import MODData", "_____no_output_____" ] ], [ [ "## Load the model", "_____no_output_____" ] ], [ [ "model = MODNetModel.load('../pretrained/vib_thermo')", "_____no_output_____" ] ], [ [ "## Create MODData\nIn order to predict the thermodynamics for the above mentioned compounds, a MODData object should be constructed.\nIn order to do so, 2 steps are required: (i) creation of a MODData object with structures and optionally MP IDs and (ii) featurization by invoking the featurize() method. The structures were querried from the MP Rester API.", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.read_pickle('data/df_thermo_new.pkl')\ndf['natoms'] = [8,7,14,7,3]\ndf", "_____no_output_____" ], [ "md = MODData(df['structure'],mpids = df.index)", "_____no_output_____" ], [ "md.featurize(fast=True)", "Computing features, this can take time...\nFast featurization on, retrieving from database...\nRetrieved features for 5 out of 5 materials\nData has successfully been featurized!\n" ] ], [ [ "## Predict", "_____no_output_____" ] ], [ [ "df_predictions = model.predict(md)", "_____no_output_____" ] ], [ [ "## Plot of the results, compared with DFPT", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n\nfor mpid in df_predictions.index:\n temp = range(5,801,20)\n S_true = df.loc[mpid,['S_{}'.format(T) for T in temp]].values\n S_pred = df_predictions.loc[mpid,['S_{}_atom'.format(T) for T in temp]].values*df.loc[mpid,'natoms']\n \n C_v_true = df.loc[mpid,['C_v_{}'.format(T) for T in temp]].values\n C_v_pred = df_predictions.loc[mpid,['C_v_{}_atom'.format(T) for T in temp]].values*df.loc[mpid,'natoms']\n \n H_true = df.loc[mpid,['H_{}'.format(T) for T in temp]].values/1000\n H_pred = df_predictions.loc[mpid,['H_{}_atom'.format(T) for T in temp]].values*df.loc[mpid,'natoms']/1000\n \n U_true = df.loc[mpid,['U_{}'.format(T) for T in temp]].values/1000\n U_pred = df_predictions.loc[mpid,['U_{}_atom'.format(T) for T in temp]].values*df.loc[mpid,'natoms']/1000\n \n fig,ax = plt.subplots()\n ax.set_title(mpid)\n ax.plot(temp,S_true)\n ax.plot(temp,S_pred,'--')\n \n ax.plot(temp,C_v_true)\n ax.plot(temp,C_v_pred,'--')\n \n ax.plot(temp,H_true)\n ax.plot(temp,H_pred,'--')\n \n ax.plot(temp,U_true)\n ax.plot(temp,U_pred,'--')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec8089aac02799c4be76115e233fefc3f931e8fa
166,709
ipynb
Jupyter Notebook
_exercise_answers/LNM_class_06-solved.ipynb
Fikrahelhifzi/Python_Numerical_Method
538713303d4493f32e447137ac2a707cbc2a146f
[ "MIT" ]
null
null
null
_exercise_answers/LNM_class_06-solved.ipynb
Fikrahelhifzi/Python_Numerical_Method
538713303d4493f32e447137ac2a707cbc2a146f
[ "MIT" ]
null
null
null
_exercise_answers/LNM_class_06-solved.ipynb
Fikrahelhifzi/Python_Numerical_Method
538713303d4493f32e447137ac2a707cbc2a146f
[ "MIT" ]
null
null
null
182.995609
142,017
0.866438
[ [ [ "![nerw_zad6_stopka.jpg](attachment:nerw_zad6_stopka.jpg)\n\n# Programming Language With Numerical Methods\n<heads>\nJoanna Kozuchowska, Msc\n \n## Class 06. Functions", "_____no_output_____" ], [ "**Whenever you learn a new feature, you should try it out in interactive mode and make errors on purpose to see what goes wrong and what types of errors you run into.**", "_____no_output_____" ], [ "**Exercise 1**\n\nWrite a function calculating an area of a circle, given its radius.", "_____no_output_____" ] ], [ [ "import math as m # we need value of pi from math library\n\ndef area_circle(radius):\n return radius**2 * m.pi\n\nprint(\"Area of a circle with radius 1 is equal\", area_circle(1))", "Area of a circle with radius 1 is equal 3.141592653589793\n" ] ], [ [ "**Exercise 2**\n\nMove around the lines of the program: move the definition of a function after the function call. Run the program to see what error messages you get.\n\nWhile running in Jupyter Notebooks, restart kernel first (to clear all the existing variables).", "_____no_output_____" ], [ "**Exercise 3**\n\nWrite a function computing an area of an arbitrary triangle given the lengths of its edges. Write a function which computes area of a triangle, given its vertices. Remember to check, if the edges create a triangle.", "_____no_output_____" ] ], [ [ "def is_triangle(a, b, c):\n if a + c > b and a + b > c and b + c > a:\n return True\n else:\n return False\n\n\ndef area_triangle(a, b, c):\n # before computing the area of triangle it's recommended to check if a triangle exists\n # if it exists\n if is_triangle(a, b, c):\n # heron's formula\n s = (a + b + c) / 2 \n p = m.sqrt(s * (s - a) * (s - b) * (s - c))\n return p\n else:\n # the triangle does not exist\n return None\n \n \np1 = area_triangle(3, 4, 5) \np2 = area_triangle(1, 2, 3)\nprint(p1, p2)", "6.0 None\n" ], [ "# based on vertices\n# computing the lengths of sides first\n\ndef distance(A, B):\n # distance between the points A = (xa, ya) and B = (xb, yb)\n return m.sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2)\n\ndef area_tr_vertices(A, B, C):\n # A, B and C are coordinates of vertices\n a = distance(A, B)\n b = distance(B, C)\n c = distance(A, C)\n return area_triangle(a, b, c)\n\np1 = area_tr_vertices([0, 0], [0, 4], [3, 0]) \np2 = area_tr_vertices([0, 1], [2, 0], [0, 0])\nprint(p1, p2)", "4.0 5.0 3.0\n2.23606797749979 2.0 1.0\n6.0 0.9999999999999999\n" ] ], [ [ "**Exercise 4**\n\nWrite a function `is_between(x, y, z)` that returns `True` if $x \\leq y \\leq z$ or `False` otherwise.\n", "_____no_output_____" ] ], [ [ "def is_between(x, y, z):\n if x <= y <= z:\n return True\n # else is not needed, there is no other way to go, since we've exited the function when condidtion was True\n return False\n\nprint(is_between(1, 2, 3))\nprint(is_between(2, 1, 3))", "True\nFalse\n" ] ], [ [ "**Exercise 5**\n\n Write a function `diff(f, x, h=1E-6)` for numerical differentiation:\n $$\n f'(x) \\approx \\frac{f(x+h) -f(x-h)}{2h} \n $$\nthat returns an approximation of a derivative of a function $f$ ($f$ should be defined in Python).", "_____no_output_____" ] ], [ [ "def f(x):\n #example function, polynomial 2x**2 - x + 1\n return 2 * x**2 - x + 1\n\n# we create a function for numerical derivative, \n# but to compute a derivative, a function is needed \n# function f is passed as an argument to use the formula above\ndef diff(f, x, h = 1e-6):\n derivative = (f(x+h) - f(x-h)) / (2*h)\n return derivative\n\nder = diff(f, 2)\n# the derivative of f(x) = 2x**2 - x + 1 is f'(x) = 4*x - 1; for x=2, f'(x) = 7\nprint(der)", "7.000000000090267\n" ] ], [ [ "**Exercise 6**\n\nWrite a function returning the maximum value in a sequence.", "_____no_output_____" ] ], [ [ "def maximum(sequence):\n # first, we establish that the first encountered value is the maximum, (we need to start with somehting...)\n # then, we go through all the remaining elements of a sequence\n # if the analysed element is greater than the current maximum, \n # looks like we have a new maximum, so we assign a new value to max_el\n # after we check all the elements in a sequence, the maximum value found is returned\n max_el = sequence[0]\n for element in sequence:\n if element > max_el:\n max_el = element\n return max_el\n\nlist_a = [2, 3, 1, 5, 2]\nm = maximum(list_a)\nprint(m)\n\n# works for letters as well\nlist_b = list(\"apple\")\nprint(maximum(list_b))", "5\np\n" ] ], [ [ "**Exercise 7**\n\nWrite a function returning the minimal value among the given values. Number of values can be arbitrary:\n\n minimum(1, 2, 0) returns 0\n minimum(2, 7, 1, 9, 4, -1, 4) returns -1\n minimum(0) returns 0", "_____no_output_____" ] ], [ [ "# Solution 1: Use arbitrary arguments to pass any number of values you want\n# minimum is found in the same way as the maximum in the previous exercise\ndef minimum(*sequence):\n if len(sequence)>0:\n min_el = sequence[0]\n for element in sequence:\n if element < min_el:\n min_el = element\n return min_el\n else:\n # no elements\n return None\n\na = (-9, 20, 45, 21, -100, -45)\nm = minimum(*a)\nprint(m)\n\nm = minimum(4, 5, -6, 7, 23, -1000, 34.34)\nprint(m)", "-100\n-1000\n" ], [ "# Solution 2: Use a sequence to pass the arbitrary number of arguments\n# use a built-in method min to find the minimum\ndef minimum2(sequence):\n return min(sequence)\n\na = (-9, 20, 45, 21, -100, -45)\nm = minimum2(a)\nprint(m)\n\nm = minimum2([4, 5, -6, 7, 23, -1000, 34.34])\nprint(m)", "-100\n-1000\n" ] ], [ [ "**Exercise 8**\n\nWrite a recursive function for computing the $i$-th element in a Fibonacci sequence. What happens if a negative value of $i$ is given? What happens if the $i$ is not an integer?", "_____no_output_____" ] ], [ [ "# we start Fibonacci sequence with two values: a_1 = 1 and a_2 = 2\n\ndef fibonacci(n):\n if n < 0 or type(n) != int:\n print(\"Wrong value given!\")\n else:\n if n <= 2:\n return 1\n else:\n element = fibonacci(n-1) + fibonacci(n-2)\n return element\n\n# Fibonacci sequence: 1, 1, 2, 3, 5, 8, 13, 21, 34 etc. \nfibonacci(5)\n#fibonacci(34)", "_____no_output_____" ], [ "# iterative approach\n# we start Fibonacci sequence with two values: a_1 = 1 and a_2 = 2\n# another version, keeping only the last two values of Fibonacci sequence to copute the current one in an iterative approach\n\ndef fibonacci_i(n):\n # start with the first two terms of the sequence\n fib = [1, 1]\n # if a_1 and a_2 is defined, we can start computation from a_3 -> in Python indexing: 2\n if n < 2:\n return fib[n-1] # lower the index, because Python starts from 0\n # in other cases:\n for i in range(2, n+1):\n # sum the previous elements and put the result inside the list with the last coefficients\n fib.append(sum(fib))\n # get rid of the first element -> it won't be needed any longer\n fib.pop(0)\n # the requested element is the last one in the list \n return fib[-1]\n\n# Fibonacci sequence: 1, 1, 2, 3, 5, 8, 13, 21, 34 etc. \nfibonacci(5)", "_____no_output_____" ], [ "# iterative approach without removing the elements\n# we start Fibonacci sequence with two values: a_1 = 1 and a_2 = 2\n# another version, keeping only the last two values of Fibonacci sequence to copute the current one in an iterative approach\n\ndef fibonacci_i2(n):\n # start with the first two terms of the sequence\n fib = [1, 1]\n # if a_1 and a_2 is defined, we can start computation from a_3 -> in Python indexing: 2\n if n < 2:\n return fib[n-1] # lower the index, because Python starts from 0\n # in other cases:\n for i in range(2, n+1):\n # sum the previous elements and put the result inside the list with the last coefficients\n new_element = fib[-1] + fib[-2]\n fib.append(new_element)\n # the requested element is the last one in the list \n return fib[-1]\n\n# Fibonacci sequence: 1, 1, 2, 3, 5, 8, 13, 21, 34 etc. \nfibonacci(5)", "_____no_output_____" ] ], [ [ "You can compare the execution time of both versions using `%%time` magic function; put `%%time` in the beginning of the cell to measure the time of cell execution; Makes sense, to compare bigger values of indices (17?, 30), but not too big, as the sequence grows pretty fast.\n\n*There is also a bit more flexible method called `%%timeit%%`*", "_____no_output_____" ] ], [ [ "%%time\nfibonacci(30)", "CPU times: user 259 ms, sys: 28 µs, total: 259 ms\nWall time: 258 ms\n" ], [ "%%time \nfibonacci_i(30)", "CPU times: user 22 µs, sys: 0 ns, total: 22 µs\nWall time: 25 µs\n" ], [ "%%time\nfibonacci_i2(30)", "CPU times: user 13 µs, sys: 1 µs, total: 14 µs\nWall time: 16.5 µs\n" ] ], [ [ "**Exercise 9**\n\nExamine the code used for computing satellite position from broadcast ephemerides or from the almanach. Think how can it be transformed into a function. What should be the arguments? What will be a return value?", "_____no_output_____" ], [ "**Exercise 10**\n\nWrite a function computing the factorial of a natural number. Use either recursive or iterative approach.", "_____no_output_____" ] ], [ [ "def factorial_it(n):\n # iterarive approach\n if n < 0:\n print(\"Wrong number!\")\n return\n else:\n factorial = 1\n for i in range(1, n+1):\n factorial *= i\n return factorial\n \ndef factorial_r(n):\n # recursive\n if n < 0:\n print(\"Wrong number!\")\n return\n else:\n if n == 0:\n factorial = 1\n else:\n factorial = n * factorial_r(n-1)\n return factorial\n\n\nprint(factorial_it(3))\nprint(factorial_r(3))", "6\n6\n" ] ], [ [ "**Exercise 11**\n\nWrite a piece of code that reads the integer values (user's input) to a six-element list. Then write a fucntion, that:\n - checks if the values in a list are sorted in ascending or descending order (returns True/False).\n - checks if a list is symmetrical (like a palindrome) (returns True/False).\n - checks if there is a repetition of at least one element in the list (returns True/False).\n\nCan the above function be used for a list containing characters (letters/numbers)?", "_____no_output_____" ] ], [ [ "x = 5\n1 if x ==1 else 0", "_____no_output_____" ], [ "def is_sorted(sequence):\n # returns True if a sequence is sorted in an ascending or descending order, False if not sorted\n \n i = 0\n order = 0\n while i < len(sequence)-1: \n if sequence[i] <= sequence[i+1] and order >= 0: #equal 0 for the first element\n order = 1 # ascending order\n elif sequence[i] >= sequence[i+1] and order <= 0:\n order = -1 # descending order\n else:\n order = 0\n break # elements are mixed\n i += 1\n return True if order != 0 else False\n\ndef list_checking(a):\n symmetry = False\n sorting = False\n repetition = False\n \n #sort (using the code from class 04)\n sorting = is_sorted(a)\n \n if a[:int(len(a)/2)] == a[:int(len(a)/2)-1:-1]: #symmetry\n symmetry = True\n if len(a) > len(set(a)): # removing duplicates with set\n repetition = True\n return sorting, symmetry, repetition\n \ni = 0\nb = []\nwhile i < 6:\n a = int(input(\"integer value, please: \"))\n b.append(a)\n i += 1\n \ns, sym, r = list_checking(b)\nprint(s, sym, r)", "integer value, please: 8\ninteger value, please: 7\ninteger value, please: 6\ninteger value, please: 5\ninteger value, please: 4\ninteger value, please: 3\nTrue False False\n" ] ], [ [ "**Exercise 12**\n\nCreate a function computing value of a polynomial of the order $n$ for value $x$. Arguments of the function are coefficients of the polynomial and $x$. ", "_____no_output_____" ] ], [ [ "#assuming: a0 + a1 * x + a2 * x**2 + ... + an * x**n\n# the coefficients can be given as an arbitrary arguments or as a sequence\n \n# Solution 1: arbitrary arguments \ndef polynomial_sol_1(x, *an):\n \"\"\"\n coefficients = (a_0, a_1, ..., a_n)\n 1. define starting value: 0\n 2. looping through coefficients with enumerate, in each iteration:\n 2.1. compute x**i * coefficient \n 2.2. add the result of 3.1 to your result\n \"\"\"\n poly = 0\n for i, a_i in enumerate(an): # using the element and the index, index is used for power of x \n poly += x**i * a_i\n return poly\n\nx = 1\nprint(polynomial_sol_1(x, 1, 1, 2, 5, 6))\n\ncoef = list(range(20))\nprint(polynomial_sol_1(x, *coef)) # asterisk means passing elements of a list as separate values", "15\n190\n" ], [ "# Solution 2: a sequence of arguments \ndef polynomial_sol_2(x, coefficients):\n \"\"\"\n coefficients = (a_0, a_1, ..., a_n)\n 1. define starting value: 0\n 2. looping through coefficients with enumerate, in each iteration:\n 2.1. compute x**i * coefficient \n 2.2. add the result of 3.1 to your result\n \"\"\"\n poly = 0\n for i, a_i in enumerate(coefficients): # using the element and the index, index is used for power of x \n poly += x**i * a_i\n return poly\n\nx = 1\nprint(polynomial_sol_2(x, (1, 1, 2, 5, 6)))\n\ncoef = list(range(20))\nprint(polynomial_sol_2(x, coef)) # asterisk means passing elements of a list as separate values", "15\n190\n" ], [ "# Solution 3: list of coefficients, passes in the order a_n, a_n-1, a_n-2, ..., a_1, a_0 \ndef polynomial_sol_3(x, *an):\n \"\"\"\n coefficients = (a_n, a_n-1, a_n-2, ..., a_1, a_0)\n 1. n = len(coefficients) - 1\n 2. define starting value: 0\n 3. looping through coefficients with enumerate, in each iteration:\n 3.1. compute x**i * coefficient \n 3.2. add the result of 3.1 to your result\n \"\"\"\n poly = 0\n n = len(coefficients) - 1 # current power of x\n for a_i in coefficients: # using the element and the index, index is used for power of x \n poly += x**n * a_i\n n -= 1 # decrease the power for next iteration\n return poly\n\nx = 1\nprint(polynomial_sol_3(x, [1, 1, 2, 5, 6]))\n\ncoef = list(range(20))\nprint(polynomial_sol_3(x, coef))", "_____no_output_____" ] ], [ [ "**Exercise 13**\n\nWrite a function `product_all` that takes an arbitrary number of arguments and returns their product.", "_____no_output_____" ] ], [ [ "def product_all(*args):\n \"\"\"\n Compute the product of the arbitrary number of values\n \"\"\"\n # check if all the values are int or float, otherwise, they cannot be multiplied\n check_types = [isinstance(x, float) or isinstance(x, int) for x in args] # return True for numeric, False otherwise\n # check if there is at least one False value, \n # so checking if all(check_types) would retuen False if not all values are true; \n # to enter if-cond branch, we need to negate it\n if not all(check_types):\n print(\"not all numeric\")\n return\n \n product = 1\n for element in args:\n product *= element\n return product\n\nprint(product_all(1, 2, 3, 4, 5))\nprint(product_all(1, 2, 0, \"a\"))", "120\nnot all numeric\nNone\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec8089c06518fddff9418f3034729f4a3ab8a51b
19,339
ipynb
Jupyter Notebook
log35.ipynb
DRKWang/cutgeneratingfunctions_new
9044a5864d3f279bfd6ad11d0531173f746ab303
[ "MIT" ]
1
2021-09-27T17:33:50.000Z
2021-09-27T17:33:50.000Z
log35.ipynb
DRKWang/cutgeneratingfunctions_new
9044a5864d3f279bfd6ad11d0531173f746ab303
[ "MIT" ]
null
null
null
log35.ipynb
DRKWang/cutgeneratingfunctions_new
9044a5864d3f279bfd6ad11d0531173f746ab303
[ "MIT" ]
null
null
null
49.083756
2,042
0.596773
[ [ [ "# Experimental codes for testing potential arithematic precision errors.", "_____no_output_____" ], [ "The following results only take rtree into account, not including libspatialindex, since libspatialindex is not more directly used than rtree.", "_____no_output_____" ], [ "# 1. The dimension of index database should always be greater than 1. (0 or 1 dimension will fail).", "_____no_output_____" ] ], [ [ "from rtree import index", "_____no_output_____" ], [ "#points-to-point intersection test in 1 dimension\np = index.Property()\np.dimension = 1\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\ndb.insert(1, (0, 0),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0.0, 0.0),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0.0, 0.00000000),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0),objects = True))\n[(item.object, item.bbox) for item in hits]", "_____no_output_____" ] ], [ [ "# 2. The arithematic precision is 16 decimal digits, which comes from the double type's precision. ( The program will only takes the top 16 decimal digits of the number as itself, no matter how large or small this number is.)", "_____no_output_____" ], [ "For the top one, the retrieving will not consider they are the same one, since the last digit can be distinguished.\nFor the bottom one, the retrieving will consider they are the same, since the last digit can not be distinguished because 16 decimal digits is largest number they can capture.", "_____no_output_____" ] ], [ [ "# intervals-to-point intersection test in 4 dimension\nprint('----------------')\np = index.Property()\np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\n# 16 decimal digits\ndb.insert(1, (0, 0, 0, 0, 0, 0, 9999999999999998,9999999999999998),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999998,9999999999999998),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999998,9999999999999998),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 9999999999999999,9999999999999999),objects = True))\n[(item.object, item.bbox) for item in hits] \nprint('----------------')\np = index.Property()\np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\n# 17 decimal digits\ndb = index.Index(properties = p, interleaved = False)\ndb.insert(1, (0, 0, 0, 0, 0, 0, 99999999999999998,99999999999999998),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 99999999999999998,99999999999999998),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 99999999999999998,99999999999999998),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 99999999999999999,99999999999999999),objects = True))\n[(item.object, item.bbox) for item in hits] ", "----------------\n----------------\n" ] ], [ [ "# 3. The limitation of arithematic precision is 324 decimal digits, which is because from the python minimal measurement of distinguishing two different number.", "_____no_output_____" ] ], [ [ "((10**(-323) == 0.0),(10**(-324) == 0.0))", "_____no_output_____" ], [ "# intervals-to-point intersection test in 4 dimension\np = index.Property() \np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\ndb.insert(1, (0, 0, 0, 0, 0, 0, 0-10**(-323), 0),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 0-10**(-323), 0),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 0-10**(-324), 0),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 0-10**(-323),0-10**(-324)),objects = True))\n[(item.object, item.bbox) for item in hits]", "_____no_output_____" ] ], [ [ "# 4. The intersection for retrieving is closed intersection, in contrast with open intersection. (-0.000) is also considered as the same number as 0, but both representation will be displayed at the same time.", "_____no_output_____" ] ], [ [ "#points-to-point intersection test in 2 dimension\np = index.Property()\np.dimension = 3\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\ndb.insert(1, (0, 0,0,0, 0.000000, -0.0000),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0.0, 0.0, 0,0, -0.000000, -0.0000),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0.0, 0.00000000, 0,0, -0.000000, -0.0000),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, -0, 0,0, -0.000000, 0.0000),objects = True))\n[(item.object, item.bbox) for item in hits]", "_____no_output_____" ] ], [ [ "# 5. Round test. The round processing is not stable.", "_____no_output_____" ] ], [ [ "print('----------------')\np = index.Property()\np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\n# 16 decimal digits\ndb.insert(1, (0, 0, 0, 0, 0, 0, 9999999999999991,9999999999999991),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999994,9999999999999994),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999996,9999999999999996),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999998,9999999999999998),obj = ['c']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 9999999999999995,9999999999999995),objects = True))\n[(item.object, item.bbox) for item in hits] ", "----------------\n" ], [ "print('----------------')\np = index.Property()\np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\n# 16 decimal digits\ndb.insert(1, (0, 0, 0, 0, 0, 0, 9999999999999991,9999999999999991),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999994,9999999999999994),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999996,9999999999999996),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 9999999999999998,9999999999999998),obj = ['c']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 9999999999999993,9999999999999993),objects = True))\n[(item.object, item.bbox) for item in hits] ", "----------------\n" ], [ "print('----------------')\np = index.Property()\np.dimension = 4\np.dat_extension = 'data'\np.idx_extension = 'index'\ndb = index.Index(properties = p, interleaved = False)\n# 15 decimal digits\ndb.insert(1, (0, 0, 0, 0, 0, 0, 999999999999991,999999999999991),obj = ['a']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 999999999999994,999999999999994),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 999999999999996,999999999999996),obj = ['b']) #(x_min, y_min, z_min, x_max, y_max, z_max)\ndb.insert(2, (0, 0, 0, 0, 0, 0, 999999999999998,999999999999998),obj = ['c']) #(x_min, y_min, z_min, x_max, y_max, z_max)\nhits = list(db.intersection((0, 0, 0, 0, 0, 0, 999999999999993,999999999999993),objects = True))\n[(item.object, item.bbox) for item in hits] ", "----------------\n" ] ], [ [ "The advantage of this library is that it is positive in the memory and storage, but not good at the precision control.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
ec808f0142044f87a9b363a300b22e20251bdc4d
11,737
ipynb
Jupyter Notebook
notebooks/01.0-custom-parsing/11.0-woodpecker-custom-parsing.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/01.0-custom-parsing/11.0-woodpecker-custom-parsing.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
notebooks/01.0-custom-parsing/11.0-woodpecker-custom-parsing.ipynb
xingjeffrey/avgn_paper
412e95dabc7b7b13a434b85cc54a21c06efe4e2b
[ "MIT" ]
null
null
null
26.554299
259
0.509415
[ [ [ "### woodpecker custom parsing\n- This dataset consists of 1669 wav files most of which consists of several syllables grouped into either 'song', 'call type' or 'drumming'. There are seven different species of vocalizers. The dataset is taken from XenoCanto The dataset consists of:\n - WAV files for vocalization that contains labels for species and vocalization. \n- This notebook creates a JSON corresponding to each WAV file.\n- Dataset origin:\n - https://zenodo.org/record/574438#.XOnxJ9NKhTY", "_____no_output_____" ] ], [ [ "from avgn.utils.general import prepare_env", "_____no_output_____" ], [ "prepare_env()", "env: CUDA_VISIBLE_DEVICES=GPU\n" ] ], [ [ "### Import relevant packages", "_____no_output_____" ] ], [ [ "from joblib import Parallel, delayed\nfrom tqdm.autonotebook import tqdm\nimport pandas as pd\npd.options.display.max_columns = None\nimport librosa\nfrom datetime import datetime\nimport numpy as np", "/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ], [ "import avgn\nfrom avgn.custom_parsing.picidae_woodpecker import generate_json\nfrom avgn.utils.paths import DATA_DIR", "_____no_output_____" ] ], [ [ "### Load data in original format", "_____no_output_____" ] ], [ [ "# create a unique datetime identifier for the files output by this notebook\nDT_ID = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\nDT_ID", "_____no_output_____" ], [ "DSLOC = avgn.utils.paths.Path('/mnt/cube/Datasets/Picidae/PicidaeDataset/')\nDSLOC", "_____no_output_____" ], [ "all_wavs = list(DSLOC.expanduser().glob('*/*.wav'))\nall_wavs = [i for i in all_wavs if i.stem[0] != '.']\nlen(all_wavs)", "_____no_output_____" ], [ "wav_df = pd.DataFrame(columns = ['species', 'call_type', 'wavloc', 'origin'])\nfor wav_loc in tqdm(all_wavs):\n if wav_loc.parent.stem == 'Silence': continue\n species, call_type = wav_loc.parent.stem.split('-')\n wav_df.loc[len(wav_df)] = [species, call_type, wav_loc, wav_loc.stem.split('-')[0]]", "_____no_output_____" ], [ "wav_df[:3]", "_____no_output_____" ], [ "wav_df.species.unique()", "_____no_output_____" ] ], [ [ "### create json for wavs", "_____no_output_____" ] ], [ [ "with Parallel(n_jobs=-1, verbose=10) as parallel:\n parallel(\n delayed(generate_json)(\n row,\n DT_ID\n )\n for idx, row in tqdm(wav_df.iterrows(), total=len(wav_df))\n );", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec80968cdf8c14ff4b6ecac77ddf4c30c489e20d
8,832
ipynb
Jupyter Notebook
clustering_fftx2.ipynb
Geson-anko/autokikitori6
6d60b6bb91b49e85c720ca4b131ba7ca8b8b0668
[ "MIT" ]
null
null
null
clustering_fftx2.ipynb
Geson-anko/autokikitori6
6d60b6bb91b49e85c720ca4b131ba7ca8b8b0668
[ "MIT" ]
null
null
null
clustering_fftx2.ipynb
Geson-anko/autokikitori6
6d60b6bb91b49e85c720ca4b131ba7ca8b8b0668
[ "MIT" ]
null
null
null
30.040816
107
0.54212
[ [ [ "from torch_KMeans import torch_KMeans\r\nkmeans = torch_KMeans(0)\r\nimport h5py\r\nimport torch\r\nimport numpy as np\r\nimport config\r\nuse = 100000\r\nwith h5py.File('data/encoded_fftx2.h5','r') as f:\r\n data = f['data'][:use]\r\nnp.random.shuffle(data)\r\ndata = torch.from_numpy(data).type(torch.float32)\r\nprint(data.size())", "torch.Size([8755, 65])\n" ], [ "from datetime import datetime\r\npick_init = 2000\r\nnum_cluster = 32\r\nnow = datetime.now().strftime('%Y-%m-%d %H_%M_%S')\r\ndata = data.to('cuda')\r\ncentroids = kmeans.init_centroids_pp(n_cluster=num_cluster,data=data[:pick_init])\r\nmode = 'k_means++'\r\ncentroids = kmeans.KMeans(num_cluster,data,default_centroids=centroids)\r\nname = f'params/centroids_fftx2_euclid_{num_cluster}_datasize{data.size(1)}_{now}_{mode}.tensor'\r\ntorch.save(centroids.to('cpu'),name)\r\ntorch.cuda.empty_cache()", "getting centroids 100.0%\ngot centroids!\nProgress : 20.7%\nfinished!\n" ], [ "centroids", "_____no_output_____" ], [ "#centroids = torch.load('centroids/centroids')", "_____no_output_____" ], [ "classes = kmeans.clustering(centroids,data)\nfor i in torch.unique(classes):\n cls = torch.sum(classes==i)\n print(f'class {i} : {cls}')", "class 0 : 22594\nclass 1 : 4590\nclass 2 : 626\nclass 3 : 972\nclass 4 : 3552\nclass 5 : 3761\nclass 6 : 394\nclass 7 : 203\nclass 8 : 6349\nclass 9 : 3679\nclass 10 : 183\nclass 11 : 304\nclass 12 : 1234\nclass 13 : 624\nclass 14 : 1812\nclass 15 : 1370\nclass 16 : 690\nclass 17 : 496\nclass 18 : 207\nclass 19 : 147\nclass 20 : 1010\nclass 21 : 218\nclass 22 : 219\nclass 23 : 249\nclass 24 : 283\nclass 25 : 3286\nclass 26 : 708\nclass 27 : 1487\nclass 28 : 229\nclass 29 : 210\nclass 30 : 157\nclass 31 : 4489\nclass 32 : 1\nclass 33 : 191\nclass 34 : 180\nclass 35 : 798\nclass 36 : 155\nclass 37 : 184\nclass 38 : 666\nclass 39 : 173\nclass 40 : 2508\nclass 41 : 274\nclass 42 : 2688\nclass 43 : 3180\nclass 44 : 197\nclass 45 : 1116\nclass 46 : 214\nclass 47 : 143\nclass 48 : 406\nclass 49 : 1633\nclass 50 : 273\nclass 51 : 1\nclass 52 : 1383\nclass 53 : 854\nclass 54 : 172\nclass 55 : 249\nclass 56 : 157\nclass 57 : 10261\nclass 58 : 237\nclass 59 : 272\nclass 60 : 450\nclass 61 : 220\nclass 62 : 4199\nclass 63 : 233\n" ], [ "for i in torch.unique(classes):\n _d = data[i==classes]\n mean = torch.mean(_d,dim=0)\n d = ( _d - mean.repeat(_d.size(0),1))**2\n d = torch.sum(d,dim=0) / d.size(0)\n m = torch.mean(d)\n print(f'class {i} : bunsan {m}')", "class 0 : bunsan 4.870586053584702e-05\nclass 1 : bunsan 0.0011764869559556246\nclass 2 : bunsan 0.017029806971549988\nclass 3 : bunsan 0.005839901510626078\nclass 4 : bunsan 0.0036741592921316624\nclass 5 : bunsan 0.010533011518418789\nclass 6 : bunsan 0.02113567292690277\nclass 7 : bunsan 0.028071686625480652\nclass 8 : bunsan 0.0005765333189629018\nclass 9 : bunsan 0.0015446300385519862\nclass 10 : bunsan 0.03498466685414314\nclass 11 : bunsan 0.025068825110793114\nclass 12 : bunsan 0.009739413857460022\nclass 13 : bunsan 0.026207221671938896\nclass 14 : bunsan 0.01795353926718235\nclass 15 : bunsan 0.01853634975850582\nclass 16 : bunsan 0.01971522718667984\nclass 17 : bunsan 0.017726805061101913\nclass 18 : bunsan 0.03015371598303318\nclass 19 : bunsan 0.03408198431134224\nclass 20 : bunsan 0.012217341922223568\nclass 21 : bunsan 0.037715643644332886\nclass 22 : bunsan 0.02988167479634285\nclass 23 : bunsan 0.027566581964492798\nclass 24 : bunsan 0.025606075301766396\nclass 25 : bunsan 0.0028511786367744207\nclass 26 : bunsan 0.01301640272140503\nclass 27 : bunsan 0.0026138448156416416\nclass 28 : bunsan 0.025882070884108543\nclass 29 : bunsan 0.0317978598177433\nclass 30 : bunsan 0.043748050928115845\nclass 31 : bunsan 0.007529634516686201\nclass 32 : bunsan 0.0\nclass 33 : bunsan 0.030756065621972084\nclass 34 : bunsan 0.039574697613716125\nclass 35 : bunsan 0.03145770728588104\nclass 36 : bunsan 0.042543891817331314\nclass 37 : bunsan 0.040766552090644836\nclass 38 : bunsan 0.011428123340010643\nclass 39 : bunsan 0.04220009595155716\nclass 40 : bunsan 0.01517494022846222\nclass 41 : bunsan 0.0775597020983696\nclass 42 : bunsan 0.012874213047325611\nclass 43 : bunsan 0.0019115813774988055\nclass 44 : bunsan 0.026950960978865623\nclass 45 : bunsan 0.004175466485321522\nclass 46 : bunsan 0.028481055051088333\nclass 47 : bunsan 0.03893148899078369\nclass 48 : bunsan 0.047626715153455734\nclass 49 : bunsan 0.0033888474572449923\nclass 50 : bunsan 0.02927730791270733\nclass 51 : bunsan 0.0\nclass 52 : bunsan 0.0069242180325090885\nclass 53 : bunsan 0.02485284022986889\nclass 54 : bunsan 0.037020791321992874\nclass 55 : bunsan 0.041652437299489975\nclass 56 : bunsan 0.03842499479651451\nclass 57 : bunsan 0.00032035724143497646\nclass 58 : bunsan 0.03675074502825737\nclass 59 : bunsan 0.02693254128098488\nclass 60 : bunsan 0.021654164418578148\nclass 61 : bunsan 0.04983598738908768\nclass 62 : bunsan 0.005430541001260281\nclass 63 : bunsan 0.032410331070423126\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ec80a11e967ba6b1031cd3bbb7594e451fb6687b
362,670
ipynb
Jupyter Notebook
examples/notebooks/16QAM_tutorial.ipynb
aff3ct/py_aff3ct
8afb7e6b1db1b621db0ae4153b29a2e848e09fcf
[ "MIT" ]
15
2021-01-24T11:59:04.000Z
2022-03-23T07:23:44.000Z
examples/notebooks/16QAM_tutorial.ipynb
aff3ct/py_aff3ct
8afb7e6b1db1b621db0ae4153b29a2e848e09fcf
[ "MIT" ]
8
2021-05-24T18:22:45.000Z
2022-03-11T09:48:05.000Z
examples/notebooks/16QAM_tutorial.ipynb
aff3ct/py_aff3ct
8afb7e6b1db1b621db0ae4153b29a2e848e09fcf
[ "MIT" ]
4
2021-01-26T19:18:21.000Z
2021-12-07T17:02:34.000Z
967.12
262,546
0.660909
[ [ [ "# Simulate a noisy 16-QAM\n\nThis tutorial presents the steps for simulating a noisy 16-QAM modulation scheme with [AFF3CT](https://aff3ct.github.io/). At the end of this, tutorial you will have built the following communication sequence.\n\n<a id='com_chain'></a>\n![16QAM_sequence](https://aff3ct.github.io/images/doc_py_aff3ct/16QAM_chain.svg)", "_____no_output_____" ], [ "## Load the `py_aff3ct` library\nBefore going farther, we should import the `py_aff3ct` library. The sys library is requiered to add the library path to your path (you can also simply copy the library into the current directory if you prefer). \n\nFor this example, we will also need the `numpy` and the `matplotlib` libraries. You can install these libraries with `pip` as follow on Ubuntu 20.04:\n```bash\n$ sudo apt install python3-pip\n$ pip3 install --user -r requirements.txt\n```", "_____no_output_____" ] ], [ [ "import sys \nsys.path.insert(0, '../build/lib')\n\nimport py_aff3ct\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "%qtconsole", "_____no_output_____" ] ], [ [ "## Build the `modules`\nWe start by building all the necessary objects for our simulation:\n* a `Source_random` for generating the bits\n* an `Encoder_repetition_sys`for encoding the message, here using a repetition code\n* a `Modem_generic`for modulating/demodulating the bits as 16-QAM symbols\n* a `Channel_AWGN_LLR` that adds a White Gaussian Noise (AWGN channel)\n* a `Decoder_NO` that decide the demodulation output to recover the bits\n* a `Monitor_BFER_AR` that counts the number of errors and compute the Binary Error Rate (BER) and Frame Error Rate (FER) and perform Automatic Reduction (for enabling multi-threaded simulations)", "_____no_output_____" ] ], [ [ "K = 2048 # Message size in bits\nN = 4096 # Packet length in bits\nbps = 4 # Bits per Symbols \nNs = N//bps # Packet size in symbols\n\nsrc = py_aff3ct.module.source.Source_random(K) # Build source\nenc = py_aff3ct.module.encoder.Encoder_repetition_sys(K,N) # Build the encoder\ncstl = py_aff3ct.tools.constellation.Constellation_QAM(bps) # Setup constellation tool\nmdm = py_aff3ct.module.modem.Modem_generic(N, cstl) # Build the modem\nchn = py_aff3ct.module.channel.Channel_AWGN_LLR(2*Ns) # Build the channel (Ns complex symbols = 2Ns real dims)\ndec = py_aff3ct.module.decoder.Decoder_repetition_std(K,N) # Build the decoder\nmnt = py_aff3ct.module.monitor.Monitor_BFER_AR(K,100) # Build the monitor (counting errors)", "_____no_output_____" ] ], [ [ "## Bind the `tasks`\nNow we can bind the different tasks in order to create the processing sequence described in [this picture](#com_chain).", "_____no_output_____" ] ], [ [ "enc[ 'encode::U_K' ].bind(src[ 'generate::U_K' ]) # Source -> Encoder\nmdm[ 'modulate::X_N1'].bind(enc[ 'encode::X_N' ]) # Encoder -> Modulation\nchn[ 'add_noise::X_N' ].bind(mdm[ 'modulate::X_N2']) # Modulation -> Channel\nmdm[ 'demodulate::Y_N1'].bind(chn[ 'add_noise::Y_N' ]) # Channel -> Demodulation\ndec[ 'decode_siho::Y_N' ].bind(mdm[ 'demodulate::Y_N2']) # Demodulation -> Decoder\nmnt['check_errors::U' ].bind(src[ 'generate::U_K' ]) # Source -> Monitor\nmnt['check_errors::V' ].bind(dec['decode_siho::V_K' ]) # Decoder -> Monitor", "_____no_output_____" ] ], [ [ "<a id='sigma_def'></a>\nIf you look at the information for tasks `add_noise` and `demodulate`, you will found out that a socket named `CP` should be filled. `CP` stands for \"Channel Parameter\", which for Additive White Gaussian Noise (AWGN) channel, represents the noise standard deviation. Let now fill this socket with a user defined value. ", "_____no_output_____" ] ], [ [ "sigma = np.ndarray(shape = (1,1), dtype = np.float32)\nsigma[0] = 0.05\n\nchn[ 'add_noise::CP'].bind(sigma)\nmdm['demodulate::CP'].bind(sigma)\nprint(\"Channel Parameter (CP) for 'chn':\", chn[ 'add_noise::CP'][:])\nprint(\"Channel Parameter (CP) for 'mdm':\", mdm['demodulate::CP'][:])", "Channel Parameter (CP) for 'chn': [[0.05]]\nChannel Parameter (CP) for 'mdm': [[0.05]]\n" ] ], [ [ "## Execute the sequence of `tasks`\nNow that the binding is performed, we can execute the our sequence.", "_____no_output_____" ] ], [ [ "src('generate' ).exec()\nenc('encode' ).exec()\nmdm('modulate' ).exec()\nchn('add_noise' ).exec()\nmdm('demodulate' ).exec()\ndec('decode_siho').exec()", "_____no_output_____" ] ], [ [ "As in the Hello World tutorial, we can recover `socket` data and then display its constellation diagram using `matplotlib`.", "_____no_output_____" ] ], [ [ "sent_signal = mdm[ 'modulate::X_N2'][:]\nrecv_signal = mdm['demodulate::Y_N1'][:]", "_____no_output_____" ], [ "plt.plot(recv_signal[0, ::2], recv_signal[0, 1::2], '.')\nplt.plot(sent_signal[0, ::2], sent_signal[0, 1::2], '+')\nplt.title(\"16-QAM constellation diagram\")\nplt.xlabel(\"In-phase\")\nplt.ylabel(\"Quadrature\")\nplt.grid()", "_____no_output_____" ], [ "mnt('check_errors').debug = True\nmnt('check_errors').set_debug_limit(8)\nmnt('check_errors').exec()\nmnt('check_errors').debug = False", "# Monitor_BFER_AR::check_errors(const int32 U[2048], const int32 V[2048])\n# {IN} U = [ 0, 1, 1, 0, 1, 1, 1, 1, ...]\n# {IN} V = [ 0, 1, 1, 0, 1, 1, 1, 1, ...]\n# Returned status: [0 'SUCCESS']\n#\n" ] ], [ [ "Changing the value of `sigma` [here](#sigma2_def) and re-executing the following cells should change the returned status of the `Monitor_BFER` module.", "_____no_output_____" ], [ "## Compute BER/FER as a function of Eb/N0\nA common way for validating a communication chain is to compute Bit Error Rates or Frame Error Rates as a function of SNR (here we choose the SNR per information bit named Eb/N0). This operation includes most of the operations performed in the first part of this tutorial.\n\nSo we first specify the vector of Eb/N0 values that we want to consider and convert these values to `sigma` values.", "_____no_output_____" ] ], [ [ "import math\n\nebn0_min = 0\nebn0_max = 15.0\nebn0_step = 0.5\n\nebn0 = np.arange(ebn0_min,ebn0_max,ebn0_step)\nesn0 = ebn0 + 10 * math.log10(K/Ns)\nsigma_vals = 1/(math.sqrt(2) * 10 ** (esn0 / 20))\n\nfer = np.zeros(len(ebn0))\nber = np.zeros(len(ebn0))", "_____no_output_____" ] ], [ [ "Because BER/FER simulations are often time consuming, AFF3CT provide a tool that enables multithreading. This tool is called `sequence`. The second advantages of sequences is that execution is performed automatically at the sequence level(it is deduced from the binding steps), we do not need to execute each task independently as before. A sequence can be built from the initial `task`, the final `task` and the desired number of threads for the simulation.", "_____no_output_____" ] ], [ [ "n_threads = 8\nseq = py_aff3ct.tools.sequence.Sequence(src(\"generate\"), mnt(\"check_errors\"), n_threads)", "_____no_output_____" ] ], [ [ "We loop over all `sigma` values and just execute the sequence. The monitor is reset each time to reset the error counter. Once the execution is over, we simply store the monitor BER and FER.", "_____no_output_____" ] ], [ [ "for i in range(len(sigma_vals)):\n mnt.reset()\n sigma[:] = sigma_vals[i]\n seq.exec()\n\n ber[i] = mnt.get_ber()\n fer[i] = mnt.get_fer()", "_____no_output_____" ] ], [ [ "Finally we display the BER/FER vs Eb/N0 curves using `matplotlib`", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nplt.title(\"16-QAM BER/FER vs Eb/N0\")\nplt.xlabel(\"Eb/N0\")\nplt.ylabel(\"BER/FER\")\nplt.grid()\nplt.semilogy(ebn0, fer, 'r-', ebn0, ber, 'b--')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec80a5f58f8ccf7721a660b1b2a355763984ae82
3,588
ipynb
Jupyter Notebook
notebooks/4-MultiLayer-Perceptron.ipynb
jeisonsrz/Curso-Deep-Learning-CreaTIC
8ad15622c21575f3b4fe71255ce88e18a2ac331f
[ "Apache-2.0" ]
2
2019-07-18T15:22:44.000Z
2019-07-24T19:43:11.000Z
notebooks/4-MultiLayer-Perceptron.ipynb
Gxbx/deeplearningcourse
b734af3c744e5d52f060aad70ab57ff6741cabc7
[ "Apache-2.0" ]
15
2019-12-04T23:35:18.000Z
2022-02-10T08:17:53.000Z
notebooks/4-MultiLayer-Perceptron.ipynb
jeisonsrz/Curso-Deep-Learning-CreaTIC
8ad15622c21575f3b4fe71255ce88e18a2ac331f
[ "Apache-2.0" ]
1
2019-07-28T03:40:17.000Z
2019-07-28T03:40:17.000Z
36.612245
125
0.549331
[ [ [ "import tensorflow as tf", "_____no_output_____" ], [ "#Implemantar la clase con el modelo base de Logistic Classifier\nclass MLP:\n def __init__(self, seq_max_len, state_size, vocab_size, num_class):\n self.seq_max_len = seq_max_len\n self.state_size = state_size\n self.vocab_size = vocab_size\n self.num_classes = num_classes\n def build (self):\n self.x = tf.placeholder (shape=[None, self.seq_max_len], dtype=tf.float32)\n x_one_hot_en = tf.one_hot(self.x, self.vocab_size)\n \n self.y = tf.placeholder (shape=[None], dtype=tf.float32)\n y_one_hot_en = tf.one_hot(self.y, self.num_classes)\n self.batch_size = tf.placeholder(tf.int32, [], name='batch_size')\n \n weights = {\n 'layer_0': tf.Variable(tf.random_normal ([self.seq_max_len*self.vocab_size, self.num_classes])),\n 'layer_1': tf.Variable(tf.random_normal ([self.state_size, self.state_size ])),\n 'layer_2': tf.Variable(tf.random_normal ([self.seq_max_len, self.num_classes]))\n }\n # y = wx + b\n biases = {\n 'layer_0': tf.Variable(tf.random_normal ([self.state_size])),\n 'layer_1': tf.Variable(tf.random_normal ([self.state_size])),\n 'layer_2': tf.Variable(tf.random_normal ([self.num_classes]))\n }\n \n x_input = tf.reshape(x_one_hot_en, [-1, self.seq_max_len*selft.vocab_size])\n hidden_layer = tf.matmul(x_input, weights['layer_0']) + biases['layer_0']\n hidden_layer = tf.nn.sigmoid(hidden_layer)\n hidden_layer = tf.matmul(hidden_layer, weights['layer_1']) + biases['layer_1']\n hidden_layer = tf.nn.sigmoid(hidden_layer)\n self.logits = tf.matmul(hidden_layer, weights['layer_2']) + biases['layer_2']\n #Normalizar los valores que arroja la red\n self.probs = tf.nn.softmax(self.logits, axis=1)\n \n self.correct_preds = tf.equal (tf.argmax(self.probs, axis =1), tf.argmax(self.y_one_hot_en, axis=1))\n self.precission = tf.reduce_mean(correct_preds)\n return \n \n def step_training (self, learning_rate=0.1):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_one_hot_en, logits=self.logits))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n return loss, optimizer\n \n \n \n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec80c3c19c2fe14b3c72e6945ddedff8041843a2
165,618
ipynb
Jupyter Notebook
boston_housing/boston_housing.ipynb
lcdutramartins/UdacityML
a3f7dde1722fa25ce3607d0ee20d1e6cbeb1e654
[ "MIT" ]
1
2017-02-18T14:42:55.000Z
2017-02-18T14:42:55.000Z
boston_housing/boston_housing.ipynb
lcdutramartins/UdacityML
a3f7dde1722fa25ce3607d0ee20d1e6cbeb1e654
[ "MIT" ]
null
null
null
boston_housing/boston_housing.ipynb
lcdutramartins/UdacityML
a3f7dde1722fa25ce3607d0ee20d1e6cbeb1e654
[ "MIT" ]
null
null
null
203.712177
77,720
0.882392
[ [ [ "# Machine Learning Engineer Nanodegree\n## Model Evaluation & Validation\n## Project: Predicting Boston Housing Prices\n\nWelcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. \n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.", "_____no_output_____" ], [ "## Getting Started\nIn this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a *good fit* could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.\n\nThe dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Housing). The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:\n- 16 data points have an `'MEDV'` value of 50.0. These data points likely contain **missing or censored values** and have been removed.\n- 1 data point has an `'RM'` value of 8.78. This data point can be considered an **outlier** and has been removed.\n- The features `'RM'`, `'LSTAT'`, `'PTRATIO'`, and `'MEDV'` are essential. The remaining **non-relevant features** have been excluded.\n- The feature `'MEDV'` has been **multiplicatively scaled** to account for 35 years of market inflation.\n\nRun the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.", "_____no_output_____" ] ], [ [ "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cross_validation import ShuffleSplit\n\n# Import supplementary visualizations code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the Boston housing dataset\ndata = pd.read_csv('housing.csv')\nprices = data['MEDV']\nfeatures = data.drop('MEDV', axis = 1)\n \n# Success\nprint \"Boston housing dataset has {} data points with {} variables each.\".format(*data.shape)", "Boston housing dataset has 489 data points with 4 variables each.\n" ] ], [ [ "## Data Exploration\nIn this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.\n\nSince the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into **features** and the **target variable**. The **features**, `'RM'`, `'LSTAT'`, and `'PTRATIO'`, give us quantitative information about each data point. The **target variable**, `'MEDV'`, will be the variable we seek to predict. These are stored in `features` and `prices`, respectively.", "_____no_output_____" ], [ "### Implementation: Calculate Statistics\nFor your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since `numpy` has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.\n\nIn the code cell below, you will need to implement the following:\n- Calculate the minimum, maximum, mean, median, and standard deviation of `'MEDV'`, which is stored in `prices`.\n - Store each calculation in their respective variable.", "_____no_output_____" ] ], [ [ "# Minimum price of the data\nminimum_price = np.amin(prices)\n\n# Maximum price of the data\nmaximum_price = np.amax(prices)\n\n# Mean price of the data\nmean_price = np.mean(prices)\n\n# Median price of the data\nmedian_price = np.median(prices)\n\n# Standard deviation of prices of the data\nstd_price = np.std(prices)\n\n# Show the calculated statistics\nprint \"Statistics for Boston housing dataset:\\n\"\nprint \"Minimum price: ${:,.2f}\".format(minimum_price)\nprint \"Maximum price: ${:,.2f}\".format(maximum_price)\nprint \"Mean price: ${:,.2f}\".format(mean_price)\nprint \"Median price ${:,.2f}\".format(median_price)\nprint \"Standard deviation of prices: ${:,.2f}\".format(std_price)", "Statistics for Boston housing dataset:\n\nMinimum price: $105,000.00\nMaximum price: $1,024,800.00\nMean price: $454,342.94\nMedian price $438,900.00\nStandard deviation of prices: $165,171.13\n" ] ], [ [ "### Question 1 - Feature Observation\nAs a reminder, we are using three features from the Boston housing dataset: `'RM'`, `'LSTAT'`, and `'PTRATIO'`. For each data point (neighborhood):\n- `'RM'` is the average number of rooms among homes in the neighborhood.\n- `'LSTAT'` is the percentage of homeowners in the neighborhood considered \"lower class\" (working poor).\n- `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood.\n\n_Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each._ \n**Hint:** Would you expect a home that has an `'RM'` value of 6 be worth more or less than a home that has an `'RM'` value of 7?", "_____no_output_____" ], [ "**Answer: **\n\nFor the average number of rooms among homes in a neighborhood, 'RM', it is appropriate to have an expectation that home prices are directly proportional to this feature (the higher the number of rooms is, more expensive is a home). For having more rooms, it is expected that a home is bigger and bigger homes tend to be more expensive.\n\nFor the percentage of \"lower class\" homeowners, 'LSTAT', an inverse relation with home prices is expected. People that earn less incomes tend to live in cheaper homes. Therefore, the higher the percentage of \"lower class\" homeowners is, cheaper is a home.\n\nFor the ratio of students to teachers in schools in the neighborhood, 'PTRATIO', it is possible to expect that the lower the ratio is, more expensive is a home. This expectation is based on a belief that better schools have classes with fewer students per teacher. Hence, if schools are better, their fees tend to be more expensive and affordable only for richer families. If these families live next to the school, home prices may also be higher.", "_____no_output_____" ], [ "----\n\n## Developing a Model\nIn this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.", "_____no_output_____" ], [ "### Implementation: Define a Performance Metric\nIt is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how \"good\" that model is at making predictions. \n\nThe values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R<sup>2</sup> of 0 is no better than a model that always predicts the *mean* of the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. _A model can be given a negative R<sup>2</sup> as well, which indicates that the model is **arbitrarily worse** than one that always predicts the mean of the target variable._\n\nFor the `performance_metric` function in the code cell below, you will need to implement the following:\n- Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`.\n- Assign the performance score to the `score` variable.", "_____no_output_____" ] ], [ [ "# Import 'r2_score'\nfrom sklearn.metrics import r2_score\n\ndef performance_metric(y_true, y_predict):\n \"\"\" Calculates and returns the performance score between \n true and predicted values based on the metric chosen. \"\"\"\n \n # Calculate the performance score between 'y_true' and 'y_predict'\n score = r2_score(y_true, y_predict)\n \n # Return the score\n return score", "_____no_output_____" ] ], [ [ "### Question 2 - Goodness of Fit\nAssume that a dataset contains five data points and a model made the following predictions for the target variable:\n\n| True Value | Prediction |\n| :-------------: | :--------: |\n| 3.0 | 2.5 |\n| -0.5 | 0.0 |\n| 2.0 | 2.1 |\n| 7.0 | 7.8 |\n| 4.2 | 5.3 |\n*Would you consider this model to have successfully captured the variation of the target variable? Why or why not?* \n\nRun the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination.", "_____no_output_____" ] ], [ [ "# Calculate the performance of this model\nscore = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])\nprint \"Model has a coefficient of determination, R^2, of {:.3f}.\".format(score)", "Model has a coefficient of determination, R^2, of 0.923.\n" ] ], [ [ "**Answer:** This model successfully captures the variation of the target variable. The coefficient of determination demonstrates that 92.3% of the variance of the target variable is predictable by the model. For all given data points, the true values and the predicted values present similar variation although there may be some small error due bias and variance.", "_____no_output_____" ], [ "### Implementation: Shuffle and Split Data\nYour next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.\n\nFor the code cell below, you will need to implement the following:\n- Use `train_test_split` from `sklearn.cross_validation` to shuffle and split the `features` and `prices` data into training and testing sets.\n - Split the data into 80% training and 20% testing.\n - Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent.\n- Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`.", "_____no_output_____" ] ], [ [ "# Import 'train_test_split'\nfrom sklearn.model_selection import train_test_split\n\n# Shuffle and split the data into training and testing subsets\nX_train, X_test, y_train, y_test = train_test_split(features, prices, train_size=0.8, random_state=0)\n\n# Success\nprint \"Training and testing split was successful.\"", "Training and testing split was successful.\n" ] ], [ [ "### Question 3 - Training and Testing\n*What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?* \n**Hint:** What could go wrong with not having a way to test your model?", "_____no_output_____" ], [ "**Answer: ** If a model is not tested, it is difficult to assess its performance. If all the avaliable data is used for training, the model cannot be tested using independent data and the performance result may be masked, i.e., it will not demonstrate how good or bad the model is in reality. Then, there is a risk of overfitting the model, since it may perfectly predict the target variable considering features from training data, but the predictions may have large error for new data.", "_____no_output_____" ], [ "----\n\n## Analyzing Model Performance\nIn this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.", "_____no_output_____" ], [ "### Learning Curves\nThe following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination. \n\nRun the code cell below and use these graphs to answer the following question.", "_____no_output_____" ] ], [ [ "# Produce learning curves for varying training set sizes and maximum depths\nvs.ModelLearning(features, prices)", "_____no_output_____" ] ], [ [ "### Question 4 - Learning the Data\n*Choose one of the graphs above and state the maximum depth for the model. What happens to the score of the training curve as more training points are added? What about the testing curve? Would having more training points benefit the model?* \n**Hint:** Are the learning curves converging to particular scores?", "_____no_output_____" ], [ "**Answer: ** \n\nConsidering the model whose maximum depth is 3, the training score diminishes and the testing score increases as more training points are added until the training set has 300 data points. For training sets having more than 300 data points, the training and testing scores have very little variation.\n\nConsidering the model whose maximum depth is 6, the same observation for a model whose maximum depth is 3 applies. But there is an importante difference for the former results: training and testing scores converge to different values as more training poins are considered. This may indicate the model capacity of generalization may be better when maximum depth is 3 than when maximum depth is 6 because training scores are very high but testing scores are lower by a reasonable margin.\n\nIn general, having more training points benefits the model until a certain limit is reached. By trespassing this limit, there is a risk of overfitting the model. This limit could be 300 data points for the model whose maximum depth is 3.", "_____no_output_____" ], [ "### Complexity Curves\nThe following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function. \n\nRun the code cell below and use this graph to answer the following two questions.", "_____no_output_____" ] ], [ [ "vs.ModelComplexity(X_train, y_train)", "_____no_output_____" ] ], [ [ "### Question 5 - Bias-Variance Tradeoff\n*When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?* \n**Hint:** How do you know when a model is suffering from high bias or high variance?", "_____no_output_____" ], [ "**Answer: **\n\nWhen the model is trained with a maximum depth of 1, it suffers from high bias because training and validation scores are pretty similar. Also, the scores are not approximately equal to 1, which indicates that a considerable part of the variance of the target variable is not explained by the model. \n\nWhen the model is trained with a maximum depth of 10, it suffers from high variance because training and validation scores are pretty different. The model perfectly predicts training points but its validation score is not high.", "_____no_output_____" ], [ "### Question 6 - Best-Guess Optimal Model\n*Which maximum depth do you think results in a model that best generalizes to unseen data? What intuition lead you to this answer?*", "_____no_output_____" ], [ "**Answer: **\nIt is probable that the model having maximum depth equal to 3 best generalizes to unseen data. \n\nConsidering the learning curves, a model having maximum depth higher than 4 present lower validation scores and the difference between training and validation scores indicate that predictions' variance gets higher as higher is the maximum depth.\n\nThe model whose maximum depth is equal to 4 has better scores than the model whose maximum depth is 3. But, again, the higher difference between training and validation scores may prejudice the capacity of the model of generalizing new data.\n\nA model having maximum depth lower than 3 have lower scores which is a sign that predictions tend to be biased.", "_____no_output_____" ], [ "-----\n\n## Evaluating Model Performance\nIn this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`.", "_____no_output_____" ], [ "### Question 7 - Grid Search\n*What is the grid search technique and how it can be applied to optimize a learning algorithm?*", "_____no_output_____" ], [ "**Answer: **\n\nGrid Search is a method for optimizing hyperparameters of an estimator by exhaustive searching the hyperparameter's space (i.e., all possible combinations of hyperparameter's values) and evaluating the performance of each candidate estimator (usually, using cross-validation method).\n\nConsidering a SVM (support vector machine) estimator, the grid search method may be applied for defining the estimator parameters. Assuming, for example, that the estimator parameters may have the following values:\n* kernel: *RBF* or *linear*;\n* penalty (C): *0* or *10*;\n\nthe grid search will perform the following operations:\n1. Fit the estimator considering *RBF* kernel and C = *0*;\n2. Perform predictions;\n3. Evaluate performance using a method like cross-validation;\n4. Repeat previous steps but considering another combination of parameters (*RBF* kernel and C = *10*, *linear* kernel and C = *0* and *linear* kernel and C = *0*);\n5. Compare performance results for all parameter combinations and choose the best one.", "_____no_output_____" ], [ "### Question 8 - Cross-Validation\n*What is the k-fold cross-validation training technique? What benefit does this technique provide for grid search when optimizing a model?* \n**Hint:** Much like the reasoning behind having a testing set, what could go wrong with using grid search without a cross-validated set?", "_____no_output_____" ], [ "**Answer: **\n\nK-fold cross-validation training technique consists of splitting the available data into *k* subsets. Then, *k* cross-validation experiments are realized considering every time a different subset as validation data and the others as training data. Finally, the performance of the model is calculated as the average of the *k* results.\n\nThis technique lowers the risk of overfitting a model even if this model is optimized using a method like Grid Search. When defining the hyperparameters of an estimator, training and testing data set are necessary for defining the estimator configuration. Then, if another data set is not used for evaluating the real performance of the definite estimator, there is a risk that the estimator has good prediction results but only for known data. For avoiding the need of a third data set, k-fold cross-validation technique enables the use of only two data sets: one designated for evaluating the best combination of hyperparameters and one for final validation. The first set is the one used in k-fold cross-validation technique.\n\nIn a bried description, the k-fold cross-validation technique allows that more data is available for training and testing by running many cross-validation experiments.", "_____no_output_____" ], [ "### Implementation: Fitting a Model\nYour final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*.\n\nIn addition, you will find your implementation is using `ShuffleSplit()` for an alternative form of cross-validation (see the `'cv_sets'` variable). While it is not the K-Fold cross-validation technique you describe in **Question 8**, this type of cross-validation technique is just as useful!. The `ShuffleSplit()` implementation below will create 10 (`'n_splits'`) shuffled sets, and for each shuffle, 20% (`'test_size'`) of the data will be used as the *validation set*. While you're working on your implementation, think about the contrasts and similarities it has to the K-fold cross-validation technique.\n\nFor the `fit_model` function in the code cell below, you will need to implement the following:\n- Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object.\n - Assign this object to the `'regressor'` variable.\n- Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable.\n- Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object.\n - Pass the `performance_metric` function as a parameter to the object.\n - Assign this scoring function to the `'scoring_fnc'` variable.\n- Use [`GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) from `sklearn.grid_search` to create a grid search object.\n - Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object. \n - Assign the `GridSearchCV` object to the `'grid'` variable.", "_____no_output_____" ] ], [ [ "# Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import GridSearchCV, ShuffleSplit\n\ndef fit_model(X, y):\n \"\"\" Performs grid search over the 'max_depth' parameter for a \n decision tree regressor trained on the input data [X, y]. \"\"\"\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(n_splits=10, test_size=0.20, random_state=0)\n\n # Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = { 'max_depth': range(1, 11) }\n\n # Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # Create the grid search object\n grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "_____no_output_____" ] ], [ [ "### Making Predictions\nOnce a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.", "_____no_output_____" ], [ "### Question 9 - Optimal Model\n_What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**?_ \n\nRun the code block below to fit the decision tree regressor to the training data and produce an optimal model.", "_____no_output_____" ] ], [ [ "#Performing Nexp experiments for evaluating if optimal max_depth varies according different data splits\nNexp = 100\nresults = np.empty([Nexp, 1])\nfor i in range(Nexp):\n # Shuffle and split the data into training and testing subsets\n X_train, X_test, y_train, y_test = train_test_split(features, prices, train_size=0.8, random_state=0)\n # Fit the training data to the model using grid search\n reg = fit_model(X_train, y_train)\n # Produce the value for 'max_depth'\n results[i] = reg.get_params()['max_depth']\n \n#Histogram of optimal max_depth results\nimport matplotlib.pyplot as plt\n\nplt.hist(results, bins = 10, align = 'left')\nplt.xlabel('max_depth')\nplt.ylabel('Occurrences')\nplt.title(r'$\\mathrm{Histogram\\ of\\ max_depth}\\ $')\nplt.grid(True)", "_____no_output_____" ] ], [ [ "**Answer: ** The optimal model has maximum depth of 4. This result is not equal to the answer given in Question 6 since it was considered that a model whose maximum depth is 4 would generate predictions with higher variance.", "_____no_output_____" ], [ "### Question 10 - Predicting Selling Prices\nImagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:\n\n| Feature | Client 1 | Client 2 | Client 3 |\n| :---: | :---: | :---: | :---: |\n| Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |\n| Neighborhood poverty level (as %) | 17% | 32% | 3% |\n| Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |\n*What price would you recommend each client sell his/her home at? Do these prices seem reasonable given the values for the respective features?* \n**Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. \n\nRun the code block below to have your optimized model make predictions for each client's home.", "_____no_output_____" ] ], [ [ "# Produce a matrix for client data\nclient_data = [[5, 17, 15], # Client 1\n [4, 32, 22], # Client 2\n [8, 3, 12]] # Client 3\n\n# Show predictions\nfor i, price in enumerate(reg.predict(client_data)):\n print \"Predicted selling price for Client {}'s home: ${:,.2f}\".format(i+1, price)\n\n# Histogram of predicted prices\nimport matplotlib.pyplot as plt\nplt.hist(prices, bins = 30)\nfor price in reg.predict(client_data):\n plt.axvline(price, c = 'r', lw = 3)", "Predicted selling price for Client 1's home: $391,183.33\nPredicted selling price for Client 2's home: $189,123.53\nPredicted selling price for Client 3's home: $942,666.67\n" ] ], [ [ "**Answer: ** Considering results obtained from data exploration, estimated prices seem to be reasonable.\n\nThe features of Client 2's home clearly indicate that it has a smaller size and it is situated at a poorer neighborhood. As the estimated price is closer to the minimum price, it is possible to affirm that the prediction is coeherent with the features. \n\nThe features of Client 3's home clearly indicate that it has a good size and it is situated at a richer neighborhood. As the estimated price is closer to the maximum price, it is possible to affirm that the prediction is coeherent with the features.\n\nThe features of Client 1's home seem to be an \"average\" of the features of other clients, specially the neighborhood poverty level. The number of rooms for Client 1's home is almost equal to the number of rooms of Client 2's cheap home, while the student-teacher ratio is similar to the Client 3's expensive home. As the price is relatively close to the mean and the median values, it is possible to affirm that the prediction is coeherent with the features.", "_____no_output_____" ], [ "### Sensitivity\nAn optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with the data it's trained on.", "_____no_output_____" ] ], [ [ "vs.PredictTrials(features, prices, fit_model, client_data)", "Trial 1: $391,183.33\nTrial 2: $419,700.00\nTrial 3: $415,800.00\nTrial 4: $420,622.22\nTrial 5: $418,377.27\nTrial 6: $411,931.58\nTrial 7: $399,663.16\nTrial 8: $407,232.00\nTrial 9: $351,577.61\nTrial 10: $413,700.00\n\nRange in prices: $69,044.61\n" ] ], [ [ "### Question 11 - Applicability\n*In a few sentences, discuss whether the constructed model should or should not be used in a real-world setting.* \n**Hint:** Some questions to answering:\n- *How relevant today is data that was collected from 1978?*\n- *Are the features present in the data sufficient to describe a home?*\n- *Is the model robust enough to make consistent predictions?*\n- *Would data collected in an urban city like Boston be applicable in a rural city?*", "_____no_output_____" ], [ "**Answer: **\n\nAlthough the constructed model seems to make consistent predictions, it should not be used in a real-world setting.\n\nThe first reason for not using the model is that the data was collected almost 40 years ago, which is a period long enough for the market to change. The preference for homes may variate according the size of the families, lifestyle, growth in different regions of the city, etc. Another important aspect is the economy of the city and the country, which may cause prices fluctuation according the willingness of the clients for buying a home.\n\nThe second reason is that the features present in the data cannot completely describe a home. The current features may not provide accurate information about a home that is relevant for defining its price. The number of rooms, for example, does not guarantee that a home is larger by just having more rooms and it does not indicate if there is a garage or a yard. In addition, there are other important features absent like how old is the home or how long since the last reform.\n\nThe third reason is similar to the first, but considering the place where the data was collected. Boston housing market may be similar to another large urban centers, but it probably has different characteristics from rural places. Even among urban centers, there may be peculiarities that may cause big changes in their housing market.", "_____no_output_____" ], [ "> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ec80c8b589dc9d147a08ee1f086f7438c4c6bbf6
12,474
ipynb
Jupyter Notebook
_solved/00-jupyter_introduction.ipynb
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
1
2022-03-02T17:41:46.000Z
2022-03-02T17:41:46.000Z
_solved/00-jupyter_introduction.ipynb
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
1
2022-03-14T15:15:53.000Z
2022-03-14T15:15:53.000Z
_solved/00-jupyter_introduction.ipynb
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
null
null
null
21.250426
267
0.513789
[ [ [ "<p><font size=\"6\"><b>Jupyter notebook INTRODUCTION </b></font></p>\n\n> *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*\n\n---", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage(url='http://python.org/images/python-logo.gif')", "_____no_output_____" ] ], [ [ "<big><center>To run a cell: push the start triangle in the menu or type **SHIFT + ENTER/RETURN**\n![](../img/shiftenter.jpg)", "_____no_output_____" ], [ "# Notebook cell types", "_____no_output_____" ], [ "We will work in **Jupyter notebooks** during this course. A notebook is a collection of `cells`, that can contain different content:", "_____no_output_____" ], [ "## Code", "_____no_output_____" ] ], [ [ "# Code cell, then we are using python\nprint('Hello DS')", "_____no_output_____" ], [ "DS = 10\nprint(DS + 5) # Yes, we advise to use Python 3 (!)", "_____no_output_____" ] ], [ [ "Writing code is what you will do most during this course!", "_____no_output_____" ], [ "## Markdown", "_____no_output_____" ], [ "Text cells, using Markdown syntax. With the syntax, you can make text **bold** or *italic*, amongst many other things...", "_____no_output_____" ], [ "* list\n* with\n* items\n\n[Link to interesting resources](https://www.youtube.com/watch?v=z9Uz1icjwrM) or images: ![images](https://listame.files.wordpress.com/2012/02/bender-1.jpg)\n\n> Blockquotes if you like them\n> This line is part of the same blockquote.", "_____no_output_____" ], [ "Mathematical formulas can also be incorporated (LaTeX it is...)\n$$\\frac{dBZV}{dt}=BZV_{in} - k_1 .BZV$$\n$$\\frac{dOZ}{dt}=k_2 .(OZ_{sat}-OZ) - k_1 .BZV$$", "_____no_output_____" ], [ "Or tables:\n\ncourse | points\n --- | --- \n Math | 8\n Chemistry | 4\n\nor tables with Latex..\n\n Symbool | verklaring\n --- | --- \n $$BZV_{(t=0)}$$\t | initiële biochemische zuurstofvraag (7.33 mg.l-1)\n $$OZ_{(t=0)}$$\t | initiële opgeloste zuurstof (8.5 mg.l-1)\n $$BZV_{in}$$\t\t | input BZV(1 mg.l-1.min-1)\n $$OZ_{sat}$$\t\t | saturatieconcentratie opgeloste zuurstof (11 mg.l-1)\n $$k_1$$\t\t | bacteriële degradatiesnelheid (0.3 min-1)\n $$k_2$$\t\t | reäeratieconstante (0.4 min-1)", "_____no_output_____" ], [ "Code can also be incorporated, but than just to illustrate:", "_____no_output_____" ], [ "```python\nBOT = 12\nprint(BOT)\n```", "_____no_output_____" ], [ "See also: https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet", "_____no_output_____" ], [ "## HTML", "_____no_output_____" ], [ "You can also use HTML commands, just check this cell:\n<h3> html-adapted titel with &#60;h3&#62; </h3> <p></p>\n<b> Bold text &#60;b&#62; </b> of <i>or italic &#60;i&#62; </i>", "_____no_output_____" ], [ "## Headings of different sizes: section\n### subsection\n#### subsubsection", "_____no_output_____" ], [ "## Raw Text", "_____no_output_____" ] ], [ [ "Cfr. any text editor", "_____no_output_____" ] ], [ [ "# Notebook handling ESSENTIALS", "_____no_output_____" ], [ "## Completion: TAB\n![](../img/tabbutton.jpg)", "_____no_output_____" ], [ "* The **TAB** button is essential: It provides you all **possible actions** you can do after loading in a library *AND* it is used for **automatic autocompletion**:", "_____no_output_____" ] ], [ [ "import os\nos.mkdir", "_____no_output_____" ], [ "my_very_long_variable_name = 3", "_____no_output_____" ] ], [ [ "my_ + TAB", "_____no_output_____" ] ], [ [ "## Help: SHIFT + TAB\n![](../img/shift-tab.png)", "_____no_output_____" ], [ "* The **SHIFT-TAB** combination is ultra essential to get information/help about the current operation", "_____no_output_____" ] ], [ [ "round(3.2)", "_____no_output_____" ], [ "import os\nos.mkdir", "_____no_output_____" ], [ "# An alternative is to put a question mark behind the command\nos.mkdir?", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n <b>EXERCISE</b>: What happens if you put two question marks behind the command?\n</div>", "_____no_output_____" ] ], [ [ "import glob\nglob.glob??", "_____no_output_____" ] ], [ [ "## *edit* mode to *command* mode\n\n* *edit* mode means you're editing a cell, i.e. with your cursor inside a cell to type content\n* *command* mode means you're NOT editing(!), i.e. NOT with your cursor inside a cell to type content\n\nTo start editing, click inside a cell or \n<img src=\"../img/enterbutton.png\" alt=\"Key enter\" style=\"width:150px\">\n\nTo stop editing,\n<img src=\"../img/keyescape.png\" alt=\"Key A\" style=\"width:150px\">", "_____no_output_____" ], [ "## new cell A-bove\n<img src=\"../img/keya.png\" alt=\"Key A\" style=\"width:150px\">\n\nCreate a new cell above with the key A... when in *command* mode", "_____no_output_____" ], [ "## new cell B-elow\n<img src=\"../img/keyb.png\" alt=\"Key B\" style=\"width:150px\">\n\nCreate a new cell below with the key B... when in *command* mode", "_____no_output_____" ], [ "## CTRL + SHIFT + C", "_____no_output_____" ], [ "Just do it!", "_____no_output_____" ], [ "## Trouble...", "_____no_output_____" ], [ "<div class=\"alert alert-danger\">\n <b>NOTE</b>: When you're stuck, or things do crash: \n <ul>\n <li> first try <code>Kernel</code> > <code>Interrupt</code> -> your cell should stop running\n <li> if no succes -> <code>Kernel</code> > <code>Restart</code> -> restart your notebook\n </ul>\n</div>", "_____no_output_____" ], [ "* **Stackoverflow** is really, really, really nice!\n\n http://stackoverflow.com/questions/tagged/python", "_____no_output_____" ], [ "* Google search is with you!", "_____no_output_____" ], [ "<big><center>**REMEMBER**: To run a cell: <strike>push the start triangle in the menu or</strike> type **SHIFT + ENTER**\n![](../img/shiftenter.jpg)", "_____no_output_____" ], [ "# some MAGIC...", "_____no_output_____" ], [ "## `%psearch`", "_____no_output_____" ] ], [ [ "%psearch os.*dir", "_____no_output_____" ] ], [ [ "## `%%timeit`", "_____no_output_____" ] ], [ [ "%%timeit\n\nmylist = range(1000)\nfor i in mylist:\n i = i**2", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "%%timeit\n\nnp.arange(1000)**2", "_____no_output_____" ] ], [ [ "## `%whos`", "_____no_output_____" ] ], [ [ "%whos", "_____no_output_____" ] ], [ [ "## `%lsmagic`", "_____no_output_____" ] ], [ [ "%lsmagic", "_____no_output_____" ] ], [ [ "# Let's get started!", "_____no_output_____" ] ], [ [ "from IPython.display import FileLink, FileLinks", "_____no_output_____" ], [ "FileLinks('.', recursive=False)", "_____no_output_____" ] ], [ [ "The follow-up notebooks provide additional background (largely adopted from the [scientific python notes](http://www.scipy-lectures.org/), which you can explore on your own to get more background on the Python syntax if specific elements would not be clear. \n\nFor now, we will work on the `python_rehearsal.ipynb` notebook, which is a short summary version of the other notebooks to quickly grasp the main elements", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "raw" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec80c9b6a3836cb7a6b725e45e0a658bbc717c57
45,142
ipynb
Jupyter Notebook
lectures/1_image_filters.ipynb
zeroth/skimage-tutorials
0c7f20f7d21ef935caebbd36b0347871cebb0569
[ "CC-BY-4.0" ]
4
2020-07-22T15:24:19.000Z
2020-08-14T08:10:48.000Z
lectures/1_image_filters.ipynb
zeroth/skimage-tutorials
0c7f20f7d21ef935caebbd36b0347871cebb0569
[ "CC-BY-4.0" ]
null
null
null
lectures/1_image_filters.ipynb
zeroth/skimage-tutorials
0c7f20f7d21ef935caebbd36b0347871cebb0569
[ "CC-BY-4.0" ]
1
2020-11-30T15:52:47.000Z
2020-11-30T15:52:47.000Z
27.193976
502
0.561384
[ [ [ "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n%gui qt", "_____no_output_____" ] ], [ [ "# Image filtering", "_____no_output_____" ], [ "## Image filtering theory", "_____no_output_____" ], [ "Filtering is one of the most basic and common image operations in image processing. You can filter an image to remove noise or to enhance features; the filtered image could be the desired result or just a preprocessing step. Regardless, filtering is an important topic to understand.", "_____no_output_____" ], [ "### Local filtering", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rcParams['image.cmap'] = 'gray'", "_____no_output_____" ] ], [ [ "The \"local\" in local filtering simply means that a pixel is adjusted by values in some surrounding neighborhood. These surrounding elements are identified or weighted based on a \"footprint\", \"structuring element\", or \"kernel\".\n\nLet's go to back to basics and look at a 1D step-signal", "_____no_output_____" ] ], [ [ "step_signal = np.zeros(100)\nstep_signal[50:] = 1\nfig, ax = plt.subplots()\nax.plot(step_signal)\nax.margins(y=0.1)", "_____no_output_____" ] ], [ [ "Now add some noise to this signal:", "_____no_output_____" ] ], [ [ "# Just to make sure we all see the same results\nnp.random.seed(0)\n\n\nnoisy_signal = (step_signal\n + np.random.normal(0, 0.35, step_signal.shape))\nfig, ax = plt.subplots()\nax.plot(noisy_signal);", "_____no_output_____" ] ], [ [ "The simplest way to recover something that looks a bit more like the original signal is to take the average between neighboring \"pixels\":", "_____no_output_____" ] ], [ [ "# Take the mean of neighboring pixels\nsmooth_signal = (noisy_signal[:-1] + noisy_signal[1:]) / 2.0\nfig, ax = plt.subplots()\nax.plot(smooth_signal);", "_____no_output_____" ] ], [ [ "What happens if we want to take the *three* neighboring pixels? We can do the same thing:", "_____no_output_____" ] ], [ [ "smooth_signal3 = (noisy_signal[:-2] + noisy_signal[1:-1]\n + noisy_signal[2:]) / 3\nfig, ax = plt.subplots()\nax.plot(smooth_signal, label='mean of 2')\nax.plot(smooth_signal3, label='mean of 3')\nax.legend(loc='upper left');", "_____no_output_____" ] ], [ [ "For averages of more points, the expression keeps getting hairier. And you have to worry more about what's going on in the margins. Is there a better way?\n\nIt turns out there is. This same concept, nearest-neighbor averages, can be expressed as a *convolution* with an *averaging kernel*. Note that the operation we did with `smooth_signal3` can be expressed as follows:\n\n* Create an output array called `smooth_signal3`, of the same length as `noisy_signal`.\n* At each element in `smooth_signal3` starting at point 1, and ending at point -2, place the average of the sum of: 1/3 of the element to the left of it in `noisy_signal`, 1/3 of the element at the same position, and 1/3 of the element to the right.\n* discard the leftmost and rightmost elements.\n\nThis is called a *convolution* between the input image and the array `[1/3, 1/3, 1/3]`. (We'll give a more in-depth explanation of convolution in the next section).", "_____no_output_____" ] ], [ [ "# Same as above, using a convolution kernel\n# Neighboring pixels multiplied by 1/3 and summed\nmean_kernel3 = np.full((3,), 1/3)\nsmooth_signal3p = np.convolve(noisy_signal, mean_kernel3,\n mode='valid')\nfig, ax = plt.subplots()\nax.plot(smooth_signal3p)\n\nprint('smooth_signal3 and smooth_signal3p are equal:',\n np.allclose(smooth_signal3, smooth_signal3p))", "_____no_output_____" ], [ "def convolve_demo(signal, kernel):\n ksize = len(kernel)\n convolved = np.correlate(signal, kernel)\n def filter_step(i):\n fig, ax = plt.subplots()\n ax.plot(signal, label='signal')\n ax.plot(convolved[:i+1], label='convolved')\n ax.legend()\n ax.scatter(np.arange(i, i+ksize),\n signal[i : i+ksize])\n ax.scatter(i, convolved[i])\n return filter_step\n\nfrom ipywidgets import interact, widgets\n\ni_slider = widgets.IntSlider(min=0, max=len(noisy_signal) - 3,\n value=0)\n\ninteract(convolve_demo(noisy_signal, mean_kernel3),\n i=i_slider);", "_____no_output_____" ] ], [ [ "The advantage of convolution is that it's just as easy to take the average of 11 points as 3:", "_____no_output_____" ] ], [ [ "mean_kernel11 = np.full((11,), 1/11)\nsmooth_signal11 = np.convolve(noisy_signal, mean_kernel11,\n mode='valid')\nfig, ax = plt.subplots()\nax.plot(smooth_signal11);", "_____no_output_____" ], [ "i_slider = widgets.IntSlider(min=0, max=len(noisy_signal) - 11,\n value=0)\n\ninteract(convolve_demo(noisy_signal, mean_kernel11),\n i=i_slider);", "_____no_output_____" ] ], [ [ "Of course, to take the mean of 11 values, we have to move further and further away from the edges, and this starts to be noticeable. You can use `mode='same'` to pad the edges of the array and compute a result of the same size as the input:", "_____no_output_____" ] ], [ [ "smooth_signal3same = np.convolve(noisy_signal, mean_kernel3,\n mode='same')\nsmooth_signal11same = np.convolve(noisy_signal, mean_kernel11,\n mode='same')\n\nfig, ax = plt.subplots(1, 2)\nax[0].plot(smooth_signal3p)\nax[0].plot(smooth_signal11)\nax[0].set_title('mode=valid')\nax[1].plot(smooth_signal3same)\nax[1].plot(smooth_signal11same)\nax[1].set_title('mode=same');", "_____no_output_____" ] ], [ [ "But now we see edge effects on the ends of the signal...\n\nThis is because `mode='same'` actually pads the signal with 0s and then applies `mode='valid'` as before.", "_____no_output_____" ] ], [ [ "def convolve_demo_same(signal, kernel):\n ksize = len(kernel)\n padded_signal = np.pad(signal, ksize // 2,\n mode='constant')\n convolved = np.correlate(padded_signal, kernel)\n def filter_step(i):\n fig, ax = plt.subplots()\n x = np.arange(-ksize // 2,\n len(signal) + ksize // 2)\n ax.plot(signal, label='signal')\n ax.plot(convolved[:i+1], label='convolved')\n ax.legend()\n start, stop = i, i + ksize\n ax.scatter(x[start:stop]+1,\n padded_signal[start : stop])\n ax.scatter(i, convolved[i])\n ax.set_xlim(-ksize // 2,\n len(signal) + ksize // 2)\n return filter_step\n\n\ni_slider = widgets.IntSlider(min=0, max=len(noisy_signal)-1,\n value=0)\n\ninteract(convolve_demo_same(noisy_signal, mean_kernel11),\n i=i_slider);", "_____no_output_____" ] ], [ [ "**Exercise** Look up the documentation of `scipy.ndimage.convolve`. Apply the same convolution, but using a different `mode=` keyword argument to avoid the edge effects we see here.", "_____no_output_____" ], [ "#### A difference filter", "_____no_output_____" ], [ "Let's look again at our simplest signal, the step signal from before:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.plot(step_signal)\nax.margins(y=0.1) ", "_____no_output_____" ] ], [ [ "**Exercise:** Can you predict what a convolution with the kernel `[-1, 0, 1]` does? Try thinking about it before running the cells below.", "_____no_output_____" ] ], [ [ "result_corr = np.correlate(step_signal, np.array([-1, 0, 1]),\n mode='valid')", "_____no_output_____" ], [ "result_conv = np.convolve(step_signal, np.array([-1, 0, 1]),\n mode='valid')", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(step_signal, label='signal')\nax.plot(result_conv, linestyle='dashed', label='convolved')\nax.plot(result_corr, linestyle='dashed', label='correlated',\n color='C3')\nax.legend(loc='upper left')\nax.margins(y=0.1) ", "_____no_output_____" ] ], [ [ "(For technical signal processing reasons, convolutions actually occur \"back to front\" between the input array and the kernel. Correlations occur in the signal order, so we'll use correlate from now on.)", "_____no_output_____" ], [ "Whenever neighboring values are close, the filter response is close to 0. Right at the boundary of a step, we're subtracting a small value from a large value and and get a spike in the response. This spike \"identifies\" our edge.", "_____no_output_____" ], [ "#### Commutativity and assortativity of filters", "_____no_output_____" ], [ "What if we try the same trick with our noisy signal?", "_____no_output_____" ] ], [ [ "noisy_change = np.correlate(noisy_signal, np.array([-1, 0, 1]))\nfig, ax = plt.subplots()\nax.plot(noisy_signal, label='signal')\nax.plot(noisy_change, linestyle='dashed', label='change')\nax.legend(loc='upper left')\nax.margins(0.1)", "_____no_output_____" ] ], [ [ "Oops! We lost our edge!\n\nBut recall that we smoothed the signal a bit by taking its neighbors. Perhaps we can do the same thing here. Actually, it turns out that we can do it *in any order*, so we can create a filter that combines both the difference and the mean:", "_____no_output_____" ] ], [ [ "mean_diff = np.correlate([-1, 0, 1], [1/3, 1/3, 1/3], mode='full')\nprint(mean_diff)", "_____no_output_____" ] ], [ [ "*Note:* we use `np.convolve` here, because it has the option to output a *wider* result than either of the two inputs.", "_____no_output_____" ], [ "Now we can use this to find our edge even in a noisy signal:", "_____no_output_____" ] ], [ [ "smooth_change = np.correlate(noisy_signal, mean_diff,\n mode='same')\nfig, ax = plt.subplots()\nax.plot(noisy_signal, label='signal')\nax.plot(smooth_change, linestyle='dashed', label='change')\nax.margins(0.1)\nax.hlines([-0.5, 0.5], 0, 100, linewidth=0.5, color='gray');", "_____no_output_____" ] ], [ [ "**Exercise:** The Gaussian filter with variance $\\sigma^2$ is given by:\n\n$$\nk_i = \\frac{1}{\\sqrt{2\\pi}\\sigma}\\exp{\\left(-\\frac{(x_i - x_0)^2}{2\\sigma^2}\\right)}\n$$\n\nBelow, we have created an array `x` containing $x_i \\in {0, 1, ..., 8}$, a value `x0` of 4 (the centre of those values), and a value `sigma=1`. Use these values to:\n\n1. Create this filter (for example, with width 9, center 4, sigma 1). (Plot it)\n2. Convolve it with the difference filter (with appropriate mode). (Plot the result)\n3. Convolve it with the noisy signal. (Plot the result)", "_____no_output_____" ] ], [ [ "xi = np.arange(9)\nx0 = 9 // 2 # 4\nx = xi - x0\nsigma = 1\n... # complete this code", "_____no_output_____" ] ], [ [ "## Local filtering of images", "_____no_output_____" ], [ "Now let's apply all this knowledge to 2D images instead of a 1D signal. Let's start with an incredibly simple image:", "_____no_output_____" ] ], [ [ "import numpy as np\n\nbright_square = np.zeros((7, 7), dtype=float)\nbright_square[2:5, 2:5] = 1", "_____no_output_____" ] ], [ [ "This gives the values below:", "_____no_output_____" ] ], [ [ "print(bright_square)", "_____no_output_____" ] ], [ [ "and looks like a white square centered on a black square:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nax.imshow(bright_square);", "_____no_output_____" ] ], [ [ "### The mean filter", "_____no_output_____" ], [ "For our first example of a filter, consider the following filtering array, which we'll call a \"mean kernel\". For each pixel, a kernel defines which neighboring pixels to consider when filtering, and how much to weight those pixels.", "_____no_output_____" ] ], [ [ "mean_kernel = np.full((3, 3), 1/9)\n\nprint(mean_kernel)", "_____no_output_____" ] ], [ [ "Now, let's take our mean kernel and apply it to every pixel of the image.", "_____no_output_____" ], [ "Applying a (linear) filter essentially means:\n* Center a kernel on a pixel\n* Multiply the pixels *under* that kernel by the values *in* the kernel\n* Sum all the those results\n* Replace the center pixel with the summed result", "_____no_output_____" ], [ "This process is known as convolution.", "_____no_output_____" ], [ "Let's take a look at the numerical result:", "_____no_output_____" ] ], [ [ "import scipy.ndimage as ndi\n\n%precision 2\nprint(bright_square)\nprint(ndi.correlate(bright_square, mean_kernel))", "_____no_output_____" ] ], [ [ "The meaning of \"mean kernel\" should be clear now: Each pixel was replaced with the mean value within the 3x3 neighborhood of that pixel. When the kernel was over `n` bright pixels, the pixel in the kernel's center was changed to n/9 (= n * 0.111). When no bright pixels were under the kernel, the result was 0.", "_____no_output_____" ], [ "This filter is a simple smoothing filter and produces two important results:\n1. The intensity of the bright pixel decreased.\n2. The intensity of the region near the bright pixel increased.", "_____no_output_____" ], [ "Let's see a convolution in action.\n\n(Execute the following cell, but don't try to read it; its purpose is to generate an example.)", "_____no_output_____" ] ], [ [ "#--------------------------------------------------------------------------\n# Convolution Demo\n#--------------------------------------------------------------------------\nfrom skimage import color\nfrom scipy import ndimage as ndi\nfrom matplotlib import patches\n\ndef mean_filter_demo(image, vmax=1):\n mean_factor = 1.0 / 9.0 # This assumes a 3x3 kernel.\n iter_kernel_and_subimage = iter_kernel(image)\n\n image_cache = []\n\n def mean_filter_step(i_step):\n while i_step >= len(image_cache):\n filtered = image if i_step == 0 else image_cache[-1][-1][-1]\n filtered = filtered.copy()\n\n (i, j), mask, subimage = next(iter_kernel_and_subimage)\n filter_overlay = color.label2rgb(mask, image, bg_label=0,\n colors=('cyan', 'red'))\n filtered[i, j] = np.sum(mean_factor * subimage)\n image_cache.append(((i, j), (filter_overlay, filtered)))\n\n (i, j), images = image_cache[i_step]\n fig, axes = plt.subplots(1, len(images), figsize=(10, 5))\n \n for ax, imc in zip(axes, images):\n ax.imshow(imc, vmax=vmax)\n rect = patches.Rectangle([j - 0.5, i - 0.5], 1, 1, color='yellow', fill=False)\n ax.add_patch(rect)\n \n plt.show()\n return mean_filter_step\n\n\ndef mean_filter_interactive_demo(image):\n from ipywidgets import IntSlider, interact\n mean_filter_step = mean_filter_demo(image)\n step_slider = IntSlider(min=0, max=image.size-1, value=0)\n interact(mean_filter_step, i_step=step_slider)\n\n\ndef iter_kernel(image, size=1):\n \"\"\" Yield position, kernel mask, and image for each pixel in the image.\n\n The kernel mask has a 2 at the center pixel and 1 around it. The actual\n width of the kernel is 2*size + 1.\n \"\"\"\n width = 2*size + 1\n for (i, j), pixel in iter_pixels(image):\n mask = np.zeros(image.shape, dtype='int16')\n mask[i, j] = 1\n mask = ndi.grey_dilation(mask, size=width)\n #mask[i, j] = 2\n subimage = image[bounded_slice((i, j), image.shape[:2], size=size)]\n yield (i, j), mask, subimage\n\n\ndef iter_pixels(image):\n \"\"\" Yield pixel position (row, column) and pixel intensity. \"\"\"\n height, width = image.shape[:2]\n for i in range(height):\n for j in range(width):\n yield (i, j), image[i, j]\n\n\ndef bounded_slice(center, xy_max, size=1, i_min=0):\n slices = []\n for i, i_max in zip(center, xy_max):\n slices.append(slice(max(i - size, i_min), min(i + size + 1, i_max)))\n return tuple(slices)\n\n", "_____no_output_____" ], [ "mean_filter_interactive_demo(bright_square)", "_____no_output_____" ] ], [ [ "Incidentally, the above filtering is the exact same principle behind the *convolutional neural networks*, or CNNs, that you might have heard much about over the past few years. The only difference is that while above, the simple mean kernel is used, in CNNs, the values inside the kernel are *learned* to find a specific feature, or accomplish a specific task. Time permitting, we'll demonstrate this in an exercise at the end of the notebook.", "_____no_output_____" ], [ "### Downsampled image", "_____no_output_____" ], [ "Let's consider a real image now. It'll be easier to see some of the filtering we're doing if we downsample the image a bit. We can slice into the image using the \"step\" argument to sub-sample it (don't scale images using this method for real work; use `skimage.transform.rescale`):", "_____no_output_____" ] ], [ [ "from skimage import data\n\nimage = data.camera()\npixelated = image[::10, ::10]\nfig, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 5))\nax0.imshow(image)\nax1.imshow(pixelated);", "_____no_output_____" ] ], [ [ "Here we use a step of 10, giving us every tenth column and every tenth row of the original image. You can see the highly pixelated result on the right.", "_____no_output_____" ], [ "### Mean filter on a real image", "_____no_output_____" ], [ "Now we can apply the filter to this downsampled image:", "_____no_output_____" ] ], [ [ "filtered = ndi.correlate(pixelated, np.ones((3, 3)) / 9)", "_____no_output_____" ], [ "import napari\n\n\nviewer = napari.view_image(np.stack([pixelated, filtered]), channel_axis=0,\n name=['pixelated', 'mean filtered'], colormap='gray')\nviewer.grid_view()", "_____no_output_____" ] ], [ [ "We are actually going to be using the pattern of plotting multiple images side by side quite often, so we are going to make the following helper function:", "_____no_output_____" ] ], [ [ "def view_images(*images, names=None):\n viewer = napari.view_image(np.stack(images), channel_axis=0,\n name=names, colormap='gray', blending='translucent')\n viewer.grid_view()\n return viewer", "_____no_output_____" ] ], [ [ "Comparing the filtered image to the pixelated image, we can see that this filtered result is smoother: Sharp edges (which are just borders between dark and bright pixels) are smoothed because dark pixels reduce the intensity of neighboring pixels and bright pixels do the opposite.", "_____no_output_____" ], [ "## Essential filters", "_____no_output_____" ], [ "If you read through the last section, you're already familiar with the essential concepts of image filtering. But, of course, you don't have to create custom filter kernels for all of your filtering needs. There are many standard filter kernels pre-defined from half a century of image and signal processing.", "_____no_output_____" ], [ "### Gaussian filter", "_____no_output_____" ], [ "The classic image filter is the Gaussian filter. This is similar to the mean filter, in that it tends to smooth images. The Gaussian filter, however, doesn't weight all values in the neighborhood equally. Instead, pixels closer to the center are weighted more than those farther away.", "_____no_output_____" ] ], [ [ "from skimage import filters\n\nsmooth_mean = ndi.correlate(bright_square, mean_kernel)\nsigma = 1\nsmooth = filters.gaussian(bright_square, sigma)\nview_images(bright_square, smooth_mean, smooth,\n names=['original', 'mean filter', 'gaussian filter'])", "_____no_output_____" ] ], [ [ "For the Gaussian filter, `sigma`, the standard deviation, defines the size of the neighborhood.\n\nFor a real image, we get the following:", "_____no_output_____" ] ], [ [ "from skimage import img_as_float\n# The Gaussian filter returns a float image, regardless of input.\n# Cast to float so the images have comparable intensity ranges.\npixelated_float = img_as_float(pixelated)\nsmooth = filters.gaussian(pixelated_float, sigma=1)\nview_images(pixelated_float, smooth);", "_____no_output_____" ] ], [ [ "This doesn't look drastically different than the mean filter, but the Gaussian filter is typically preferred because of the distance-dependent weighting, and because it does not have any sharp transitions (consider what happens in the Fourier domain!). For a more detailed image and a larger filter, you can see artifacts in the mean filter since it doesn't take distance into account:", "_____no_output_____" ] ], [ [ "size = 20\nstructuring_element = np.ones((3*size, 3*size))\nsmooth_mean = filters.rank.mean(image, structuring_element)\nsmooth_gaussian = filters.gaussian(image, size)\ntitles = ['mean', 'gaussian']\nview_images(smooth_mean, smooth_gaussian, names=titles)", "_____no_output_____" ] ], [ [ "(Above, we've tweaked the size of the structuring element used for the mean filter and the standard deviation of the Gaussian filter to produce an approximately equal amount of smoothing in the two results.)", "_____no_output_____" ], [ "Incidentally, for reference, let's have a look at what the Gaussian filter actually looks like. Technically, the value of the kernel at a pixel that is $r$ rows and $c$ cols from the center is:\n\n$$\nk_{r, c} = \\frac{1}{2\\pi \\sigma^2} \\exp{\\left(-\\frac{r^2 + c^2}{2\\sigma^2}\\right)}\n$$\n\nPractically speaking, this value is pretty close to zero for values more than $4\\sigma$ away from the center, so practical Gaussian filters are truncated at about $4\\sigma$:", "_____no_output_____" ] ], [ [ "sidelen = 45\nsigma = (sidelen - 1) // 2 // 4\nspot = np.zeros((sidelen, sidelen), dtype=float)\nspot[sidelen // 2, sidelen // 2] = 1\nkernel = filters.gaussian(spot, sigma=sigma)\n\nview_images(spot, kernel / np.max(kernel));", "_____no_output_____" ] ], [ [ "**Exercise** (Chapter 0 reminder!) Plot the profile of the gaussian kernel at its midpoint, i.e. the values under the line shown here:", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\nax.imshow(kernel, cmap='inferno')\nax.vlines(22, -100, 100, color='C9')\nax.set_ylim((sidelen - 1, 0))", "_____no_output_____" ], [ "... # add your plotting code here", "_____no_output_____" ] ], [ [ "### Basic edge filtering", "_____no_output_____" ], [ "For images, edges are boundaries between light and dark values. The detection of edges can be useful on its own, or it can be used as preliminary step in other algorithms (which we'll see later).", "_____no_output_____" ], [ "#### Difference filters in 2D", "_____no_output_____" ], [ "For images, you can think of an edge as points where the gradient is large in one direction. We can approximate gradients with difference filters.", "_____no_output_____" ] ], [ [ "vertical_kernel = np.array([\n [-1],\n [ 0],\n [ 1],\n])\n\ngradient_vertical = ndi.correlate(pixelated.astype(float),\n vertical_kernel)\nfig, ax = plt.subplots()\nax.imshow(gradient_vertical, cmap='twilight');", "_____no_output_____" ] ], [ [ "## <span style=\"color:cornflowerblue\">Exercise:</span>", "_____no_output_____" ], [ "- Add a horizontal kernel to the above example to also compute the horizontal gradient, $g_y$\n- Compute the magnitude of the image gradient at each point: $\\left|g\\right| = \\sqrt{g_x^2 + g_y^2}$", "_____no_output_____" ] ], [ [ "... # add your horizontal and gradient magnitude code here", "_____no_output_____" ] ], [ [ "### Sobel edge filter", "_____no_output_____" ], [ "The Sobel filter, the most commonly used edge filter, should look pretty similar to what you developed above. Take a look at the vertical and horizontal components of the Sobel kernel to see how they differ from your earlier implementation:", "_____no_output_____" ], [ "* http://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.sobel_v\n* http://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.sobel_h", "_____no_output_____" ] ], [ [ "view_images(bright_square, filters.sobel(bright_square))", "_____no_output_____" ] ], [ [ "Notice that the size of the output matches the input, and the edges aren't preferentially shifted to a corner of the image. Furthermore, the weights used in the Sobel filter produce diagonal edges with reponses that are comparable to horizontal or vertical edges.\n\nLike any derivative, noise can have a strong impact on the result:", "_____no_output_____" ] ], [ [ "pixelated_gradient = filters.sobel(pixelated)\nview_images(pixelated, pixelated_gradient)", "_____no_output_____" ] ], [ [ "Smoothing is often used as a preprocessing step in preparation for feature detection and image-enhancement operations because sharp features can distort results.", "_____no_output_____" ] ], [ [ "gradient = filters.sobel(smooth)\ntitles = ['gradient before smoothing', 'gradient after smoothing']\n# Scale smoothed gradient up so they're of comparable brightness.\nview_images(pixelated_gradient, gradient*1.8, names=titles)", "_____no_output_____" ] ], [ [ "Notice how the edges look more continuous in the smoothed image.", "_____no_output_____" ], [ "**Exercise: the simplest neural network.** Let's pretend we have an image and a \"ground truth\" image of what we want to detect:", "_____no_output_____" ] ], [ [ "target = (filters.sobel_h(image) > 0.07)\nview_images(image, target, names=['source', 'target'])", "_____no_output_____" ] ], [ [ "Can we use machine learning to find a 3x3 convolutional filter that recovers this target?\n\n- use `skimage.util.view_as_windows` and `np.reshape` to view the image as a set of (approximately) `npixels` 3x3 patches. (Hint: why is it only approximate? Think of `mode=valid` convolutions.)\n- use `np.reshape` again to see it as `npixels` \"linear\" patches of 9 pixels.\n- Now you have an `(npixels, 9)` \"feature\" matrix, `X`.\n- Use slicing and `np.ravel` to get an `npixels`-length array of target values.\n- Use `sklearn.linear_model.LogisticRegression` to learn the relationship between our pixel neighborhoods (of size 9) and the target.\n- Look at your `model.coef_`. How do they compare to the Sobel coefficients?", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## Denoising filters", "_____no_output_____" ], [ "At this point, we make a distinction. The earlier filters were implemented as a *linear dot-product* of values in the filter kernel and values in the image. The following kernels implement an *arbitrary* function of the local image neighborhood. Denoising filters in particular are filters that preserve the sharpness of edges in the image.\n\nAs you can see from our earlier examples, mean and Gaussian filters smooth an image rather uniformly, including the edges of objects in an image. When denoising, however, you typically want to preserve features and just remove noise. The distinction between noise and features can, of course, be highly situation-dependent and subjective.", "_____no_output_____" ], [ "### Median Filter", "_____no_output_____" ], [ "The median filter is the classic edge-preserving filter. As the name implies, this filter takes a set of pixels (i.e. the pixels within a kernel or \"structuring element\") and returns the median value within that neighborhood. Because regions near a sharp edge will have many dark values and many light values (but few values in between) the median at an edge will most likely be either light or dark, rather than some value in between. In that way, we don't end up with edges that are smoothed.", "_____no_output_____" ] ], [ [ "from skimage.morphology import disk\nneighborhood = disk(radius=1) # \"selem\" is often the name used for \"structuring element\"\nmedian = filters.rank.median(pixelated, neighborhood)\ntitles = ['image', 'gaussian', 'median']\nview_images(pixelated, smooth, median, names=titles)", "_____no_output_____" ] ], [ [ "This difference is more noticeable with a more detailed image.", "_____no_output_____" ] ], [ [ "neighborhood = disk(10)\ncoins = data.coins()\nmean_coin = filters.rank.mean(coins, neighborhood)\nmedian_coin = filters.rank.median(coins, neighborhood)\ntitles = ['image', 'mean', 'median']\nview_images(coins, mean_coin, median_coin, names=titles)", "_____no_output_____" ] ], [ [ "Notice how the edges of coins are preserved after using the median filter.", "_____no_output_____" ], [ "## Further reading", "_____no_output_____" ], [ "`scikit-image` also provides more sophisticated denoising filters:", "_____no_output_____" ] ], [ [ "from skimage.restoration import denoise_tv_bregman\ndenoised = denoise_tv_bregman(image, 4)\nd = disk(4)\nmedian = filters.rank.median(image, d)\ntitles = ['image', 'median', 'denoised']\nview_images(image, median, denoised, names=titles)", "_____no_output_____" ] ], [ [ "* [Denoising examples](http://scikit-image.org/docs/dev/auto_examples/plot_denoise.html)\n* [Rank filters example](http://scikit-image.org/docs/dev/auto_examples/applications/plot_rank_filters.html)\n* [Restoration API](http://scikit-image.org/docs/stable/api/skimage.restoration.html)\n\nTake a look at this [neat feature](https://github.com/scikit-image/scikit-image/pull/2647) merged last year:\n\n![cycle spinning](../images/cycle_spin.png)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec80dc8bec2cabac2e80f92292f5bc6d710308aa
11,522
ipynb
Jupyter Notebook
algorithmique/tri_selection/Tris_Selection.ipynb
herve-vasseur/python-lycee
8921557ff6da6082bcb216aba621d04421445db7
[ "CC0-1.0" ]
null
null
null
algorithmique/tri_selection/Tris_Selection.ipynb
herve-vasseur/python-lycee
8921557ff6da6082bcb216aba621d04421445db7
[ "CC0-1.0" ]
null
null
null
algorithmique/tri_selection/Tris_Selection.ipynb
herve-vasseur/python-lycee
8921557ff6da6082bcb216aba621d04421445db7
[ "CC0-1.0" ]
null
null
null
30.725333
331
0.547735
[ [ [ "<div class =\"alert alert-warning\"> \n \n \nnotebook consultable, exécutable, modifiable et téléchargeable en ligne : \n \n\t\n- se rendre à : https://github.com/nsi-acot/continuite_pedagogique_premiere\n- cliquer sur l'icone \"launch binder\" en bas de page\n- patienter quelques secondes que le serveur Jupyter démarre\n- naviguer dans le dossier `\"./algorithmique/tri_selection/\"`\n- cliquer sur le nom de ce notebook\n</div>", "_____no_output_____" ], [ "# <center> TRIER </center>", "_____no_output_____" ], [ "<center><b>Le tri est une activité fondamentale et omniprésente de l'informatique.</b></center>\n\n<img src='tri.jpg' width=200> \n\n* On l'utilise régulièrement, par exemple :\n * Lorsque l'on consulte sur Internet des listes de produits que l'on souhaite afficher par prix croissant ou décroissant, par popularité...\n * Lorsque dans un tableur on souhaite trier des noms par ordre alphabétique\n * Lorsque l'on affiche ses photos par date\n\n\n* Cela permet d'étudier des concepts algorithmiques puissants et efficaces\n\n\n* <b>Pour simplifier, on cherche ici à trier des liste d'entiers dans l'ordre croissant. Le but d’un algorithme de tri est ainsi de calculer une nouvelle liste, ou\nde modifier la liste initiale, de manière à ce qu’elle contienne les mêmes\nnombres que la liste de départ, mais que ces éléments soient ordonnés.</b>", "_____no_output_____" ] ], [ [ "#Générer une liste d'entiers aléatoires\nfrom random import *\nL=[randint(-32,32) for i in range(5)]\nprint(L)", "[12, -16, -21, -10, -9]\n" ] ], [ [ "## 1. De quoi dispose un ordinateur pour trier ?\n\n* Une fonction de comparaison ($<$ , $>$ ), pour comparer deux valeurs.\n* Des zones de stockage pour mémoriser des emplacements(à l'aide de l'index des éléments d'une liste), déplacer des valeurs...\n* Il existe des dizaines d'algorithmes de tri, nous allons en étudier 2 cette année : \n * **Le tri par sélection.**\n * **Le tri par insertion.**", "_____no_output_____" ], [ "## 2. Le tri par sélection\n\n<img src='Selection-Sort.gif' width='400'>\n\n### Présentation \n* On commence par chercher, parmi les nombres à trier, un élément plus petit que tous les autres. Cet élément sera le premier de la liste triée. \n* On cherche ensuite, parmi ceux qui restent, un élément plus petit que tous les autres, qui sera le deuxième du tableau trié\n* On recommence pour trouver le troisième élément trié et ainsi de suite jusqu'à ce que toute la liste soit triée.\n\n\n#### Exemple : \n\nOn cherche à trier la liste suivante : `[29, -6, 12, -11, 10]`\n\n* Le plus petit élement est $-11$, on le place au premier index 0\n* Que fait-on du $29$ ? Il prend la place de $-11$, la liste devient donc : `[-11,-6,12,29,10]`\n* Le plus petit des élements restants à trier est $-6$, il est déjà bien placé à l'index 1.\n* Le plus petit des élements restants à trier est $10$, on le place en troisième position (index 2).\n* Que fait-on du $12$ ? Il prend la place de $10$, la liste devient donc : `[-11,-6,10,29,12]`\n* Le plus petit des élements restants à trier est $12$, on le place en quatrième position (index 3).\n* Que fait-on du $29$ ? Il prend la place de $12$, la liste devient donc : `[-11,-6,10,12,29]`\n* Le dernier élément est nécéssairement le plus grand, la liste est triée.", "_____no_output_____" ], [ "#### Exercice 1 :\n1. En indiquant les listes intermédiaires, trier la liste `[5, -27, -14, 10, 7]` par sélection\n \n * \n * \n * \n * \n \n\n2. Même exercice avec la liste `[30, 24, -7, -19, 26, 21, -18]`\n * \n * \n * \n * \n * ", "_____no_output_____" ], [ "### Complexité\n#### Exercice 2 : \nOn cherche à trier la liste suivante par sélection : `[12, 31, -2, -14, 8]`\n1. Au premier parcours, la liste devient `[-14, 31, -2, 12, 8]`. Combien de comparaisons ont été nécéssaires pour identifier que $-14$ est le plus petit élement ?\n2. Au deuxième parcours la liste devient `[-14, -2, 31, 12, 8]`. Combien de comparaisons ont été nécéssaires pour identifier que $-2$ est le plus petit des éléments restant à trier ?\n3. Combien de comparaisons sont nécéssaires pour trier toute la liste ?\n4. Est-ce vrai pour toutes les listes de cinq éléments ?", "_____no_output_____" ], [ "Réponse :\n\n1. \n2. \n3. \n4. \n", "_____no_output_____" ], [ "#### Exercice 3 :\n1. Combien de comparaisons sont nécéssaires pour trier par sélection une liste de 7 éléments ?\n2. Et pour une liste de 10 éléments ?\n3. Et pour $n$ éléments ?", "_____no_output_____" ], [ "Réponse :\n \n1. \n2. \n3. ", "_____no_output_____" ], [ "#### A retenir\n<img style='float:right;' src='selection.png' width=400><br><br>Quelque soit le nombre d'éléments $n$ d'éléments à trier et dans tous les cas( par exemple même quand la liste est partiellement triée), le nombre de comparaisons est exactement $\\frac{n\\times(n-1)}{2}=\\frac{n^{2}-n}{2}$, c'est à dire de l'ordre de $n^2$.\n\nOn dit que cet algorithme est de complexité quadratique en $\\mathcal{O}(n^2)$.\n\nPar exemple , une liste de $25000$ éléments à trier nécéssite de l'ordre de $25000^2=625000000$ comparaisons.", "_____no_output_____" ], [ "### Programmation", "_____no_output_____" ], [ "#### Recherche du minimum et échange\nLa première étape de l'algorithme est de rechercher le minimum de la liste et d'échanger sa place avec le premier élément: ", "_____no_output_____" ], [ "$(1)\\; L \\; est\\;une\\;liste\\\\\n(2)\\; indexmin \\leftarrow 0\\\\\n(3)\\; Pour\\; j\\; de \\;1\\;à \\; index\\;du\\;dernier\\;élément\\;de\\;L \\\\\n(4)\\; \\; \\; \\; \\; Si\\; L[j]<L[indexmin] \\\\\n(5) \\; \\; \\; \\; \\; \\; \\; \\;\\;indexmin \\leftarrow j \\\\\n(6)\\; L[0],L[indexmin]=L[indexmin],L[0]$\n", "_____no_output_____" ], [ "#### Exercice 4 :\n\n1. Que représente la variable `indexmin` ?\n2. Expliquer la ligne 6 de l'algorithme.\n3. En remplaçant les commentaires, programmer cet algorithme.", "_____no_output_____" ], [ "Réponses :\n1. \n2. ", "_____no_output_____" ] ], [ [ "L=[randint(-32,32) for i in range(5)]\nprint(L)\n\n#ligne2\n#ligne3\n#ligne4\n#ligne5\n#ligne6\n\nprint(L)", "_____no_output_____" ] ], [ [ "#### Exercice 5 :\nOn suppose que le minimum de la liste est désormais le premier élément. L'étape suivante est d'identifier le plus petit élément parmi ceux restant à trier et de le placer en deuxième position.\n\n1. Recopier et modifier l'algorithme précédent en ce sens. On utilisera la liste`[-29, 24, 31, -15, 8]`. l'instruction`print(L)` doit renvoyer `[-29, -15, 31, 24, 8]`.\n2. Qu'a-t-on modifié précisement ?\n3. Que doit-on modifier pour trouver le troisième élement de la liste triée ?", "_____no_output_____" ] ], [ [ "#1\nL=[-29, 24, 31, -15, 8]\n\n#ligne2\n#ligne3\n#ligne4\n#ligne5\n\nprint(L)", "_____no_output_____" ] ], [ [ "2. \n3. Pour trouver le troisième élément trié , il faut :\n * \n * \n * ", "_____no_output_____" ], [ "#### Algorithme du tri par sélection", "_____no_output_____" ], [ "#### Exercice 6 :\nLa fonction `tri_select(L)` prend en paramètre une liste `L` d'entiers et doit renvoyer cette liste triée par sélection.\nEn vous aidant des résultats précédents, compléter cette fonction en remplaçant les `?`.", "_____no_output_____" ] ], [ [ "#Algorithme de tri par sélection\ndef tri_select(L):\n for i in ?:\n imin = ?\n for j in range(?,len(L)):\n if L[j]<L[imin]:\n imin=j\n \n L[?],L[imin]=L[imin],L[?]\n \n return L\n \nL=[randint(-32,32) for i in range(5)]\nprint (L)\nprint(tri_select(L)) ", "_____no_output_____" ] ], [ [ "Remarques :\n* On est sûr que l'algorithme s'arrête car on sort des boucles `for` une fois le parcours terminé.\n* A la fin de l'étape $i$, les $i$ premiers éléments du tableau sont triés, ce qui prouve que cet algorithme est correct (il fait ce que l'on attend de lui)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec80f002b3829bf34a46bf977b2bd667f2dce6e1
18,976
ipynb
Jupyter Notebook
openmdao/docs/openmdao_book/features/building_blocks/components/exec_comp.ipynb
sebasgo/OpenMDAO
b78d840780b73209dc3a00a2fb3dbf729bfeb8d5
[ "Apache-2.0" ]
null
null
null
openmdao/docs/openmdao_book/features/building_blocks/components/exec_comp.ipynb
sebasgo/OpenMDAO
b78d840780b73209dc3a00a2fb3dbf729bfeb8d5
[ "Apache-2.0" ]
null
null
null
openmdao/docs/openmdao_book/features/building_blocks/components/exec_comp.ipynb
sebasgo/OpenMDAO
b78d840780b73209dc3a00a2fb3dbf729bfeb8d5
[ "Apache-2.0" ]
null
null
null
30.508039
168
0.495679
[ [ [ "try:\n import openmdao.api as om\nexcept ImportError:\n !python -m pip install openmdao[notebooks]\n import openmdao.api as om", "_____no_output_____" ] ], [ [ "# ExecComp\n\n\n`ExecComp` is a component that provides a shortcut for building an ExplicitComponent that\nrepresents a set of simple mathematical relationships between inputs and outputs. The ExecComp\nautomatically takes care of all of the component API methods, so you just need to instantiate\nit with an equation or a list of equations.\n\n## ExecComp Options\n", "_____no_output_____" ] ], [ [ "om.show_options_table(\"openmdao.components.exec_comp.ExecComp\")", "_____no_output_____" ] ], [ [ "## ExecComp Constructor\n\nThe call signature for the `ExecComp` constructor is:\n\n```{eval-rst}\n .. automethod:: openmdao.components.exec_comp.ExecComp.__init__\n :noindex:\n```", "_____no_output_____" ], [ "The values of the `kwargs` can be `dicts` which define the initial value for the variables along with\nother metadata. For example,\n\n```\n ExecComp('xdot=x/t', x={'units': 'ft'}, t={'units': 's'}, xdot={'units': 'ft/s')\n```\n\nHere is a list of the possible metadata that can be assigned to a variable in this way. The **Applies To** column indicates\nwhether the metadata is appropriate for input variables, output variables, or both.\n\n```{eval-rst}\n================ ====================================================== ============================================================= ============== ========\nName Description Valid Types Applies To Default\n================ ====================================================== ============================================================= ============== ========\nvalue Initial value in user-defined units float, list, tuple, ndarray input & output 1\nshape Variable shape, only needed if not an array int, tuple, list, None input & output None\nshape_by_conn Determine variable shape based on its connection bool input & output False\ncopy_shape Determine variable shape based on named variable str input & output None\nunits Units of variable str, None input & output None\ndesc Description of variable str input & output \"\"\nres_units Units of residuals str, None output None\nref Value of variable when scaled value is 1 float, ndarray output 1\nref0 Value of variable when scaled value is 0 float, ndarray output 0\nres_ref Value of residual when scaled value is 1 float, ndarray output 1\nlower Lower bound of variable float, list, tuple, ndarray, Iterable, None output None\nupper Lower bound of variable float, list, tuple, ndarray, Iterable, None output None\nsrc_indices Global indices of the variable int, list of ints, tuple of ints, int ndarray, Iterable, None input None\nflat_src_indices If True, src_indices are indices into flattened source bool input None\ntags Used to tag variables for later filtering str, list of strs input & output None\n================ ====================================================== ============================================================= ============== ========\n```\n\nThese metadata are passed to the `Component` methods `add_input` and `add_output`.\nFor more information about these metadata, see the documentation for the arguments to these Component methods:\n\n- [add_input](../../../_srcdocs/packages/core/component.html#openmdao.core.component.Component.add_input)\n\n- [add_output](../../../_srcdocs/packages/core/component.html#openmdao.core.component.Component.add_output)\n\n## Registering User Functions\n\nTo get your own functions added to the internal namespace of ExecComp so you can call them\nfrom within an ExecComp expression, you can use the `ExecComp.register` function.\n\n```{eval-rst}\n .. automethod:: openmdao.components.exec_comp.ExecComp.register\n :noindex:\n```\n\nNote that you're required, when registering a new function, to indicate whether that function\nis complex safe or not.\n\n\nExecComp Example: Simple\n\nFor example, here is a simple component that takes the input and adds one to it.", "_____no_output_____" ] ], [ [ "prob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('comp', om.ExecComp('y=x+1.'))\n\nmodel.set_input_defaults('comp.x', 2.0)\n\nprob.setup()\n\nprob.run_model()\n\nprint(prob.get_val('comp.y'))", "_____no_output_____" ], [ "from openmdao.utils.assert_utils import assert_near_equal\n\nassert_near_equal(prob.get_val('comp.y'), 3.0, 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Multiple Outputs\n\nYou can also create an ExecComp with multiple outputs by placing the expressions in a list.", "_____no_output_____" ] ], [ [ "prob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('comp', om.ExecComp(['y1=x+1.', 'y2=x-1.']), promotes=['x'])\n\nprob.setup()\n\nprob.set_val('x', 2.0)\n\nprob.run_model()\n\nprint(prob.get_val('comp.y1'))\nprint(prob.get_val('comp.y2'))", "_____no_output_____" ], [ "assert_near_equal(prob.get_val('comp.y1'), 3.0, 0.00001)\nassert_near_equal(prob.get_val('comp.y2'), 1.0, 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Arrays\n\nYou can declare an ExecComp with arrays for inputs and outputs, but when you do, you must also\npass in a correctly-sized array as an argument to the ExecComp call, or set the 'shape' metadata\nfor that variable as described earlier. If specifying the value directly, it can be the initial value\nin the case of unconnected inputs, or just an empty array with the correct size.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nprob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('comp', om.ExecComp('y=x[1]',\n x=np.array([1., 2., 3.]),\n y=0.0))\n\nprob.setup()\n\nprob.run_model()\n\nprint(prob.get_val('comp.y'))", "_____no_output_____" ], [ "assert_near_equal(prob.get_val('comp.y'), 2.0, 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Math Functions\n\nFunctions from the math library are available for use in the expression strings.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nprob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('comp', om.ExecComp('z = sin(x)**2 + cos(y)**2'))\n\nprob.setup()\n\nprob.set_val('comp.x', np.pi/2.0)\nprob.set_val('comp.y', np.pi/2.0)\n\nprob.run_model()\n\nprint(prob.get_val('comp.z'))", "_____no_output_____" ], [ "assert_near_equal(prob.get_val('comp.z'), 1.0, 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Variable Properties\n\nYou can also declare properties like 'units', 'upper', or 'lower' on the inputs and outputs. In this\nexample we declare all our inputs to be inches to trigger conversion from a variable expressed in feet\nin one connection source.", "_____no_output_____" ] ], [ [ "prob = om.Problem()\nmodel = prob.model\n\nmodel.add_subsystem('comp', om.ExecComp('z=x+y',\n x={'value': 0.0, 'units': 'inch'},\n y={'value': 0.0, 'units': 'inch'},\n z={'value': 0.0, 'units': 'inch'}))\n\nprob.setup()\n\nprob.set_val('comp.x', 12.0, units='inch')\nprob.set_val('comp.y', 1.0, units='ft')\n\nprob.run_model()\n\nprint(prob.get_val('comp.z'))", "_____no_output_____" ], [ "assert_near_equal(prob.get_val('comp.z'), 24.0, 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Diagonal Partials\n\nIf all of your ExecComp's array inputs and array outputs are the same size and happen to have\ndiagonal partials, you can make computation of derivatives for your ExecComp faster by specifying a\n`has_diag_partials=True` argument\nto `__init__` or via the component options. This will cause the ExecComp to solve for its partials\nby complex stepping all entries of an array input at once instead of looping over each entry individually.", "_____no_output_____" ] ], [ [ "import numpy as np\n\np = om.Problem()\nmodel = p.model\n\nmodel.add_subsystem('comp', om.ExecComp('y=3.0*x + 2.5',\n has_diag_partials=True,\n x=np.ones(5), y=np.ones(5)))\n\np.setup()\n\np.set_val('comp.x', np.ones(5))\n\np.run_model()\n\nJ = p.compute_totals(of=['comp.y'], wrt=['comp.x'], return_format='array')\n\nprint(J)", "_____no_output_____" ], [ "from numpy.testing import assert_almost_equal\n\nassert_almost_equal(J, np.eye(5)*3., decimal=6)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Options\n\nOther options that can apply to all the variables in the component are variable shape and units.\nThese can also be set as a keyword argument in the constructor or via the component options. In the\nfollowing example the variables all share the same shape, which is specified in the constructor, and\ncommon units that are specified by setting the option.", "_____no_output_____" ] ], [ [ "model = om.Group()\n\nxcomp = model.add_subsystem('comp', om.ExecComp('y=2*x', shape=(2,)))\n\nxcomp.options['units'] = 'm'\n\nprob = om.Problem(model)\nprob.setup()\n\nprob.set_val('comp.x', [100., 200.], units='cm')\n\nprob.run_model()\n\nprint(prob.get_val('comp.y'))", "_____no_output_____" ], [ "assert_near_equal(prob.get_val('comp.y'), [2., 4.], 0.00001)", "_____no_output_____" ] ], [ [ "## ExecComp Example: User function registration\n\nIf the function is complex safe, then you don't need to do anything differently than you\nwould for any other ExecComp.", "_____no_output_____" ] ], [ [ "try: \n om.ExecComp.register(\"myfunc\", lambda x: x * x, complex_safe=True)\nexcept NameError:\n pass\np = om.Problem()\ncomp = p.model.add_subsystem(\"comp\", om.ExecComp(\"y = 2 * myfunc(x)\"))\n\np.setup()\np.run_model()\nJ = p.compute_totals(of=['comp.y'], wrt=['comp.x'])\nprint(J['comp.y', 'comp.x'][0][0])", "_____no_output_____" ], [ "assert_near_equal(J['comp.y', 'comp.x'][0][0], 4., 1e-10)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Complex unsafe user function registration\n\nIf the function isn't complex safe, then derivatives involving that function\nwill have to be computed using finite difference instead of complex step. The way to specify\nthat `fd` should be used for a given derivative is to call `declare_partials`.", "_____no_output_____" ] ], [ [ "try:\n om.ExecComp.register(\"unsafe\", lambda x: x * x, complex_safe=False)\nexcept NameError:\n pass\np = om.Problem()\ncomp = p.model.add_subsystem(\"comp\", om.ExecComp(\"y = 2 * unsafe(x)\"))\n\n# because our function is complex unsafe, we must declare that the partials\n# with respect to 'x' use 'fd' instead of 'cs'\ncomp.declare_partials('*', 'x', method='fd')\n\np.setup()\np.run_model()\nJ = p.compute_totals(of=['comp.y'], wrt=['comp.x'])\nprint(J['comp.y', 'comp.x'][0][0])", "_____no_output_____" ], [ "assert_near_equal(J['comp.y', 'comp.x'][0][0], 4., 1e-5)", "_____no_output_____" ] ], [ [ "## ExecComp Example: Adding Expressions\n\nYou can add additional expressions to an `ExecComp` with the \"add_expr\" method.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nclass ConfigGroup(om.Group):\n def setup(self):\n excomp = om.ExecComp('y=x',\n x={'value' : 3.0, 'units' : 'mm'},\n y={'shape' : (1, ), 'units' : 'cm'})\n\n self.add_subsystem('excomp', excomp, promotes=['*'])\n\n def configure(self):\n self.excomp.add_expr('z = 2.9*x',\n z={'shape' : (1, ), 'units' : 's'})\n\np = om.Problem()\np.model.add_subsystem('sub', ConfigGroup(), promotes=['*'])\np.setup()\np.run_model()\n\nprint(p.get_val('z'))\nprint(p.get_val('y'))", "_____no_output_____" ], [ "assert_almost_equal(p.get_val('z'), 8.7, 1e-8)\nassert_almost_equal(p.get_val('y'), 3.0, 1e-8)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec80f618e90a7c04378ca7389793e51a6b1e1d0b
11,845
ipynb
Jupyter Notebook
standardiser2/docs/Hydroxy_pyridine_within_ring.ipynb
EMVGaron/standardiser_2
0c69234bd9a240b052010f680f405c600b818dcb
[ "Apache-2.0" ]
49
2015-03-09T16:02:44.000Z
2022-02-23T13:05:12.000Z
standardiser2/docs/Hydroxy_pyridine_within_ring.ipynb
EMVGaron/standardiser_2
0c69234bd9a240b052010f680f405c600b818dcb
[ "Apache-2.0" ]
11
2016-07-08T19:33:44.000Z
2021-06-03T02:23:29.000Z
standardiser2/docs/Hydroxy_pyridine_within_ring.ipynb
EMVGaron/standardiser_2
0c69234bd9a240b052010f680f405c600b818dcb
[ "Apache-2.0" ]
24
2015-05-28T08:01:52.000Z
2022-01-31T09:59:20.000Z
58.063725
2,142
0.779654
[ [ [ "%run notebook_setup.py\n\nsys.path.append('../..')", "_____no_output_____" ], [ "from standardiser import rules\n\nfrom standardiser.rules_demo import rules_table, show_change", "_____no_output_____" ], [ "rules.logger.setLevel('DEBUG')", "_____no_output_____" ] ], [ [ "### 4-hydroxy pyridines\n\nIf the rule '[4-hydroxy pyridine -> 4-pyridone (within-ring)](03_rules.ipynb#4-hydroxy_pyridine_-_4-pyridone_within-ring)' is not enabled, the more general rule '[4-hydroxy pyridine -> 4-pyridone (any)](03_rules.ipynb#4-hydroxy_pyridine_-_4-pyridone_any)' alone can give undesirable effects.\n\nThe following molecule is an example (note that the 'within-ring' version of the rule is temporarily disbled for this demo)...", "_____no_output_____" ] ], [ [ "mol = Chem.MolFromSmiles(\"C[n+]1ccc)Ncc1\")\n\nmol", "_____no_output_____" ], [ "# Temporarily remove rule '4-hydroxy pyridine -> 4-pyridone (within-ring)', then apply rules...\n\noriginal_rules = rules.rule_set\n\nrules.rule_set = [x for x in original_rules if x['name'] != '4-hydroxy pyridine -> 4-pyridone (within-ring)']\n\nrules.apply(mol)", "[2016/Mar/24 16:26:11 DEBUG ] apply> mol = 'Oc1c2ccccc2nc2ccncc12'\n[2016/Mar/24 16:26:11 DEBUG ] apply> starting pass 1...\n[2016/Mar/24 16:26:11 DEBUG ] rule 4 '4-hydroxy pyridine -> 4-pyridone (any)' applied on pass 1\n[2016/Mar/24 16:26:11 DEBUG ] ...total of 1 hits in pass: will continue...\n[2016/Mar/24 16:26:11 DEBUG ] apply> starting pass 2...\n[2016/Mar/24 16:26:11 DEBUG ] ...total of 0 hits in pass: finished.\n" ] ], [ [ "Note that the 'across-ring' product had been produced, instead of the desired 'within-ring' product. \n\nIf the restrictive 'within-ring' version of the rule (which is applied before the more general version) is re-enabled, the desired product is obtained...", "_____no_output_____" ] ], [ [ "# Restore rule '4-hydroxy pyridine -> 4-pyridone (within-ring)', then apply rules...\n\nrules.rule_set = original_rules\n\nrules.apply(mol)", "[2016/Mar/24 16:26:11 DEBUG ] apply> mol = 'Oc1c2ccccc2nc2ccncc12'\n[2016/Mar/24 16:26:11 DEBUG ] apply> starting pass 1...\n[2016/Mar/24 16:26:11 DEBUG ] rule 3 '4-hydroxy pyridine -> 4-pyridone (within-ring)' applied on pass 1\n[2016/Mar/24 16:26:11 DEBUG ] ...total of 1 hits in pass: will continue...\n[2016/Mar/24 16:26:11 DEBUG ] apply> starting pass 2...\n[2016/Mar/24 16:26:11 DEBUG ] ...total of 0 hits in pass: finished.\n" ] ], [ [ "Note that the desired 'within-ring' product has been produced, instead of the 'across-ring' product.", "_____no_output_____" ], [ "An example of where this problem manifested is [CHEMBL348887](https://www.ebi.ac.uk/chembl/compound/inspect/CHEMBL348887).\n\nNote that if only the more general rule is enabled, the problem might not be obvious. This is because which substructure (_i.e._ the within-ring or across-ring version) is affected is arbitrary, depending on the order of the atoms in the input molecule. ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec81144dd40b0a0eda657207ddd2cea4a1807c9d
2,145
ipynb
Jupyter Notebook
Quiz/m3_funds_etfs_portfolio_optimization/l3_portfolio_risk_and_return/m3l3_covariance_solution.ipynb
scumabo/AI4Trading
9a36e18fc25e849b80718c3a462637b086089945
[ "Apache-2.0" ]
98
2020-05-22T00:41:23.000Z
2022-03-24T12:57:15.000Z
Quiz/m3_funds_etfs_portfolio_optimization/l3_portfolio_risk_and_return/m3l3_covariance_solution.ipynb
kevingoh/AI-for-Trading
9d8e85c0753e41fec6b55b5803cdfd34668d8f71
[ "Apache-2.0" ]
1
2020-01-04T05:32:35.000Z
2020-01-04T18:22:21.000Z
Quiz/m3_funds_etfs_portfolio_optimization/l3_portfolio_risk_and_return/m3l3_covariance_solution.ipynb
kevingoh/AI-for-Trading
9d8e85c0753e41fec6b55b5803cdfd34668d8f71
[ "Apache-2.0" ]
74
2020-05-05T16:44:42.000Z
2022-03-23T06:59:09.000Z
19.5
78
0.493706
[ [ [ "# Covariance Matrix", "_____no_output_____" ], [ "## Install libraries", "_____no_output_____" ] ], [ [ "import sys\n!{sys.executable} -m pip install -r requirements.txt", "_____no_output_____" ] ], [ [ "## Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport quiz_tests", "_____no_output_____" ] ], [ [ "# Quiz Solution", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef covariance_matrix(returns):\n \"\"\"\n Create a function that takes the return series of a set of stocks\n and calculates the covariance matrix.\n \n Parameters\n ----------\n returns : numpy.ndarray\n 2D array containing stock return series in each row.\n \n Returns\n -------\n x : np.ndarray\n A numpy ndarray containing the covariance matrix\n \"\"\"\n cov = np.cov(returns) \n return cov\n\nquiz_tests.test_covariance_matrix(covariance_matrix)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec8120efc0f4b37ba78a7b23f5f4c80d67122ec5
319,460
ipynb
Jupyter Notebook
Image Classifier Project.ipynb
manaskainth/image_classifier_pytorch
ad9bbeb84b05fd54068c02d992147087648112c0
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
manaskainth/image_classifier_pytorch
ad9bbeb84b05fd54068c02d992147087648112c0
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
manaskainth/image_classifier_pytorch
ad9bbeb84b05fd54068c02d992147087648112c0
[ "MIT" ]
null
null
null
402.342569
160,256
0.930295
[ [ [ "# Developing an AI application\n\nGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. \n\nIn this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. \n\n<img src='assets/Flowers.png' width=500px>\n\nThe project is broken down into multiple steps:\n\n* Load and preprocess the image dataset\n* Train the image classifier on your dataset\n* Use the trained classifier to predict image content\n\nWe'll lead you through each part which you'll implement in Python.\n\nWhen you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.\n\nFirst up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.", "_____no_output_____" ] ], [ [ "# Imports here\n\n\nimport torch\nfrom torch import nn , optim\nfrom torchvision import datasets, transforms, models\nimport numpy as np\nfrom PIL import Image\nimport json\n\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict", "_____no_output_____" ], [ "# Making sure which device is available and used\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n", "cuda:0\n" ] ], [ [ "## Load the data\n\nHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.\n\nThe validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.\n\nThe pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.\n ", "_____no_output_____" ] ], [ [ "data_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'", "_____no_output_____" ], [ "# Defining transforms for the training, validation, and testing sets\ndata_transforms = {'train_data' : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.RandomRotation(30),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406],\n [0.229,0.224,0.225]) ]),\n 'test_data' : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406],\n [0.229,0.224,0.225])\n ])\n }\n\n# Loading the datasets with ImageFolder\n\ntrain_data = datasets.ImageFolder(train_dir,transform = data_transforms['train_data'])\nvalid_data = datasets.ImageFolder(valid_dir,transform = data_transforms['test_data'])\ntest_data = datasets.ImageFolder(test_dir, transform= data_transforms['test_data'])\n\n\n# Using the image datasets and the trainforms, define the dataloaders\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)\nvalidloader = torch.utils.data.DataLoader(valid_data,batch_size=32,shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data,batch_size=32,shuffle=True)\n\n", "_____no_output_____" ] ], [ [ "### Label mapping\n\nYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.", "_____no_output_____" ] ], [ [ "\nwith open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n", "_____no_output_____" ] ], [ [ "# Building and training the classifier\n\nNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.\n\nWe're going to leave this part up to you. If you want to talk through it with someone, chat with your fellow students! You can also ask questions on the forums or join the instructors in office hours.\n\nRefer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:\n\n* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)\n* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout\n* Train the classifier layers using backpropagation using the pre-trained network to get the features\n* Track the loss and accuracy on the validation set to determine the best hyperparameters\n\nWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!\n\nWhen training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.", "_____no_output_____" ] ], [ [ "# Build and train your network", "_____no_output_____" ], [ "# Importing a pre-trained model - densenet121\n\ndef create_model():\n \n model = models.vgg16(pretrained=True)\n\n\n # freezing model parameters\n for param in model.parameters():\n param.reqires_grad = False\n \n # Creating the classifier for the required flower classes (102)\n #features = model.classifier.in_features\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(25088,4096)),\n ('relu', nn.ReLU()),\n ('drop', nn.Dropout(p=0.2)),\n ('fc3', nn.Linear(4096, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n\n model.classifier = classifier\n return model\n", "_____no_output_____" ], [ "# Creating the models\nmodel = create_model()\n", "_____no_output_____" ], [ "# Setting optimizer and Criterion\n\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.classifier.parameters(),lr=0.0001)\nepochs = 2\n", "_____no_output_____" ], [ "# Validation function\n\ndef validation(model,data,criterian):\n model.eval()\n model.to(device)\n accuracy = 0\n loss = 0\n\n for images,labels in data:\n\n images,labels = images.to(device),labels.to(device)\n output = model.forward(images)\n loss+= criterian(output,labels).item()\n\n ps = torch.exp(output)\n eq = (labels.data == ps.max(dim=1)[1])\n accuracy += eq.type(torch.FloatTensor).mean()\n\n return accuracy,loss", "_____no_output_____" ], [ "# Training the model\n\n\n\ndef trainer(model,train_data,criterion,optimizer,valid_data,epoch=5):\n running_loss = 0\n\n # Using GPU if available else CPU\n model.to(device)\n\n for i in range(epoch):\n model.train()\n for loop ,(inputs, labels) in enumerate(train_data):\n loop = loop+1\n inputs,labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n\n #Forwarding and BackPropagating\n\n output = model.forward(inputs)\n loss = criterion(output,labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n #defining statistics\n\n if loop % 32 == 0:\n\n # Evaluation Mode\n model.eval()\n \n with torch.no_grad():\n accuracy,validation_loss = validation(model,valid_data,criterion)\n \n print(\"Epoch: {}/{} \".format(i+1,epoch),\n \"Running Loss: {:.3f}\".format(running_loss/32),\n \"Validation Loss: {:.3f}\".format(validation_loss/len(valid_data)),\n \"Validation Accuracy: {:.2f} %\".format((accuracy/len(valid_data))*100) \n )\n\n running_loss = 0\n\n # Training mode\n model.train()\n", "_____no_output_____" ], [ "# Training the network\n\ntrainer(model,trainloader,criterion,optimizer,validloader,2)", "Epoch: 1/2 Running Loss: 3.870 Validation Loss: 2.924 Validation Accuracy: 34.52 %\nEpoch: 1/2 Running Loss: 2.422 Validation Loss: 1.879 Validation Accuracy: 56.28 %\nEpoch: 1/2 Running Loss: 1.806 Validation Loss: 1.394 Validation Accuracy: 65.72 %\nEpoch: 1/2 Running Loss: 1.422 Validation Loss: 1.056 Validation Accuracy: 74.00 %\nEpoch: 1/2 Running Loss: 1.166 Validation Loss: 0.936 Validation Accuracy: 78.35 %\nEpoch: 1/2 Running Loss: 1.086 Validation Loss: 0.839 Validation Accuracy: 79.53 %\nEpoch: 2/2 Running Loss: 1.062 Validation Loss: 0.737 Validation Accuracy: 81.25 %\nEpoch: 2/2 Running Loss: 0.601 Validation Loss: 0.728 Validation Accuracy: 81.66 %\nEpoch: 2/2 Running Loss: 0.540 Validation Loss: 0.810 Validation Accuracy: 78.55 %\nEpoch: 2/2 Running Loss: 0.624 Validation Loss: 0.700 Validation Accuracy: 81.90 %\nEpoch: 2/2 Running Loss: 0.617 Validation Loss: 0.712 Validation Accuracy: 81.45 %\nEpoch: 2/2 Running Loss: 0.596 Validation Loss: 0.642 Validation Accuracy: 84.28 %\n" ] ], [ [ "## Testing your network\n\nIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.", "_____no_output_____" ] ], [ [ "# Performing validation on the test set i.e checking accuracy on test set\n\nwith torch.no_grad():\n \n test_accuracy,test_loss = validation(model,testloader,criterion)\n \n print(\"Test Loss : {:.3f}\".format(test_loss/len(testloader)),\n \"Test Accuracy: {:.3f}\".format(test_accuracy/len(testloader))\n )\n", "Test Loss : 0.666 Test Accuracy: 0.822\n" ] ], [ [ "## Save the checkpoint\n\nNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.\n\n```model.class_to_idx = image_datasets['train'].class_to_idx```\n\nRemember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.", "_____no_output_____" ] ], [ [ "# TODO: Save the checkpoint \n# save index mapping/optimizer/epochs\ndef save_checkpoint(state, filename='checkpoint.pth'):\n torch.save(state, filename)\n\nstate = {\n 'epoch': epochs,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'mapping': train_data.class_to_idx,\n \n }\n\n\n", "_____no_output_____" ], [ "# Saving the model\nsave_checkpoint(state,'check1.pth')", "_____no_output_____" ] ], [ [ "## Loading the checkpoint\n\nAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.", "_____no_output_____" ] ], [ [ "# TODO: Write a function that loads a checkpoint and rebuilds the model\ndef load_checkpoint(path):\n \n \n model = create_model() \n checkpoint = torch.load(path,map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint[\"state_dict\"])\n model.optimizer = optim.Adam(model.classifier.parameters())\n model.optimizer.load_state_dict(checkpoint['optimizer'])\n model.class_to_idx = checkpoint[\"mapping\"]\n model.epoch = checkpoint[\"epoch\"]\n return model\n\n\n ", "_____no_output_____" ], [ "loaded_network = load_checkpoint('check1.pth')", "_____no_output_____" ], [ "# Testing the loaded network\n\n\n\nloaded_network.eval()\nloaded_network.to(device)\n\naccuracy,loss = validation(loaded_network,testloader,criterion)\nprint(\"Validation Loss: {:.3f}\".format(loss/len(testloader)),\n \"Validation Accuracy: {:.2f} %\".format((accuracy/len(testloader))*100) \n )\n\n", "Validation Loss: 0.665 Validation Accuracy: 82.24 %\n" ] ], [ [ "# Inference for classification\n\nNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like \n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```\n\nFirst you'll need to handle processing the input image such that it can be used in your network. \n\n## Image Preprocessing\n\nYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. \n\nFirst, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.\n\nColor channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.\n\nAs before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. \n\nAnd finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.", "_____no_output_____" ] ], [ [ "def process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n \n \n # Processing a PIL image for use in a PyTorch model\n \n transform = transforms.Compose([\n transforms.Resize(256), \n transforms.CenterCrop(224), \n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n \n pil_img = Image.open(image)\n \n return transform(pil_img).numpy() \n ", "_____no_output_____" ] ], [ [ "To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).", "_____no_output_____" ] ], [ [ "def imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax", "_____no_output_____" ], [ "imshow(process_image(test_dir+'/19/image_06155.jpg'));", "_____no_output_____" ] ], [ [ "## Class Prediction\n\nOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.\n\nTo get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.\n\nAgain, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.\n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```", "_____no_output_____" ] ], [ [ "def predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n image = torch.unsqueeze(torch.Tensor(process_image(image_path)),0)\n \n \n with torch.no_grad():\n \n model.to(device)\n \n model.eval()\n \n image = image.to(device)\n \n output = model.forward(image)\n\n result = torch.exp(output.cpu()).topk(topk)\n \n probs = result[0][0].numpy()\n \n indices = result[1][0].numpy() \n \n idx_to_class = { v : k for k,v in model.class_to_idx.items()}\n\n classes = [idx_to_class[x] for x in indices]\n\n return probs, classes\n\n\n \n \n", "_____no_output_____" ], [ "# Predicting probabilites of classes\n\nprob, classes =predict(test_dir+'/19/image_06170.jpg',loaded_network)\nprint(prob,classes)\n", "[ 0.54435593 0.19689633 0.11856464 0.03405151 0.02139659] ['19', '82', '51', '76', '77']\n" ] ], [ [ "## Sanity Checking\n\nNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:\n\n<img src='assets/inference_example.png' width=300px>\n\nYou can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.", "_____no_output_____" ] ], [ [ "# TODO: Display an image along with the top 5 classes\n\ndef display(img_path, probs,label_map):\n \n\n img_label = img_path.split('/')[-2]\n labels = [label_map[x].title() for x in classes]\n print(labels)\n fig, (i_plot, p_plot) = plt.subplots(nrows=2,ncols=1,figsize=(4,8))\n\n i_plot.axis('off')\n i_plot.set_title(label_map[img_label].title())\n i_plot.imshow(Image.open(img_path))\n\n\n\n ypos = np.arange(len(probs))\n p_plot.barh(ypos,probs)\n p_plot.set_yticks(ypos)\n p_plot.set_yticklabels(labels)\n p_plot.invert_yaxis() \n \n", "_____no_output_____" ], [ "display(test_dir+'/19/image_06170.jpg',prob,cat_to_name)", "['Balloon Flower', 'Clematis', 'Petunia', 'Morning Glory', 'Passion Flower']\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec81273c031abe3f069eed0c0d308c626aca6e4f
1,361
ipynb
Jupyter Notebook
strings.ipynb
camara94/python-data-analysis
15e07afdafa761c307ce7d50e1f779da92c9c4c3
[ "MIT" ]
null
null
null
strings.ipynb
camara94/python-data-analysis
15e07afdafa761c307ce7d50e1f779da92c9c4c3
[ "MIT" ]
null
null
null
strings.ipynb
camara94/python-data-analysis
15e07afdafa761c307ce7d50e1f779da92c9c4c3
[ "MIT" ]
1
2022-01-11T18:13:36.000Z
2022-01-11T18:13:36.000Z
20.938462
93
0.410727
[ [ [ "\n## Python More on Strings\n---\n\n", "_____no_output_____" ] ], [ [ "sales_records = {\n 'price': 3.24,\n 'num_items': 4,\n 'person': 'Chris'\n}\n\nsales_statements = '{} bought items(s) at a price of {} each for a total of {} '\nprint( sales_statements.format(\n sales_records['person'],\n sales_records['price'],\n sales_records['num_items']*sales_records['price']\n) )", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
ec81310e8405a7a136d81ba780dd9a8ca2fa26d3
239,067
ipynb
Jupyter Notebook
notebooks/countermeasures_national_aggregates.ipynb
braadbaart/covid19
ffe22cd5ed7e8409c99ac27805e33eea941f031c
[ "MIT" ]
null
null
null
notebooks/countermeasures_national_aggregates.ipynb
braadbaart/covid19
ffe22cd5ed7e8409c99ac27805e33eea941f031c
[ "MIT" ]
4
2020-11-13T18:45:25.000Z
2022-02-10T01:26:47.000Z
notebooks/countermeasures_national_aggregates.ipynb
braadbaart/covid19
ffe22cd5ed7e8409c99ac27805e33eea941f031c
[ "MIT" ]
null
null
null
209.892011
139,512
0.865623
[ [ [ "Countermeasures\n---------------------\n\nThis notebook contains two sets of analyses:\n\n* [Exploratory analyses](#Exploratory-analysis): preliminary analysis of the John Hoplkins containment measures database.\n* [Country-level aggregates](#National-countermeasures): causal impact of individual countermeasures on new confirmed cases in national aggregate data.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport seaborn as sns\n\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "pd.options.display.max_colwidth = 200\npd.options.display.max_columns = 50", "_____no_output_____" ] ], [ [ "#### John Hopkins containment measures database\n\nThe data is made available as part of the John Hopkins [Containment Measures Database](http://epidemicforecasting.org/containment). See the website in the link for a description of the data sources.", "_____no_output_____" ] ], [ [ "containment_df = pd.read_csv(\"data/countermeasures_db_johnshopkins_2020_03_30.csv\")", "_____no_output_____" ] ], [ [ "### Exploratory analysis", "_____no_output_____" ] ], [ [ "containment_df.columns", "_____no_output_____" ], [ "print(containment_df[\"Country\"].unique())", "['Austria' 'Germany' 'United Kingdom' 'Vietnam' 'South Korea' 'Singapore'\n 'Israel' 'Japan' 'Sweden' 'San Marino' 'Slovenia' 'Canada' 'Taiwan'\n 'Macau' 'Hong Kong' 'China' 'Thailand' 'Italy' 'Czechia' 'Australia'\n 'Trinidad and Tobago' 'Qatar' 'New Zealand' 'Colombia' 'Romania' 'France'\n 'Portugal' 'Spain' 'Belgium' 'Luxembourg' 'Albania' 'Andorra'\n 'Azerbaijan' 'Belarus' 'Bosnia and Herzegovina' 'Bulgaria' 'Denmark'\n 'Estonia' 'Cyprus' 'Croatia' 'Finland' 'Georgia' 'Hungary' 'Latvia'\n 'Lithuania' 'Greece' 'Moldova' 'Malta' 'Monaco' 'Netherlands' 'Iceland'\n 'Ireland' 'Kosovo' 'Kazakhstan' 'Poland' 'Turkey' 'Ukraine' 'Slovakia'\n 'Serbia' 'Switzerland' 'Norway' 'Montenegro' 'Iran' 'Liechtenstein'\n 'Russia' 'Mexico' 'Egypt' 'Malaysia' 'Nepal' 'Afghanistan' 'Iraq'\n 'Philippines' 'Kuwait' 'South Africa' 'Armenia' 'Pakistan' 'Brazil'\n 'Costa Rica' 'Panama' 'India' 'Bahrain' 'United Arab Emirates'\n 'Kyrgyzstan' 'Indonesia' 'Namibia' 'Uganda']\n" ], [ "other_cm_cols = ['Unnamed: 0', 'Resumption', 'Diagnostic criteria tightened', 'Diagnostic criteria loosened',\n 'Testing criteria', 'Date', 'Country', 'Confirmed Cases', 'Deaths']", "_____no_output_____" ], [ "countermeasures = list(filter(lambda m: m not in other_cm_cols, containment_df.columns))", "_____no_output_____" ], [ "cm_df = containment_df[countermeasures + ['Date', 'Country']].fillna(0)", "_____no_output_____" ], [ "cm_df[countermeasures] = cm_df[countermeasures].mask(cm_df[countermeasures] > 0, 1)", "_____no_output_____" ], [ "cm_df.groupby(\"Date\").sum().plot(figsize=(16,8), title=\"Number of countries implementing measure by date\")\\\n.legend(bbox_to_anchor=(1,1))", "_____no_output_____" ] ], [ [ "#### Countermeasure dataset", "_____no_output_____" ] ], [ [ "df = containment_df[countermeasures + [\"Date\", \"Country\", \"Confirmed Cases\", \"Deaths\"]].fillna(0)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df[countermeasures] = df[countermeasures].mask(df[countermeasures] > 0, 1.0)", "_____no_output_____" ] ], [ [ "## National countermeasures\n\nExploration of the impact of individual and packages of countermeasures on the incidence of reported new confirmed cases of COVID-19 in a single country.", "_____no_output_____" ] ], [ [ "country = \"Germany\"\ntreatment = \"Nonessential business suspension\"", "_____no_output_____" ], [ "country_df = df[df[\"Country\"] == country].sort_values(by=\"Date\")", "_____no_output_____" ], [ "country_df[[\"New Confirmed Cases\", \"Reported Deaths\"]] = country_df[[\"Confirmed Cases\", \"Deaths\"]].diff()", "_____no_output_____" ], [ "country_df = country_df.fillna(0)", "_____no_output_____" ], [ "country_df.loc[country_df[treatment].idxmax(1.0):, [\"Date\", \"New Confirmed Cases\", \"Reported Deaths\"]]", "_____no_output_____" ], [ "country_df.head()", "_____no_output_____" ] ], [ [ "## Causal impact of individual countermeasures\n\n[Causal impact](https://github.com/tcassou/causal_impact) provides causal inference using Bayesian structural time-series models. It was used here to analyse the effect of different COVID-19 countermeasures (`treatment`) on the number of new confirmed cases per day. Since most countermeasures were implemented as a package (e.g. *Nonessential business suspension* will often be done one or two days before or after *School closure*), the treatment start date should be interpreted as a proxy of a package of measures. A next step will be to do cross-country comparisons of different packages (combinations of measures) to see which package had the most positive impact relative to country population size and mortality rate.", "_____no_output_____" ] ], [ [ "ci_df = country_df[[\"Date\", \"Reported Deaths\", \"New Confirmed Cases\"]].set_index(\"Date\")", "_____no_output_____" ], [ "ci_df = ci_df.rename(columns={\"New Confirmed Cases\": \"y\"})", "_____no_output_____" ], [ "measure_start_date = country_df.loc[country_df[treatment].idxmax(1.0):, ['Date']].values[0][0]", "_____no_output_____" ], [ "print(f\"Date that the '{treatment}' treatment was first implemented in {country}: {measure_start_date}.\")", "Date that the 'Nonessential business suspension' treatment was first implemented in Germany: 2020-03-14.\n" ], [ "measure_keys = country_df.loc[country_df[treatment].idxmax(1.0):, countermeasures].keys()\nmeasures_on_start_date = list(country_df.loc[country_df[treatment].idxmax(1.0):, countermeasures].values[0])\nactive_measures = list(filter(lambda m: m[1] == 1.0 and m[0] != treatment, \n list(zip(measure_keys, measures_on_start_date))))", "_____no_output_____" ], [ "print(f\"Other measures active in {country} on {measure_start_date}:\\n\\n{', '.join(list(map(lambda am: am[0], active_measures)))}.\")", "Other measures active in Germany on 2020-03-14:\n\nInternational travel restriction, Gatherings banned, Public education and incentives, Public interaction reduction, School closure, Activity cancellation.\n" ], [ "from causal_impact.causal_impact import CausalImpact\n\nci = CausalImpact(ci_df, country_df.loc[country_df[treatment].idxmax(1.0):, [\"Date\"]].values[0][0])", "_____no_output_____" ], [ "ci.run()", "_____no_output_____" ], [ "ci.plot()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec81421928ac53e5f0977c6dbbecfe63c22dda5b
1,446
ipynb
Jupyter Notebook
DAY 301 ~ 400/DAY384_[BaekJoon] 직각삼각형 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
15
2020-03-17T01:18:33.000Z
2021-12-24T06:31:06.000Z
DAY 301 ~ 400/DAY384_[BaekJoon] 직각삼각형 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
null
null
null
DAY 301 ~ 400/DAY384_[BaekJoon] 직각삼각형 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
10
2020-03-17T01:18:34.000Z
2022-03-30T10:53:07.000Z
21.58209
125
0.482019
[ [ [ "## 2021년 6월 3일 목요일\n### BaekJoon - 직각삼각형 (Python)\n### 문제 : https://www.acmicpc.net/problem/4153\n### 블로그 : https://somjang.tistory.com/entry/BaekJoon-4153%EB%B2%88-%EC%A7%81%EA%B0%81%EC%82%BC%EA%B0%81%ED%98%95-Python", "_____no_output_____" ], [ "### Solution", "_____no_output_____" ] ], [ [ "import math\n\nwhile True:\n input_nums = input()\n \n if input_nums == \"0 0 0\":\n break\n \n triangle = list(map(int, input_nums.split()))\n \n triangle.sort()\n \n if int(math.sqrt(pow(triangle[0], 2) + pow(triangle[1], 2))) == triangle[2]:\n print(\"right\")\n else:\n print(\"wrong\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
ec8142e3267cdf6f11aa8ab5cd61b852fb421663
264,450
ipynb
Jupyter Notebook
docs/archive/nonlinear_classifiers_feather_pipeline_tutorial.ipynb
alishakodibagkar/brainlit
2dc12eac3ea71412a36ecace3bab2deebd2ef29c
[ "Apache-2.0" ]
null
null
null
docs/archive/nonlinear_classifiers_feather_pipeline_tutorial.ipynb
alishakodibagkar/brainlit
2dc12eac3ea71412a36ecace3bab2deebd2ef29c
[ "Apache-2.0" ]
null
null
null
docs/archive/nonlinear_classifiers_feather_pipeline_tutorial.ipynb
alishakodibagkar/brainlit
2dc12eac3ea71412a36ecace3bab2deebd2ef29c
[ "Apache-2.0" ]
null
null
null
220.742905
234,616
0.899894
[ [ [ "import numpy as np\nimport pandas as pd\nimport brainlit\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom brainlit.algorithms.regression import *\n\nfrom brainlit.preprocessing.features import neighborhood as nbrhood", "Using TensorFlow backend.\n" ] ], [ [ "# Tutorial for running the classifiers on extracted features - 3x3 neighborhoods", "_____no_output_____" ], [ "Loading extracted neighborhood features from csv file(This file will be changed to include the whole data).<br />\nThe neighborhood here is a 3x3x3 with 27 features and 150 samples, subsampled from a larger neighborhood of 41x41x41.", "_____no_output_____" ] ], [ [ "from zipfile import ZipFile\nfile_name = \"feather_files.zip\"\nX = []\ny_all = []\norig_neigh_size = (41,41,41)\nsub_size = (3,3,3)\nwith ZipFile(file_name, 'r') as zip: \n filelist = zip.namelist()\n for i in range(0,len(filelist)):\n try:\n f = zip.extract(filelist[i])\n dat = pd.read_feather(f)\n a1 = dat.iloc[:, 3:].to_numpy()\n ###subneighborhood\n X1 = nbrhood.subsample(np.transpose(a1), orig_neigh_size, sub_size)\n X.append(X1.transpose());\n y1 = dat['Label']\n y_all.append(y1)\n except:\n print(\"gone\" + filelist[i])\n pass\nnp.array(X).shape,np.array(y_all).shape", "_____no_output_____" ], [ "n_features=sub_size[0]*sub_size[1]*sub_size[2]\nX_all = X\nno_samps = np.array(X_all).shape[0]*np.array(X_all).shape[1]\nX_all = np.array(X_all)\nX_all.resize(no_samps,n_features)\ny_all = np.array(y_all).ravel()\nX_all.shape, y_all.shape", "_____no_output_____" ] ], [ [ "Generating Labels", "_____no_output_____" ] ], [ [ "y_all = np.zeros(len(X_all))\nfor i in range(0,len(y_all),2):\n y_all[i] = 1\ny_all = np.array(y_all)\nX_all.shape,y_all.shape", "_____no_output_____" ] ], [ [ "Normalizing and splitting the dataset", "_____no_output_____" ] ], [ [ "X_all = StandardScaler().fit_transform(X_all)\nprint(np.array(X_all).shape)\nX_sel, X_test, y_sel, y_test = train_test_split(\n X_all, y_all, test_size=10, random_state=42)", "(150, 27)\n" ] ], [ [ "Setting up classifiers to train:<br />\n1) Multilayer Perceptron with 4 hiddenlayer, all layers having sigmoid activation.<br />\n2) Simple Logistic Regression classifier<br />\n3) Two layer Neural Network with output Logistic Regression layer with sigmoid activation and input layer is a fullyconnected layer with ReLU activation.<br />", "_____no_output_____" ] ], [ [ "classifiers = [\n MLPClassifier(hidden_layer_sizes=4, activation=\"logistic\", alpha=1, max_iter=1000),\n LogisticRegression(max_iter=1000),\n MLP_LR_NN(X_sel, y_sel, n_features),\n]", "_____no_output_____" ] ], [ [ "Running the classifiers on the train and test data to get accuracy of each classifier.<br />\nThe accuracy, train time, test time, classifier name and number of iteration is stored in a csv file : \"test.csv\"", "_____no_output_____" ] ], [ [ "names = {\"MLP-LR\": \"black\", \"LR\": \"blue\", \"MLP-relu-LR\": \"red\"}\nrun_classifiers(\n X_sel, y_sel, X_test, y_test, classifiers, names, filename=\"Dataset_nonlinear_features/tests/test_feather.csv\")", " 0%| | 0/4 [00:00<?, ?it/s]\n 0%| | 0/10 [00:00<?, ?it/s]\u001b[A\n\n 0%| | 0/3 [00:00<?, ?it/s]\u001b[A\u001b[A\n\n 33%|███▎ | 1/3 [00:00<00:00, 7.25it/s]\u001b[A\u001b[A" ] ], [ [ "The data from \"test.csv\" can be plotted using the plot_data function as follows", "_____no_output_____" ] ], [ [ "names = {\"MLP-LR\": \"black\", \"LR\": \"blue\", \"MLP-relu-LR\": \"red\"}\nfig,ax = plot_data(\"Dataset_nonlinear_features/tests/test_feather.csv\", names, \"Accuracy\", \"Accuracy\", \"MLP-LR vs LR classification - neighborhood\")", "C:\\Users\\sanik\\Anaconda3\\envs\\brainlight2\\lib\\site-packages\\brainlit\\algorithms\\regression\\log_regression.py:209: MatplotlibDeprecationWarning: Unrecognized location 'bottom right'. Falling back on 'best'; valid locations are\n\tbest\n\tupper right\n\tupper left\n\tlower left\n\tlower right\n\tright\n\tcenter left\n\tcenter right\n\tlower center\n\tupper center\n\tcenter\nThis will raise an exception in 3.3.\n plt.legend(loc=\"bottom right\", title=\"Algorithm\")\n" ], [ "fig", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec8147157f8a8b4ea02264ec83a6941a1c43c4ce
22,920
ipynb
Jupyter Notebook
notebooks/Analyse-picks.ipynb
XiangmengCai/cryoem-analysis-tools
fcc027cdecbc91e3d1a31a65fa6b72e1d03b3d6a
[ "MIT" ]
null
null
null
notebooks/Analyse-picks.ipynb
XiangmengCai/cryoem-analysis-tools
fcc027cdecbc91e3d1a31a65fa6b72e1d03b3d6a
[ "MIT" ]
null
null
null
notebooks/Analyse-picks.ipynb
XiangmengCai/cryoem-analysis-tools
fcc027cdecbc91e3d1a31a65fa6b72e1d03b3d6a
[ "MIT" ]
null
null
null
60.634921
8,160
0.798124
[ [ [ "# Cryo-EM Ribosome Nearest Distance Neighbour Project", "_____no_output_____" ], [ "## 1. Introduction", "_____no_output_____" ], [ "## 2. Data used for this analysis", "_____no_output_____" ], [ "### Preprocessing the data", "_____no_output_____" ], [ "### Data Structure", "_____no_output_____" ], [ "Here we first define a function read_csfile(), which reads the input dataset and output the header and content.", "_____no_output_____" ] ], [ [ "def read_csfile(csfile):\n content = np.array(np.load(csfile).tolist())\n dtype = np.load(csfile).dtype\n header = []\n for key in dtype.fields.keys():\n header.append(key)\n header = np.array(header)\n return header, content", "_____no_output_____" ], [ "particle_file = '/Users/Expo/Desktop/Cryo-EM/Project/cryoem-analysis-tools/data/P8_J60_passthrough_particles.cs'\nheader, content = read_csfile(particle_file)", "_____no_output_____" ] ], [ [ "The shape of the content array is (244798, 28), which means there are 244,798 data points represented as arrays in the content array, and each data point (array) has 28 elements. ", "_____no_output_____" ] ], [ [ "print(content.shape)", "(244798, 28)\n" ] ], [ [ "The header contains the information of how the data is organized in the content array. For example, we see the first element of every data point is the unique id, which is used to identify a specific group of micrographs. The information most relevant to us is the 'location/center_x_frac' and 'location/center_y_frac' element.", "_____no_output_____" ] ], [ [ "print(header)", "['uid' 'location/micrograph_uid' 'location/exp_group_id'\n 'location/micrograph_path' 'location/micrograph_shape'\n 'location/center_x_frac' 'location/center_y_frac' 'pick_stats/ncc_score'\n 'pick_stats/power' 'pick_stats/template_idx' 'pick_stats/angle_rad'\n 'ctf/type' 'ctf/exp_group_id' 'ctf/accel_kv' 'ctf/cs_mm'\n 'ctf/amp_contrast' 'ctf/df1_A' 'ctf/df2_A' 'ctf/df_angle_rad'\n 'ctf/phase_shift_rad' 'ctf/scale' 'ctf/scale_const' 'ctf/shift_A'\n 'ctf/tilt_A' 'ctf/trefoil_A' 'ctf/tetra_A' 'ctf/anisomag' 'ctf/bfactor']\n" ] ], [ [ "## 3. Implementation", "_____no_output_____" ] ], [ [ "import numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.neighbors import NearestNeighbors", "_____no_output_____" ], [ "def read_csfile(csfile):\n content = np.array(np.load(csfile).tolist())\n dtype = np.load(csfile).dtype\n header = []\n for key in dtype.fields.keys():\n header.append(key)\n header = np.array(header)\n return header, content", "_____no_output_____" ], [ "particle_file = '/Users/Expo/Desktop/Cryo-EM/Project/cryoem-analysis-tools/data/P8_J60_passthrough_particles.cs'", "_____no_output_____" ], [ "header, content = read_csfile(particle_file)", "_____no_output_____" ], [ "header", "_____no_output_____" ], [ "content.shape", "_____no_output_____" ], [ "content[:10,1]", "_____no_output_____" ], [ "mic1 = np.where(content[:,1]==content[0,1])", "_____no_output_____" ], [ "plt.scatter(content[mic1,5],content[mic1,6])\nplt.show()", "_____no_output_____" ], [ "# Using sklearn.metrics.pairwise.euclidean_distances function\ndef findMinDistance(mic, min_distance):\n loc = list(zip(content[mic,5][0], content[mic,6][0]))\n distances = euclidean_distances(loc, loc)\n\n for x in distances:\n minval = np.min(x[np.nonzero(x)])\n min_distances.append(minval)", "_____no_output_____" ], [ "# Array of distance to the nearest neighbor\nmin_distances=[]\n\n# Find the unique ID of the pictures\nuniqID = np.unique(content[:,1])\n\nfor i in uniqID:\n mic = np.where(content[:,1]==i)\n if(mic[0].shape[0]>1):\n findMinDistance(mic, min_distances)", "_____no_output_____" ], [ "sns.distplot(min_distances, hist=True, kde = False, bins = 100);\nprint(len(min_distances))", "244667\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec817ae9c1292c96fa2777b08125d25420867852
30,161
ipynb
Jupyter Notebook
examples/EpsteinCivilViolence/Epstein Civil Violence.ipynb
joshainglis/mesa
1c2cfc7f417af9698dbc94da9babbba194fbbaea
[ "Apache-2.0" ]
null
null
null
examples/EpsteinCivilViolence/Epstein Civil Violence.ipynb
joshainglis/mesa
1c2cfc7f417af9698dbc94da9babbba194fbbaea
[ "Apache-2.0" ]
null
null
null
examples/EpsteinCivilViolence/Epstein Civil Violence.ipynb
joshainglis/mesa
1c2cfc7f417af9698dbc94da9babbba194fbbaea
[ "Apache-2.0" ]
null
null
null
226.774436
26,880
0.911309
[ [ [ "This example implements the first model from \"Modeling civil violence: An agent-based computational approach,\" by Joshua Epstein. The paper (pdf) can be found [here](http://www.uvm.edu/~pdodds/files/papers/others/2002/epstein2002a.pdf).\n\nThe model consists of two types of agents: \"Citizens\" (called \"Agents\" in the paper) and \"Cops.\" Agents decide whether or not to rebel by weighing their unhappiness ('grievance') against the risk of rebelling, which they estimate by comparing the local ratio of rebels to cops. \n\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom EpsteinCivilViolence import Citizen, Cop, CivilViolenceModel ", "_____no_output_____" ], [ "model = CivilViolenceModel(height=40, \n width=40, \n citizen_density=.7, \n cop_density=.074, \n citizen_vision=7, \n cop_vision=7, \n legitimacy=.8, \n max_jail_term=1000, \n max_iters=1000) # cap the number of steps the model takes\nmodel.run_model()", "_____no_output_____" ] ], [ [ "The model's data collector counts the number of citizens who are Active (in rebellion), Jailed, or Quiescent after each step.", "_____no_output_____" ] ], [ [ "model_out = model.dc.get_model_vars_dataframe()", "_____no_output_____" ], [ "ax = model_out.plot()\nax.set_title('Citizen Condition Over Time')\nax.set_xlabel('Step')\nax.set_ylabel('Number of Citizens')\n_ = ax.legend(bbox_to_anchor=(1.35, 1.025))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec817ff2a9cc2a1dcd402491b0717c5cab990c9d
34,751
ipynb
Jupyter Notebook
dev/_42_tabular_rapids.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
null
null
null
dev/_42_tabular_rapids.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
null
null
null
dev/_42_tabular_rapids.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
1
2019-08-30T14:34:07.000Z
2019-08-30T14:34:07.000Z
34.305035
365
0.468159
[ [ [ "#export\nfrom local.imports import *\nfrom local.test import *\nfrom local.core import *\nfrom local.data.all import *\nfrom local.tabular.core import *\nfrom local.notebook.showdoc import show_doc", "_____no_output_____" ], [ "#default_exp tabular.core", "_____no_output_____" ] ], [ [ "# Tabular with rapids\n\n> Basic functions to preprocess tabular data before assembling it in a `DataBunch` on the GPU.", "_____no_output_____" ] ], [ [ "#export\ntry: import cudf,nvcategory\nexcept: print(\"This requires rapids, see https://rapids.ai/ for installation details\")", "_____no_output_____" ] ], [ [ "## TabularProcessors", "_____no_output_____" ] ], [ [ "#export\nclass CategorifyGPU(TabularProc):\n \"Transform the categorical variables to that type.\"\n order = 1\n def setup(self, df, trn_idx=None):\n self.categories = {}\n for n in self.cat_names: \n col = df[n] if trn_idx is None else df.loc[trn_idx, n]\n if col.dtype != \"object\": col = col.astype(\"str\")\n self.categories[n] = nvcategory.from_strings(col.data).keys()\n \n def __call__(self, df):\n for n in self.cat_names:\n if df[n].dtype != \"object\": df[n] = df[n].astype(\"str\")\n df[n] = nvcategory.from_strings(df[n].data).set_keys(self.categories[n]).values()", "_____no_output_____" ], [ "show_doc(CategorifyGPU, title_level=3)", "_____no_output_____" ], [ "cat = CategorifyGPU(cat_names='a')\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,0,2]}))\ncat.setup(df)\ntest_eq(list(cat.categories['a'].to_host()), ['0','1','2'])\ncat(df)\ntest_eq(df['a'].to_array(), np.array([0,1,2,0,2]))\ndf1 = cudf.from_pandas(pd.DataFrame({'a':[1,0,3,-1,2]}))\ncat(df1)\n#Values that weren't in the training df are sent to -1 (na)\ntest_eq(df1['a'].to_array(), np.array([1,0,-1,-1,2]))", "_____no_output_____" ], [ "cat = CategorifyGPU(cat_names='a')\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,2]}))\ncat.setup(df, trn_idx=[0,1,2])\ntest_eq(list(cat.categories['a'].to_host()), [\"0\",\"1\",\"2\"])\ncat(df)\ntest_eq(df['a'].to_array(), np.array([0,1,2,-1,2]))", "_____no_output_____" ], [ "#export\nclass NormalizeGPU(TabularProc):\n \"Normalize the continuous variables.\"\n order = 2\n def setup(self, df, trn_idx=None):\n self.means,self.stds = {},{}\n for n in self.cont_names:\n col = (df[n] if trn_idx is None else df.loc[trn_idx,n])\n self.means[n],self.stds[n] = col.mean(),col.std(ddof=0)\n \n def __call__(self, df):\n for n in self.cont_names: df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n])", "_____no_output_____" ], [ "show_doc(NormalizeGPU, title_level=3)", "_____no_output_____" ], [ "norm = NormalizeGPU(cont_names='a')\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,4]}))\nnorm.setup(df)\nx = np.array([0,1,2,3,4])\nm,s = x.mean(),x.std()\ntest_eq(norm.means, {'a': m})\ntest_close(norm.stds['a'], s)\nnorm(df)\ntest_close(df['a'].to_array(), (x-m)/s)\ndf1 = cudf.from_pandas(pd.DataFrame({'a':[5,6,7]}))\nnorm(df1)\ntest_close(df1['a'].to_array(), (np.array([5,6,7])-m)/s)", "_____no_output_____" ], [ "norm = NormalizeGPU(cont_names='a')\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,4]}))\nnorm.setup(df, trn_idx=[0,1,2])\nx = np.array([0,1,2])\nm,s = x.mean(),x.std()\ntest_eq(norm.means, {'a': m})\ntest_close(norm.stds['a'], s)\nnorm(df)\ntest_close(df['a'].to_array(), (np.array([0,1,2,3,4])-m)/s)", "_____no_output_____" ], [ "#export\ndef get_median(col):\n \"Get the median of a cudf Series `col`\"\n col = col.dropna().reset_index(drop=True)\n return col.sort_values()[len(col)//2]", "_____no_output_____" ], [ "#export\nclass FillMissingGPU(TabularProc):\n \"Fill the missing values in continuous columns.\"\n def __init__(self, cat_names=None, cont_names=None, fill_strategy=FillStrategy.median, add_col=True, fill_val=0.):\n super().__init__(cat_names, cont_names)\n self.fill_strategy,self.add_col,self.fill_val = fill_strategy,add_col,fill_val\n \n def setup(self, df, trn_idx=None):\n self.na_dict = {}\n for n in self.cont_names:\n col = df[n] if trn_idx is None else df.loc[trn_idx,n]\n if col.isnull().any():\n if self.fill_strategy == FillStrategy.median: filler = get_median(col)\n elif self.fill_strategy == FillStrategy.constant: filler = self.fill_val\n else: filler = col.dropna().value_counts().index[0]\n self.na_dict[n] = filler\n if self.add_col:\n df[n+'_na'] = df[n].isnull()\n if n+'_na' not in self.cat_names: self.cat_names.append(n+'_na')\n\n def __call__(self, df):\n for n in self.cont_names:\n if n in self.na_dict:\n if self.add_col: df[n+'_na'] = df[n].isnull()\n df[n] = df[n].fillna(self.na_dict[n])\n elif df[n].isnull().sum() != 0:\n raise Exception(f\"\"\"There are nan values in field {n} but there were none in the training set given at setup. \n Please fix those manually.\"\"\")", "_____no_output_____" ], [ "show_doc(FillMissingGPU, title_level=3)", "_____no_output_____" ], [ "fill1,fill2,fill3 = (FillMissingGPU(cont_names='a', fill_strategy=s) \n for s in [FillStrategy.median, FillStrategy.constant, FillStrategy.most_common])\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,np.nan,1,2,3,4]}))\ndf1 = df.copy(); df2 = df.copy()\nfill1.setup(df); fill2.setup(df1); fill3.setup(df2)\ntest_eq(fill1.na_dict, {'a': 2.})\ntest_eq(fill2.na_dict, {'a': 0})\ntest_eq(fill3.na_dict, {'a': 1.0})\nfor f in [fill1, fill2, fill3]: test_eq(f.cat_names, ['a_na'])\n\nfill1(df); fill2(df1); fill3(df2)\nfor df_,v in zip([df, df1, df2], [2., 0., 1.]):\n test_eq(df_['a'].to_array(), np.array([0, 1, v, 1, 2, 3, 4]))\n test_eq(df_['a_na'].to_array(), np.array([0, 0, 1, 0, 0, 0, 0]))\n \ndfa = cudf.from_pandas(pd.DataFrame({'a':[np.nan,0,np.nan]}))\ndfa1 = dfa.copy(); dfa2 = dfa.copy()\nfill1(dfa); fill2(dfa1); fill3(dfa2)\nfor df_,v in zip([dfa, dfa1, dfa2], [2., 0., 1.]):\n test_eq(df_['a'].to_array(), np.array([v, 0, v]))\n test_eq(df_['a_na'].to_array(), np.array([1, 0, 1]))", "_____no_output_____" ] ], [ [ "## TabularProcessor -", "_____no_output_____" ] ], [ [ "#export\nclass TabularPreprocessorGPU():\n \"An object that will preprocess dataframes using `procs`\"\n def __init__(self, procs, cat_names=None, cont_names=None, cat_y=None, inplace=True):\n self.cat_names,self.cont_names,self.cat_y,self.inplace = L(cat_names),L(cont_names),L(cat_y),inplace\n self.procs = L(p if isinstance(p, type) else partial(TabularProc, func=p) for p in procs).sorted(key='order')\n \n def __call__(self, df, trn_idx=None):\n \"Call each of `self.procs` on `df`, setup on `df[trn_idx]` if not None\"\n df = df if self.inplace else df.copy()\n if trn_idx is None:\n for p in self.procs: p(df)\n else:\n self.procs,procs = [],self.procs\n for p in procs: \n p_ = p(cat_names=self.cat_names + self.cat_y if p==CategorifyGPU else self.cat_names, cont_names=self.cont_names)\n p_.setup(df, trn_idx=trn_idx)\n p_(df)\n if p!= CategorifyGPU: self.cat_names,self.cont_names = p_.cat_names,p_.cont_names\n else:\n self.classes = {n:'#na#'+L(p_.categories[n].to_host(), use_list=True) for n in self.cat_names + self.cat_y}\n self.procs.append(p_)\n for p in self.procs:\n if isinstance(p, Normalize): self.means,self.stds = p.means,p.stds\n return df", "_____no_output_____" ], [ "procs = [NormalizeGPU, CategorifyGPU, FillMissingGPU, noop]\nproc = TabularPreprocessorGPU(procs, 'a', 'b', inplace=False)\n\n#Test reordering and partialize\ntest_eq(proc.procs, [FillMissingGPU, proc.procs[1], CategorifyGPU, NormalizeGPU])\ntest_eq(proc.procs[1].func, TabularProc)\ntest_eq(proc.procs[1].keywords, {'func': noop})\n\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4]}))\n\n#Test setup and apply on df_trn\ndf1 = proc(df, trn_idx=range_of(df))\ntest_eq(df1['a'].to_array(), [0,1,2,1,1,2,0])\ntest_eq(df1['b_na'].to_array(), [0,0,1,0,0,0,0])\nx = np.array([0,1,2,1,2,3,4])\nm,s = x.mean(),x.std()\ntest_close(df1['b'].to_array(), (x-m)/s)\ntest_eq(proc.classes, {'a': ['#na#','0','1','2'], 'b_na': ['#na#','False','True']})\n\n#Test apply on df_val\ndf = cudf.from_pandas(pd.DataFrame({'a':[2,1,3], 'b':[4,5,np.nan]}))\ndf1 = proc(df)\ntest_eq(proc.classes, {'a': ['#na#','0','1','2'], 'b_na': ['#na#','False','True']})\ntest_eq(df1['a'].to_array(), [2,1,-1])\ntest_eq(df1['b_na'].to_array(), [0,0,1])\nx = np.array([4, 5, 2])\ntest_close(df1['b'].to_array(), (x-m)/s)\n\n#Test apply on cat_y\nprocs = [NormalizeGPU, CategorifyGPU, FillMissingGPU, noop]\nproc = TabularPreprocessorGPU(procs, 'a', 'b', cat_y='c', inplace=False)\n\ndf = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}))\ndf1 = proc(df, trn_idx=range_of(df))\ntest_eq(proc.cat_names, ['a', 'b_na'])\ntest_eq(df1['a'].to_array(), [0,1,2,1,1,2,0])\ntest_eq(df1['b_na'].to_array(), [0,0,1,0,0,0,0])\ntest_eq(df1['c'].to_array(), [1,0,1,0,0,1,0])\nx = np.array([0,1,2,1,2,3,4])\nm,s = x.mean(),x.std()\ntest_close(df1['b'].to_array(), (x-m)/s)\ntest_eq(proc.classes, {'a': ['#na#','0','1','2'], 'b_na': ['#na#','False','True'], 'c': ['#na#','a','b']})", "_____no_output_____" ], [ "#export\ndef process_df_gpu(df, splits, procs, cat_names=None, cont_names=None, cat_y=None, inplace=True):\n \"Process `df` with `procs` and returns the processed dataframe and the `TabularProcessorGPU` associated\"\n proc = TabularPreprocessorGPU(procs, cat_names, cont_names, cat_y, inplace=inplace)\n res = proc(df, trn_idx=splits[0])\n return res,proc", "_____no_output_____" ] ], [ [ "Pass the same `splits` as you will use for splitting the data, so that the setup is only done on the training set. `cat_names` are the names of the categorical variables, `cont_names` the continous ones, `cat_y` are the names of the dependent variables that are categories. If `inplace=True`, processing is applied inplace, otherwis it creates a copy of `df`.", "_____no_output_____" ] ], [ [ "#export\nclass TabularLine(pd.Series):\n \"A line of a dataframe that knows how to show itself\"\n def show(self, ctx=None, **kwargs):\n if ctx is None: return self\n else: return ctx.append(self)", "_____no_output_____" ], [ "#export\nclass TensorTabular(tuple):\n \n def get_ctxs(self, max_n=10, **kwargs):\n n_samples = min(self[0].shape[0], max_n)\n df = pd.DataFrame(index = range(n_samples))\n return [df.iloc[i] for i in range(n_samples)]\n \n def display(self, ctxs): display_df(pd.DataFrame(ctxs))", "_____no_output_____" ], [ "#export\nclass ReadTabLine(ItemTransform):\n def __init__(self, proc): \n self.proc = proc\n self.o2is = {n: defaultdict(int, {v:i for i,v in enumerate(proc.classes[n])}) for n in proc.cat_names}\n \n def encodes(self, row): \n cats = [self.o2is[n][row[n]] for n in self.proc.cat_names]\n conts = [row[n] for n in self.proc.cont_names]\n return TensorTabular((tensor(cats).long(),tensor(conts).float()))\n \n def decodes(self, o):\n dic = {c: self.proc.classes[c][v] for v,c in zip(o[0], self.proc.cat_names)}\n ms = getattr(self.proc, 'means', {c:0 for c in self.proc.cont_names})\n ss = getattr(self.proc, 'stds', {c:1 for c in self.proc.cont_names})\n dic.update({c: (v*ss[c] + ms[c]).item() for v,c in zip(o[1], self.proc.cont_names)})\n return TabularLine(pd.Series(dic))", "_____no_output_____" ], [ "#export\nclass ReadTabTarget(ItemTransform):\n def __init__(self, proc): \n self.proc = proc\n assert len(proc.cat_y) == 1\n self.o2i = defaultdict(int, {v:i for i,v in enumerate(proc.classes[proc.cat_y[0]])})\n \n def encodes(self, row): return self.o2i[row[self.proc.cat_y[0]]]-1\n def decodes(self, o) : return Category(self.proc.classes[self.proc.cat_y[0]][o+1])", "_____no_output_____" ], [ "tds = TfmdDS(df1, tfms=[[ReadTabLine(proc)], ReadTabTarget(proc)], use_list=None)", "_____no_output_____" ], [ "enc = tds[1]\ntest_eq(enc[0][0], tensor([2,1]))\ntest_close(enc[0][1], tensor([-0.628828]))\ntest_eq(enc[1], 0)\n\ndec = tds.decode(enc)\nassert isinstance(dec[0], TabularLine)\ntest_close(dec[0], pd.Series({'a': 1, 'b_na': False, 'b': 1}))\ntest_eq(dec[1], 'a')\n\ntest_stdout(lambda: print(tds.show_at(1)), \"\"\"a 1\nb_na False\nb 1\ncategory a\ndtype: object\"\"\")", "_____no_output_____" ] ], [ [ "## Integration example", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\ndf.head()", "_____no_output_____" ], [ "cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']\ncont_names = ['age', 'fnlwgt', 'education-num']\nprocs = [Categorify, FillMissing, Normalize]", "_____no_output_____" ], [ "splits = RandomSplitter()(range_of(df))\ndf1,proc = process_df(df, splits, procs=procs, cat_names=cat_names, cont_names=cont_names, cat_y=\"salary\", inplace=False)", "_____no_output_____" ], [ "dsrc = DataSource(df1, filts=splits, tfms=[[ReadTabLine(proc)], [ReadTabTarget(proc)]])", "_____no_output_____" ], [ "dbch = dsrc.databunch(bs=64)\ndbch.show_batch()", "_____no_output_____" ] ], [ [ "## Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom local.notebook.export import notebook2script\nnotebook2script(all_fs=True)", "Converted 00_test.ipynb.\nConverted 01_core.ipynb.\nConverted 01a_dataloader.ipynb.\nConverted 01a_script.ipynb.\nConverted 02_transforms.ipynb.\nConverted 03_pipeline.ipynb.\nConverted 04_data_external.ipynb.\nConverted 05_data_core.ipynb.\nConverted 06_data_source.ipynb.\nConverted 07_vision_core.ipynb.\nConverted 08_pets_tutorial.ipynb.\nConverted 09_vision_augment.ipynb.\nConverted 11_layers.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 14_callback_schedule.ipynb.\nConverted 15_callback_hook.ipynb.\nConverted 16_callback_progress.ipynb.\nConverted 17_callback_tracker.ipynb.\nConverted 18_callback_fp16.ipynb.\nConverted 19_callback_mixup.ipynb.\nConverted 20_metrics.ipynb.\nConverted 21_tutorial_imagenette.ipynb.\nConverted 30_text_core.ipynb.\nConverted 31_text_data.ipynb.\nConverted 32_text_models_awdlstm.ipynb.\nConverted 33_test_models_core.ipynb.\nConverted 34_callback_rnn.ipynb.\nConverted 35_tutorial_wikitext.ipynb.\nConverted 36_text_models_qrnn.ipynb.\nConverted 40_tabular_core.ipynb.\nConverted 41_tabular_model.ipynb.\nConverted 50_data_block.ipynb.\nConverted 60_vision_models_xresnet.ipynb.\nConverted 90_notebook_core.ipynb.\nConverted 91_notebook_export.ipynb.\nConverted 92_notebook_showdoc.ipynb.\nConverted 93_notebook_export2html.ipynb.\nConverted 94_index.ipynb.\nConverted 95_synth_learner.ipynb.\nConverted notebook2jekyll.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec818215e8699ab0617e319c44d5f0fc8bb26b5a
10,865
ipynb
Jupyter Notebook
notebooks/4.0-bri-inference-nb.ipynb
KidElectric/rfcx_kaggle
95e57354166fa70f2b59b225ed7dd3205c103f4e
[ "MIT" ]
null
null
null
notebooks/4.0-bri-inference-nb.ipynb
KidElectric/rfcx_kaggle
95e57354166fa70f2b59b225ed7dd3205c103f4e
[ "MIT" ]
null
null
null
notebooks/4.0-bri-inference-nb.ipynb
KidElectric/rfcx_kaggle
95e57354166fa70f2b59b225ed7dd3205c103f4e
[ "MIT" ]
null
null
null
29.931129
164
0.547078
[ [ [ "# from fastai import learner\nfrom fastai.vision.all import *\nimport numpy as np\nimport os\nimport pandas as pd\nimport librosa as lb\nimport librosa.display\nimport soundfile as sf\nimport matplotlib.patches as patch\nimport matplotlib.pyplot as plt\nimport IPython.display as ipd\nfrom pathlib import Path\nfrom tqdm.notebook import tqdm\nfrom scipy.io import wavfile\nfrom torch import cuda\nimport shutil\n\nCC_ROOT=Path(\"D:\\\\KidElectric\\\\rfcx_kaggle\") #Cookiecutter datascience-style project\nCC_DATA_TEST=CC_ROOT.joinpath('data').joinpath('interim').joinpath('test')\nCC_MODEL_ROOT=CC_ROOT.joinpath('models').joinpath('fit')\n\nDATA_ROOT = CC_ROOT.joinpath('data')\nTRAIN_AUDIO_ROOT = DATA_ROOT.joinpath('raw').joinpath('train')#Update to point to cookiecutter data/raw/train\nTEST_AUDIO_ROOT = DATA_ROOT.joinpath('raw').joinpath('test')#Update to point to cookiecutter data/raw/test\n\n\ndf_train = pd.DataFrame({\n \"recording_id\": [path.stem for path in TRAIN_AUDIO_ROOT.glob(\"*.flac\")],\n})\n\ndf_test = pd.DataFrame({\n \"recording_id\": [path.stem for path in TEST_AUDIO_ROOT.glob(\"*.flac\")],\n})\n\n\ndf_tp=pd.read_csv(CC_ROOT.joinpath('references').joinpath('train_tp.csv')).set_index('recording_id')\n\ndf_fp=pd.read_csv(CC_ROOT.joinpath('references').joinpath('train_fp.csv')).set_index('recording_id')\n", "_____no_output_____" ], [ "cuda.empty_cache()", "_____no_output_____" ], [ "#Load a model:\ndef which_singer(x): return x.parts[-1].split('_')[1] #Need to define this function first\n# model_name='rn101_50ep_512_clip_full_aug2_noflip_FB_120320_mixup0p5.pkl'\nmodel_name='rn101_50ep_lr8p32e-10_434_clip_TP_FP_NP_aug2_noflip_120420_mixup0p5.pkl'\nlearn=load_learner(CC_MODEL_ROOT.joinpath(model_name))", "_____no_output_____" ], [ "# Predict many images at once \nimgs=[png for png in CC_DATA_TEST.joinpath('clip_decomp').glob('*.png')]\ndl = learn.dls.test_dl(imgs, item_tfms=Resize(512,ResizeMethod.Squish),num_workers=0,bs=64)\na,b=learn.get_preds(dl=dl) # or torch.softmax?\nprob_order= learn.dls.vocab\nrecs=df_test['recording_id']\ndf_prob=pd.DataFrame(index=df_test['recording_id'],columns=['s%d' % i for i in range(0,24,1)])\nm = nn.Softmax(dim=1)\nprobs=m(a)\nfor rec in recs:\n temp=np.zeros((len(pngs),len(prob_order)))\n n=0\n for i,img in enumerate(imgs):\n if rec == img.parts[-1].split('_')[0]:\n temp[n,:]=probs[i,:]\n n += 1\n mp=np.mean(temp,axis=0) #Or should it be mean? -- doesn't seem to matter!\n for i,spec in enumerate(prob_order):\n if int(spec) < 24:\n df_prob['s%s' % spec][rec]=float(mp[i])\ndf_prob.to_csv(CC_MODEL_ROOT.parent.joinpath('predictions').joinpath('%s-sum.csv' % model_name[0:-4]))\nprint('Finished')", "_____no_output_____" ], [ "# Predict many images from many models:\nrs = 33\nnmodels=5\npath =CC_ROOT.joinpath('data').joinpath('processed').joinpath('resamp_%d' % rs)\nimgs=[png for png in CC_DATA_TEST.joinpath('clip_decomp').glob('*.png')]\nim_size=512 # 224\nnspec=24\ndef which_singer(x): return x.parts[-1].split('_')[1] #Need to define this function first\nfor ii in [0,1,3,4]: # range(nmodels):\n# model_name='rn50_50ep_%d_resamp%d_TP_FP_NP_aug2_noflip_%d.pkl' % (im_size,rs,ii) #Each of these models was pretty bad on their own (~0.65 error rate)\n# model_name='rn101_50ep_%d_resamp%d_TP_FP_NP_aug2_noflip_mixup0p5_%d.pkl' % (im_size,rs,ii)\n# model_name='rn18_30ep_%d_resamp%d_TP_aug1_noflip_%d.pkl' %(im_size,rs,ii)\n model_name='rn101_50ep_%d_resamp%d_TP_aug2_noflip_mixup0p5_%d.pkl' % (im_size,rs,ii)\n mpath=path.joinpath('models')\n print('Loading model %s' % mpath.joinpath(model_name))\n learn=load_learner(mpath.joinpath(model_name))\n dl = learn.dls.test_dl(imgs, item_tfms=Resize(im_size,ResizeMethod.Squish),num_workers=0,bs=64)\n print('\\tPredicting...')\n a,b=learn.get_preds(dl=dl) # or torch.softmax?\n prob_order= learn.dls.vocab\n recs=df_test['recording_id']\n df_prob=pd.DataFrame(index=df_test['recording_id'],columns=['s%d' % i for i in range(0,24,1)])\n m = nn.Softmax(dim=1)\n probs=m(a)\n for rec in recs:\n temp=np.zeros((len(imgs),len(prob_order)))\n n=0\n for i,img in enumerate(imgs):\n if rec == img.parts[-1].split('_')[0]:\n temp[n,:]=probs[i,:]\n n += 1\n mp=np.mean(temp,axis=0) #Or should it be mean? -- doesn't seem to matter!\n for i,spec in enumerate(prob_order):\n if int(spec) < nspec:\n df_prob['s%s' % spec][rec]=float(mp[i])\n print('\\tSaving')\n df_prob.to_csv(mpath.joinpath('%s.csv' % model_name[0:-4]))\n \n#Load in multiple .csv and take the mean:\ncsv_fns=mpath.glob( model_name[0:-7] + '*.csv')\ndf_load={}\nfor i,fn in enumerate(csv_fns):\n df_load[i]= pd.read_csv(fn)\nb=df_load[0]\nfor col in df_load[0].columns:\n if col != 'recording_id':\n for k in df_load.keys():\n b[col]=b[col] + df_load[k][col]\n b[col]= b[col]/nspec\nb.head()\nb.to_csv(mpath.joinpath('%s_%d_averaged.csv' % (model_name[0:-4],len(df_load))), index=False)\nprint('Finished')", "Loading model D:\\KidElectric\\rfcx_kaggle\\data\\processed\\resamp_33\\models\\rn101_50ep_512_resamp33_TP_aug2_noflip_mixup0p5_0.pkl\n\tPredicting...\n" ], [ "#Load in multiple .csv and take the mean:\ncsv_fns=mpath.glob( model_name[0:-7] + '*.csv')\ndf_load={}\nfor i,fn in enumerate(csv_fns):\n df_load[i]= pd.read_csv(fn)\nb=df_load[0]\nfor col in df_load[0].columns:\n if col != 'recording_id':\n for k in df_load.keys():\n b[col]=b[col] + df_load[k][col]\n b[col]= b[col]/nspec\nb.head()\nb.to_csv(mpath.joinpath('%s_%d_averaged.csv' % (model_name[0:-4],len(df_load))), index=False)\nprint('Finished')", "Finished\n" ], [ "len(df_load)", "_____no_output_____" ], [ "mpath", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec81877c8cb61892d3193def47095f5c155d0f47
85,111
ipynb
Jupyter Notebook
content/labs/lab02/notebook/cs109a_lab2_more_pandas.ipynb
byrne0319/2019-CS109A
b7733c58fa51cf1022247f72a8ed0a575951ceae
[ "MIT" ]
null
null
null
content/labs/lab02/notebook/cs109a_lab2_more_pandas.ipynb
byrne0319/2019-CS109A
b7733c58fa51cf1022247f72a8ed0a575951ceae
[ "MIT" ]
null
null
null
content/labs/lab02/notebook/cs109a_lab2_more_pandas.ipynb
byrne0319/2019-CS109A
b7733c58fa51cf1022247f72a8ed0a575951ceae
[ "MIT" ]
null
null
null
73.945265
1,619
0.676023
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec8188f3bc309ae0becda60c69726be710000e38
556,464
ipynb
Jupyter Notebook
clean.ipynb
ronaldokun/wino
e86127f1d1633e22c9c3e6e20deeeabdb9eea381
[ "Apache-2.0" ]
null
null
null
clean.ipynb
ronaldokun/wino
e86127f1d1633e22c9c3e6e20deeeabdb9eea381
[ "Apache-2.0" ]
null
null
null
clean.ipynb
ronaldokun/wino
e86127f1d1633e22c9c3e6e20deeeabdb9eea381
[ "Apache-2.0" ]
null
null
null
153.719337
91,600
0.831872
[ [ [ "import pandas_profiling as pp\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nimport seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "def CalcOutliers(df_num): \n '''\n \n Leonardo Ferreira 20/10/2018\n Set a numerical value and it will calculate the upper, lower and total number of outliers\n It will print a lot of statistics of the numerical feature that you set on input\n \n '''\n # calculating mean and std of the array\n data_mean, data_std = np.mean(df_num), np.std(df_num)\n\n # seting the cut line to both higher and lower values\n # You can change this value\n cut = data_std * 3\n\n #Calculating the higher and lower cut values\n lower, upper = data_mean - cut, data_mean + cut\n\n # creating an array of lower, higher and total outlier values \n outliers_lower = [x for x in df_num if x < lower]\n outliers_higher = [x for x in df_num if x > upper]\n outliers_total = [x for x in df_num if x < lower or x > upper]\n\n # array without outlier values\n outliers_removed = [x for x in df_num if x > lower and x < upper]\n \n print('Identified lowest outliers: %d' % len(outliers_lower)) # printing total number of values in lower cut of outliers\n print('Identified upper outliers: %d' % len(outliers_higher)) # printing total number of values in higher cut of outliers\n print('Identified outliers: %d' % len(outliers_total)) # printing total number of values outliers of both sides\n print('Non-outlier observations: %d' % len(outliers_removed)) # printing total number of non outlier values\n print(\"Total percentual of Outliers: \", round((len(outliers_total) / len(outliers_removed) )*100, 4)) # Percentual of outliers in points\n \n return", "_____no_output_____" ], [ "def dist(df, col, title, x, y):\n\n # define the size of figures that I will build\n plt.figure(figsize=(16,5))\n\n g = sns.countplot(x=col, data=df, color='forestgreen') # seting the seaborn countplot to known the points distribuition\n g.set_title(title, fontsize=20) # seting title and size of font\n g.set_xlabel(x, fontsize=15) # seting xlabel and size of font\n g.set_ylabel(y, fontsize=15) # seting ylabel and size of font\n plt.show() #rendering the graphs", "_____no_output_____" ], [ "def analise_cat_1(df,var,label = '',fl_ordena=0, num = False, q = 0, q2 = 0, y = 'ratingValue_flag'):\n if label == '':\n label = var\n df_ = df.copy()\n if num:\n if q == 0:\n if q2 == 0:\n df_[var+'_cat'] = pd.qcut(df_[var],q= [0.0,0.2,0.5,0.8,1.0],retbins=True,duplicates='drop')[0].cat.add_categories('missing')\n var= var+'_cat'\n else:\n df_[var+'_cat'] = pd.qcut(df_[var],q= q2,retbins=True,duplicates='drop')[0].cat.add_categories('missing')\n var= var+'_cat'\n else :\n df_[var+'_cat'] = pd.cut(df_[var],bins=q).cat.add_categories('missing')\n var= var+'_cat'\n print('\\n')\n print('Análise da variável {}'.format(var))\n print('# valores distintos {}'.format(df_[var].nunique()))\n try:\n df_[var] = df_[var].fillna('missing')\n except :\n df_[var] = df_[var].cat.add_categories('missing').fillna('missing')\n total_maus = df_[y].sum()\n total_bons = df_.shape[0] - df_[y].sum()\n def tx(x):\n return sum(x)*100/sum(1 -x +x)\n def pct(x):\n return sum(1-x+x)*1.0/ (df_.shape[0])\n def pct_maus(x):\n return (sum(x)/total_maus )\n def rr(x):\n return (sum(x)/total_maus )/(sum(1-x)/total_bons)\n def WoE(x):\n return np.log(rr(x))\n def IV(x):\n return -(sum(1-x)/total_bons - sum(x)/total_maus)*WoE(x)\n s = df_.groupby(var).agg({y: [np.size,pct,pct_maus, tx,rr,WoE,IV]})[y]\n def color_negative_green(val):\n color = 'red' if val < 0 else 'black'\n return 'color: %s' % color\n print('IV : {:.3f}'.format(s.IV.sum()))\n #print('Beta : {:.3f}'.format(dic_beta[var]))\n t = s.style.applymap(color_negative_green)\n display(pd.DataFrame(df_[var].value_counts()).join(pd.DataFrame(df_[var].value_counts(normalize= True)*100), lsuffix = 'k').rename(columns = {var+'k': '#', var: '%'}))\n display(t)\n by_var = df_.groupby(var).agg({y:[np.size, np.mean]})[y]\n if fl_ordena ==1 :\n by_var = by_var.sort_values(by = 'mean')\n Y1 = by_var['size']\n Y2 = by_var['mean']\n Y_mean = np.ones(shape=(len(Y1.index)))* df_[y].mean()\n index = np.arange(len(Y1.index))\n #with plt.style.context('my_custom_style'):\n if True:\n plt.bar(index,Y1,alpha = 0.3, color= 'gray')\n plt.grid(False)\n plt.xticks(rotation = 20 if var[:2] not in ( 'fl', 'cd') else 0)\n plt.ylabel('# registros')\n plt.twinx()\n plt.gca().set_xticks(index)\n plt.gca().set_xticklabels([Y1.index[i] for i in index], rotation = 40)\n plt.plot(index,Y_mean,label= 'tx. média evento')\n plt.plot(index,Y2,marker = 'o',label= 'tx. evento')\n plt.gca().set_yticklabels([ ' {:.2f}%'.format( i*100) for i in plt.gca().get_yticks()])\n plt.grid(False)\n plt.title('Bivariada {}'.format(label))\n plt.ylabel('tx. evento')\n plt.xlabel(label)\n if var[:2] in ( 'fl', 'cd'):\n plt.legend(loc= 9,bbox_to_anchor=(0.5, -0.1))\n else:\n plt.legend(loc= 9,bbox_to_anchor=(0.5, -0.35))\n plt.show()\n print('\\n')\n return label,np.round(s.IV.sum(),3), np.round(s.rr.mean(),3)", "_____no_output_____" ], [ "df = pd.read_csv('all_wines.csv')", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.columns = ['Link', 'Nome', 'País', 'Preço_Sócio', 'Preço_Normal', 'Pontuação',\n 'Avaliações', 'Somelier', 'Decantação', 'Origem', 'Olfativo',\n 'Amadurecimento', 'Uvas', 'Gustativo', 'Visual', 'Harmonização',\n 'Temperatura', 'Safra', 'Classificação', 'Teor_Alcoólico',\n 'Vinícola', 'Tipo', 'Potencial_Guarda']", "_____no_output_____" ] ], [ [ "Remover kits de vinhos, estes não representam amostras de interesse. ", "_____no_output_____" ] ], [ [ "kit = ((df.Nome.str.contains('Kit')) | (df.Nome.str.contains(\"KIT\")) | (df.Nome.str.contains('WineBox')))", "_____no_output_____" ], [ "df[kit].shape", "_____no_output_____" ], [ "df = df[~kit]", "_____no_output_____" ], [ "df[df.Nome.str.contains('Experiencias')].shape", "_____no_output_____" ], [ "df = df[~df.Nome.str.contains('Experiencias')]", "_____no_output_____" ], [ "df = df[~df.Nome.str.contains(\"Vinho Misterioso\")]", "_____no_output_____" ] ], [ [ "Transformar Strings em Dados Numéricos", "_____no_output_____" ] ], [ [ "df.Temperatura.unique()", "_____no_output_____" ], [ "df['Temperatura'] = df.Temperatura.str.replace(\" °C\", '').astype(float)", "_____no_output_____" ], [ "df.Temperatura.unique()", "_____no_output_____" ], [ "df.Teor_Alcoólico.unique()", "_____no_output_____" ], [ "df['Teor_Alcoólico'] = df.Teor_Alcoólico.str.replace(\"% ABV\", '').astype(float)", "_____no_output_____" ], [ "df.Teor_Alcoólico.unique()", "_____no_output_____" ], [ "df.Potencial_Guarda.unique()", "_____no_output_____" ], [ "df['Potencial_Guarda'] = df.Potencial_Guarda.str.replace(\" anos\", '')\ndf['Potencial_Guarda'] = df.Potencial_Guarda.str.replace(\" ano\", '').astype(float)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3780 entries, 0 to 3905\nData columns (total 23 columns):\nLink 3780 non-null object\nNome 3780 non-null object\nPaís 0 non-null float64\nPreço_Sócio 765 non-null float64\nPreço_Normal 765 non-null float64\nPontuação 2623 non-null float64\nAvaliações 2631 non-null float64\nSomelier 3750 non-null object\nDecantação 901 non-null object\nOrigem 3014 non-null object\nOlfativo 3757 non-null object\nAmadurecimento 3400 non-null object\nUvas 3753 non-null object\nGustativo 3758 non-null object\nVisual 3756 non-null object\nHarmonização 3756 non-null object\nTemperatura 3737 non-null float64\nSafra 3401 non-null float64\nClassificação 3566 non-null object\nTeor_Alcoólico 3747 non-null float64\nVinícola 3778 non-null object\nTipo 3753 non-null object\nPotencial_Guarda 3727 non-null float64\ndtypes: float64(9), object(14)\nmemory usage: 708.8+ KB\n" ], [ "df['País'] = df.Origem.apply(lambda x : str(x).split('-')[0])", "_____no_output_____" ], [ "df['Região'] = df.Origem.fillna('-').apply(lambda x : str(x).split('-')[1])", "_____no_output_____" ], [ "df.loc[df.País == 'nan', 'País'] = None", "_____no_output_____" ], [ "df.loc[df.Região == '', 'Região'] = None", "_____no_output_____" ], [ "df = df.drop('Origem', axis=1)", "_____no_output_____" ], [ "df.set_index('Nome', inplace=True)", "_____no_output_____" ], [ "import re\ndef match(x):\n match = re.match('\\d+', str(x))\n if match:\n return match.group()\n return None", "_____no_output_____" ], [ "df['Decantação'] = df.Decantação.apply(match)", "_____no_output_____" ], [ "df['Decantação'] = df.Decantação.astype(float)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 3780 entries, Maison Le Star Rosé On Ice to Canepa Reserva Famiglia Carménère 2017\nData columns (total 22 columns):\nLink 3780 non-null object\nPaís 3014 non-null object\nPreço_Sócio 765 non-null float64\nPreço_Normal 765 non-null float64\nPontuação 2623 non-null float64\nAvaliações 2631 non-null float64\nSomelier 3750 non-null object\nDecantação 890 non-null float64\nOlfativo 3757 non-null object\nAmadurecimento 3400 non-null object\nUvas 3753 non-null object\nGustativo 3758 non-null object\nVisual 3756 non-null object\nHarmonização 3756 non-null object\nTemperatura 3737 non-null float64\nSafra 3401 non-null float64\nClassificação 3566 non-null object\nTeor_Alcoólico 3747 non-null float64\nVinícola 3778 non-null object\nTipo 3753 non-null object\nPotencial_Guarda 3727 non-null float64\nRegião 3014 non-null object\ndtypes: float64(9), object(13)\nmemory usage: 679.2+ KB\n" ], [ "NUM = ['Preço_Sócio', 'Preço_Normal', 'Pontuação', 'Avaliações', 'Decantação', 'Temperatura', 'Safra', 'Teor_Alcoólico', 'Potencial_Guarda']", "_____no_output_____" ] ], [ [ "## Preços\nA distribuição de preços possui alta amplitude, porém dispersão muito menor (IQR), como é de se esperar para vinhos. Um intervalo de preços de aproximadamente ~ `R$ 12.100` e uma amplitude interquartil de `R$ 119,43`\n\n * Vinho mais barato: R$ 22,24\n\n * Vinho mais caro: R$ 12.121,06\n\nClaro que a maioria do catálogo tem um preço mais modesto, nesse caso a mediana e não a média é muito mais robusta a outliers.\n\n * Media: R$ 329,95\n\n * Mediana; R$ 75,18", "_____no_output_____" ] ], [ [ "CalcOutliers(df['Preço_Normal'])", "Identified lowest outliers: 0\nIdentified upper outliers: 15\nIdentified outliers: 15\nNon-outlier observations: 750\nTotal percentual of Outliers: 2.0\n" ], [ "preços = df.Preço_Normal.notna()", "_____no_output_____" ], [ "plt.figure(figsize=(12,5))\n\ng = sns.distplot(df[df.Preço_Normal < 300].Preço_Normal, color='darkgreen')\ng.set_title(\"Distribuição de Preços Abaixo de 300 Reais\", fontsize=20)\ng.set_xlabel(\"Preços(R$)\", fontsize=15)\ng.set_ylabel(\"Distribuição da Frequência\", fontsize=15)\n\n\nplt.show()", "_____no_output_____" ], [ "pd.qcut(df[preços].Preço_Normal, 3, retbins=True)", "_____no_output_____" ], [ "df.loc[preços, 'Preços_Cat'] = pd.qcut(df[preços].Preço_Normal, 3, labels=[1, 2, 3])", "_____no_output_____" ], [ "df['Preços_Cat']", "_____no_output_____" ], [ "total = len(df[preços])\nplt.figure(figsize=(14,6))\n\ng = sns.countplot(x='Preços_Cat', color='darkgreen',\n data=df[preços])\ng.set_title(\"Categoria de Preços\", fontsize=20)\ng.set_xlabel(\"Categorias \", fontsize=15)\ng.set_ylabel(\"Total\", fontsize=15)\n\nsizes=[]\n\nfor p in g.patches:\n height = p.get_height()\n sizes.append(height)\n g.text(p.get_x()+p.get_width()/2.,\n height + 3,\n '{:1.2f}%'.format((height/total)*100),\n ha=\"center\", fontsize=14) \n \ng.set_ylim(0, max(sizes) * 1.15)\n\nplt.show()", "_____no_output_____" ] ], [ [ "Como é de se esperar, os outliers existentes são superiores, i.e. Vinhos raros e/ou altamente premiados.", "_____no_output_____" ] ], [ [ "#profile = pp.ProfileReport(df[NUM])", "_____no_output_____" ], [ "#profile", "_____no_output_____" ] ], [ [ "## Pontuação\n\nOs vinhos no geral são bem pontuados:\n * Média: 3.88\n * Mediana: 4.0", "_____no_output_____" ], [ "Adaptado de https://www.kaggle.com/kabure/wine-review-s-eda-recommend-systems", "_____no_output_____" ] ], [ [ "# define the size of figures that I will build\nplt.figure(figsize=(16,5))\n\nplt.subplot(1,2,1) # this will create a grid of 1 row and 2 columns; this is the first graphic\ng = sns.countplot(x='Pontuação', data=df, color='forestgreen') # seting the seaborn countplot to known the points distribuition\ng.set_title(\"Distribuição da Contagem dos Pontos\", fontsize=20) # seting title and size of font\ng.set_xlabel(\"Pontos\", fontsize=15) # seting xlabel and size of font\ng.set_ylabel(\"Contagem\", fontsize=15) # seting ylabel and size of font\n\nplt.subplot(1,2,2) # this will set the second graphic of our grid\nplt.scatter(range(df.shape[0]), np.sort(df.Pontuação.values), color='forestgreen') # creating a cumulative distribution\nplt.xlabel('Index', fontsize=15) # seting xlabel and size of font\nplt.ylabel('Distribuição dos Pontos', fontsize=15) # seting ylabel and size of font\nplt.title(\"Distribuição dos Pontos\", fontsize=20) # seting title and size of font\n\nplt.show() #rendering the graphs", "_____no_output_____" ] ], [ [ "A mediana é próxima da média e portanto a distribuição se aproxima a uma distribuição normal em torno de 4 pontos. Poucos vinhos possuem pontuação menor que 3.\n\nNo entanto há algumas ressalvas a serem consideradas como o número de diferentes avaliações para o mesmo vinho, i.e. a interpretação da pontuação deve ser considerada pelo número de avaliações recebidas. Em outras palavras, a confiabilidade na pontuação é diretamente proporcional à contagem de avaliações distintas. Um vinho com 5 pontos e somente uma avaliação não possui o mesmo peso que um vinho com 4.5 porém com 100 avaliações. A confiabilidade do 2º caso é muito maior.\n\nVamos analisar a contagem de avaliações a seguir", "_____no_output_____" ], [ "## # de Avaliações\nO Número de Avaliações por vinho é altamente \"skewed\". Somente cerca de 1/3 dos vinhos somente foram avaliados e mesmo dentre esses o número de avaliações é muito baixo. Existem poucos vinhos extremamente populares com um grande número de avaliações.\n* Média: 16.5\n* Mediana: 6\n* IQR: 16\n* Máximo", "_____no_output_____" ] ], [ [ "df['Avaliações'] = df.Avaliações.fillna(0)", "_____no_output_____" ], [ "pd.qcut(df[df.Avaliações != 0].Avaliações.values, 5) # , labels=['muito_baixo', 'baixo', 'médio', 'bom', 'alto'], retbins=True)", "_____no_output_____" ], [ "def cat_avaliações(x):\n if x == 0:\n return 0\n elif 0 < x <10:\n return 1\n #elif 5 < x <=10:\n # return 2\n else:\n return 2", "_____no_output_____" ], [ "df[df.Avaliações.notna()].shape", "_____no_output_____" ], [ "df['Pontuação_Cat'] = df.Avaliações.apply(cat_avaliações) ", "_____no_output_____" ], [ "total = len(df)\nplt.figure(figsize=(14,6))\n\ng = sns.countplot(x='Pontuação_Cat', color='darkgreen',\n data=df)\ng.set_title(\"Categoria da Pontuação Ponderada pelo número de Avaliações\", fontsize=20)\ng.set_xlabel(\"Categories \", fontsize=15)\ng.set_ylabel(\"Total Count\", fontsize=15)\n\nsizes=[]\n\nfor p in g.patches:\n height = p.get_height()\n sizes.append(height)\n g.text(p.get_x()+p.get_width()/2.,\n height + 3,\n '{:1.2f}%'.format((height/total)*100),\n ha=\"center\", fontsize=14) \n \ng.set_ylim(0, max(sizes) * 1.15)\n\nplt.show()", "_____no_output_____" ], [ "guarda = df.Potencial_Guarda.notna()\npd.qcut(df[guarda].Potencial_Guarda, 3)", "_____no_output_____" ], [ "def estoque_cat(points):\n if points < 5:\n return 1\n elif 5 <= points <= 10:\n return 2\n else:\n return 3 ", "_____no_output_____" ], [ "df['Estoque_Cat'] = df.Potencial_Guarda.apply(estoque_cat)", "_____no_output_____" ], [ "total = len(df)\nplt.figure(figsize=(14,6))\n\ng = sns.countplot(x='Estoque_Cat', color='darkgreen',\n data=df)\ng.set_title(\"Categoria dos Vinhos Ponderada pelo Potencial de Guarda\", fontsize=20)\ng.set_xlabel(\"Categorias\", fontsize=15)\ng.set_ylabel(\"Total\", fontsize=15)\n\nsizes=[]\n\nfor p in g.patches:\n height = p.get_height()\n sizes.append(height)\n g.text(p.get_x()+p.get_width()/2.,\n height + 3,\n '{:1.2f}%'.format((height/total)*100),\n ha=\"center\", fontsize=14) \n \ng.set_ylim(0, max(sizes) * 1.15)\n\nplt.show()", "_____no_output_____" ] ], [ [ "Vinhos ", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "df.sort_values('Avaliações', ascending=False)", "_____no_output_____" ], [ "df.Classificação.unique()", "_____no_output_____" ], [ "df.Classificação.value_counts()", "_____no_output_____" ], [ "df['Seco'] = 0", "_____no_output_____" ], [ "df.loc[(df.Classificação.notna()) & (df.Classificação == 'Seco'), 'Seco'] = 1", "_____no_output_____" ], [ "df.loc[(df.Classificação.notna()) & (df.Classificação == 'Extra Dry'), 'Seco'] = 1", "_____no_output_____" ], [ "df.Seco.value_counts()", "_____no_output_____" ], [ "df.Tipo.value_counts()", "_____no_output_____" ], [ "df.groupby('País')['Link'].count().sort_values(ascending=False).cumsum().apply(lambda x : x / total)", "_____no_output_____" ], [ "plt.figure(figsize=(14,6))\n\ncountry = df.País.value_counts()\n\ng = sns.countplot(x='País', \n data=df,\n color='darkgreen')\ng.set_title(\"Contagem de Vinhos por País\", fontsize=20)\ng.set_xlabel(\"País \", fontsize=15)\ng.set_ylabel(\"Contagem\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(),rotation=45)\n\nplt.show()", "_____no_output_____" ], [ "df['Pontuação'] = df.Pontuação.fillna(0)", "_____no_output_____" ], [ "df['Pontos_Total'] = df['Pontuação'] * df['Avaliações']", "_____no_output_____" ], [ "plt.figure(figsize=(16,12))\n\nplt.subplot(2,1,1)\n\ng = sns.boxplot(x='País', y=np.log(df.Preço_Normal.values), data=df, color='darkgreen')\ng.set_title(\"Preço por País de Origem (log)\", fontsize=20)\ng.set_xlabel(\"País\", fontsize=15)\ng.set_ylabel(\"Distribuição de Preços\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(), rotation=45)\n\nplt.subplot(2, 1, 2)\n\ng = sns.boxplot(x='País', y=np.log(df[df.Pontos_Total != 0.0].Pontos_Total), data=df[df.Pontos_Total != 0.0], color='darkgreen')\ng.set_title(\"Pontos por País de Origem (log)\", fontsize=20)\ng.set_xlabel(\"País\", fontsize=15)\ng.set_ylabel(\"Distribuição de Pontos\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(), rotation=45)\n\nplt.subplots_adjust(hspace=0.6, top=0.9)\n\nplt.show()\n\n", "_____no_output_____" ] ], [ [ "Interessante que o país de origem mais abundante, França, não possui vinhos com preço extremamente alto no nosso conjunto de dados, i.e. considerados outliers. ", "_____no_output_____" ] ], [ [ "df.groupby('País').Pontuação.median().sort_values(ascending=False)", "_____no_output_____" ], [ "df.groupby('País').Preço_Normal.median().sort_values(ascending=False)", "_____no_output_____" ], [ "for row in df.itertuples():\n df.loc[row.Index, 'Desc_Sócio'] = 1 - (row.Preço_Sócio / row.Preço_Normal) ", "_____no_output_____" ], [ "df.Desc_Sócio.mean()", "_____no_output_____" ], [ "plt.figure(figsize=(16,12))\n\nplt.subplot(2,1,1)\n\ng = sns.boxplot(x='Tipo', y=np.log(df.Preço_Normal.values), data=df, color='darkgreen')\ng.set_title(\"Preço por Tipo de Vinho (log)\", fontsize=20)\ng.set_xlabel(\"Tipo\", fontsize=15)\ng.set_ylabel(\"Distribuição de Preços\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(), rotation=45)\n\nplt.subplot(2, 1, 2)\n\ng = sns.boxplot(x='Tipo', y=np.log(df[df.Pontos_Total != 0.0].Pontos_Total), data=df[df.Pontos_Total != 0.0], color='darkgreen')\ng.set_title(\"Pontos por Tipo de Vinho (log)\", fontsize=20)\ng.set_xlabel(\"Tipo\", fontsize=15)\ng.set_ylabel(\"Distribuição de Pontos\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(), rotation=45)\nplt.subplots_adjust(hspace=0.6, top=0.9)\nplt.show()\n", "_____no_output_____" ] ], [ [ "* Dado que a imensa maioria do catálogo é de vinhos tintos, não é surpresa que tenha o maior número de outliers. Mas os vinhos espumantes e licorosos possuem mediana de preços maior, ou seja, no geral são vinhos bem mais caros do catálogo.\n* Os vinhos frisantes são os que possuem menor preço e maior pontuação na média, claro que possuem pouquíssimos vinhos disponíveis então não devemos levar essa estatística ao pé da letra.", "_____no_output_____" ] ], [ [ "df['Uvas'] = df.Uvas.str.split(\",\")", "_____no_output_____" ], [ "uvas = df.explode(column='Uvas')[['Uvas']]", "_____no_output_____" ], [ "uvas = uvas.reset_index()", "_____no_output_____" ], [ "import re\nfor row in uvas[uvas.Uvas.notna()].itertuples():\n m = re.match(r\"([\\w+ ]+)\\s*\\(?(\\d*)\\.?\\d*%?\\)?\", row.Uvas.strip(), re.UNICODE)\n uvas.loc[row.Index, 'Uva'] = m.group(1).rstrip()\n conc = m.group(2).rstrip()\n uvas.loc[row.Index, 'Concentração'] = conc if conc != '' else 100 / len(uvas[uvas.Nome == row.Nome])", "_____no_output_____" ], [ "uvas = uvas.drop('Uvas', axis=1)", "_____no_output_____" ], [ "uvas", "_____no_output_____" ], [ "top_20 = uvas.Uva.value_counts()[:30]\n\ntop_20 = pd.DataFrame(top_20).reset_index()\n\ntop_20.columns = ['Uva', 'Count']", "_____no_output_____" ], [ "top_20", "_____no_output_____" ], [ "plt.figure(figsize=(16,8))\n\n\ng = sns.barplot(x='Uva',\n y='Count',\n data=top_20,\n color='blue')\ng.set_title(\"Distribuição por Tipos de Uvas\", fontsize=20)\ng.set_xlabel(\"Uvas\", fontsize=15)\ng.set_ylabel(\"Contagem\", fontsize=15)\ng.set_xticklabels(g.get_xticklabels(),rotation=90)\n\nfor p in g.patches:\n height = p.get_height()\n g.text(p.get_x()+p.get_width()/2.,\n height + 3,\n int(height),\n ha=\"center\", fontsize=10) \n \nplt.show()", "_____no_output_____" ], [ "uvas['Concentração'] = uvas.Concentração.astype(float)\nuvas.Concentração.value_counts()", "_____no_output_____" ], [ "df['Puro'] = 0", "_____no_output_____" ], [ "for row in uvas[uvas.Concentração == 100].itertuples():\n df.loc[row.Nome, 'Puro'] = 1Esse vídeo mostra, ilustrando com tinta, o modo correto de lavar as mãos.", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "df.to_csv('all_wines_cleaned.csv')", "_____no_output_____" ], [ "df = pd.read_csv('all_wines_cleaned.csv')", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec8198c082b930dd561f4a95849313c5e9c9c8ae
56,316
ipynb
Jupyter Notebook
week_1_intro/notebooks/09-pandas.ipynb
razekmaiden/ml_zoomcamp
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
[ "MIT" ]
null
null
null
week_1_intro/notebooks/09-pandas.ipynb
razekmaiden/ml_zoomcamp
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
[ "MIT" ]
null
null
null
week_1_intro/notebooks/09-pandas.ipynb
razekmaiden/ml_zoomcamp
fe26bb2fc611cb22b5da6178544c4f1dfd30f9ee
[ "MIT" ]
1
2021-10-11T12:18:36.000Z
2021-10-11T12:18:36.000Z
26.984188
264
0.354287
[ [ [ "# Machine Learning Zoomcamp\n\n## 1.9 Introduction to Pandas\n\nPlan:\n\n* Data Frames\n* Series\n* Index\n* Accessing elements\n* Element-wise operations\n* Filtering\n* String operations\n* Summarizing operations\n* Missing values\n* Grouping\n* Getting the NumPy arrays", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## DataFrames", "_____no_output_____" ] ], [ [ "data = [\n ['Nissan', 'Stanza', 1991, 138, 4, 'MANUAL', 'sedan', 2000],\n ['Hyundai', 'Sonata', 2017, None, 4, 'AUTOMATIC', 'Sedan', 27150],\n ['Lotus', 'Elise', 2010, 218, 4, 'MANUAL', 'convertible', 54990],\n ['GMC', 'Acadia', 2017, 194, 4, 'AUTOMATIC', '4dr SUV', 34450],\n ['Nissan', 'Frontier', 2017, 261, 6, 'MANUAL', 'Pickup', 32340],\n]\n\ncolumns = [\n 'Make', 'Model', 'Year', 'Engine HP', 'Engine Cylinders',\n 'Transmission Type', 'Vehicle_Style', 'MSRP'\n]", "_____no_output_____" ], [ "df = pd.DataFrame(data, columns=columns)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "data = [\n {\n \"Make\": \"Nissan\",\n \"Model\": \"Stanza\",\n \"Year\": 1991,\n \"Engine HP\": 138.0,\n \"Engine Cylinders\": 4,\n \"Transmission Type\": \"MANUAL\",\n \"Vehicle_Style\": \"sedan\",\n \"MSRP\": 2000\n },\n {\n \"Make\": \"Hyundai\",\n \"Model\": \"Sonata\",\n \"Year\": 2017,\n \"Engine HP\": None,\n \"Engine Cylinders\": 4,\n \"Transmission Type\": \"AUTOMATIC\",\n \"Vehicle_Style\": \"Sedan\",\n \"MSRP\": 27150\n },\n {\n \"Make\": \"Lotus\",\n \"Model\": \"Elise\",\n \"Year\": 2010,\n \"Engine HP\": 218.0,\n \"Engine Cylinders\": 4,\n \"Transmission Type\": \"MANUAL\",\n \"Vehicle_Style\": \"convertible\",\n \"MSRP\": 54990\n },\n {\n \"Make\": \"GMC\",\n \"Model\": \"Acadia\",\n \"Year\": 2017,\n \"Engine HP\": 194.0,\n \"Engine Cylinders\": 4,\n \"Transmission Type\": \"AUTOMATIC\",\n \"Vehicle_Style\": \"4dr SUV\",\n \"MSRP\": 34450\n },\n {\n \"Make\": \"Nissan\",\n \"Model\": \"Frontier\",\n \"Year\": 2017,\n \"Engine HP\": 261.0,\n \"Engine Cylinders\": 6,\n \"Transmission Type\": \"MANUAL\",\n \"Vehicle_Style\": \"Pickup\",\n \"MSRP\": 32340\n }\n]", "_____no_output_____" ], [ "df = pd.DataFrame(data)\ndf", "_____no_output_____" ], [ "df.head(n=2)", "_____no_output_____" ] ], [ [ "## Series", "_____no_output_____" ] ], [ [ "df.Engine HP", "_____no_output_____" ], [ "df['Engine HP']", "_____no_output_____" ], [ "df[['Make', 'Model', 'MSRP']]", "_____no_output_____" ], [ "df['id'] = [1, 2, 3, 4, 5]", "_____no_output_____" ], [ "df['id'] = [10, 20, 30, 40, 50]", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "del df['id']", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "## Index\n", "_____no_output_____" ] ], [ [ "df.index", "_____no_output_____" ], [ "df.Year.index", "_____no_output_____" ], [ "df.index = ['a', 'b', 'c', 'd', 'e']", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.iloc[[1, 2, 4]]", "_____no_output_____" ], [ "df = df.reset_index(drop=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "## Accessing elements", "_____no_output_____" ], [ "## Element-wise operations", "_____no_output_____" ] ], [ [ "df['Engine HP'] * 2", "_____no_output_____" ], [ "df['Year'] >= 2015", "_____no_output_____" ] ], [ [ "## Filtering", "_____no_output_____" ] ], [ [ "df[\n df['Make'] == 'Nissan'\n]", "_____no_output_____" ], [ "df[\n (df['Make'] == 'Nissan') & (df['Year'] >= 2015)\n]", "_____no_output_____" ] ], [ [ "## String operations", "_____no_output_____" ] ], [ [ "'machine learning zoomcamp'.replace(' ', '_')", "_____no_output_____" ], [ "df['Vehicle_Style'].str.lower()", "_____no_output_____" ], [ "df['Vehicle_Style'] = df['Vehicle_Style'].str.replace(' ', '_').str.lower()", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "## Summarizing operations", "_____no_output_____" ] ], [ [ "df.describe().round(2)", "_____no_output_____" ], [ "df.nunique()", "_____no_output_____" ] ], [ [ "## Missing values\n", "_____no_output_____" ] ], [ [ "df.isnull().sum()", "_____no_output_____" ] ], [ [ "## Grouping\n", "_____no_output_____" ], [ "```\nSELECT \n transmission_type,\n AVG(MSRP)\nFROM\n cars\nGROUP BY\n transmission_type\n```", "_____no_output_____" ] ], [ [ "df.groupby('Transmission Type').MSRP.max()", "_____no_output_____" ] ], [ [ "## Getting the NumPy arrays", "_____no_output_____" ] ], [ [ "df.MSRP.values", "_____no_output_____" ], [ "df.to_dict(orient='records')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec819950874739c9ee483bf21a313d8087ad6cfb
30,221
ipynb
Jupyter Notebook
notebooks/HyperparamTuning.ipynb
knathanieltucker/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
120
2017-06-22T05:19:52.000Z
2022-03-11T17:22:15.000Z
notebooks/HyperparamTuning.ipynb
CharlieBlogg/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
null
null
null
notebooks/HyperparamTuning.ipynb
CharlieBlogg/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
153
2017-07-17T13:17:32.000Z
2022-03-07T14:35:16.000Z
41.455418
466
0.527315
[ [ [ "# Tuning the hyper-parameters of an estimator\n\nHyper-parameters are parameters that are not directly learnt within estimators. In scikit-learn they are passed as arguments to the constructor of the estimator classes. Typical examples include C, kernel and gamma for Support Vector Classifier, alpha for Lasso, etc.\n\nIt is possible and recommended to search the hyper-parameter space for the best Cross-validation: evaluating estimator performance score.\n\nAny parameter provided when constructing an estimator may be optimized in this manner. Specifically, to find the names and current values for all parameters for a given estimator, use:\n\n`estimator.get_params()`\n\nA search consists of:\n* an estimator (regressor or classifier such as sklearn.svm.SVC());\n* a parameter space;\n* a method for searching or sampling candidates;\n* a cross-validation scheme; and\n* a score function.\n\nSome models allow for specialized, efficient parameter search strategies, outlined below. Two generic approaches to sampling search candidates are provided in scikit-learn: for given values, GridSearchCV exhaustively considers all parameter combinations, while RandomizedSearchCV can sample a given number of candidates from a parameter space with a specified distribution. After describing these tools we detail best practice applicable to both approaches.\n\nNote that it is common that a small subset of those parameters can have a large impact on the predictive or computation performance of the model while others can be left to their default values. It is recommend to read the docstring of the estimator class to get a finer understanding of their expected behavior, possibly by reading the enclosed reference to the literature.", "_____no_output_____" ], [ "## GridSearch\n\nThe grid search provided by GridSearchCV exhaustively generates candidates from a grid of parameter values specified with the param_grid parameter. For instance, the following param_grid:", "_____no_output_____" ] ], [ [ "param_grid = [\n {'C': [1, 10, 100, 1000], 'kernel': ['linear']},\n {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},\n ]", "_____no_output_____" ] ], [ [ "specifies that two grids should be explored: one with a linear kernel and C values in [1, 10, 100, 1000], and the second one with an RBF kernel, and the cross-product of C values ranging in [1, 10, 100, 1000] and gamma values in [0.001, 0.0001].\n\nThe GridSearchCV instance implements the usual estimator API: when “fitting” it on a dataset all the possible combinations of parameter values are evaluated and the best combination is retained.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\n\nGridSearchCV?", "_____no_output_____" ], [ "from sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.svm import SVC\n\ndigits = datasets.load_digits()\n\nn_samples = len(digits.images)\nX = digits.images.reshape((n_samples, -1))\ny = digits.target\n\n# Split the dataset in two equal parts\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=0)\n\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],\n 'C': [1, 10, 100, 1000]},\n {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]\n\n\n\nclf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,\n scoring='f1_macro')\nclf.fit(X_train, y_train)", "_____no_output_____" ], [ "clf.best_params_\n", "_____no_output_____" ], [ "clf.cv_results_", "_____no_output_____" ], [ "y_true, y_pred = y_test, clf.predict(X_test)\nprint classification_report(y_true, y_pred)", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 89\n 1 0.97 1.00 0.98 90\n 2 0.99 0.98 0.98 92\n 3 1.00 0.99 0.99 93\n 4 1.00 1.00 1.00 76\n 5 0.99 0.98 0.99 108\n 6 0.99 1.00 0.99 89\n 7 0.99 1.00 0.99 78\n 8 1.00 0.98 0.99 92\n 9 0.99 0.99 0.99 92\n\navg / total 0.99 0.99 0.99 899\n\n" ], [ "clf.cv_results_.keys()", "_____no_output_____" ], [ "for param, score in zip(clf.cv_results_['params'], clf.cv_results_['mean_test_score']):\n print param, score", "{'kernel': 'rbf', 'C': 1, 'gamma': 0.001} 0.985584530844\n{'kernel': 'rbf', 'C': 1, 'gamma': 0.0001} 0.957017352561\n{'kernel': 'rbf', 'C': 10, 'gamma': 0.001} 0.986932256371\n{'kernel': 'rbf', 'C': 10, 'gamma': 0.0001} 0.980973238881\n{'kernel': 'rbf', 'C': 100, 'gamma': 0.001} 0.986932256371\n{'kernel': 'rbf', 'C': 100, 'gamma': 0.0001} 0.981150421585\n{'kernel': 'rbf', 'C': 1000, 'gamma': 0.001} 0.986932256371\n{'kernel': 'rbf', 'C': 1000, 'gamma': 0.0001} 0.981150421585\n{'kernel': 'linear', 'C': 1} 0.972738260762\n{'kernel': 'linear', 'C': 10} 0.972738260762\n{'kernel': 'linear', 'C': 100} 0.972738260762\n{'kernel': 'linear', 'C': 1000} 0.972738260762\n" ] ], [ [ "## Randomized Search\n\nWhile using a grid of parameter settings is currently the most widely used method for parameter optimization, other search methods have more favourable properties. RandomizedSearchCV implements a randomized search over parameters, where each setting is sampled from a distribution over possible parameter values. This has two main benefits over an exhaustive search:\n\n* A budget can be chosen independent of the number of parameters and possible values.\n* Adding parameters that do not influence the performance does not decrease efficiency.\n\nSpecifying how parameters should be sampled is done using a dictionary, very similar to specifying parameters for GridSearchCV. Additionally, a computation budget, being the number of sampled candidates or sampling iterations, is specified using the n_iter parameter. For each parameter, either a distribution over possible values or a list of discrete choices (which will be sampled uniformly) can be specified:", "_____no_output_____" ] ], [ [ "import scipy\n\nparams = {'C': scipy.stats.expon(scale=100), 'gamma': scipy.stats.expon(scale=.1),\n 'kernel': ['rbf'], 'class_weight':['balanced', None]}", "_____no_output_____" ] ], [ [ "This example uses the scipy.stats module, which contains many useful distributions for sampling parameters, such as expon, gamma, uniform or randint. In principle, any function can be passed that provides a rvs (random variate sample) method to sample a value. A call to the rvs function should provide independent random samples from possible parameter values on consecutive calls.\n\nFor continuous parameters, such as C above, it is important to specify a continuous distribution to take full advantage of the randomization. This way, increasing n_iter will always lead to a finer search.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import RandomizedSearchCV\n\nRandomizedSearchCV?", "_____no_output_____" ], [ "clf = RandomizedSearchCV(SVC(), params, cv=5,\n scoring='f1_macro')\nclf.fit(X_train, y_train)", "/Users/nate/Desktop/scikit-learn-tutorial/env/lib/python2.7/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n" ], [ "clf.best_params_\n", "_____no_output_____" ], [ "clf.cv_results_", "_____no_output_____" ], [ "y_true, y_pred = y_test, clf.predict(X_test)\nprint classification_report(y_true, y_pred)", " precision recall f1-score support\n\n 0 1.00 0.98 0.99 89\n 1 1.00 0.98 0.99 90\n 2 1.00 0.95 0.97 92\n 3 0.99 0.95 0.97 93\n 4 0.99 1.00 0.99 76\n 5 1.00 0.94 0.97 108\n 6 1.00 0.94 0.97 89\n 7 1.00 0.99 0.99 78\n 8 0.80 0.98 0.88 92\n 9 0.94 0.98 0.96 92\n\navg / total 0.97 0.97 0.97 899\n\n" ], [ "for param, score in zip(clf.cv_results_['params'], clf.cv_results_['mean_test_score']):\n print param, score", "{'kernel': 'rbf', 'C': 197.43804316699695, 'gamma': 0.06685665237888809, 'class_weight': None} 0.0209367845031\n{'kernel': 'rbf', 'C': 66.457135213559567, 'gamma': 0.052735834620258396, 'class_weight': None} 0.0209367845031\n{'kernel': 'rbf', 'C': 177.34863184477223, 'gamma': 0.012859019553229155, 'class_weight': None} 0.402592233575\n{'kernel': 'rbf', 'C': 246.38577363335213, 'gamma': 0.024434327856707762, 'class_weight': None} 0.0725820179991\n{'kernel': 'rbf', 'C': 106.14275113094149, 'gamma': 0.055239367053976862, 'class_weight': 'balanced'} 0.0209367845031\n{'kernel': 'rbf', 'C': 77.66272238664412, 'gamma': 0.005068411787780897, 'class_weight': 'balanced'} 0.952387358855\n{'kernel': 'rbf', 'C': 544.70767720523997, 'gamma': 0.047295948666685128, 'class_weight': 'balanced'} 0.0251636863296\n{'kernel': 'rbf', 'C': 17.176008353499572, 'gamma': 0.047877537276124066, 'class_weight': 'balanced'} 0.0251636863296\n{'kernel': 'rbf', 'C': 97.301699633849807, 'gamma': 0.065282190141343288, 'class_weight': None} 0.0209367845031\n{'kernel': 'rbf', 'C': 5.0170210293950577, 'gamma': 0.19937148031345198, 'class_weight': None} 0.0209367845031\n" ] ], [ [ "Don't forget the old _CV classes that are faster than gridsearch! And also don't forget about OOB error that can be a great proxy", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec81a3007a6580b753d5e92c8bee0e1c402183f5
80,364
ipynb
Jupyter Notebook
Notebooks/.ipynb_checkpoints/DataViz-NLP_Frequency-Words-checkpoint.ipynb
hannahkidwell/NLP-Spotify-Genres
cb1404882ce62c77689840017bd24c947b78f175
[ "MIT" ]
1
2020-12-04T03:05:49.000Z
2020-12-04T03:05:49.000Z
Notebooks/.ipynb_checkpoints/DataViz-NLP_Frequency-Words-checkpoint.ipynb
hannahkidwell/NLP-Spotify-Genres
cb1404882ce62c77689840017bd24c947b78f175
[ "MIT" ]
1
2021-01-13T22:42:25.000Z
2021-01-13T22:42:25.000Z
Notebooks/.ipynb_checkpoints/DataViz-NLP_Frequency-Words-checkpoint.ipynb
hannahkidwell/NLP-Spotify-Genres
cb1404882ce62c77689840017bd24c947b78f175
[ "MIT" ]
2
2020-12-02T04:04:30.000Z
2020-12-04T03:15:21.000Z
76.319088
14,352
0.571064
[ [ [ "from collections import Counter\nimport pandas as pd\nimport numpy as np\nimport nltk\n#nltk.download('stopwords')\n\n# Bokeh\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.io import output_notebook, push_notebook, show\n\ntop_N = 10\n\ndf = pd.read_csv('../Data/nlp_df.csv',\n usecols=['category','filtered'])\n\nstopwords = nltk.corpus.stopwords.words('english')\n# RegEx for stopwords\nRE_stopwords = r'\\b(?:{})\\b'.format('|'.join(stopwords))\n# replace '|'-->' ' and drop all stopwords\nwords = (df.filtered\n .str.lower()\n .replace([r'\\|', RE_stopwords], [' ', ''], regex=True)\n .str.cat(sep=' ')\n .split()\n)\n\n# generate DF out of Counter\nrslt = pd.DataFrame(Counter(words).most_common(top_N),\n columns=['Word', 'Frequency']).set_index('Word')\nprint(rslt)\n\n# plot\nrslt.plot.bar(rot=0, figsize=(16,10), width=0.8)", " Frequency\nWord \n'', 100727\n'im', 25553\n'dont', 18057\n'?', 16652\n'like', 16063\n'oh', 15220\n'know', 15164\n'yeah', 14767\n'love', 14196\n'got', 11350\n" ], [ "frequency_df = rslt\nfrequency_df.head()", "_____no_output_____" ], [ "# Create DataFrame from CSV file\nnlp_df = pd.read_csv('../Data/nlp_df.csv')\nnlp_df.head(10)", "_____no_output_____" ], [ "for ", "_____no_output_____" ], [ "# prepare some data\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models import ColumnDataSource\n#from bokeh.models.tools import HoverTool\nimport pandas_bokeh\n\n# output to static HTML file\n# output_file(\"log_lines.html\")\noutput_notebook()\n\n\nfrequency_df.plot_bokeh(\n kind='bar',\n xlabel='Word',\n ylabel='Frequency',\n title='Words by Frequency'\n)\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec81b0c7e7ebbde0af1df8bb6343595e6d0c5686
14,979
ipynb
Jupyter Notebook
image-classification/image_classification-train.ipynb
snegirigens/DLND
bc6f03fc9578d9fdabe51806739ae9beabc0352a
[ "MIT" ]
null
null
null
image-classification/image_classification-train.ipynb
snegirigens/DLND
bc6f03fc9578d9fdabe51806739ae9beabc0352a
[ "MIT" ]
104
2020-01-28T21:53:40.000Z
2022-03-11T23:15:45.000Z
image-classification/image_classification-train.ipynb
snegirigens/DLND
bc6f03fc9578d9fdabe51806739ae9beabc0352a
[ "MIT" ]
null
null
null
33.139381
191
0.561519
[ [ [ "from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport problem_unittests as tests\nimport tarfile\n\ncifar10_dataset_folder_path = 'cifar-10-batches-py'\n", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle\nimport problem_unittests as tests\nimport helper\n\n# Load the Preprocessed Validation data\nvalid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))", "_____no_output_____" ], [ "import tensorflow as tf\n\ndef neural_net_image_input(image_shape):\n \"\"\"\n Return a Tensor for a batch of image input\n : image_shape: Shape of the images\n : return: Tensor for image input.\n \"\"\"\n # TODO: Implement Function\n shape = list(image_shape)\n shape.insert (0, None)\n return tf.placeholder (tf.float32, shape=shape, name='x')\n\n\ndef neural_net_label_input(n_classes):\n \"\"\"\n Return a Tensor for a batch of label input\n : n_classes: Number of classes\n : return: Tensor for label input.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder (tf.float32, shape=[None, n_classes], name='y')\n\n\ndef neural_net_keep_prob_input():\n \"\"\"\n Return a Tensor for keep probability\n : return: Tensor for keep probability.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder (tf.float32, name='keep_prob')\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntf.reset_default_graph()\ntests.test_nn_image_inputs(neural_net_image_input)\ntests.test_nn_label_inputs(neural_net_label_input)\ntests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)", "_____no_output_____" ], [ "def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n \"\"\"\n Apply convolution then max pooling to x_tensor\n :param x_tensor: TensorFlow Tensor\n :param conv_num_outputs: Number of outputs for the convolutional layer\n :param conv_ksize: kernal size 2-D Tuple for the convolutional layer\n :param conv_strides: Stride 2-D Tuple for convolution\n :param pool_ksize: kernal size 2-D Tuple for pool\n :param pool_strides: Stride 2-D Tuple for pool\n : return: A tensor that represents convolution and max pooling of x_tensor\n \"\"\"\n # TODO: Implement Function\n input_shape = [conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[3], conv_num_outputs]\n weights = tf.Variable (tf.truncated_normal (input_shape, mean=0.0, stddev=0.10, dtype=tf.float32))\n biases = tf.Variable (tf.zeros (conv_num_outputs, dtype=tf.float32))\n strides = [1, conv_strides[0], conv_strides[1], 1]\n \n conv = tf.nn.conv2d (x_tensor, weights, strides=strides, padding='SAME')\n conv = tf.nn.bias_add (conv, biases)\n conv = tf.nn.relu (conv)\n \n ksize = [1, pool_ksize[0], pool_ksize[1], 1]\n strides = [1, pool_strides[0], pool_strides[1], 1]\n maxpool = tf.nn.max_pool (conv, ksize=ksize, strides=strides, padding='SAME')\n return maxpool \n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_con_pool(conv2d_maxpool)", "_____no_output_____" ], [ "import operator\nimport functools\n\ndef flatten(x_tensor):\n \"\"\"\n Flatten x_tensor to (Batch Size, Flattened Image Size)\n : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.\n : return: A tensor of size (Batch Size, Flattened Image Size).\n \"\"\"\n # TODO: Implement Function\n image_size = functools.reduce(operator.mul, x_tensor.get_shape().as_list()[1:], 1)\n return tf.reshape (x_tensor, [-1, image_size])\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_flatten(flatten)", "_____no_output_____" ], [ "def fully_conn(x_tensor, num_outputs):\n \"\"\"\n Apply a fully connected layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n weights = tf.Variable (tf.truncated_normal ([x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.10, dtype=tf.float32))\n biases = tf.Variable (tf.zeros (num_outputs, dtype=tf.float32))\n activations = tf.add (tf.matmul (x_tensor, weights), biases)\n return tf.nn.relu (activations)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_fully_conn(fully_conn)", "_____no_output_____" ], [ "def output(x_tensor, num_outputs):\n \"\"\"\n Apply a output layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n weights = tf.Variable (tf.truncated_normal ([x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.10, dtype=tf.float32))\n biases = tf.Variable (tf.zeros (num_outputs, dtype=tf.float32))\n return tf.add (tf.matmul (x_tensor, weights), biases)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_output(output)", "_____no_output_____" ], [ "def conv_net(x, keep_prob):\n \"\"\"\n Create a convolutional neural network model\n : x: Placeholder tensor that holds image data.\n : keep_prob: Placeholder tensor that hold dropout keep probability.\n : return: Tensor that represents logits\n \"\"\"\n # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers\n conv1 = conv2d_maxpool (x, conv_num_outputs=64, conv_ksize=(8,8), conv_strides=(2,2), pool_ksize=(2,2), pool_strides=(2,2))\n conv2 = conv2d_maxpool (conv1, conv_num_outputs=128, conv_ksize=(6,6), conv_strides=(2,2), pool_ksize=(2,2), pool_strides=(2,2))\n\n flat = flatten (conv2)\n \n conn1 = fully_conn (flat, 2048)\n conn1 = tf.nn.dropout (conn1, keep_prob)\n conn2 = fully_conn (conn1, 1024)\n conn2 = tf.nn.dropout (conn2, keep_prob)\n \n out = output (conn2, 10)\n return out\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n##############################\n## Build the Neural Network ##\n##############################\n\n# Remove previous weights, bias, inputs, etc..\ntf.reset_default_graph()\n\n# Inputs\nx = neural_net_image_input((32, 32, 3))\ny = neural_net_label_input(10)\nkeep_prob = neural_net_keep_prob_input()\n\n# Model\nlogits = conv_net(x, keep_prob)\n\n# Name logits Tensor, so that is can be loaded from disk after training\nlogits = tf.identity(logits, name='logits')\n\n# Loss and Optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Accuracy\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\ntests.test_conv_net(conv_net)", "_____no_output_____" ], [ "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n \"\"\"\n Optimize the session on a batch of images and labels\n : session: Current TensorFlow session\n : optimizer: TensorFlow optimizer function\n : keep_probability: keep probability\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n \"\"\"\n # TODO: Implement Function\n session.run (optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_train_nn(train_neural_network)", "_____no_output_____" ], [ "def print_stats(session, feature_batch, label_batch, cost, accuracy):\n \"\"\"\n Print information about loss and validation accuracy\n : session: Current TensorFlow session\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n : cost: TensorFlow cost function\n : accuracy: TensorFlow accuracy function\n \"\"\"\n # TODO: Implement Function\n loss = session.run (cost, feed_dict={x:feature_batch, y:label_batch, keep_prob:1.0})\n valid_acc = session.run (accuracy, feed_dict={x:valid_features, y:valid_labels, keep_prob:1.0})\n print ('Loss = {:8.6f}, Validation Accuracy = {:.4f}'.format (loss, valid_acc))\n ", "_____no_output_____" ], [ "# TODO: Tune Parameters\nepochs = 20\nbatch_size = 128\nkeep_probability = 0.75", "_____no_output_____" ] ], [ [ "### Fully Train the Model\nNow that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_model_path = './image_classification'\n\nprint('Training...')\nwith tf.Session() as sess:\n print (\"Initializing the variables\")\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n # Loop over all batches\n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)\n \n if (epoch + 1) % 100 == 0:\n # Save Model\n print (\"Save model...\")\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path + '-' + str(epoch + 1)) \n \n # Save Model\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path + '-' + str(epoch + 1))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec81b176a80857268befd99bd169a06b92276426
58,261
ipynb
Jupyter Notebook
language_analysis.ipynb
allisonhonold/spark-blog-tfidf
817c631462a0390cf03ca3ea9cd09e953ced6d4c
[ "MIT" ]
4
2020-02-08T06:08:24.000Z
2021-09-13T09:24:58.000Z
language_analysis.ipynb
allisonhonold/spark-blog-tfidf
817c631462a0390cf03ca3ea9cd09e953ced6d4c
[ "MIT" ]
null
null
null
language_analysis.ipynb
allisonhonold/spark-blog-tfidf
817c631462a0390cf03ca3ea9cd09e953ced6d4c
[ "MIT" ]
3
2020-02-07T16:44:21.000Z
2021-08-28T03:10:50.000Z
29.559107
263
0.456721
[ [ [ "# TF-IDF with PySpark: Investigating the consumer financial complaint database", "_____no_output_____" ], [ "## John Snow Spark NLP package\nSee [this Quick Start Guide](https://nlp.johnsnowlabs.com/docs/en/quickstart)\n\nand/or [this quick start guide](https://github.com/JohnSnowLabs/spark-nlp)\n\nI used aspects of each, personally. Some highlights:\n\nIf you haven't already installed PySpark (note: PySpark version 2.4.4 is the only supported version):\n$ conda install pyspark==2.4.4\n$ conda install -c johnsnowlabs spark-nlp\n\nIf you already have PySpark, make sure to install spark-nlp in the same channel as PySpark. In my case PySpark is in my conda-forge channel, so I used:\n$ conda install -c johnsnowlabs spark-nlp --channel conda-forge\n\nI already had PySpark installed and set up for use with jupyter notebooks, but if you don't, you may need to set some additional environment variables in the terminal (as mentioned in the second quick start guide, but not the first, so I'm not positive):\n\nexport SPARK_HOME=/path/to/your/spark/folder\n\nexport PYSPARK_PYTHON=python3\n\nexport PYSPARK_DRIVER_PYTHON=jupyter\n\nexport PYSPARK_DRIVER_PYTHON_OPTS=notebook", "_____no_output_____" ] ], [ [ "# if you don't need to access the SparkSession:\n# import sparknlp\n# sparknlp.start()", "_____no_output_____" ] ], [ [ "since i need the spark session to load the data from my parquet file, I'll start a \"custom\" SparkSession", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\n\n# start spark session configured for spark nlp\nspark = SparkSession.builder \\\n .master('local[*]') \\\n .appName('Spark NLP') \\\n .config(\"spark.jars.packages\", \"com.johnsnowlabs.nlp:spark-nlp_2.11:2.3.5\") \\\n .getOrCreate()\n\n# set to pandas-like output\nspark.conf.set(\"spark.sql.repl.eagerEval.enabled\", True)", "_____no_output_____" ], [ "# get the list of stopwords from nltk\nfrom nltk.corpus import stopwords\n\neng_stopwords = stopwords.words('english')\neng_stopwords.append('xxxx')", "_____no_output_____" ] ], [ [ "### Set up pipeline", "_____no_output_____" ] ], [ [ "from sparknlp.base import Finisher, DocumentAssembler\nfrom sparknlp.annotator import (Tokenizer, Normalizer, \n LemmatizerModel, StopWordsCleaner)\nfrom pyspark.ml import Pipeline", "_____no_output_____" ] ], [ [ "Start with the documentAssembler, then see the annotator [docs](https://nlp.johnsnowlabs.com/docs/en/annotators) to for the available annotators. Convert back to human-readable form at the end with a Finnisher.", "_____no_output_____" ] ], [ [ "documentAssembler = DocumentAssembler() \\\n .setInputCol('consumer_complaint_narrative') \\\n .setOutputCol('document')\n\ntokenizer = Tokenizer() \\\n .setInputCols(['document']) \\\n .setOutputCol('token')\n\n# note normalizer defaults to changing all words to lowercase.\n# Use .setLowercase(False) to maintain input case.\nnormalizer = Normalizer() \\\n .setInputCols(['token']) \\\n .setOutputCol('normalized') \\\n .setLowercase(True)\n\n# note that lemmatizer needs a dictionary. So I used the pre-trained\n# model (note that it defaults to english)\nlemmatizer = LemmatizerModel.pretrained() \\\n .setInputCols(['normalized']) \\\n .setOutputCol('lemma') \\\n\nstopwords_cleaner = StopWordsCleaner() \\\n .setInputCols(['lemma']) \\\n .setOutputCol('clean_lemma') \\\n .setCaseSensitive(False) \\\n .setStopWords(eng_stopwords)\n\n# finisher converts tokens to human-readable output\nfinisher = Finisher() \\\n .setInputCols(['clean_lemma']) \\\n .setCleanAnnotations(False)", "lemma_antbnc download started this may take some time.\nApproximate size to download 907.6 KB\n[OK!]\n" ] ], [ [ "#### Defining the pipeline", "_____no_output_____" ] ], [ [ "pipeline = Pipeline() \\\n .setStages([\n documentAssembler,\n tokenizer,\n normalizer,\n lemmatizer,\n stopwords_cleaner,\n finisher\n ])", "_____no_output_____" ] ], [ [ "## Import data and apply pipeline", "_____no_output_____" ], [ "Note: info on how to get this parquet file here:\n\ndata set: https://catalog.data.gov/dataset/consumer-complaint-database\n\nmy transformation from csv to parquet: https://github.com/allisonhonold/spark-blog/blob/master/pyspark_blog.ipynb", "_____no_output_____" ] ], [ [ "# import data\ndf = spark.read.load('../data/consumer_complaints.parquet',\n inferSchema=\"true\", header=\"true\")", "_____no_output_____" ], [ "# select equifax data as test\ndata = df.filter((df['company'] == 'EQUIFAX, INC.') \n & (df['consumer_complaint_narrative'].isNull() == False))", "_____no_output_____" ], [ "# transform text with the pipeline\nequifax = pipeline.fit(data).transform(data)", "_____no_output_____" ], [ "equifax.columns", "_____no_output_____" ], [ "equifax.select('finished_clean_lemma')", "_____no_output_____" ], [ "# expand the \"finished_clean_lemma\" column so that the words are not in a list\nfrom pyspark.sql.functions import explode, col\n\nequifax_words = equifax.withColumn(\"exploded_text\", explode(col(\"finished_clean_lemma\")))", "_____no_output_____" ], [ "equifax_words.columns", "_____no_output_____" ], [ "counts = equifax_words.groupby('exploded_text').count()", "_____no_output_____" ], [ "counts_pd = counts.toPandas()", "_____no_output_____" ], [ "counts_pd", "_____no_output_____" ], [ "counts_pd.shape", "_____no_output_____" ], [ "{counts_pd.loc[i, 'exploded_text']: counts_pd.loc[i, 'count'] for i in range(counts_pd.shape[0])}", "_____no_output_____" ], [ "companies = ['EQUIFAX, INC.',\n 'Experian Information Solutions Inc.',\n 'TRANSUNION INTERMEDIATE HOLDINGS, INC.',\n 'BANK OF AMERICA, NATIONAL ASSOCIATION',\n 'WELLS FARGO & COMPANY',\n 'JPMORGAN CHASE & CO.',\n 'CITIBANK, N.A.',\n 'CAPITAL ONE FINANCIAL CORPORATION',\n 'Navient Solutions, LLC.',\n 'Ocwen Financial Corporation',\n 'SYNCHRONY FINANCIAL',\n 'NATIONSTAR MORTGAGE',\n 'U.S. BANCORP',\n 'AMERICAN EXPRESS COMPANY',\n 'Ditech Financial LLC',\n 'PNC Bank N.A.',\n 'ENCORE CAPITAL GROUP INC.',\n 'PORTFOLIO RECOVERY ASSOCIATES INC',\n 'DISCOVER BANK',\n 'TD BANK US HOLDING COMPANY']", "_____no_output_____" ], [ "from pyspark.sql.functions import explode, col\n\n# initialize {company: {word counts}} dictionary\ncompany_complaint_word_counts_dict = {company: {} for company in companies}\n\nfor company in companies:\n print(company)\n # get complaint narratives\n company_df = df.filter((df['company'] == company) \n & (df['consumer_complaint_narrative'].isNull() == False))\n data = company_df.select('consumer_complaint_narrative')\n \n # process narratives into counts dictionary\n clean_data = pipeline.fit(data).transform(data)\n clean_data_exploded = clean_data.withColumn(\"exploded_text\", explode(col(\"finished_clean_lemma\")))\n counts = clean_data_exploded.groupby('exploded_text').count().toPandas()\n counts_dict = {counts.loc[i, 'exploded_text']: counts.loc[i, 'count'] for i in range(counts.shape[0])}\n \n # add counts to dictionary\n company_complaint_word_counts_dict[company] = counts_dict", "EQUIFAX, INC.\nExperian Information Solutions Inc.\nTRANSUNION INTERMEDIATE HOLDINGS, INC.\nBANK OF AMERICA, NATIONAL ASSOCIATION\nWELLS FARGO & COMPANY\nJPMORGAN CHASE & CO.\nCITIBANK, N.A.\nCAPITAL ONE FINANCIAL CORPORATION\nNavient Solutions, LLC.\nOcwen Financial Corporation\nSYNCHRONY FINANCIAL\nNATIONSTAR MORTGAGE\nU.S. BANCORP\nAMERICAN EXPRESS COMPANY\nDitech Financial LLC\nPNC Bank N.A.\nENCORE CAPITAL GROUP INC.\nPORTFOLIO RECOVERY ASSOCIATES INC\nDISCOVER BANK\nTD BANK US HOLDING COMPANY\n" ] ], [ [ "## Tf-idf", "_____no_output_____" ] ], [ [ "def term_frequency(BoW_dict):\n tot_words = sum(BoW_dict.values())\n freq_dict = {word: BoW_dict[word]/tot_words for word in BoW_dict.keys()}\n return freq_dict", "_____no_output_____" ], [ "from math import log\n\ndef inverse_document_frequency(list_of_dicts):\n tot_docs = len(list_of_dicts)\n words = set([w for w_dict in list_of_dicts for w in w_dict.keys()])\n idf_dict = {word: log(float(tot_docs)/(1.0+ sum([1 for w_dict in list_of_dicts if word in w_dict.keys()]))) for word in words}\n return idf_dict", "_____no_output_____" ], [ "def tf_idf(list_of_dicts):\n words = set([w for w_dict in list_of_dicts for w in w_dict.keys()])\n tf_idf_dicts = []\n idfs = inverse_document_frequency(list_of_dicts)\n for w_dict in list_of_dicts:\n w_dict.update({word: 0 for word in words if word not in w_dict.keys()})\n tf = term_frequency(w_dict)\n tf_idf_dicts.append({word: tf[word]*idfs[word] for word in words})\n return tf_idf_dicts", "_____no_output_____" ], [ "list_of_word_dicts = [company_complaint_word_counts_dict[company] for company in companies]\ntf_idf_by_company_list = tf_idf(list_of_word_dicts)\ntf_idf_by_company_dict = {c: tf_dict for c, tf_dict in zip(companies, tf_idf_by_company_list)}", "_____no_output_____" ] ], [ [ "## what's unique about each of our top companies (in terms of most compaints)?", "_____no_output_____" ] ], [ [ "import heapq", "_____no_output_____" ], [ "from nltk.corpus import words\n\neng_words = words.words()\n\nfor company in companies[0:10]:\n print (\"\\n\", company)\n tf_idf_dict = tf_idf_by_company_dict[company]\n t100 = heapq.nlargest(100, tf_idf_dict, key=tf_idf_dict.get)\n t100 = {word: tf_idf_dict[word] for word in t100}\n t100 = [(k, v) for k, v in zip(t100.keys(), t100.values())]\n t100 = sorted(t100, key=lambda x: x[1], reverse=True)\n \n counter = 0\n for word, tfidf in t100:\n if counter < 10:\n if word in eng_words:\n counter += 1\n print (word, tfidf)", "\n EQUIFAX, INC.\nreseller 0.0002688113448994784\ntobe 0.00011921055656363865\naccuser 6.927943339562246e-05\nreinsertion 6.873224017786062e-05\ncertifiably 6.287826619016647e-05\ncertifiable 5.514274765382813e-05\nrunner 4.9931123168016185e-05\ncompliantly 4.7578859720163194e-05\ncounteraction 4.0577767192584324e-05\nrejectable 3.6200064296811736e-05\n\n Experian Information Solutions Inc.\nreseller 0.00027501965154673653\ntobe 0.00014687622941775372\naccuser 8.571715639653474e-05\ncertifiably 7.710990986287158e-05\ncertifiable 6.866771862714405e-05\nrunner 6.281202094071141e-05\nreinsertion 5.664692062940039e-05\ncompliantly 5.587557352215855e-05\ncounteraction 4.989434077751539e-05\nrejectable 4.4865729243365284e-05\n\n TRANSUNION INTERMEDIATE HOLDINGS, INC.\nreseller 0.0003042826362853188\ntobe 0.00018786806000262364\ntu 0.0001447630541930031\naccuser 0.0001098803905320385\ncertifiably 9.867512105377535e-05\ncertifiable 8.755074575739848e-05\ncompliantly 7.476791926979552e-05\nreinsertion 7.393924034451927e-05\ncounteraction 6.71101733310887e-05\nautomotive 6.59765529228018e-05\n\n BANK OF AMERICA, NATIONAL ASSOCIATION\nboa 0.002601791649485743\nlynch 0.00021317211441412916\nbac 8.268148058932168e-05\nmerchant 4.679212901191259e-05\nforeclose 4.5737780238272726e-05\nteller 4.080119277432391e-05\nplatinum 3.0403277615214078e-05\nmellon 2.973787348481885e-05\nfirearm 2.8966243483422735e-05\nmesne 2.6773531766979178e-05\n\n WELLS FARGO & COMPANY\npreservation 0.00011058691165804388\nforeclose 5.753781062766643e-05\nappraisal 3.9504738341510795e-05\npreservationist 3.163435178085603e-05\nfaro 3.119630797032966e-05\nteller 3.0855199973996694e-05\ndealer 2.8840062699925677e-05\nwels 2.8796591972611992e-05\nharp 2.627888546665399e-05\nyen 2.094920839524432e-05\n\n JPMORGAN CHASE & CO.\nmorgan 0.0018633127860989127\nsapphire 0.00042155429663852476\nsouthwest 0.00010974924949395894\nmerchant 6.305054087767063e-05\nairway 4.9920903549298976e-05\nexplorer 3.315799740490192e-05\nteller 3.110680856345583e-05\nmileage 2.9911618110749452e-05\nforeclose 2.862552616831636e-05\nchip 2.6531947912280743e-05\n\n CITIBANK, N.A.\ndepot 0.00030378309613007765\npromotional 9.267216727888594e-05\ngoodyear 8.178787351660182e-05\nprestige 6.454169576756428e-05\nmerchant 6.021662641408726e-05\ncit 5.619833352167643e-05\ndividend 5.3841648114814206e-05\nfirearm 4.578598930519518e-05\nsear 3.997459383517256e-05\nplatinum 3.951041951838869e-05\n\n CAPITAL ONE FINANCIAL CORPORATION\nkohl 0.001678411867304545\nquicksilver 0.00024730629638458683\nsavor 8.383264284223283e-05\norchard 5.870988013205198e-05\nmerchant 5.849909422914206e-05\nventure 4.975123629540618e-05\nrebill 4.045101320669507e-05\nplatinum 3.6035707906554444e-05\nspark 2.9218141545854956e-05\nsaver 2.6755386558840852e-05\n\n Navient Solutions, LLC.\npioneer 0.0003673137645965303\nmae 0.0003399697271674276\nunsubsidized 0.00026885770523046995\ndiploma 5.7210786923799285e-05\nrecertify 5.38658296551885e-05\nsally 5.328508048956544e-05\nsalle 5.328508048956544e-05\ngraduation 5.164994361446035e-05\nsubsidize 4.538174657387051e-05\nreconsolidate 4.34521184309761e-05\n\n Ocwen Financial Corporation\nhomeward 0.0004597371937048256\nforeclose 0.00015073465073065314\nindy 0.00011407013076886652\nsuspense 6.934210110075154e-05\nhooligan 6.222007132847264e-05\nsam 6.113523603137288e-05\naffiant 4.29440511897045e-05\nduplex 3.641965405987751e-05\nunderwater 3.627725314946916e-05\nappraisal 3.318793066953237e-05\n" ], [ "for company in companies:\n tf_idf_dict = tf_idf_by_company_dict[company]\n n_words = len([v for v in tf_idf_dict.values() if v > 0])\n print (f\"{company}'s # of words: {n_words}\")", "EQUIFAX, INC.'s # of words: 24241\nExperian Information Solutions Inc.'s # of words: 22881\nTRANSUNION INTERMEDIATE HOLDINGS, INC.'s # of words: 21200\nBANK OF AMERICA, NATIONAL ASSOCIATION's # of words: 19820\nWELLS FARGO & COMPANY's # of words: 21038\nJPMORGAN CHASE & CO.'s # of words: 19433\nCITIBANK, N.A.'s # of words: 17386\nCAPITAL ONE FINANCIAL CORPORATION's # of words: 13188\nNavient Solutions, LLC.'s # of words: 13078\nOcwen Financial Corporation's # of words: 10713\nSYNCHRONY FINANCIAL's # of words: 9969\nNATIONSTAR MORTGAGE's # of words: 9990\nU.S. BANCORP's # of words: 8568\nAMERICAN EXPRESS COMPANY's # of words: 8114\nDitech Financial LLC's # of words: 6520\nPNC Bank N.A.'s # of words: 5405\nENCORE CAPITAL GROUP INC.'s # of words: 3876\nPORTFOLIO RECOVERY ASSOCIATES INC's # of words: 3699\nDISCOVER BANK's # of words: 6392\nTD BANK US HOLDING COMPANY's # of words: 5094\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec81b401f9d3cc0cc0c73f25a655310ab972cc27
6,962
ipynb
Jupyter Notebook
examples/template.ipynb
jacklu2016/recommenders
bded904a04c63dd83369ba6813c2c334363002b1
[ "MIT" ]
null
null
null
examples/template.ipynb
jacklu2016/recommenders
bded904a04c63dd83369ba6813c2c334363002b1
[ "MIT" ]
null
null
null
examples/template.ipynb
jacklu2016/recommenders
bded904a04c63dd83369ba6813c2c334363002b1
[ "MIT" ]
null
null
null
21.893082
198
0.544815
[ [ [ "<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>\n\n<i>Licensed under the MIT License.</i>", "_____no_output_____" ], [ "# Template\n\nTitle of the notebooks should be concise and it's at heading-1 level, i.e., with one \"#\" in the markdown code.", "_____no_output_____" ], [ "Right under the notebook title, a brief introduction of the notebook is placed. Usually this will be what technical/business problems that the technical contents in this notebook try to solve.", "_____no_output_____" ], [ "**Example**:", "_____no_output_____" ], [ "This notebook shows how to set a version check for `papermill`.", "_____no_output_____" ], [ "## 0 Global settings", "_____no_output_____" ], [ "Heading-2 level is for each sections in the notebook. It starts from 0, where it is usually about global settings such as module imports, global variable definitions, etc. \nName of the section starts with a capital letter. ", "_____no_output_____" ], [ "**Examples:**", "_____no_output_____" ], [ "- Module imports", "_____no_output_____" ] ], [ [ "import papermill as pm", "/anaconda/envs/recommender/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n" ] ], [ [ "- Global variables", "_____no_output_____" ], [ "For the convenience of parameterizing notebook tests, tagging of \"parameters\" can be added to the cell such that variables in the cell can be found by `papermill` in testing. ", "_____no_output_____" ] ], [ [ "PM_VERSION = \"0.15.1\"", "_____no_output_____" ] ], [ [ "## 1 Section1", "_____no_output_____" ], [ "Each of the sections can be hierarchical. Level numbers are connect by \".\". ", "_____no_output_____" ], [ "### 1.1 Sub-section1", "_____no_output_____" ], [ "Note that \n1. The Python codes in the notebook should follow PEP standard.\n2. It is preferable to put comments of codes in cell into a standalone text cell.", "_____no_output_____" ], [ "**Example:**", "_____no_output_____" ], [ "Here we want to check version of `papermill`. ", "_____no_output_____" ] ], [ [ "def check_version(version):\n current_pm_version = pm.__version__\n if version < current_pm_version:\n print(\"Error: version checked {} is smaller than library version {}\".format(version, current_pm_version))\n raise ValueError(\"Error\")\n else:\n return True", "_____no_output_____" ], [ "checked_version = check_version(PM_VERSION)", "_____no_output_____" ] ], [ [ "Codes in a notebook are tested with `papermill`. Below the example shows how to record a variable for testing purpose.", "_____no_output_____" ] ], [ [ "pm.record(\"checked_version\", checked_version)", "_____no_output_____" ] ], [ [ "#### 1.1.1 Sub-sub-section", "_____no_output_____" ], [ "### 1.2 Sub-section2", "_____no_output_____" ], [ "## 2 Section2", "_____no_output_____" ], [ "## References\n\nIt is highly encouraged to have references for technical explanations in the notebooks for people to easily understand theories and reproduce codes. ", "_____no_output_____" ], [ "**Example:**\n \n1. Jianxu Lian et al, \"xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems\", Proc. ACM KDD, London, UK, 2018, pp. 1754-1763.\n2. PySpark MLlib evaluation metrics, url: https://spark.apache.org/docs/2.3.0/mllib-evaluation-metrics.html", "_____no_output_____" ], [ "Note this section, which is not the body sections of the notebook, does not have to be numbered in section name. ", "_____no_output_____" ] ], [ [ "import time\n# 字符类型的时间\ntss1 = '2230-10-10'\n # 转为时间数组\ntimeArray = time.strptime(tss1, \"%Y-%m-%d\")\ntimeArray \n # timeArray可以调用tm_year等\ntimeArray.tm_year # 2013\n # 转为时间戳\ntimeStamp = int(time.mktime(timeArray))\ntimeStamp # 1381419600", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
ec81b4fc99f1523596a8be10ec37d3da8c3608fe
23,484
ipynb
Jupyter Notebook
notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
11
2021-09-08T05:39:02.000Z
2022-03-25T14:35:22.000Z
notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
118
2021-08-28T03:09:44.000Z
2022-03-31T00:38:44.000Z
notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline.ipynb
Jonathanpro/asl-ml-immersion
c461aa215339a6816810dfef5a92a6e375f9bc66
[ "Apache-2.0" ]
110
2021-09-02T15:01:35.000Z
2022-03-31T12:32:48.000Z
33.596567
394
0.587208
[ [ [ "# Continuous training pipeline with KFP and Cloud AI Platform", "_____no_output_____" ], [ "**Learning Objectives:**\n1. Learn how to use KF pre-build components (BiqQuery, CAIP training and predictions)\n1. Learn how to use KF lightweight python components\n1. Learn how to build a KF pipeline with these components\n1. Learn how to compile, upload, and run a KF pipeline with the command line\n\n\nIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates **BigQuery** and **Cloud AI Platform** services to train, tune, and deploy a **scikit-learn** model.\n", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "# Set `PATH` to include the directory containing TFX CLI and skaffold.\nPATH = %env PATH\n%env PATH=/home/jupyter/.local/bin:{PATH}", "_____no_output_____" ] ], [ [ "## Understanding the pipeline design\n", "_____no_output_____" ], [ "The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `covertype_training_pipeline.py` file that we will generate below.\n\nThe pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables.\n", "_____no_output_____" ] ], [ [ "!grep 'BASE_IMAGE =' -A 5 pipeline/covertype_training_pipeline.py", "_____no_output_____" ] ], [ [ "The pipeline uses a mix of custom and pre-build components.\n\n- Pre-build components. The pipeline uses the following pre-build components that are included with the KFP distribution:\n - [BigQuery query component](https://github.com/kubeflow/pipelines/tree/0.2.5/components/gcp/bigquery/query)\n - [AI Platform Training component](https://github.com/kubeflow/pipelines/tree/0.2.5/components/gcp/ml_engine/train)\n - [AI Platform Deploy component](https://github.com/kubeflow/pipelines/tree/0.2.5/components/gcp/ml_engine/deploy)\n- Custom components. The pipeline uses two custom helper components that encapsulate functionality not available in any of the pre-build components. The components are implemented using the KFP SDK's [Lightweight Python Components](https://www.kubeflow.org/docs/pipelines/sdk/lightweight-python-components/) mechanism. The code for the components is in the `helper_components.py` file:\n - **Retrieve Best Run**. This component retrieves a tuning metric and hyperparameter values for the best run of a AI Platform Training hyperparameter tuning job.\n - **Evaluate Model**. This component evaluates a *sklearn* trained model using a provided metric and a testing dataset.\n ", "_____no_output_____" ], [ "### Exercise\n\nComplete the TODOs the pipeline file below.", "_____no_output_____" ] ], [ [ "%%writefile ./pipeline/covertype_training_pipeline.py\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"KFP pipeline orchestrating BigQuery and Cloud AI Platform services.\"\"\"\n\nimport os\n\nimport kfp\nfrom helper_components import evaluate_model, retrieve_best_run\nfrom jinja2 import Template\nfrom kfp.components import func_to_container_op\nfrom kfp.gcp import use_gcp_secret\n\n# Defaults and environment settings\nBASE_IMAGE = os.getenv(\"BASE_IMAGE\")\nTRAINER_IMAGE = os.getenv(\"TRAINER_IMAGE\")\nRUNTIME_VERSION = os.getenv(\"RUNTIME_VERSION\")\nPYTHON_VERSION = os.getenv(\"PYTHON_VERSION\")\nCOMPONENT_URL_SEARCH_PREFIX = os.getenv(\"COMPONENT_URL_SEARCH_PREFIX\")\nUSE_KFP_SA = os.getenv(\"USE_KFP_SA\")\n\nTRAINING_FILE_PATH = \"datasets/training/data.csv\"\nVALIDATION_FILE_PATH = \"datasets/validation/data.csv\"\nTESTING_FILE_PATH = \"datasets/testing/data.csv\"\n\n# Parameter defaults\nSPLITS_DATASET_ID = \"splits\"\nHYPERTUNE_SETTINGS = \"\"\"\n{\n \"hyperparameters\": {\n \"goal\": \"MAXIMIZE\",\n \"maxTrials\": 6,\n \"maxParallelTrials\": 3,\n \"hyperparameterMetricTag\": \"accuracy\",\n \"enableTrialEarlyStopping\": True,\n \"params\": [\n {\n \"parameterName\": \"max_iter\",\n \"type\": \"DISCRETE\",\n \"discreteValues\": [500, 1000]\n },\n {\n \"parameterName\": \"alpha\",\n \"type\": \"DOUBLE\",\n \"minValue\": 0.0001,\n \"maxValue\": 0.001,\n \"scaleType\": \"UNIT_LINEAR_SCALE\"\n }\n ]\n }\n}\n\"\"\"\n\n\n# Helper functions\ndef generate_sampling_query(source_table_name, num_lots, lots):\n \"\"\"Prepares the data sampling query.\"\"\"\n\n sampling_query_template = \"\"\"\n SELECT *\n FROM\n `{{ source_table }}` AS cover\n WHERE\n MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})\n \"\"\"\n query = Template(sampling_query_template).render(\n source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1]\n )\n\n return query\n\n\n# Create component factories\ncomponent_store = # TODO\n\nbigquery_query_op = # TODO - use the pre-built bigquery/query component\nmlengine_train_op = # TODO - use the pre-built ml_engine/train\nmlengine_deploy_op = # TODO - use the pre-built ml_engine/deploy component\nretrieve_best_run_op = # TODO - package the retrieve_best_run function into a lightweight component\nevaluate_model_op = # TODO - package the evaluate_model function into a lightweight component\n\n\[email protected](\n name=\"Covertype Classifier Training\",\n description=(\n \"The pipeline training and deploying the Covertype \"\n \"classifierpipeline_yaml\"\n ),\n)\ndef covertype_train(\n project_id,\n region,\n source_table_name,\n gcs_root,\n dataset_id,\n evaluation_metric_name,\n evaluation_metric_threshold,\n model_id,\n version_id,\n replace_existing_version,\n hypertune_settings=HYPERTUNE_SETTINGS,\n dataset_location=\"US\",\n):\n \"\"\"Orchestrates training and deployment of an sklearn model.\"\"\"\n\n # Create the training split\n query = generate_sampling_query(\n source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4]\n )\n\n training_file_path = f\"{gcs_root}/{TRAINING_FILE_PATH}\"\n\n create_training_split = bigquery_query_op(\n query=query,\n project_id=project_id,\n dataset_id=dataset_id,\n table_id=\"\",\n output_gcs_path=training_file_path,\n dataset_location=dataset_location,\n )\n\n # Create the validation split\n query = generate_sampling_query(\n source_table_name=source_table_name, num_lots=10, lots=[8]\n )\n\n validation_file_path = f\"{gcs_root}/{VALIDATION_FILE_PATH}\"\n\n create_validation_split = # TODO - use the bigquery_query_op\n\n # Create the testing split\n query = generate_sampling_query(\n source_table_name=source_table_name, num_lots=10, lots=[9])\n\n testing_file_path = f\"{gcs_root}/{TESTING_FILE_PATH}\"\n\n create_testing_split = # TODO - use the bigquery_query_op\n\n # Tune hyperparameters\n tune_args = [\n \"--training_dataset_path\",\n create_training_split.outputs[\"output_gcs_path\"],\n \"--validation_dataset_path\",\n create_validation_split.outputs[\"output_gcs_path\"],\n \"--hptune\",\n \"True\",\n ]\n\n job_dir = f\"{gcs_root}/jobdir/hypertune/{kfp.dsl.RUN_ID_PLACEHOLDER}\"\n\n hypertune = # TODO - use the mlengine_train_op\n\n # Retrieve the best trial\n get_best_trial = retrieve_best_run_op(\n project_id, hypertune.outputs['job_id'])\n\n # Train the model on a combined training and validation datasets\n job_dir = f\"{gcs_root}/jobdir/{kfp.dsl.RUN_ID_PLACEHOLDER}\"\n\n train_args = [\n \"--training_dataset_path\",\n create_training_split.outputs[\"output_gcs_path\"],\n \"--validation_dataset_path\",\n create_validation_split.outputs[\"output_gcs_path\"],\n \"--alpha\",\n get_best_trial.outputs[\"alpha\"],\n \"--max_iter\",\n get_best_trial.outputs[\"max_iter\"],\n \"--hptune\",\n \"False\",\n ]\n\n train_model = # TODO - use the mlengine_train_op\n\n # Evaluate the model on the testing split\n eval_model = evaluate_model_op(\n dataset_path=str(create_testing_split.outputs[\"output_gcs_path\"]),\n model_path=str(train_model.outputs[\"job_dir\"]),\n metric_name=evaluation_metric_name,\n )\n\n # Deploy the model if the primary metric is better than threshold\n with kfp.dsl.Condition(\n eval_model.outputs[\"metric_value\"] > evaluation_metric_threshold\n ):\n deploy_model = mlengine_deploy_op( # pylint: disable=unused-variable\n model_uri=train_model.outputs[\"job_dir\"],\n project_id=project_id,\n model_id=model_id,\n version_id=version_id,\n runtime_version=RUNTIME_VERSION,\n python_version=PYTHON_VERSION,\n replace_existing_version=replace_existing_version,\n )\n\n # Configure the pipeline to run using the service account defined\n # in the user-gcp-sa k8s secret\n if USE_KFP_SA == \"True\":\n kfp.dsl.get_pipeline_conf().add_op_transformer(\n use_gcp_secret(\"user-gcp-sa\")\n )", "_____no_output_____" ] ], [ [ "The custom components execute in a container image defined in `base_image/Dockerfile`.", "_____no_output_____" ] ], [ [ "!cat base_image/Dockerfile", "_____no_output_____" ] ], [ [ "The training step in the pipeline employes the AI Platform Training component to schedule a AI Platform Training job in a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.", "_____no_output_____" ] ], [ [ "!cat trainer_image/Dockerfile", "_____no_output_____" ] ], [ [ "## Building and deploying the pipeline\n\nBefore deploying to AI Platform Pipelines, the pipeline DSL has to be compiled into a pipeline runtime format, also refered to as a pipeline package. The runtime format is based on [Argo Workflow](https://github.com/argoproj/argo), which is expressed in YAML. \n", "_____no_output_____" ], [ "### Configure environment settings\n\nUpdate the below constants with the settings reflecting your lab environment. \n\n- `REGION` - the compute region for AI Platform Training and Prediction\n- `ARTIFACT_STORE` - the GCS bucket created during installation of AI Platform Pipelines. The bucket name ends with the `-kubeflowpipelines-default` suffix.\n- `ENDPOINT` - set the `ENDPOINT` constant to the endpoint to your AI Platform Pipelines instance. Then endpoint to the AI Platform Pipelines instance can be found on the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console.\n\n1. Open the *SETTINGS* for your instance\n2. Use the value of the `host` variable in the *Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD* section of the *SETTINGS* window.", "_____no_output_____" ] ], [ [ "REGION = \"us-central1\"\nENDPOINT = \"337dd39580cbcbd2-dot-us-central2.pipelines.googleusercontent.com\"\nARTIFACT_STORE_URI = (\n \"gs://qwiklabs-gcp-04-406b0039d298-kubeflowpipelines-default\"\n)\nPROJECT_ID = !(gcloud config get-value core/project)\nPROJECT_ID = PROJECT_ID[0]", "_____no_output_____" ] ], [ [ "### Build the trainer image", "_____no_output_____" ] ], [ [ "IMAGE_NAME = \"trainer_image\"\nTAG = \"latest\"\nTRAINER_IMAGE = f\"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}\"", "_____no_output_____" ], [ "!gcloud builds submit --timeout 15m --tag $TRAINER_IMAGE trainer_image", "_____no_output_____" ] ], [ [ "### Build the base image for custom components", "_____no_output_____" ] ], [ [ "IMAGE_NAME = \"base_image\"\nTAG = \"latest\"\nBASE_IMAGE = f\"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}\"", "_____no_output_____" ], [ "!gcloud builds submit --timeout 15m --tag $BASE_IMAGE base_image", "_____no_output_____" ] ], [ [ "### Compile the pipeline\n\nYou can compile the DSL using an API from the **KFP SDK** or using the **KFP** compiler.\n\nTo compile the pipeline DSL using the **KFP** compiler.", "_____no_output_____" ], [ "#### Set the pipeline's compile time settings\n\nThe pipeline can run using a security context of the GKE default node pool's service account or the service account defined in the `user-gcp-sa` secret of the Kubernetes namespace hosting Kubeflow Pipelines. If you want to use the `user-gcp-sa` service account you change the value of `USE_KFP_SA` to `True`.\n\nNote that the default AI Platform Pipelines configuration does not define the `user-gcp-sa` secret.", "_____no_output_____" ] ], [ [ "USE_KFP_SA = False\n\nCOMPONENT_URL_SEARCH_PREFIX = (\n \"https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/\"\n)\nRUNTIME_VERSION = \"1.15\"\nPYTHON_VERSION = \"3.7\"\n\n%env USE_KFP_SA={USE_KFP_SA}\n%env BASE_IMAGE={BASE_IMAGE}\n%env TRAINER_IMAGE={TRAINER_IMAGE}\n%env COMPONENT_URL_SEARCH_PREFIX={COMPONENT_URL_SEARCH_PREFIX}\n%env RUNTIME_VERSION={RUNTIME_VERSION}\n%env PYTHON_VERSION={PYTHON_VERSION}", "_____no_output_____" ] ], [ [ "#### Use the CLI compiler to compile the pipeline", "_____no_output_____" ], [ "### Exercise\n\nCompile the `covertype_training_pipeline.py` with the `dsl-compile` command line:", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "The result is the `covertype_training_pipeline.yaml` file. ", "_____no_output_____" ] ], [ [ "!head covertype_training_pipeline.yaml", "_____no_output_____" ] ], [ [ "### Deploy the pipeline package", "_____no_output_____" ], [ "### Exercise\n\nUpload the pipeline to the Kubeflow cluster using the `kfp` command line:", "_____no_output_____" ] ], [ [ "PIPELINE_NAME = \"covertype_continuous_training\"\n\n# TODO", "_____no_output_____" ] ], [ [ "## Submitting pipeline runs\n\nYou can trigger pipeline runs using an API from the KFP SDK or using KFP CLI. To submit the run using KFP CLI, execute the following commands. Notice how the pipeline's parameters are passed to the pipeline run.\n\n### List the pipelines in AI Platform Pipelines", "_____no_output_____" ] ], [ [ "!kfp --endpoint $ENDPOINT pipeline list", "_____no_output_____" ] ], [ [ "### Submit a run\n\nFind the ID of the `covertype_continuous_training` pipeline you uploaded in the previous step and update the value of `PIPELINE_ID` .\n", "_____no_output_____" ] ], [ [ "PIPELINE_ID = \"0918568d-758c-46cf-9752-e04a4403cd84\"", "_____no_output_____" ], [ "EXPERIMENT_NAME = \"Covertype_Classifier_Training\"\nRUN_ID = \"Run_001\"\nSOURCE_TABLE = \"covertype_dataset.covertype\"\nDATASET_ID = \"splits\"\nEVALUATION_METRIC = \"accuracy\"\nEVALUATION_METRIC_THRESHOLD = \"0.69\"\nMODEL_ID = \"covertype_classifier\"\nVERSION_ID = \"v01\"\nREPLACE_EXISTING_VERSION = \"True\"\n\nGCS_STAGING_PATH = f\"{ARTIFACT_STORE_URI}/staging\"", "_____no_output_____" ] ], [ [ "### Exercise\n\n1. Create BigQuery Dataset with DATASET_ID variable by using the `bq mk --force` command.\n2. Run the pipeline using the `kfp` command line. Here are some of the variable\nyou will have to use to pass to the pipeline:\n\n- EXPERIMENT_NAME is set to the experiment used to run the pipeline. You can choose any name you want. If the experiment does not exist it will be created by the command\n- RUN_ID is the name of the run. You can use an arbitrary name\n- PIPELINE_ID is the id of your pipeline. Use the value retrieved by the `kfp pipeline list` command\n- GCS_STAGING_PATH is the URI to the GCS location used by the pipeline to store intermediate files. By default, it is set to the `staging` folder in your artifact store.\n- REGION is a compute region for AI Platform Training and Prediction.", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "### Monitoring the run\n\nYou can monitor the run using KFP UI. Follow the instructor who will walk you through the KFP UI and monitoring techniques.\n\nTo access the KFP UI in your environment use the following URI:\n\nhttps://[ENDPOINT]\n\n", "_____no_output_____" ], [ "Copyright 2021 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec81c2f36e09806bb619823557a68e936fbc4e6e
170,290
ipynb
Jupyter Notebook
calibrate_from_spec.ipynb
tinoetzold/KostalPlenticoreData-for-PVLIB
3ee2f43745daf3d4ed26ebb39d70a8d98f948295
[ "MIT" ]
null
null
null
calibrate_from_spec.ipynb
tinoetzold/KostalPlenticoreData-for-PVLIB
3ee2f43745daf3d4ed26ebb39d70a8d98f948295
[ "MIT" ]
null
null
null
calibrate_from_spec.ipynb
tinoetzold/KostalPlenticoreData-for-PVLIB
3ee2f43745daf3d4ed26ebb39d70a8d98f948295
[ "MIT" ]
null
null
null
327.480769
31,510
0.693235
[ [ [ "# Kostal Plenticor Plus 4.2 - Data setup for PVLIB\n\nTo use the Plenticore Kostal 4.2 inverter in PVLIB calculations, the data needs to be collected manually since the standard SAM library for cec converters maintained by Sandia Labs does not include this item yet. \n\n[Here is the link to pvlib documentation.](https://pvlib-python.readthedocs.io/en/stable/generated/pvlib.pvsystem.retrieve_sam.html)\n\nThe following sources have been used:\n\n- DB Plenticore Technical Specification (DB_PLENTICORE-plus_en.pdf)\n- Performance Model for Grid-Connected Photovoltaic Inverters (Perf_Model_inverters.pdf)\n- SAM Photovoltaic Model Technical Reference Update (SAM_PV_Model.pdf)\n\n", "_____no_output_____" ] ], [ [ "my_data = {}\n\n# Nominal voltage --> taken from grid connection [V]\nmy_data[\"Vac\"] = 400.0\n\n# Maximum AC Power [W]\nmy_data[\"Paco\"] = 4200.0\n\n# Maximum DC Power [W]\nmax_efficiency = 0.971\nmy_data[\"Pdco\"] = my_data[\"Paco\"] / max_efficiency\n\n# Nomial DC Voltage [V]\nmy_data[\"Vdco\"] = 570.0\n\n# Power consumption during operation [W]\n# = DC Power required to start the conversion process --> from own experience\nmy_data[\"Pso\"] = 20.0\n\n# Power consumption at night [W] (including 24 h comumption measurements)\nmy_data[\"Pnt\"] = 7.9\n\n# Maximum DC Voltage [V]\nmy_data[\"Vdcmax\"] = 900.0\n\n# Maximum DC Current [A]\nmy_data[\"Idcmax\"] = 13.0\n\n# Minimum MPPT DC Voltage [V]\nmy_data[\"Mppt_low\"] = 120.0\n\n# Maximum MPPT DC Voltage [V]\nmy_data[\"Mppt_high\"] = 720.0\n\n# Nomial AC Voltage [V]\nmy_data[\"Vaco\"] = 460.0", "_____no_output_____" ] ], [ [ "## Converter efficiency for different voltage levels\n\nAs shown in the Plenticore specification, the efficiency for different voltage levels looks like this:\n\n![](pictures/Efficiency_Data.png?raw=true)\n\nData for reference voltage (570V), low voltage (180V) and high voltage (720V) is added manually to following lists of tuples, extracted from aboves graphic.", "_____no_output_____" ] ], [ [ "# Offset used at DC Power Output = 0W for low volatage curve (guess, is not in spec):\noffset_pso_low = 20.0 # W\noffset_pso_high = 50.0 # W\n\n\n# Definition of the Tuple: (Power DC, Power AC = Power DC * Efficiency(Pac/Pac,r))\n\n# Ref. Voltage: 570 V\nrated_power_ref = [(my_data[\"Pso\"], 0.0),\n (0.05 * my_data[\"Pdco\"], 0.05 * 0.879 * my_data[\"Pdco\"]), # 10%\n (0.10 * my_data[\"Pdco\"], 0.10 * 0.908 * my_data[\"Pdco\"]), # 10%\n (0.15 * my_data[\"Pdco\"], 0.15 * 0.930 * my_data[\"Pdco\"]), # 15%\n (0.20 * my_data[\"Pdco\"], 0.20 * 0.942 * my_data[\"Pdco\"]), # 20%\n (0.25 * my_data[\"Pdco\"], 0.25 * 0.949 * my_data[\"Pdco\"]), # 25%\n (0.30 * my_data[\"Pdco\"], 0.30 * 0.953 * my_data[\"Pdco\"]), # 30%\n (0.35 * my_data[\"Pdco\"], 0.35 * 0.956 * my_data[\"Pdco\"]), # 35%\n (0.40 * my_data[\"Pdco\"], 0.40 * 0.96 * my_data[\"Pdco\"]), # 40%\n (0.5 * my_data[\"Pdco\"], 0.5 * 0.965 * my_data[\"Pdco\"]), # 50%\n (0.6 * my_data[\"Pdco\"], 0.6 * 0.967 * my_data[\"Pdco\"]), # 60%\n (0.75 * my_data[\"Pdco\"], 0.75 * 0.969 * my_data[\"Pdco\"]), # 75%\n (0.82 * my_data[\"Pdco\"], 0.82 * 0.970 * my_data[\"Pdco\"]), # 82%\n (1.00 * my_data[\"Pdco\"], 1.0 * 0.971 * my_data[\"Pdco\"]), # 100%\n ]\n\n# Low Voltage: 180 V\nrated_power_low = [(my_data[\"Pso\"] + offset_pso_low, 0.0),\n (0.05 * my_data[\"Pdco\"], 0.05 * 0.87 * my_data[\"Pdco\"]), # 10%\n (0.10 * my_data[\"Pdco\"], 0.10 * 0.903 * my_data[\"Pdco\"]), # 10%\n (0.15 * my_data[\"Pdco\"], 0.15 * 0.925 * my_data[\"Pdco\"]), # 15%\n (0.20 * my_data[\"Pdco\"], 0.20 * 0.938 * my_data[\"Pdco\"]), # 20%\n (0.25 * my_data[\"Pdco\"], 0.25 * 0.944 * my_data[\"Pdco\"]), # 25%\n (0.30 * my_data[\"Pdco\"], 0.30 * 0.948 * my_data[\"Pdco\"]), # 30%\n (0.35 * my_data[\"Pdco\"], 0.35 * 0.952 * my_data[\"Pdco\"]), # 35%\n (0.40 * my_data[\"Pdco\"], 0.40 * 0.954 * my_data[\"Pdco\"]), # 40%\n (0.5 * my_data[\"Pdco\"], 0.5 * 0.958 * my_data[\"Pdco\"]), # 50%\n (0.6 * my_data[\"Pdco\"], 0.6 * 0.960 * my_data[\"Pdco\"]), # 60%\n (0.75 * my_data[\"Pdco\"], 0.75 * 0.962 * my_data[\"Pdco\"]), # 75%\n (0.82 * my_data[\"Pdco\"], 0.82 * 0.962 * my_data[\"Pdco\"]), # 82%\n (1.00 * my_data[\"Pdco\"], 1.0 * 0.963 * my_data[\"Pdco\"]), # 100%\n]\n\n# high Voltage: 720 V\nrated_power_high = [(my_data[\"Pso\"] + offset_pso_high , 0.0),\n (0.10 * my_data[\"Pdco\"], 0.10 * 0.870 * my_data[\"Pdco\"]), # 10%\n (0.15 * my_data[\"Pdco\"], 0.15 * 0.904 * my_data[\"Pdco\"]), # 15%\n (0.20 * my_data[\"Pdco\"], 0.20 * 0.925 * my_data[\"Pdco\"]), # 20%\n (0.25 * my_data[\"Pdco\"], 0.25 * 0.935 * my_data[\"Pdco\"]), # 25%\n (0.30 * my_data[\"Pdco\"], 0.30 * 0.941 * my_data[\"Pdco\"]), # 30%\n (0.35 * my_data[\"Pdco\"], 0.35 * 0.945 * my_data[\"Pdco\"]), # 35%\n (0.40 * my_data[\"Pdco\"], 0.40 * 0.950 * my_data[\"Pdco\"]), # 40%\n (0.5 * my_data[\"Pdco\"], 0.5 * 0.958 * my_data[\"Pdco\"]), # 50%\n (0.6 * my_data[\"Pdco\"], 0.6 * 0.960 * my_data[\"Pdco\"]), # 60%\n (0.75 * my_data[\"Pdco\"], 0.75 * 0.963 * my_data[\"Pdco\"]), # 75%\n (0.82 * my_data[\"Pdco\"], 0.82 * 0.964 * my_data[\"Pdco\"]), # 82%\n (1.00 * my_data[\"Pdco\"], 1.0 * 0.965 * my_data[\"Pdco\"]), # 100%\n ]\n", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ], [ "# Polynomial regression of data for reference voltage:\nCoeff = np.polyfit(*zip(*rated_power_ref),2)\nPolynom_ref = np.poly1d(np.polyfit(*zip(*rated_power_ref), 2))\nmy_data[\"C0\"] = Coeff[0]\n\nc0_ref = Coeff[0]\n\n# Polynomial regression of data for low voltage:\nCoeff = np.polyfit(*zip(*rated_power_low),2)\nPolynom_low = np.poly1d(np.polyfit(*zip(*rated_power_low), 2))\nc0_low = Coeff[1]\n\n# Polynomial regression of data for high voltage:\nCoeff = np.polyfit(*zip(*rated_power_high),2)\nPolynom_high = np.poly1d(np.polyfit(*zip(*rated_power_high), 2))\nc0_high = Coeff[1]\n", "_____no_output_____" ], [ "plt.scatter(*zip(*rated_power_ref))\nplt.scatter(*zip(*rated_power_low))\nplt.scatter(*zip(*rated_power_high))\n\nx=list()\ny=list()\nfor i in range(4300):\n xval = float(i)\n yval = Polynom_ref(xval)\n x.append(xval)\n y.append(yval)\n\nplt.plot(x,y,c=\"red\",alpha=0.5 )\nplt.xlabel(\"Power AC [W]\")\nplt.ylabel(\"Power DC [W]\")\nplt.show()", "_____no_output_____" ], [ "my_data", "_____no_output_____" ] ], [ [ "## Variation of DC input voltage @ Max. DC Power (Coefficient C1)", "_____no_output_____" ] ], [ [ "from scipy.stats import linregress\n\nc1_calibration_data = [(0, Polynom_ref(my_data[\"Pdco\"])),\n (180.0 - 570.0, Polynom_low(my_data[\"Pdco\"])),\n (720.0 - 570.0, Polynom_high(my_data[\"Pdco\"]))]\nb, a, r, p, std = linregress(*zip(*c1_calibration_data))\n\n# Since the computet b-Value does not make physical sense, it will be set to zero. Otherwise, PVLib results are not valid.\n#my_data[\"C1\"] = b\nmy_data[\"C1\"] = 0.0", "_____no_output_____" ], [ "plt.scatter(*zip(*c1_calibration_data))\nplt.plot([-400,130],[a,a+130*b],c=\"red\",alpha=0.5)\nplt.ylabel(\"Power AC [W]\")\nplt.xlabel(\"V_DC - V_nom [V]\")\nplt.show()", "_____no_output_____" ] ], [ [ "The Sandia-Approach is not best suiting for the Plenticore, since the linear regression model used for Parameter C1 does not fit to the characteristics of the Plenticore converter (the efficiency is max at the nominal voltage (570 V)). Therefore, C1 does not really repesent the characteristics of the converter.", "_____no_output_____" ], [ "## Variation of DC input voltage @ 0 DC Power (Coefficient C2)", "_____no_output_____" ] ], [ [ "c2_calibration_data = [(0, my_data[\"Pso\"]),\n (180.0 - 570.0,my_data[\"Pso\"] + offset_pso_low),\n (720.0 - 570.0,my_data[\"Pso\"] + offset_pso_high)]\nb, a, r, p, std = linregress(*zip(*c2_calibration_data))\n\n# Since the computet b-Value does not make physical sense, it will be set to zero. Otherwise, PVLib results are not valid.\n#my_data[\"C2\"] = b\nmy_data[\"C2\"] = 0.0", "_____no_output_____" ], [ "plt.scatter(*zip(*c2_calibration_data))\nplt.plot([-400,130],[a,a+130*b],c=\"red\",alpha=0.5)\nplt.ylabel(\"Power Pso [W]\")\nplt.xlabel(\"V_DC - V_nom [V]\")\nplt.show()", "_____no_output_____" ] ], [ [ "## Variation of C0 Coefficients for ref, low and high voltage (Coefficient C3)", "_____no_output_____" ] ], [ [ "c3_calibration_data = [(0, c0_ref),\n (180.0 - 570.0, c0_low),\n (720.0 - 570.0, c0_high)]\nb, a, r, p, std = linregress(*zip(*c3_calibration_data))\n\nmy_data[\"C3\"] = b", "_____no_output_____" ], [ "plt.scatter(*zip(*c3_calibration_data))\nplt.plot([-400,130],[a,a+130*b],c=\"red\",alpha=0.5)\nplt.ylabel(\"Coefficient C0\")\nplt.xlabel(\"V_DC - V_nom [V]\")\nplt.show()", "_____no_output_____" ] ], [ [ "# Summary - Converter Data", "_____no_output_____" ] ], [ [ "my_data", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "my_df = pd.Series(my_data)", "_____no_output_____" ], [ "my_df", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec81cde3d6f4d7d694c7433d4223fa80f9874b91
4,974
ipynb
Jupyter Notebook
docs/source/auto_examples/plot_accentuation_feature.ipynb
giovana-morais/carat
982af4dde63e8560ccc9120f3767f1420a545b67
[ "MIT" ]
11
2019-08-08T13:54:59.000Z
2021-12-09T19:15:12.000Z
docs/source/auto_examples/plot_accentuation_feature.ipynb
giovana-morais/carat
982af4dde63e8560ccc9120f3767f1420a545b67
[ "MIT" ]
42
2019-07-01T19:17:24.000Z
2022-03-15T19:46:09.000Z
docs/source/auto_examples/plot_accentuation_feature.ipynb
giovana-morais/carat
982af4dde63e8560ccc9120f3767f1420a545b67
[ "MIT" ]
3
2021-02-10T13:21:59.000Z
2022-02-17T11:47:06.000Z
26.741935
106
0.510253
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Plot accentuation feature\n\n\nThis example shows how to compute an accentuation feature from de audio waveform.\n\n", "_____no_output_____" ] ], [ [ "# Code source: Martín Rocamora\n# License: MIT", "_____no_output_____" ] ], [ [ "Imports\n - matplotlib for visualization\n\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport carat", "_____no_output_____" ] ], [ [ "The accentuation feature is based on the Spectral flux,\nthat consists in seizing the changes in the spectral magnitude\nof the audio signal along different frequency bands.\nIn principle, the feature value is high when a note has been\narticulated and close to zero otherwise.\n\nFirst, we'll load one of the audio files included in `carat`.\nWe get the path to the audio file example number 1, and load 10 seconds of the file.\n\n", "_____no_output_____" ] ], [ [ "audio_path = carat.util.example_audio_file(num_file=1)\n\ny, sr = carat.audio.load(audio_path, sr=None, duration=10.0)", "_____no_output_____" ] ], [ [ "Next, we'll load the annotations provided for the example audio file.\nWe get the path to the annotations file corresponding to example number 1,\nand then we load beats and downbeats, along with their labels.\n\n", "_____no_output_____" ] ], [ [ "annotations_path = carat.util.example_beats_file(num_file=1)\n\nbeats, beat_labs = carat.annotations.load_beats(annotations_path)\ndownbeats, downbeat_labs = carat.annotations.load_downbeats(annotations_path)", "_____no_output_____" ] ], [ [ "Then, we'll compute the accentuation feature.\n\n**Note:** This example is tailored towards the rhythmic patterns of the lowest\nsounding of the three drum types taking part in the recording, so the analysis\nfocuses on the low frequencies (20 to 200 Hz).\n\n", "_____no_output_____" ] ], [ [ "acce, times, _ = carat.features.accentuation_feature(y, sr, minfreq=20, maxfreq=200)", "_____no_output_____" ] ], [ [ "Finally we plot the audio waveform, the beat annotations and the accentuation feature values.\n\n", "_____no_output_____" ] ], [ [ "# plot waveform and accentuation feature\nplt.figure(figsize=(12, 6))\n# plot waveform\nax1 = plt.subplot(2, 1, 1)\ncarat.display.wave_plot(y, sr, ax=ax1, beats=beats, beat_labs=beat_labs)\n# plot accentuation feature\nax2 = plt.subplot(2, 1, 2, sharex=ax1)\ncarat.display.feature_plot(acce, times, ax=ax2, beats=beats, beat_labs=beat_labs)\nplt.tight_layout()\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec81d3b9b722d0dfa2b2f343f5a84d93569c4fde
204,770
ipynb
Jupyter Notebook
Kim_Lowry_DS_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(1).ipynb
hBar2013/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
9bf194528840a39f6fcb3babd238cf192e451df6
[ "MIT" ]
null
null
null
Kim_Lowry_DS_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(1).ipynb
hBar2013/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
9bf194528840a39f6fcb3babd238cf192e451df6
[ "MIT" ]
null
null
null
Kim_Lowry_DS_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(1).ipynb
hBar2013/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
9bf194528840a39f6fcb3babd238cf192e451df6
[ "MIT" ]
null
null
null
72.178357
60,200
0.658881
[ [ [ "<a href=\"https://colab.research.google.com/github/hBar2013/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/Kim_Lowry_DS_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(1).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Data Science Unit 1 Sprint Challenge 2\n\n## Data Wrangling and Storytelling\n\nTaming data from its raw form into informative insights and stories.", "_____no_output_____" ], [ "## Data Wrangling\n\nIn this Sprint Challenge you will first \"wrangle\" some data from [Gapminder](https://www.gapminder.org/about-gapminder/), a Swedish non-profit co-founded by Hans Rosling. \"Gapminder produces free teaching resources making the world understandable based on reliable statistics.\"\n- [Cell phones (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv)\n- [Population (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)\n- [Geo country codes](https://github.com/open-numbers/ddf--gapminder--systema_globalis/blob/master/ddf--entities--geo--country.csv)\n\nThese two links have everything you need to successfully complete the first part of this sprint challenge.\n- [Pandas documentation: Working with Text Data](https://pandas.pydata.org/pandas-docs/stable/text.html) (one question)\n- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) (everything else)", "_____no_output_____" ], [ "### Part 0. Load data\n\nYou don't need to add or change anything here. Just run this cell and it loads the data for you, into three dataframes.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ncell_phones = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv')\n\npopulation = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')\n\ngeo_country_codes = (pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')\n .rename(columns={'country': 'geo', 'name': 'country'}))", "_____no_output_____" ], [ "cell_phones.head()", "_____no_output_____" ], [ "population.head()", "_____no_output_____" ], [ "geo_country_codes.head()", "_____no_output_____" ] ], [ [ "### Part 1. Join data", "_____no_output_____" ], [ "First, join the `cell_phones` and `population` dataframes (with an inner join on `geo` and `time`).\n\nThe resulting dataframe's shape should be: (8590, 4)", "_____no_output_____" ] ], [ [ "cell_pop = pd.merge(population, cell_phones, how='inner', on=['geo','time'])", "_____no_output_____" ], [ "cell_pop.shape", "_____no_output_____" ] ], [ [ "Then, select the `geo` and `country` columns from the `geo_country_codes` dataframe, and join with your population and cell phone data.\n\nThe resulting dataframe's shape should be: (8590, 5)", "_____no_output_____" ] ], [ [ "geo_country_codes = geo_country_codes[['geo','country']]", "_____no_output_____" ], [ "geo_country_codes.head()", "_____no_output_____" ], [ "final = pd.merge(cell_pop, geo_country_codes)", "_____no_output_____" ], [ "final.head()", "_____no_output_____" ], [ "final.shape", "_____no_output_____" ] ], [ [ "***Optional bonus for Part 1: Take initiative to join more data.***", "_____no_output_____" ], [ "### Part 2. Make features", "_____no_output_____" ], [ "Calculate the number of cell phones per person, and add this column onto your dataframe.\n\n(You've calculated correctly if you get 1.220 cell phones per person in the United States in 2017.)", "_____no_output_____" ] ], [ [ "no_zeros = final[final['cell_phones_total'] > 0]", "_____no_output_____" ], [ "no_zeros.head()", "_____no_output_____" ], [ "no_zeros['cell_per_capita'] = no_zeros['cell_phones_total']/no_zeros['population_total']", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "no_zeros.head()", "_____no_output_____" ] ], [ [ "Modify the `geo` column to make the geo codes uppercase instead of lowercase.", "_____no_output_____" ] ], [ [ "no_zeros['geo'] = no_zeros['geo'].str.upper()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "no_zeros.head()", "_____no_output_____" ] ], [ [ "***Optional bonus for Part 2: Take initiative to make more features.***", "_____no_output_____" ], [ "### Part 3. Process data", "_____no_output_____" ], [ "Use the describe function, to describe your dataframe's numeric columns, and then its non-numeric columns.\n\n(You'll see the time period ranges from 1960 to 2017, and there are 195 unique countries represented.)", "_____no_output_____" ] ], [ [ "no_zeros.describe()", "_____no_output_____" ], [ "no_zeros.describe(exclude=np.number)", "_____no_output_____" ] ], [ [ "In 2017, what were the top 5 countries with the most cell phones total?\n\nYour list of countries should have these totals:\n\n| country | cell phones total |\n|:-------:|:-----------------:|\n| ? | 1,474,097,000 |\n| ? | 1,168,902,277 |\n| ? | 458,923,202 |\n| ? | 395,881,000 |\n| ? | 236,488,548 |\n\n", "_____no_output_____" ] ], [ [ "Cell_2017 = no_zeros[no_zeros['time'] == 2017]", "_____no_output_____" ], [ "Cell_2017.nlargest(5, ['cell_phones_total'])", "_____no_output_____" ], [ "# This optional code formats float numbers with comma separators\npd.options.display.float_format = '{:,}'.format", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "2017 was the first year that China had more cell phones than people.\n\nWhat was the first year that the USA had more cell phones than people?", "_____no_output_____" ] ], [ [ "usa_cell = no_zeros[(no_zeros['geo'] == 'USA') & (no_zeros['cell_per_capita'] > 1)]", "_____no_output_____" ], [ "usa_cell.head(1)", "_____no_output_____" ] ], [ [ "***Optional bonus for Part 3: Take initiative to do more exploratory data analysis.***", "_____no_output_____" ], [ "### (OPTIONAL) Part 4. Reshape data", "_____no_output_____" ], [ "*This part is not needed to pass the sprint challenge, only to get a 3! Only work on this after completing the other sections.*\n\nCreate a pivot table:\n- Columns: Years 2007—2017\n- Rows: China, India, United States, Indonesia, Brazil (order doesn't matter)\n- Values: Cell Phones Total\n\nThe table's shape should be: (5, 11)", "_____no_output_____" ] ], [ [ "geo_list = ('CHN', 'IND', 'IDN', 'USA', 'BRA')\nselect_pivot = no_zeros[no_zeros['geo'] == geo_list]\nselect_pivot(select_pivot['time']]", "_____no_output_____" ] ], [ [ "Sort these 5 countries, by biggest increase in cell phones from 2007 to 2017.\n\nWhich country had 935,282,277 more cell phones in 2017 versus 2007?", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "If you have the time and curiosity, what other questions can you ask and answer with this data?", "_____no_output_____" ], [ "## Data Storytelling\n\nIn this part of the sprint challenge you'll work with a dataset from **FiveThirtyEight's article, [Every Guest Jon Stewart Ever Had On ‘The Daily Show’](https://fivethirtyeight.com/features/every-guest-jon-stewart-ever-had-on-the-daily-show/)**!", "_____no_output_____" ], [ "### Part 0 — Run this starter code\n\nYou don't need to add or change anything here. Just run this cell and it loads the data for you, into a dataframe named `df`.\n\n(You can explore the data if you want, but it's not required to pass the Sprint Challenge.)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nurl = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/daily-show-guests/daily_show_guests.csv'\ndf = pd.read_csv(url).rename(columns={'YEAR': 'Year', 'Raw_Guest_List': 'Guest'})\n\ndef get_occupation(group):\n if group in ['Acting', 'Comedy', 'Musician']:\n return 'Acting, Comedy & Music'\n elif group in ['Media', 'media']:\n return 'Media'\n elif group in ['Government', 'Politician', 'Political Aide']:\n return 'Government and Politics'\n else:\n return 'Other'\n \ndf['Occupation'] = df['Group'].apply(get_occupation)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "### Part 1 — What's the breakdown of guests’ occupations per year?\n\nFor example, in 1999, what percentage of guests were actors, comedians, or musicians? What percentage were in the media? What percentage were in politics? What percentage were from another occupation?\n\nThen, what about in 2000? In 2001? And so on, up through 2015.\n\nSo, **for each year of _The Daily Show_, calculate the percentage of guests from each occupation:**\n- Acting, Comedy & Music\n- Government and Politics\n- Media\n- Other\n\n#### Hints:\nYou can make a crosstab. (See pandas documentation for examples, explanation, and parameters.)\n\nYou'll know you've calculated correctly when the percentage of \"Acting, Comedy & Music\" guests is 90.36% in 1999, and 45% in 2015.\n\n**Optional Bonus Challenge:** Do additional insightful data exploration.", "_____no_output_____" ] ], [ [ "ct = pd.crosstab(df['Year'], df['Occupation'], margins=True)", "_____no_output_____" ], [ "ct", "_____no_output_____" ], [ "ct = pd.crosstab(df['Year'], df['Occupation'], margins=True)", "_____no_output_____" ], [ "ct2 = pd.crosstab(df['Year'], df['Occupation'], normalize='index') * 100", "_____no_output_____" ], [ "ct2", "_____no_output_____" ] ], [ [ "### Part 2 — Recreate this explanatory visualization:", "_____no_output_____" ] ], [ [ "from IPython.display import display, Image\npng = 'https://fivethirtyeight.com/wp-content/uploads/2015/08/hickey-datalab-dailyshow.png'\nexample = Image(png, width=500)\ndisplay(example)", "_____no_output_____" ], [ "ct2 = ct2.drop(columns=['Other'])", "_____no_output_____" ], [ "year = ct2.index.tolist()", "_____no_output_____" ], [ "#!pip install --upgrade seaborn\nimport seaborn as sns\nplt.style.use('fivethirtyeight')\n\n\nfig, ax = plt.subplots(figsize=(8,6))\nplt.ylim(-5,110)\nax.set(yticks=range(0,125,25), xticks=range(2000,2016,4),\n xticklabels=('2000', \"'04\", \"'08\", \"'12\"))\nax.text(x=1997,y=120,s=\"Who Got to Be On 'The Daily Show'?\",\n fontsize=18,fontweight='bold')\n\nax.text(x=1997,y=114,s='Occupation of guests, by year',\n fontsize=15)\n\nax.text(x=2001,y=80,s='Acting, Comedy & Music',color='#0F95D7',\n fontsize=12, fontweight='bold')\n\nax.text(x=2007.5,y=55,s='Media',color='#810F7C',\n fontsize=12, fontweight='bold')\n\nax.text(x=2008.5,y=6,s='Government and Politics',color='#FF2700',\n fontsize=12, fontweight='bold')\n\nax1 = sns.lineplot(x=year, y=ct2['Acting, Comedy & Music'].tolist(), color='#0F95D7', lw=2.5)\nax2 = sns.lineplot(x=year, y=ct2['Government and Politics'].tolist(), color='#FF2700', lw=2.5)\nax3 = sns.lineplot(x=year, y=ct2['Media'].tolist(), color='#810F7C', lw=2.5)", "_____no_output_____" ] ], [ [ "**Hints:**\n- You can choose any Python visualization library you want. I've verified the plot can be reproduced with matplotlib, pandas plot, or seaborn. I assume other libraries like altair or plotly would work too.\n- If you choose to use seaborn, you may want to upgrade the version to 0.9.0.\n\n**Expectations:** Your plot should include:\n- 3 lines visualizing \"occupation of guests, by year.\" The shapes of the lines should look roughly identical to 538's example. Each line should be a different color. (But you don't need to use the _same_ colors as 538.)\n- Legend or labels for the lines. (But you don't need each label positioned next to its line or colored like 538.)\n- Title in the upper left: _\"Who Got To Be On 'The Daily Show'?\"_ with more visual emphasis than the subtitle. (Bolder and/or larger font.)\n- Subtitle underneath the title: _\"Occupation of guests, by year\"_\n\n**Optional Bonus Challenge:**\n- Give your plot polished aesthetics, with improved resemblance to the 538 example.\n- Any visual element not specifically mentioned in the expectations is an optional bonus.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "### (OPTIONAL) Part 3 — Who were the top 10 guests on _The Daily Show_?\n\n*This part is not needed to pass the sprint challenge, only to get a 3! Only work on this after completing the other sections.*\n\n**Make a plot** that shows their names and number of appearances.\n\n**Add a title** of your choice.\n\n**Expectations:** It's ok to make a simple, quick plot: exploratory, instead of explanatory. \n\n**Optional Bonus Challenge:** You can change aesthetics and add more annotation. For example, in a relevant location, could you add the text \"19\" to show that Fareed Zakaria appeared 19 times on _The Daily Show_? (And so on, for each of the top 10 guests.)", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec81e05029dd08f49dbe022784a67c12c36fa18c
16,945
ipynb
Jupyter Notebook
Make_csv_file_for_make_ML_model.ipynb
ishancoderr/Rainfall_predictor
85246e5e96b72733b6928e1a5998c1866fd92e63
[ "MIT" ]
1
2021-10-01T01:48:43.000Z
2021-10-01T01:48:43.000Z
Make_csv_file_for_make_ML_model.ipynb
scumechanics/Rainfall_predictor
85246e5e96b72733b6928e1a5998c1866fd92e63
[ "MIT" ]
null
null
null
Make_csv_file_for_make_ML_model.ipynb
scumechanics/Rainfall_predictor
85246e5e96b72733b6928e1a5998c1866fd92e63
[ "MIT" ]
2
2021-10-01T01:48:45.000Z
2022-01-12T14:57:30.000Z
32.092803
325
0.326114
[ [ [ "<a href=\"https://colab.research.google.com/github/ishancoderr/Rainfall_predictor/blob/main/Make_csv_file_for_make_ML_model.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "https://www.scirp.org/journal/paperinformation.aspx?paperid=81276\n\n\nhttps://en.climate-data.org/asia/sri-lanka/uva/badulla-764256/\n\nhttps://books.google.lk/books?id=L3ckDwAAQBAJ&pg=PA270&lpg=PA270&dq=rainfall+data+with+cumulative+time+in+badulla&source=bl&ots=3YCsce_9-A&sig=ACfU3U1hUHgX5JoFKeC2LIYimb4GFj_CsA&hl=en&sa=X&ved=2ahUKEwjyy9av1aHzAhVBjeYKHXS_DAkQ6AF6BAglEAM#v=onepage&q=rainfall%20data%20with%20cumulative%20time%20in%20badulla&f=false", "_____no_output_____" ], [ "**Importing the libraries**", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "**Importing Rainfall data** ", "_____no_output_____" ] ], [ [ "\nimport io\nimport requests\nurl='https://raw.githubusercontent.com/ishancoderr/Rainfall_predictor/main/Rainfall.csv'\ns=requests.get(url).content\ndataset=pd.read_csv(io.StringIO(s.decode('utf-8')))\n", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "column_names=dataset.columns.values.tolist()\nprint(column_names)", "['Year', 'Month', 'Day', 'Avg_Accumulation (mm)', 'Duration (hr)', 'Avg_intensity (mm/hr)', 'occur']\n" ] ], [ [ "**import Encoders**", "_____no_output_____" ] ], [ [ "from sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelEncoder", "_____no_output_____" ], [ "labelencode=LabelEncoder()\ndataset['occur_']=labelencode.fit_transform(dataset['occur'])\nlabelencode=LabelEncoder()\ndataset['Month_']=labelencode.fit_transform(dataset['Month'])\nlabelencode=LabelEncoder()", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ] ], [ [ "**Make new data frame with this dataset**", "_____no_output_____" ] ], [ [ "new_dataset= dataset[[ 'Avg_Accumulation (mm)', 'Duration (hr)', 'Avg_intensity (mm/hr)', 'occur_']]", "_____no_output_____" ], [ "new_dataset.head()", "_____no_output_____" ], [ "new_dataset.to_csv('rainfall_data.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec81e583dc2d3f24431ff4fc9374eda5dfec2ccd
15,221
ipynb
Jupyter Notebook
notebooks/neural_network_and_data_loading.ipynb
sscardapane/jax
21e5eb13dd879f92b6ff94e18bf33a24ed8cc2a7
[ "ECL-2.0", "Apache-2.0" ]
1
2021-12-10T13:38:18.000Z
2021-12-10T13:38:18.000Z
notebooks/neural_network_and_data_loading.ipynb
sscardapane/jax
21e5eb13dd879f92b6ff94e18bf33a24ed8cc2a7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
notebooks/neural_network_and_data_loading.ipynb
sscardapane/jax
21e5eb13dd879f92b6ff94e18bf33a24ed8cc2a7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
33.899777
442
0.529006
[ [ [ "##### Copyright 2018 Google LLC.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");", "_____no_output_____" ], [ "Licensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.", "_____no_output_____" ], [ "# Training a Simple Neural Network, with PyTorch Data Loading\n\n_Dougal Maclaurin, Peter Hawkins, Matthew Johnson, Roy Frostig, Alex Wiltschko, Chris Leary_\n\n![JAX](https://raw.githubusercontent.com/google/jax/master/images/jax_logo_250px.png)\n\nLet's combine everything we showed in the [quickstart notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/quickstart.ipynb) to train a simple neural network. We will first specify and train a simple MLP on MNIST using JAX for the computation. We will use PyTorch's data loading API to load images and labels (because it's pretty great, and the world doesn't need yet another data loading library).\n\nOf course, you can use JAX with any API that is compatible with NumPy to make specifying the model a bit more plug-and-play. Here, just for explanatory purposes, we won't use any neural network libraries or special APIs for builidng our model.", "_____no_output_____" ] ], [ [ "!pip install --upgrade https://storage.googleapis.com/jax-wheels/cuda92/jaxlib-0.1.3-py3-none-linux_x86_64.whl\n!pip install --upgrade jax", "_____no_output_____" ], [ "from __future__ import print_function, division, absolute_import\nimport jax.numpy as np\nfrom jax import grad, jit, vmap\nfrom jax import random", "_____no_output_____" ] ], [ [ "### Hyperparameters\nLet's get a few bookkeeping items out of the way.", "_____no_output_____" ] ], [ [ "# A helper function to randomly initialize weights and biases\n# for a dense neural network layer\ndef random_layer_params(m, n, key, scale=1e-2):\n w_key, b_key = random.split(key)\n return scale * random.normal(w_key, (n, m)), scale * random.normal(b_key, (n,))\n\n# Initialize all layers for a fully-connected neural network with sizes \"sizes\"\ndef init_network_params(sizes, key):\n keys = random.split(key, len(sizes))\n return [random_layer_params(m, n, k) for m, n, k in zip(sizes[:-1], sizes[1:], keys)]\n\nlayer_sizes = [784, 512, 512, 10]\nparam_scale = 0.1\nstep_size = 0.001\nnum_epochs = 10\nbatch_size = 128\nn_targets = 10\nparams = init_network_params(layer_sizes, random.PRNGKey(0))", "_____no_output_____" ] ], [ [ "### Auto-batching predictions\n\nLet us first define our prediction function. Note that we're defining this for a _single_ image example. We're going to use JAX's `vmap` function to automatically handle mini-batches, with no performance penalty.", "_____no_output_____" ] ], [ [ "from jax.scipy.misc import logsumexp\n\ndef relu(x):\n return np.maximum(0, x)\n\ndef predict(params, image):\n # per-example predictions\n activations = image\n for w, b in params[:-1]:\n outputs = np.dot(w, activations) + b\n activations = relu(outputs)\n \n final_w, final_b = params[-1]\n logits = np.dot(final_w, activations) + final_b\n return logits - logsumexp(logits)", "_____no_output_____" ] ], [ [ "Let's check that our prediction function only works on single images.", "_____no_output_____" ] ], [ [ "# This works on single examples\nrandom_flattened_image = random.normal(random.PRNGKey(1), (28 * 28,))\npreds = predict(params, random_flattened_image)\nprint(preds.shape)", "_____no_output_____" ], [ "# Doesn't work with a batch\nrandom_flattened_images = random.normal(random.PRNGKey(1), (10, 28 * 28))\ntry:\n preds = predict(params, random_flattened_images)\nexcept TypeError:\n print('Invalid shapes!')", "_____no_output_____" ], [ "# Let's upgrade it to handle batches using `vmap`\n\n# Make a batched version of the `predict` function\nbatched_predict = vmap(predict, in_axes=(None, 0))\n\n# `batched_predict` has the same call signature as `predict`\nbatched_preds = batched_predict(params, random_flattened_images)\nprint(batched_preds.shape)", "_____no_output_____" ] ], [ [ "At this point, we have all the ingredients we need to define our neural network and train it. We've built an auto-batched version of `predict`, which we should be able to use in a loss function. We should be able to use `grad` to take the derivative of the loss with respect to the neural network parameters. Last, we should be able to use `jit` to speed up everything.", "_____no_output_____" ], [ "### Utility and loss functions", "_____no_output_____" ] ], [ [ "def one_hot(x, k, dtype=np.float32):\n \"\"\"Create a one-hot encoding of x of size k.\"\"\"\n return np.array(x[:, None] == np.arange(k), dtype)\n \ndef accuracy(params, images, targets):\n target_class = np.argmax(targets, axis=1)\n predicted_class = np.argmax(batched_predict(params, images), axis=1)\n return np.mean(predicted_class == target_class)\n\ndef loss(params, images, targets):\n preds = batched_predict(params, images)\n return -np.sum(preds * targets)\n\n@jit\ndef update(params, x, y):\n grads = grad(loss)(params, x, y)\n return [(w - step_size * dw, b - step_size * db)\n for (w, b), (dw, db) in zip(params, grads)]", "_____no_output_____" ] ], [ [ "### Data Loading with PyTorch\n\nJAX is laser-focused on program transformations and accelerator-backed NumPy, so we don't include data loading or munging in the JAX library. There are already a lot of great data loaders out there, so let's just use them instead of reinventing anything. We'll grab PyTorch's data loader, and make a tiny shim to make it work with NumPy arrays.", "_____no_output_____" ] ], [ [ "!pip install torch torchvision", "_____no_output_____" ], [ "import numpy as onp\nfrom torch.utils import data\nfrom torchvision.datasets import MNIST\n\ndef numpy_collate(batch):\n if isinstance(batch[0], onp.ndarray):\n return onp.stack(batch)\n elif isinstance(batch[0], (tuple,list)):\n transposed = zip(*batch)\n return [numpy_collate(samples) for samples in transposed]\n else:\n return onp.array(batch)\n\nclass NumpyLoader(data.DataLoader):\n def __init__(self, dataset, batch_size=1,\n shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0,\n pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n super(self.__class__, self).__init__(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n collate_fn=numpy_collate,\n pin_memory=pin_memory,\n drop_last=drop_last,\n timeout=timeout,\n worker_init_fn=worker_init_fn)\n\nclass FlattenAndCast(object):\n def __call__(self, pic):\n return onp.ravel(onp.array(pic, dtype=np.float32))", "_____no_output_____" ], [ "# Define our dataset, using torch datasets\nmnist_dataset = MNIST('/tmp/mnist/', download=True, transform=FlattenAndCast())\ntraining_generator = NumpyLoader(mnist_dataset, batch_size=128, num_workers=0)", "_____no_output_____" ], [ "# Get the full train dataset (for checking accuracy while training)\ntrain_images = onp.array(mnist_dataset.train_data).reshape(len(mnist_dataset.train_data), -1)\ntrain_labels = one_hot(onp.array(mnist_dataset.train_labels), n_targets)\n\n# Get full test dataset\nmnist_dataset_test = MNIST('/tmp/mnist/', download=True, train=False)\ntest_images = np.array(mnist_dataset_test.test_data.numpy().reshape(len(mnist_dataset_test.test_data), -1), dtype=np.float32)\ntest_labels = one_hot(onp.array(mnist_dataset_test.test_labels), n_targets)", "_____no_output_____" ] ], [ [ "### Training Loop", "_____no_output_____" ] ], [ [ "import time\n\nfor epoch in range(num_epochs):\n start_time = time.time()\n for x, y in training_generator:\n y = one_hot(y, n_targets)\n params = update(params, x, y)\n epoch_time = time.time() - start_time\n\n train_acc = accuracy(params, train_images, train_labels)\n test_acc = accuracy(params, test_images, test_labels)\n print(\"Epoch {} in {:0.2f} sec\".format(epoch, epoch_time))\n print(\"Training set accuracy {}\".format(train_acc))\n print(\"Test set accuracy {}\".format(test_acc))", "_____no_output_____" ] ], [ [ "We've now used the whole of the JAX API: `grad` for derivatives, `jit` for speedups and `vmap` for auto-vectorization.\nWe used NumPy to specify all of our computation, and borrowed the great data loaders from PyTorch, and ran the whole thing on the GPU.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec81f032a424b8419dfb2b0293f0317670bbb4a4
55,400
ipynb
Jupyter Notebook
matrix_one/day3.ipynb
MaciejSE/dw_matrix
a15207b8efea0c0caf8fae409fe9a3f81b799e84
[ "MIT" ]
null
null
null
matrix_one/day3.ipynb
MaciejSE/dw_matrix
a15207b8efea0c0caf8fae409fe9a3f81b799e84
[ "MIT" ]
null
null
null
matrix_one/day3.ipynb
MaciejSE/dw_matrix
a15207b8efea0c0caf8fae409fe9a3f81b799e84
[ "MIT" ]
null
null
null
55,400
55,400
0.736227
[ [ [ "!pip install datadotworld\n!pip install datadotworld[pandas]", "Collecting datadotworld\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/eb/2d/564c9b9056c414528f7a91c48bc33f2243bd5323ac07d52269002bd3d6c6/datadotworld-1.7.0-py2.py3-none-any.whl (158kB)\n\u001b[K |████████████████████████████████| 163kB 1.3MB/s \n\u001b[?25hRequirement already satisfied: certifi>=2017.04.17 in /usr/local/lib/python3.6/dist-packages (from datadotworld) (2019.11.28)\nRequirement already satisfied: python-dateutil<3.0a,>=2.6.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld) (2.6.1)\nCollecting tabulator>=1.22.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/56/5c/e57ce30b0751c9071f848e9eb3bda128a921fff13ea442fc1059206cceb2/tabulator-1.34.0-py2.py3-none-any.whl (65kB)\n\u001b[K |████████████████████████████████| 71kB 3.7MB/s \n\u001b[?25hRequirement already satisfied: urllib3<2.0a,>=1.15 in /usr/local/lib/python3.6/dist-packages (from datadotworld) (1.24.3)\nRequirement already satisfied: requests<3.0a,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld) (2.21.0)\nRequirement already satisfied: six<2.0a,>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld) (1.12.0)\nCollecting click<7.0a,>=6.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/34/c1/8806f99713ddb993c5366c362b2f908f18269f8d792aff1abfd700775a77/click-6.7-py2.py3-none-any.whl (71kB)\n\u001b[K |████████████████████████████████| 71kB 3.8MB/s \n\u001b[?25hCollecting datapackage<2.0a,>=1.6.2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/b8/75/0a2e0853116d2fe3f6422f7844129eba9939e54c530706d6116545438401/datapackage-1.11.1-py2.py3-none-any.whl (84kB)\n\u001b[K |████████████████████████████████| 92kB 4.4MB/s \n\u001b[?25hCollecting configparser<4.0a,>=3.5.0\n Downloading https://files.pythonhosted.org/packages/ab/1a/ec151e5e703ac80041eaccef923611bbcec2b667c20383655a06962732e9/configparser-3.8.1-py2.py3-none-any.whl\nCollecting tableschema<2.0a,>=1.5.2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/3c/a5/75ebdbf57b828edd1c7b059d3ff86b1c23d7bd69daae4c16199c124f4aa3/tableschema-1.12.5-py2.py3-none-any.whl (66kB)\n\u001b[K |████████████████████████████████| 71kB 4.2MB/s \n\u001b[?25hRequirement already satisfied: xlrd>=1.0 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld) (1.1.0)\nCollecting jsonlines>=1.1\n Downloading https://files.pythonhosted.org/packages/4f/9a/ab96291470e305504aa4b7a2e0ec132e930da89eb3ca7a82fbe03167c131/jsonlines-1.2.0-py2.py3-none-any.whl\nRequirement already satisfied: sqlalchemy>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld) (1.3.13)\nCollecting ijson>=2.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/23/42/2066f77a714ab7221542ea23710b35a96c5dd398f1933429088afd888293/ijson-2.6.1-cp36-cp36m-manylinux1_x86_64.whl (65kB)\n\u001b[K |████████████████████████████████| 71kB 3.5MB/s \n\u001b[?25hCollecting linear-tsv>=1.0\n Downloading https://files.pythonhosted.org/packages/82/e5/03207a0f11e1d60df85b97b61704ed701b725a7c2feaf83f7bfbd0c2d83e/linear-tsv-1.1.0.tar.gz\nCollecting cchardet>=1.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fa/4e/847feebfc3e71c773b23ee06c74687b8c50a5a6d6aaff452a0a4f4eb9a32/cchardet-2.1.5-cp36-cp36m-manylinux1_x86_64.whl (241kB)\n\u001b[K |████████████████████████████████| 245kB 4.2MB/s \n\u001b[?25hCollecting unicodecsv>=0.14\n Downloading https://files.pythonhosted.org/packages/6f/a4/691ab63b17505a26096608cc309960b5a6bdf39e4ba1a793d5f9b1a53270/unicodecsv-0.14.1.tar.gz\nCollecting openpyxl>=2.6\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/95/8c/83563c60489954e5b80f9e2596b93a68e1ac4e4a730deb1aae632066d704/openpyxl-3.0.3.tar.gz (172kB)\n\u001b[K |████████████████████████████████| 174kB 5.4MB/s \n\u001b[?25hRequirement already satisfied: boto3>=1.9 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld) (1.11.10)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0a,>=2.0.0->datadotworld) (2.8)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0a,>=2.0.0->datadotworld) (3.0.4)\nRequirement already satisfied: jsonschema>=2.5 in /usr/local/lib/python3.6/dist-packages (from datapackage<2.0a,>=1.6.2->datadotworld) (2.6.0)\nCollecting jsonpointer>=1.10\n Downloading https://files.pythonhosted.org/packages/18/b0/a80d29577c08eea401659254dfaed87f1af45272899e1812d7e01b679bc5/jsonpointer-2.0-py2.py3-none-any.whl\nCollecting rfc3986>=1.1.0\n Downloading https://files.pythonhosted.org/packages/00/8d/9d56bfe43997f1864fe0891be69bc239ded98e69c9f56eb9eaa5b1789660/rfc3986-1.3.2-py2.py3-none-any.whl\nCollecting isodate>=0.5.4\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9b/9f/b36f7774ff5ea8e428fdcfc4bb332c39ee5b9362ddd3d40d9516a55221b2/isodate-0.6.0-py2.py3-none-any.whl (45kB)\n\u001b[K |████████████████████████████████| 51kB 4.5MB/s \n\u001b[?25hRequirement already satisfied: jdcal in /usr/local/lib/python3.6/dist-packages (from openpyxl>=2.6->tabulator>=1.22.0->datadotworld) (1.4.1)\nRequirement already satisfied: et_xmlfile in /usr/local/lib/python3.6/dist-packages (from openpyxl>=2.6->tabulator>=1.22.0->datadotworld) (1.0.1)\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld) (0.3.2)\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld) (0.9.4)\nRequirement already satisfied: botocore<1.15.0,>=1.14.10 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld) (1.14.10)\nRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3>=1.9->tabulator>=1.22.0->datadotworld) (0.15.2)\nBuilding wheels for collected packages: linear-tsv, unicodecsv, openpyxl\n Building wheel for linear-tsv (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for linear-tsv: filename=linear_tsv-1.1.0-cp36-none-any.whl size=7383 sha256=3d255845902fcf4731b33534aadbb3bd4594cbca07cbe4998891fff638b79bef\n Stored in directory: /root/.cache/pip/wheels/3f/8a/cb/38917fd1ef4356b9870ace7331b83417dc594bf2c029bd991f\n Building wheel for unicodecsv (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for unicodecsv: filename=unicodecsv-0.14.1-cp36-none-any.whl size=10768 sha256=9f2b7c2738057ac5100a75bf9837e6ed25a0c069ec6fe8bb8f7f93ade45b9775\n Stored in directory: /root/.cache/pip/wheels/a6/09/e9/e800279c98a0a8c94543f3de6c8a562f60e51363ed26e71283\n Building wheel for openpyxl (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for openpyxl: filename=openpyxl-3.0.3-py2.py3-none-any.whl size=241262 sha256=8ed4599023f6a96630d82af8e93dc32eb3a147486203122f9a88d9660b7b866f\n Stored in directory: /root/.cache/pip/wheels/b5/85/ca/e768ac132e57e75e645a151f8badac71cc0089e7225dddf76b\nSuccessfully built linear-tsv unicodecsv openpyxl\nInstalling collected packages: click, jsonlines, ijson, linear-tsv, cchardet, unicodecsv, openpyxl, tabulator, rfc3986, isodate, tableschema, jsonpointer, datapackage, configparser, datadotworld\n Found existing installation: Click 7.0\n Uninstalling Click-7.0:\n Successfully uninstalled Click-7.0\n Found existing installation: openpyxl 2.5.9\n Uninstalling openpyxl-2.5.9:\n Successfully uninstalled openpyxl-2.5.9\nSuccessfully installed cchardet-2.1.5 click-6.7 configparser-3.8.1 datadotworld-1.7.0 datapackage-1.11.1 ijson-2.6.1 isodate-0.6.0 jsonlines-1.2.0 jsonpointer-2.0 linear-tsv-1.1.0 openpyxl-3.0.3 rfc3986-1.3.2 tableschema-1.12.5 tabulator-1.34.0 unicodecsv-0.14.1\nRequirement already satisfied: datadotworld[pandas] in /usr/local/lib/python3.6/dist-packages (1.7.0)\nRequirement already satisfied: tableschema<2.0a,>=1.5.2 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (1.12.5)\nRequirement already satisfied: six<2.0a,>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (1.12.0)\nRequirement already satisfied: click<7.0a,>=6.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (6.7)\nRequirement already satisfied: configparser<4.0a,>=3.5.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (3.8.1)\nRequirement already satisfied: requests<3.0a,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (2.21.0)\nRequirement already satisfied: certifi>=2017.04.17 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (2019.11.28)\nRequirement already satisfied: urllib3<2.0a,>=1.15 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (1.24.3)\nRequirement already satisfied: datapackage<2.0a,>=1.6.2 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (1.11.1)\nRequirement already satisfied: tabulator>=1.22.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (1.34.0)\nRequirement already satisfied: python-dateutil<3.0a,>=2.6.0 in /usr/local/lib/python3.6/dist-packages (from datadotworld[pandas]) (2.6.1)\nCollecting numpy<=1.16.4; extra == \"pandas\"\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/87/2d/e4656149cbadd3a8a0369fcd1a9c7d61cc7b87b3903b85389c70c989a696/numpy-1.16.4-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)\n\u001b[K |████████████████████████████████| 17.3MB 234kB/s \n\u001b[?25hCollecting pandas<0.25; extra == \"pandas\"\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/19/74/e50234bc82c553fecdbd566d8650801e3fe2d6d8c8d940638e3d8a7c5522/pandas-0.24.2-cp36-cp36m-manylinux1_x86_64.whl (10.1MB)\n\u001b[K |████████████████████████████████| 10.1MB 31.5MB/s \n\u001b[?25hRequirement already satisfied: jsonschema>=2.5 in /usr/local/lib/python3.6/dist-packages (from tableschema<2.0a,>=1.5.2->datadotworld[pandas]) (2.6.0)\nRequirement already satisfied: rfc3986>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tableschema<2.0a,>=1.5.2->datadotworld[pandas]) (1.3.2)\nRequirement already satisfied: isodate>=0.5.4 in /usr/local/lib/python3.6/dist-packages (from tableschema<2.0a,>=1.5.2->datadotworld[pandas]) (0.6.0)\nRequirement already satisfied: unicodecsv>=0.14 in /usr/local/lib/python3.6/dist-packages (from tableschema<2.0a,>=1.5.2->datadotworld[pandas]) (0.14.1)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0a,>=2.0.0->datadotworld[pandas]) (3.0.4)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3.0a,>=2.0.0->datadotworld[pandas]) (2.8)\nRequirement already satisfied: cchardet>=1.0 in /usr/local/lib/python3.6/dist-packages (from datapackage<2.0a,>=1.6.2->datadotworld[pandas]) (2.1.5)\nRequirement already satisfied: jsonpointer>=1.10 in /usr/local/lib/python3.6/dist-packages (from datapackage<2.0a,>=1.6.2->datadotworld[pandas]) (2.0)\nRequirement already satisfied: sqlalchemy>=0.9.6 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (1.3.13)\nRequirement already satisfied: openpyxl>=2.6 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (3.0.3)\nRequirement already satisfied: jsonlines>=1.1 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (1.2.0)\nRequirement already satisfied: ijson>=2.5 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (2.6.1)\nRequirement already satisfied: boto3>=1.9 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (1.11.10)\nRequirement already satisfied: xlrd>=1.0 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (1.1.0)\nRequirement already satisfied: linear-tsv>=1.0 in /usr/local/lib/python3.6/dist-packages (from tabulator>=1.22.0->datadotworld[pandas]) (1.1.0)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas<0.25; extra == \"pandas\"->datadotworld[pandas]) (2018.9)\nRequirement already satisfied: et-xmlfile in /usr/local/lib/python3.6/dist-packages (from openpyxl>=2.6->tabulator>=1.22.0->datadotworld[pandas]) (1.0.1)\nRequirement already satisfied: jdcal in /usr/local/lib/python3.6/dist-packages (from openpyxl>=2.6->tabulator>=1.22.0->datadotworld[pandas]) (1.4.1)\nRequirement already satisfied: botocore<1.15.0,>=1.14.10 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld[pandas]) (1.14.10)\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld[pandas]) (0.9.4)\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3>=1.9->tabulator>=1.22.0->datadotworld[pandas]) (0.3.2)\nRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3>=1.9->tabulator>=1.22.0->datadotworld[pandas]) (0.15.2)\n\u001b[31mERROR: plotnine 0.6.0 has requirement pandas>=0.25.0, but you'll have pandas 0.24.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: mizani 0.6.0 has requirement pandas>=0.25.0, but you'll have pandas 0.24.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: google-colab 1.0.0 has requirement pandas~=0.25.0; python_version >= \"3.0\", but you'll have pandas 0.24.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\nInstalling collected packages: numpy, pandas\n Found existing installation: numpy 1.17.5\n Uninstalling numpy-1.17.5:\n Successfully uninstalled numpy-1.17.5\n Found existing installation: pandas 0.25.3\n Uninstalling pandas-0.25.3:\n Successfully uninstalled pandas-0.25.3\nSuccessfully installed numpy-1.16.4 pandas-0.24.2\n" ], [ "!dw configure", "API token (obtained at: https://data.world/settings/advanced): eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJwcm9kLXVzZXItY2xpZW50Om1hY2llanMxOTg4IiwiaXNzIjoiYWdlbnQ6bWFjaWVqczE5ODg6OmMyNzAzYjNkLWM5NDYtNDI4Ni05YjA2LTU5ZWEyMDI1YTJmMiIsImlhdCI6MTU4MTUyNzQ0OSwicm9sZSI6WyJ1c2VyX2FwaV9yZWFkIiwidXNlcl9hcGlfd3JpdGUiXSwiZ2VuZXJhbC1wdXJwb3NlIjp0cnVlLCJzYW1sIjp7fX0.cs922LL_Q8oO4Y5oBeEoSAOqbBTPWz5uCvm62dFcnTfIUkGrlYGsrIEaStw6XCoNUa_7HSLau0I9s-_Gn5UNsw\n" ], [ "from google.colab import drive\nimport pandas as pd\nimport numpy as np\n\nimport datadotworld as dw", "_____no_output_____" ], [ "drive.mount(\"/content/drive\")", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "ls", "\u001b[0m\u001b[01;34mdrive\u001b[0m/ \u001b[01;34msample_data\u001b[0m/\n" ], [ "cd \"drive/My Drive/Colab Notebooks/dw_matrix\"", "/content/drive/My Drive/Colab Notebooks/dw_matrix\n" ], [ "!mkdir data", "_____no_output_____" ], [ "!echo 'data' > .gitignore", "_____no_output_____" ], [ "!git add .gitignore", "_____no_output_____" ], [ "data = dw.load_dataset('datafiniti/mens-shoe-prices')", "_____no_output_____" ], [ "df = data.dataframes['7004_1']", "_____no_output_____" ], [ "df.sample(5)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df.prices_currency.unique()", "_____no_output_____" ], [ "df.prices_currency.value_counts(normalize=True)", "_____no_output_____" ], [ "df_usd = df[ df.prices_currency == 'USD' ].copy()\ndf_usd.shape", "_____no_output_____" ], [ "df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)\ndf_usd['prices_amountmin'].hist()", "_____no_output_____" ], [ "filter_max = np.percentile( df_usd['prices_amountmin'], 99)\nfilter_max", "_____no_output_____" ], [ "df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max ]", "_____no_output_____" ], [ "df_usd_filter.prices_amountmin.hist(bins=100)", "_____no_output_____" ], [ "ls", "\u001b[0m\u001b[01;34mdata\u001b[0m/ HelloGithub.ipynb LICENSE \u001b[01;34mmatrix_one\u001b[0m/ README.md\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec81f37ef1be0dc832cbed04bd6856a9d9bb49a9
283,055
ipynb
Jupyter Notebook
telco_churn.answers.ipynb
ttwange/ADS-Assignment-4
ddc88fc312b814680d18deff7b6634ca71e61162
[ "MIT" ]
null
null
null
telco_churn.answers.ipynb
ttwange/ADS-Assignment-4
ddc88fc312b814680d18deff7b6634ca71e61162
[ "MIT" ]
null
null
null
telco_churn.answers.ipynb
ttwange/ADS-Assignment-4
ddc88fc312b814680d18deff7b6634ca71e61162
[ "MIT" ]
null
null
null
101.30816
47,236
0.785123
[ [ [ "# Kaggle: Telco Customer Churn.\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport warnings\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n", "_____no_output_____" ], [ "df = pd.read_csv(\"WA_Fn-UseC_-Telco-Customer-Churn.csv\")\ndf.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7043 entries, 0 to 7042\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 customerID 7043 non-null object \n 1 gender 7043 non-null object \n 2 SeniorCitizen 7043 non-null int64 \n 3 Partner 7043 non-null object \n 4 Dependents 7043 non-null object \n 5 tenure 7043 non-null int64 \n 6 PhoneService 7043 non-null object \n 7 MultipleLines 7043 non-null object \n 8 InternetService 7043 non-null object \n 9 OnlineSecurity 7043 non-null object \n 10 OnlineBackup 7043 non-null object \n 11 DeviceProtection 7043 non-null object \n 12 TechSupport 7043 non-null object \n 13 StreamingTV 7043 non-null object \n 14 StreamingMovies 7043 non-null object \n 15 Contract 7043 non-null object \n 16 PaperlessBilling 7043 non-null object \n 17 PaymentMethod 7043 non-null object \n 18 MonthlyCharges 7043 non-null float64\n 19 TotalCharges 7043 non-null object \n 20 Churn 7043 non-null object \ndtypes: float64(1), int64(2), object(18)\nmemory usage: 1.1+ MB\n" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df = df.drop(['customerID'], axis = 1)\ndf.tail()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df.Partner.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.StreamingTV.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.TechSupport.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.DeviceProtection.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.OnlineBackup.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.OnlineSecurity.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.InternetService.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.PhoneService.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.MultipleLines.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df.StreamingMovies.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ] ], [ [ "# 1. Demographics \n", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "#df.groupby(['gender']).size()\n\ndf[\"gender\"].value_counts().sort_values().plot(kind = 'barh',color= \"green\")", "_____no_output_____" ], [ "#df.groupby(['SeniorCitizen']).size()\ndf[\"SeniorCitizen\"].value_counts().sort_values().plot(kind = 'barh',color= \"blue\")", "_____no_output_____" ], [ "#df[\"Partner\"].value_counts()\ndf[\"Partner\"].value_counts().sort_values().plot(kind = 'barh',color= \"black\")", "_____no_output_____" ], [ "# Combine levels in a categorical variable by seeing their distribution\nJobRoleCrossTab = pd.crosstab(df['gender'], df['Partner'], margins=True)\nJobRoleCrossTab", "_____no_output_____" ], [ "sns.histplot(df, x=\"Partner\", hue=\"gender\", multiple=\"stack\")", "_____no_output_____" ], [ "# Combine levels in a categorical variable by seeing their distribution\nJobRoleCrossTab = pd.crosstab(df['SeniorCitizen'], df['gender'],margins=True)\nJobRoleCrossTab", "_____no_output_____" ], [ "sns.histplot(df, x=\"SeniorCitizen\", hue=\"gender\", multiple=\"stack\")", "_____no_output_____" ], [ "#df.Attrition.replace([\"Yes\",\"No\"],[1,0],inplace=True)", "_____no_output_____" ], [ "df[\"SeniorCitizen\"].value_counts().sort_values().plot(kind = 'barh',color= \"cyan\")", "_____no_output_____" ], [ "#Young population were the the most popular customers of the brand with senior citizen being low.This can be attributed to that young people are more savy with their phones than senior citizens.\n#When it comes to partners it seems like almost a tie in use of the telco services but those without a partner seem more popular with the brand\n#The marketing team should focus their efforts on the young generation and of both gender, regardless if they have a partner or not. The partner aspect has very little difference when it comes to it.", "_____no_output_____" ] ], [ [ "# 2. Services:\na. Which 3 services are contributing to a higher monthly charge and resulting to customers churning? (Use visualizations in explaining your analysis)", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "# Plotting countplots for the categorical variables\nfig,ax = plt.subplots(3,2, figsize=(20,20)) \nplt.suptitle(\"Distribution of different factors\", fontsize=20)\nsns.countplot(df['OnlineSecurity'], ax = ax[0,0]) \nsns.countplot(df['OnlineBackup'], ax = ax[0,1]) \nsns.countplot(df['DeviceProtection'], ax = ax[1,0]) \nsns.countplot(df['TechSupport'], ax = ax[1,1])\nsns.countplot(df['StreamingTV'], ax = ax[2,0]) \nsns.countplot(df['PhoneService'], ax = ax[2,1]) \nplt.xticks(rotation=20)\nplt.subplots_adjust(bottom=0.4)\nplt.show()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"MultipleLines\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"StreamingMovies\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"OnlineBackup\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"DeviceProtection\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"TechSupport\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"StreamingTV\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "#sns.histplot(df,x=\"PhoneService\", hue=\"Churn\", multiple=\"stack\")\nsns.catplot(data=df, kind=\"bar\", x=\"PhoneService\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"InternetService\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "sns.catplot(data=df, kind=\"bar\", x=\"OnlineSecurity\", y=\"MonthlyCharges\", hue=\"Churn\")", "_____no_output_____" ], [ "#StreamingMovies,StreamingMovies and Multiple lines had the highest contribution to a higher monthly charge and customers churning", "_____no_output_____" ] ], [ [ "# b. \nIf the telco was to offer Phone as a standalone service, which type of contract \nwould encourage customer retention?", "_____no_output_____" ] ], [ [ "\nsns.histplot(df, hue=\"PhoneService\", x=\"Contract\", multiple=\"stack\")", "_____no_output_____" ], [ "df[\"PhoneService\"].value_counts()", "_____no_output_____" ], [ "df[\"Contract\"].value_counts()", "_____no_output_____" ], [ "#Month-to-month services have the highest customer base. This shows that if it was to be offered as a stand alone service, then it's month-to-to month services would be the most profitable of all the contracts.", "_____no_output_____" ] ], [ [ "# 3. Payment: (Use visualizations in explaining your analysis)\na. If the company was to streamline all its services into 3 bouquet packages, what \nmonthly prices will be appropriate for the following packages to keep customers \nfrom churning:\ni. Basic\nii. Plus\niii. Premium", "_____no_output_____" ] ], [ [ "sns.boxplot(y=\"MonthlyCharges\", x=\"Churn\", data=df)\n", "_____no_output_____" ], [ "df[\"MonthlyCharges\"].describe()", "_____no_output_____" ], [ "#The company should price its packages as \n# i. Basic = 35.50\n# ii. Plus = 70.35\n# iii. Premium = 89.85", "_____no_output_____" ] ], [ [ "# b.\nShould the company strictly go paperless for their monthly billings as a technique \nof keeping their customers and why?", "_____no_output_____" ] ], [ [ "df[\"PaperlessBilling\"].value_counts()", "_____no_output_____" ], [ "sns.histplot(df, x=\"Churn\", hue=\"PaperlessBilling\", multiple=\"stack\")", "_____no_output_____" ], [ "#PaperlessBilling had the lowest churn among the customer. \n#It could be that customers find paperlessBilling cumbersome to work with hence opt out of the telco customer base", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec81f515b8345e7f2feb7c67c0da1155fc6686f9
65,684
ipynb
Jupyter Notebook
mobilenet_emotions/visualization_history.ipynb
HikkaV/OAHEGA
0396cac52a360940234fdb64f5ea9ee55dab579c
[ "MIT" ]
1
2021-10-04T07:43:58.000Z
2021-10-04T07:43:58.000Z
mobilenet_emotions/visualization_history.ipynb
HikkaV/OAHEGA
0396cac52a360940234fdb64f5ea9ee55dab579c
[ "MIT" ]
3
2021-02-08T13:30:46.000Z
2022-02-10T03:59:35.000Z
mobilenet_emotions/visualization_history.ipynb
HikkaV/OAHEGA
0396cac52a360940234fdb64f5ea9ee55dab579c
[ "MIT" ]
null
null
null
250.70229
26,107
0.597056
[ [ [ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n%matplotlib notebook", "_____no_output_____" ], [ "import seaborn as sns\nsns.set()", "_____no_output_____" ], [ "training_history = pd.read_csv('../visualize/history_emotions.csv')", "_____no_output_____" ], [ "training_history.head()", "_____no_output_____" ], [ "training_history.columns", "_____no_output_____" ], [ "def visualize_k(training_history, x, y, k=5):\n hist_sample = training_history.groupby('experiment_id') \\\n .agg({y:[list,np.max], x:[list,np.max]}).sort_values(by=(y,'amax'),ascending=False)\n hist_sample = hist_sample[:k]\n plt.figure(figsize=(10,10))\n plt.title('Visualization of training',fontdict={'size':20})\n plt.xlabel(x,fontdict={'size':20})\n plt.ylabel(y,fontdict={'size':20})\n for i in hist_sample.index:\n plt.plot(hist_sample.loc[i][x]['list'],hist_sample.loc[i][y]['list'])\n plt.legend(hist_sample.index)\n var = 2*(np.std(hist_sample[y]['amax'])/len(hist_sample[y]['amax']))\n plt.yticks(np.arange(0,max(hist_sample[y]['amax']),var))\n plt.xticks(np.arange(0,max(hist_sample[x]['amax']),5))", "_____no_output_____" ], [ "visualize_k(training_history,'epoch','val_acc',20)", "_____no_output_____" ], [ "def get_last(x):\n return x.values[len(x.values)-1]", "_____no_output_____" ], [ "def precision_per_class(training_history,x,y,k=10):\n classes = ['Angry','Ahegao','Happy','Neutral','Sad','Surprise']\n columns = training_history.columns\n dict_funcs = dict([(i+'_'+y, get_last) for i in classes if i+'_'+y in columns])\n y_bar_labels = list(dict_funcs.keys())\n dict_funcs.update({ x:[list,np.max], 'val_{}'.format('acc') : np.max})\n hist_sample = training_history.groupby('experiment_id') \\\n .agg(dict_funcs).sort_values(by=('val_{}'.format('acc'),'amax'),ascending=False)[:k]\n fig, axes = plt.subplots(k,1, figsize=(10,10))\n axes = axes if not len(axes)==2 else [i for z in axes for i in z]\n bars = []\n fig.suptitle('Comparison between classes {0} for {1} experiments'.format(y, k),y=1)\n for i in zip(axes,hist_sample.index):\n bar = i[0].bar(range(len(y_bar_labels)),hist_sample.loc[i[1]][y_bar_labels].values)\n i[0].set_xticks(range(len(y_bar_labels)))\n i[0].set_xticklabels(y_bar_labels, rotation=10)\n i[0].title.set_text('experiment {}'.format(i[1]))\n \n bars.append((bar,i[0]))\n for i,z in bars:\n for bar in i:\n yval = bar.get_height()\n z.axes.get_yaxis().set_visible(False)\n z.text(bar.get_x(), yval + .005, str(yval)[:4])\n \n plt.tight_layout(pad=1, w_pad=0.5, h_pad=0.8)", "_____no_output_____" ], [ "precision_per_class(training_history,'epoch','precision',5)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec81f794fd9aa4e85a96a9f40da6d0357cc54b8b
22,207
ipynb
Jupyter Notebook
06_Stats/US_Baby_Names/Exercises.ipynb
thapaAshish/pandas_exercises
e234ea1534fb42173870cea70cf7962900f3efb9
[ "BSD-3-Clause" ]
1
2021-12-24T11:42:34.000Z
2021-12-24T11:42:34.000Z
06_Stats/US_Baby_Names/Exercises.ipynb
thapaAshish/pandas_exercises
e234ea1534fb42173870cea70cf7962900f3efb9
[ "BSD-3-Clause" ]
null
null
null
06_Stats/US_Baby_Names/Exercises.ipynb
thapaAshish/pandas_exercises
e234ea1534fb42173870cea70cf7962900f3efb9
[ "BSD-3-Clause" ]
null
null
null
25.350457
175
0.350385
[ [ [ "# US - Baby Names", "_____no_output_____" ], [ "### Introduction:\n\nWe are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle. \nIn the file it will be names from 2004 until 2014\n\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called baby_names.", "_____no_output_____" ] ], [ [ "baby_names = pd.read_csv('https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv')", "_____no_output_____" ] ], [ [ "### Step 4. See the first 10 entries", "_____no_output_____" ] ], [ [ "baby_names.head(10)", "_____no_output_____" ] ], [ [ "### Step 5. Delete the column 'Unnamed: 0' and 'Id'", "_____no_output_____" ] ], [ [ "baby_names.drop(baby_names.columns[baby_names.columns.str.contains('Unnamed: 0',case=False)],axis='columns',inplace=True)", "_____no_output_____" ] ], [ [ "### Step 6. Is there more male or female names in the dataset?", "_____no_output_____" ] ], [ [ "baby_names.groupby('Gender').agg({'Gender': 'count'})", "_____no_output_____" ] ], [ [ "### Step 7. Group the dataset by name and assign to names", "_____no_output_____" ] ], [ [ "baby_names.groupby('Name')", "_____no_output_____" ] ], [ [ "### Step 8. How many different names exist in the dataset?", "_____no_output_____" ] ], [ [ "len(baby_names['Name'].unique())", "_____no_output_____" ] ], [ [ "### Step 9. What is the name with most occurrences?", "_____no_output_____" ] ], [ [ "spd= baby_names.groupby('Name').sum()\nspd.sort_values('Count',ascending=False)", "_____no_output_____" ] ], [ [ "### Step 10. How many different names have the least occurrences?", "_____no_output_____" ] ], [ [ "name_count.sort_values('names',ascending=False)", "_____no_output_____" ] ], [ [ "### Step 11. What is the median name occurrence?", "_____no_output_____" ] ], [ [ "name_count[name_count['names']==8.00]\n", "_____no_output_____" ] ], [ [ "### Step 12. What is the standard deviation of names?", "_____no_output_____" ] ], [ [ "name_count.std()", "_____no_output_____" ] ], [ [ "### Step 13. Get a summary with the mean, min, max, std and quartiles.", "_____no_output_____" ] ], [ [ "name_count.describe()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec81fa76f41e74617930b93abce63f79896eab55
686,188
ipynb
Jupyter Notebook
notebooks/Syst3x2pt.ipynb
Lhior/TXPipe
58fd7612326779d4c1b0e499157dddc9e3b524c0
[ "BSD-3-Clause" ]
9
2018-03-17T02:07:52.000Z
2022-02-23T20:25:48.000Z
notebooks/Syst3x2pt.ipynb
Lhior/TXPipe
58fd7612326779d4c1b0e499157dddc9e3b524c0
[ "BSD-3-Clause" ]
162
2018-03-06T16:18:23.000Z
2022-03-21T18:11:37.000Z
notebooks/Syst3x2pt.ipynb
Lhior/TXPipe
58fd7612326779d4c1b0e499157dddc9e3b524c0
[ "BSD-3-Clause" ]
7
2018-07-26T11:49:46.000Z
2022-02-23T22:14:48.000Z
928.535859
131,688
0.951757
[ [ [ "# Building systematics maps for 3x2pt with Spark\n\n<br>Kernel: desc-pyspark\n<br>Owner: **S Plaszczynski** \n<br>Last Verified to Run: **2019-01-10**\n\nThe goal of this notebook is to show how to build (simply) Healpix maps from the DC2 \nDPDD output inorder to test for possible 3x2pt systematics.\nIt is illustrated on the current run1.2p production.\nIt also shows how Spark can be used for data analysis (for more details see: https://arxiv.org/abs/1807.03078)\nNote that the full power of Spark will reveal when more data will be available.\n\nThe advantages of using Spark are:\n- one can put the relevant variables in cache\n- computation automatically optimised (lazy evaluation)\n- the analysis will scale when more data will be available\n- Spark is available at NERSC (as this notebook shows). jupyter-dev is limited to running on 4 threads ie 8GB mem. For (much) more memory use the interactive or batch mode, see https://github.com/LSSTDESC/desc-spark\n", "_____no_output_____" ], [ "# reading the data", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\n\n# Initialise our Spark session\nspark = SparkSession.builder.getOrCreate()\nprint(\"spark session started\")\n\n#usefull tool to benchmark\nfrom time import time\nclass Timer:\n \"\"\"\n a simple class for printing time (s) since last call\n \"\"\"\n def __init__(self):\n self.t0=time()\n \n def start(self):\n self.t0=time()\n \n def stop(self):\n t1=time()\n print(\"{:2.1f}s\".format(t1-self.t0))\n\ntimer=Timer()", "spark session started\n" ], [ "timer.start()\ndf_all=spark.read.parquet(\"/global/cscratch1/sd/plaszczy/Run1.2p/object_catalog/full_catalog.parquet\")\ndf_all.printSchema()\ntimer.stop()", "root\n |-- xErr: float (nullable = true)\n |-- magerr_z_cModel: double (nullable = true)\n |-- magerr_i_cModel: double (nullable = true)\n |-- Iyy_g: double (nullable = true)\n |-- snr_i_cModel: double (nullable = true)\n |-- Ixy_y: double (nullable = true)\n |-- mag_g: double (nullable = true)\n |-- Iyy_i: double (nullable = true)\n |-- objectId: long (nullable = true)\n |-- snr_r_cModel: double (nullable = true)\n |-- Iyy_u: double (nullable = true)\n |-- Ixy_r: double (nullable = true)\n |-- Ixx: double (nullable = true)\n |-- IxxPSF_g: double (nullable = true)\n |-- mag_z: double (nullable = true)\n |-- tract: long (nullable = true)\n |-- Iyy_z: double (nullable = true)\n |-- good: boolean (nullable = true)\n |-- mag_y: double (nullable = true)\n |-- psFlux_flag_i: boolean (nullable = true)\n |-- Ixy_z: double (nullable = true)\n |-- clean: boolean (nullable = true)\n |-- IyyPSF_u: double (nullable = true)\n |-- Ixx_z: double (nullable = true)\n |-- mag_r: double (nullable = true)\n |-- Ixx_y: double (nullable = true)\n |-- Ixy_i: double (nullable = true)\n |-- patch: string (nullable = true)\n |-- psNdata: float (nullable = true)\n |-- IxxPSF_z: double (nullable = true)\n |-- xy_flag: boolean (nullable = true)\n |-- Ixy: double (nullable = true)\n |-- I_flag_g: boolean (nullable = true)\n |-- Ixy_u: double (nullable = true)\n |-- mag_z_cModel: double (nullable = true)\n |-- psFluxErr_z: double (nullable = true)\n |-- mag_g_cModel: double (nullable = true)\n |-- IyyPSF_y: double (nullable = true)\n |-- I_flag_y: boolean (nullable = true)\n |-- psFluxErr_u: double (nullable = true)\n |-- psFlux_flag_y: boolean (nullable = true)\n |-- psFlux_flag_g: boolean (nullable = true)\n |-- IxyPSF_r: double (nullable = true)\n |-- mag_u: double (nullable = true)\n |-- Iyy_r: double (nullable = true)\n |-- mag_r_cModel: double (nullable = true)\n |-- psFlux_z: double (nullable = true)\n |-- IxyPSF: double (nullable = true)\n |-- magerr_u_cModel: double (nullable = true)\n |-- Iyy: double (nullable = true)\n |-- magerr_u: double (nullable = true)\n |-- I_flag_i: boolean (nullable = true)\n |-- IxxPSF: double (nullable = true)\n |-- magerr_z: double (nullable = true)\n |-- psf_fwhm_r: double (nullable = true)\n |-- mag_y_cModel: double (nullable = true)\n |-- magerr_y_cModel: double (nullable = true)\n |-- psFlux_flag_z: boolean (nullable = true)\n |-- yErr: float (nullable = true)\n |-- IxyPSF_i: double (nullable = true)\n |-- psf_fwhm_i: double (nullable = true)\n |-- psFluxErr_r: double (nullable = true)\n |-- mag_i: double (nullable = true)\n |-- IxyPSF_g: double (nullable = true)\n |-- snr_u_cModel: double (nullable = true)\n |-- psFlux_r: double (nullable = true)\n |-- magerr_g_cModel: double (nullable = true)\n |-- magerr_y: double (nullable = true)\n |-- snr_z_cModel: double (nullable = true)\n |-- ra: double (nullable = true)\n |-- psFlux_g: double (nullable = true)\n |-- IxyPSF_z: double (nullable = true)\n |-- psFluxErr_i: double (nullable = true)\n |-- psFluxErr_g: double (nullable = true)\n |-- psf_fwhm_z: double (nullable = true)\n |-- snr_g_cModel: double (nullable = true)\n |-- IyyPSF_i: double (nullable = true)\n |-- psFluxErr_y: double (nullable = true)\n |-- Iyy_y: double (nullable = true)\n |-- blendedness: double (nullable = true)\n |-- magerr_r_cModel: double (nullable = true)\n |-- psFlux_flag_r: boolean (nullable = true)\n |-- IxyPSF_y: double (nullable = true)\n |-- Ixx_g: double (nullable = true)\n |-- IxyPSF_u: double (nullable = true)\n |-- psf_fwhm_u: double (nullable = true)\n |-- IyyPSF_g: double (nullable = true)\n |-- psFlux_flag_u: boolean (nullable = true)\n |-- magerr_g: double (nullable = true)\n |-- magerr_i: double (nullable = true)\n |-- IyyPSF_r: double (nullable = true)\n |-- Ixy_g: double (nullable = true)\n |-- IxxPSF_y: double (nullable = true)\n |-- psf_fwhm_g: double (nullable = true)\n |-- Ixx_r: double (nullable = true)\n |-- dec: double (nullable = true)\n |-- parentObjectId: long (nullable = true)\n |-- IxxPSF_i: double (nullable = true)\n |-- IxxPSF_r: double (nullable = true)\n |-- I_flag: boolean (nullable = true)\n |-- I_flag_z: boolean (nullable = true)\n |-- I_flag_r: boolean (nullable = true)\n |-- IyyPSF_z: double (nullable = true)\n |-- extendedness: double (nullable = true)\n |-- psFlux_y: double (nullable = true)\n |-- psf_fwhm_y: double (nullable = true)\n |-- Ixx_u: double (nullable = true)\n |-- magerr_r: double (nullable = true)\n |-- I_flag_u: boolean (nullable = true)\n |-- IyyPSF: double (nullable = true)\n |-- psFlux_i: double (nullable = true)\n |-- IxxPSF_u: double (nullable = true)\n |-- snr_y_cModel: double (nullable = true)\n |-- mag_i_cModel: double (nullable = true)\n |-- psFlux_u: double (nullable = true)\n |-- x: double (nullable = true)\n |-- mag_u_cModel: double (nullable = true)\n |-- Ixx_i: double (nullable = true)\n |-- y: double (nullable = true)\n\n0.8s\n" ] ], [ [ "select interesting columns (for this example we will only use the i band)", "_____no_output_____" ] ], [ [ "# build selection by appending to string\ncols=[\"ra\",\"dec\",\"good\",\"clean\",\"extendedness\",\"blendedness\",\"mag_i_cModel\",\"magerr_i_cModel\",\"snr_i_cModel\",\\\n \"psf_fwhm_i\",\"Ixx_i\",\"Iyy_i\",\"Ixy_i\",\"IxxPSF_i\",\"IyyPSF_i\",\"IxyPSF_i\"]\nprint(cols)\n#use these columns\ndf=df_all.select(cols)", "['ra', 'dec', 'good', 'clean', 'extendedness', 'blendedness', 'mag_i_cModel', 'magerr_i_cModel', 'snr_i_cModel', 'psf_fwhm_i', 'Ixx_i', 'Iyy_i', 'Ixy_i', 'IxxPSF_i', 'IyyPSF_i', 'IxyPSF_i']\n" ] ], [ [ "Apply some quality cuts", "_____no_output_____" ] ], [ [ "df=df.filter( (df.good==True)& \\\n (df.clean==True) & \\\n (df.extendedness>0.9) & \\\n (df.blendedness < 10**(-0.375)) &\\\n (df.mag_i_cModel< 24.5) &\\\n (df.snr_i_cModel>10))", "_____no_output_____" ] ], [ [ "Add a column of healpixels (mapReduce way)", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport healpy as hp\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\n\nnside=2048\n#create the ang2pix user-defined-function. \n#we use pandas_udf because they are more efficient\n@pandas_udf('int', PandasUDFType.SCALAR)\ndef Ang2Pix(ra,dec):\n return pd.Series(hp.ang2pix(nside,np.radians(90-dec),np.radians(ra)))\n\n#add a column of healpix indices\ndf=df.withColumn(\"ipix\",Ang2Pix(\"ra\",\"dec\"))\n#groupby indices and count the number of elements in each group\ndf_map=df.groupBy(\"ipix\").count()", "_____no_output_____" ] ], [ [ "Drop all Nans and put in cache", "_____no_output_____" ] ], [ [ "timer.start()\ndf.na.drop().cache()\nprint(\"sample has {}M objects\".format(df.count()/1e6))\ntimer.stop()", "sample has 1.026842M objects\n2.6s\n" ] ], [ [ "## Mean counts", "_____no_output_____" ] ], [ [ "timer.start()\n#groupby indices and count the number of elements in each group\ndf_map=df.groupBy(\"ipix\").count()\n#statistics per pixel\ndf_map.describe(['count']).show() \n#back to python world\nmap_p=df_map.toPandas()\n#now data is reduced create the healpy map\nmap_c = np.zeros(hp.nside2npix(nside))\nmap_c[map_p['ipix'].values]=map_p['count'].values\n#map_c[map_c==0]=hp.UNSEEN\ntimer.stop()", "+-------+------------------+\n|summary| count|\n+-------+------------------+\n| count| 34445|\n| mean| 29.81106111191755|\n| stddev|13.829903847822989|\n| min| 1|\n| max| 106|\n+-------+------------------+\n\n8.7s\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.set_cmap('jet')\nhp.gnomview(map_c,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),max=80,title='counts')", "_____no_output_____" ] ], [ [ "## Sky sigma", "_____no_output_____" ] ], [ [ "var=\"magerr_i_cModel\"\nvar_sys=\"avg(\"+var+\")\"\ndf_map=df.groupBy(\"ipix\").mean(var)\ndf_map.describe([var_sys]).show() \ndfp=df_map.toPandas()\nmap_s = np.zeros(hp.nside2npix(nside))\nmap_s[dfp['ipix'].values]=dfp[var_sys].values\nhp.gnomview(map_s,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),title=var_sys)", "+-------+--------------------+\n|summary|avg(magerr_i_cModel)|\n+-------+--------------------+\n| count| 34445|\n| mean|0.024500885427180673|\n| stddev|0.012347323648925776|\n| min|2.134098291166544E-4|\n| max| 0.1084740582290262|\n+-------+--------------------+\n\n" ], [ "var='snr_i_cModel'\nvar_sys=\"avg(\"+var+\")\"\ndf_map=df.groupBy(\"ipix\").mean(var)\ndf_map.describe([var_sys]).show() \ndfp=df_map.toPandas()\nmap_s = np.zeros(hp.nside2npix(nside))\nmap_s[dfp['ipix'].values]=dfp[var_sys].values\nhp.gnomview(map_s,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),min=10,max=500,title=var_sys)", "+-------+------------------+\n|summary| avg(snr_i_cModel)|\n+-------+------------------+\n| count| 34445|\n| mean|164.15670419096477|\n| stddev|169.24539569737587|\n| min|10.009178438459132|\n| max| 5087.564191641063|\n+-------+------------------+\n\n" ] ], [ [ "## Mean seeing", "_____no_output_____" ] ], [ [ "var=\"psf_fwhm_i\"\nvar_sys=\"avg(\"+var+\")\"\ndf_map=df.groupBy(\"ipix\").mean(var)\ndf_map.describe([var_sys]).show() \ndfp=df_map.toPandas()\nmap_s = np.zeros(hp.nside2npix(nside))\nmap_s[dfp['ipix'].values]=dfp[var_sys].values\nhp.gnomview(map_s,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),min=0.45,max=1.,title=var_sys)", "+-------+-------------------+\n|summary| avg(psf_fwhm_i)|\n+-------+-------------------+\n| count| 34445|\n| mean| 0.7585912225874618|\n| stddev|0.02875550973296599|\n| min| 0.4634907660683619|\n| max| 1.2366712329163176|\n+-------+-------------------+\n\n" ] ], [ [ "## Ellipticities \n\n\ncompute distorsion (thanks to Javier). Note that we don't have redshifts\n\n### Signal", "_____no_output_____" ] ], [ [ "from pyspark.sql import functions as F\nQ11=\"IxxPSF_i\"\nQ22=\"IyyPSF_i\"\nQ12=\"IxyPSF_i\"\n\n# pre-compute denominator\ndf_shear=df.withColumn(\"denom\",F.col(Q11)+F.col(Q22))\n#read and img parts of shear\ndf_shear=df_shear.withColumn(\"R_E\",(F.col(Q11)-F.col(Q22))/F.col('denom')).\\\n withColumn(\"I_E\",(2*F.col(Q12))/F.col('denom'))\n# convert to amplitude and phase\ndf_shear=df_shear.withColumn(\"amp_E\",F.hypot(F.col(\"R_E\"),F.col(\"I_E\"))).\\\n withColumn(\"phase_E\",F.atan2(F.col(\"R_E\"),F.col(\"I_E\")))\ndf_shear.select(\"R_E\",\"I_E\",\"amp_E\",\"phase_E\").show(5)", "+--------------------+--------------------+--------------------+-------------------+\n| R_E| I_E| amp_E| phase_E|\n+--------------------+--------------------+--------------------+-------------------+\n|0.007246182544069873| 0.01418021293385543| 0.01592437126895325| 0.4724140760070876|\n|0.003568535569388947|0.014762145616105947| 0.01518734306260019| 0.2371853715455586|\n| 0.00869009643549297|0.013290660532789492| 0.01587952875421736| 0.5790768606800847|\n|0.003478605225655...|0.014664918534131778|0.015071845604592769|0.23290138725901982|\n|0.007994816523823053| 0.01396278968083379|0.016089642224760396| 0.5200138136124188|\n+--------------------+--------------------+--------------------+-------------------+\nonly showing top 5 rows\n\n" ], [ "var=\"amp_E\"\nvar_sys=\"avg(\"+var+\")\"\ndf_map=df_shear.groupBy(\"ipix\").mean(var)\ndf_map.describe([var_sys]).show() \ndfp=df_map.toPandas()\nmap_e = np.zeros(hp.nside2npix(nside))\nmap_e[dfp['ipix'].values]=dfp[var_sys].values\nhp.gnomview(map_e,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),title=var_sys)", "+-------+--------------------+\n|summary| avg(amp_E)|\n+-------+--------------------+\n| count| 34445|\n| mean|0.017776996524925093|\n| stddev|0.013070370158505245|\n| min|1.333122569798362E-4|\n| max| 0.22244359041872053|\n+-------+--------------------+\n\n" ], [ "var=\"phase_E\"\nvar_sys=\"avg(\"+var+\")\"\ndf_map=df_shear.groupBy(\"ipix\").mean(var)\ndf_map.describe([var_sys]).show() \ndfp=df_map.toPandas()\nmap_e = np.zeros(hp.nside2npix(nside))\nmap_e[dfp['ipix'].values]=dfp[var_sys].values\nhp.gnomview(map_e,rot=[55,-29.8],reso=hp.nside2resol(nside,arcmin=True),title=var_sys)", "+-------+--------------------+\n|summary| avg(phase_E)|\n+-------+--------------------+\n| count| 34440|\n| mean|0.007071566530390142|\n| stddev| 0.6297128563006118|\n| min| -3.139029564064607|\n| max| 3.1409622795373204|\n+-------+--------------------+\n\n" ] ], [ [ "# Missing quantities", "_____no_output_____" ], [ "- redshift \n- airmass\n- HSM _e1/e2 (ext_shapeHSM_HsmShapeRegauss_e1 and ext_shapeHSM_HsmShapeRegauss_e2 availbale in GCR as native_quantities but not DPDD)\n- ?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec8210fa2b468b11c09b3a7517b6efa52057f4bb
113,720
ipynb
Jupyter Notebook
AWS_SQL/.ipynb_checkpoints/case2-checkpoint.ipynb
christineton/DSFA_WomensSummit
c7838c2c295e6f9a9a824fb7d1906345573b5f1f
[ "MIT" ]
1
2020-03-07T17:57:23.000Z
2020-03-07T17:57:23.000Z
AWS_SQL/case2.ipynb
christineton/DSFA_WomensSummit
c7838c2c295e6f9a9a824fb7d1906345573b5f1f
[ "MIT" ]
null
null
null
AWS_SQL/case2.ipynb
christineton/DSFA_WomensSummit
c7838c2c295e6f9a9a824fb7d1906345573b5f1f
[ "MIT" ]
null
null
null
38.998628
750
0.512232
[ [ [ "# Deriving customer insights with SQL and AWS", "_____no_output_____" ], [ "## Introduction (5 mts)\n\nIn this case we'll learn about working large databases that are stored on the cloud and accessed with a database manager. The case will be split into two parts: first, we'll learn the basics of SQL using a smaller data set locally. Then, we'll set up a larger relational database on Amazon Web Services and perform some analysis in the cloud.", "_____no_output_____" ], [ "# Part 1: Introduction to SQL\n\n**Business Context.** You are a data analyst at a large financial services firm that sells a diverse portfolio of products. In order to make these sales, the firm relies on a call center where sales agents make calls to current as well as prospective customers. The company would like you to dive into their data to devise strategies to increase their revenue or reduce their costs. Specifically, they would like to double down on their most reliable customers.\n\n**Business Problem.** The business would like to answer the following question: **\"What types of customers are most likely to buy our product?**\n\n**Analytical Context.** The data is split across 3 tables: \"Agents\", \"Calls\", and \"Customers\", which sit on CSV files. Unlike the last case though, we will first be reading these CSV files into a SQLite database created within Python. You will learn how this database differs from CSV files and how to interact with it using SQL to extract useful insights.", "_____no_output_____" ], [ "## Background (5 min)\n\n### Why databases?\nWhile we have been dealing with data sitting in CSV files so far, no serious data organization runs their operations off of CSV files on a single person's computer. This practice presents all sorts of hazards, including but not limited to:\n\n1. Destruction of that single device\n2. Destruction of the files on that device\n3. Inability to connect to that person's device from another device that requires the data\n4. Inability to store more than a limited amount of data (since a single device doesn't have that much memory)\n\nTherefore, our data should be stored elsewhere if we want to reliably access it in the future and, more importantly, share it and work on it with others. The **database** is the classic location where modern organizations have chosen to store their data for professional use. A couple of advantages that databases provide are:\n\n- Ability to query only certain recods, instead of fetching and going through the entire csv file\n- User based access restrictions - Specify what data each of your users can access from the database. This will strengthen the privacy of the data. \n\nDaabases have been a topic of research since the late 1960s. Many technology vendors picked up on this and developed databases software for companies to consume. Some of these vendors and products are:\n\n1. Microsoft, initially with Microsoft Access and more recently with Microsoft SQL Server\n2. Oracle, with their Oracle database\n3. The “PostgreSQL Global Development Group”, with the open-source PostgreSQL", "_____no_output_____" ], [ "### Types of databases\n\nAt this point, you might believe that databases can be thought of as a collection of data. This is true, but unfortunately it is not that simple. Data cannot simply be thrown in a database the same way you throw your socks in your sock drawer. The data that you wish to store in your database must follow some patterns which are determined by the **database type** you wish to store.", "_____no_output_____" ], [ "#### Relational databases\n\nThe most common database type is called a **relational database**, and the systems that manage these kinds of databases are called **Relational Database Management Systems (RDBMS)**. Relational databases date back to the early 1970s and can be considered the first type of database ever conceived.\n\nRelational databases deal with “relational data”, which is a fancy way to say “tabular” data. This kind of dataset consists of rows and columns (i.e. tables) where each row corresponds to an observation and each column corresponds to an attribute of that observation. So, for example, if we go back to the example where we were keeping track of our friends and their phones, each row on the file (or table) represents one friend and each column represents the information we want to track about that friend (name and phone number). The cell on the intersection of the row and column contains the actual data. Relational data is manipulated using a specific language called **SQL (Structured Query Language)**, which we will learn about soon.\n\nA simple way to conceptualize a table inside a relational database is as a CSV file “copied” to the database. In fact, many databases offer that possibility (assuming your file is correctly formatted, of course).", "_____no_output_____" ], [ "#### NoSQL databases\n\nAround 20 years ago, with the advent of the internet and the necessity to store and process unstructured data (i.e. data that does not fit well in the row-by-column paradigm), developers started to discuss another type of database, which eventually ended up being referred to as a **NoSQL database**. As the name implies, these databases do not rely on SQL and are not relational. They are also built with more “relaxed” rules compared to their predecessors.", "_____no_output_____" ], [ "## SQL Basics (20 min)", "_____no_output_____" ], [ "### What is this \"SQL\" thing?\n\nJust like data can't really survive without a database, a database can't be utilized without SQL. SQL is used for a wide variety of tasks, including but not limited to extracting data, creating the internal structure of a database (in the form of tables), and reading and writing data to these tables.\n\nIn this case, we will be writing SQL queries using the `SQLAlchemy` package in Python. This allows you to directly interface with relational databases without exiting the Python environment, while using syntax that is identical to what you would write outside of Python. Run the code below to set up this framework:", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sqlalchemy import create_engine, text\n#maximum number of rows to display\npd.options.display.max_rows = 10\n\nengine=create_engine('sqlite://')\ndf = pd.read_csv('customers.csv').to_sql('customers', engine, if_exists='replace', index=False)\ndf = pd.read_csv('agents.csv').to_sql('agents', engine, if_exists='replace', index=False)\ndf = pd.read_csv('calls.csv').to_sql('calls', engine, if_exists='replace', index=False)\n\ndef runQuery(sql):\n result = engine.connect().execute((text(sql)))\n return pd.DataFrame(result.fetchall(), columns=result.keys())", "_____no_output_____" ] ], [ [ "We can see what the tables look like:", "_____no_output_____" ] ], [ [ "# display the customers table\nqueryd1 = \"\"\"SELECT *\nFROM customers\n\"\"\"\nrunQuery(queryd1)", "_____no_output_____" ], [ "# display the calls table\nqueryd2 = \"\"\"SELECT *\nFROM calls\n\"\"\"\nrunQuery(queryd2)", "_____no_output_____" ] ], [ [ "### Exploring the `customers` table\n\nThe most important thing you will ever do in SQL is extract a subset of the data from a SQL table based on a set of rules. This is accomplished using the following statement syntax:\n\n1. Start with the keyword `SELECT`\n2. Follow with the names of the columns you want to select, separated by commas (alternatively, you can use the `*` symbol to indicate you wish to select all columns)\n3. Follow with the keyword `FROM`\n4. Finish with the name of the table you wish to select data from\n\t\nAdditionally, you can use the `WHERE` clause to only return results which satisfy certain conditions (similar to how code within Python if-then blocks only execute if the associated conditions are true). `WHERE` clauses immediately follow the table name you want to select data from.\n\nSince the firm wants to dig deeper into its customers, let's start by pulling some of their data out of our files; namely, information about customers who are not unemployed (and therefore are more likely to buy from us).", "_____no_output_____" ], [ "### Exercise 1: (5 min)\n\nWrite a query that selects the customer ID and name from the `customer` table, only showing results for customers who are not unemployed. Remember to write your query as a multi-line string (enclosed within a pair of triple quotes `\"\"\"`) and pass it to the `runQuery()` function defined in the framework above to check your work!", "_____no_output_____" ], [ "**Answer**. One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT customerid, name\nFROM customers\nWHERE occupation != 'Unemployed'\n```", "_____no_output_____" ] ], [ [ "query1 = \"\"\"SELECT customerid, name\nFROM customers\nWHERE occupation != 'Unemployed'\"\"\"\nrunQuery(query1)", "_____no_output_____" ] ], [ [ "Of course, for names, it's sensible to try to list them in alphabetical order. SQL allows us to do this rather easily with the `ORDER BY` statement. This is then followed by a comma-separated list of columns on which you want to order your results (columns that come first take priority in the subsequent ordering). Optionally, you can then append the keyword `ASC` or `DESC` (short for ascending and descending, respectively) after each column to determine the ordering type (e.g. alphabetical or reverse-alphabetical for a string column).\n\nWe can also use the `AS` statment to change the name of a column returned by your query. However, this change is only temporary and is only valid for that particular query. For example, we can rename the `name` column to `customername` and order it alphabetically. This operation is known as **aliasing**:", "_____no_output_____" ], [ "```SQL\nSELECT customerid, name AS customername\nFROM customers\nWHERE occupation != 'Unemployed'\nORDER BY customername\n```", "_____no_output_____" ] ], [ [ "query2 = \"\"\"SELECT customerid, name AS customername\nFROM customers\nWHERE occupation != 'Unemployed'\nORDER BY customername\"\"\"\nrunQuery(query2)", "_____no_output_____" ] ], [ [ "This is a great first step; however, while producing the list of customers that are not unemployed, you inevitably spend a lot of time looking at the different professions your customers have and realize how often engineers appear in your database. You know that engineering jobs tend to command higher salaries these days, so you decide to try to extract a list of all the unique types of engineering jobs that are represented in your database. To ensure that you don't get duplicate job titles in your query results, you'll need to write the keyword `DISTINCT` immediately after `SELECT` in your query.", "_____no_output_____" ], [ "### Exercise 2: (5 min)\n\nWrite a query which produces a list, in alphabetical order, of all the distinct occupations in the `customer` table that contain the word \"Engineer\".\n\n(Hint: The `LIKE` operator can be used when you want to look for similar values. It is included as part of a `WHERE` clause. It needs to be complemented with the `%` symbol, which is a wild card that represents zero, one, or multiple characters. For example, one valid `WHERE` clause utilizing the `LIKE` operator is `WHERE name LIKE 'Matt%'`, which would return any results where the person's name starts with the word \"Matt\"; e.g. \"Matt\" or \"Matteo\" or \"Matthew\", etc.)", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT DISTINCT occupation\nFROM customers\nWHERE occupation LIKE '%Engineer%'\nORDER BY occupation\n```", "_____no_output_____" ] ], [ [ "query3 = \"\"\"SELECT DISTINCT occupation\nFROM customers\nWHERE occupation LIKE '%Engineer%'\nORDER BY occupation\"\"\"\nrunQuery(query3)", "_____no_output_____" ] ], [ [ "Now, one of your marketing colleagues tells you that people who are 30 or older will have a higher probability of buying your product (presumably because by that point they have more disposable income and savings). You don't want to take your colleague's word for granted, so you decide not to completely ignore people under 30, but instead to add that information on the report regarding the person’s age, so that the agent making the subsequent call can decide how they want to use that information. However, due to privacy concerns, you also cannot share the person's exact age.", "_____no_output_____" ], [ "### Exercise 3: (5 min)\n\nWrite a query that retuns the customer ID, their name, and a column `Over30` containing \"Yes\" if the customer is more than 30 years of age and \"No\" if not.\n\n(Hint: You will need to use the `CASE-END` clause. The `CASE-END` clause can be used to evaluate conditional statements and returns a value once a condition is met (similar to an if-then-else clause in Python). If no conditions are true, it returns the value in the ELSE clause (or NULL if there is no ELSE statement). For example:\n\n```SQL\nCASE\n WHEN name = \"Matt\" THEN 'Yes'\n WHEN name = \"Matteo\" THEN 'Maybe'\n ELSE 'No'\nEND\n```", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT customerid, name,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers\nORDER BY name DESC\n```", "_____no_output_____" ] ], [ [ "query4 = \"\"\"SELECT customerid, name,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers\nORDER BY name DESC\"\"\"\nrunQuery(query4)", "_____no_output_____" ] ], [ [ "Let's now modify Exercise 3 so that the query only returns customers who work in an engineering profession:", "_____no_output_____" ], [ "```SQL\nSELECT customerid, name,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers\nWHERE occupation LIKE '%Engineer%'\nORDER BY name DESC\n```", "_____no_output_____" ] ], [ [ "query5 = \"\"\"SELECT customerid, name,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers\nWHERE occupation LIKE '%Engineer%'\nORDER BY name DESC\"\"\"\nrunQuery(query5)", "_____no_output_____" ] ], [ [ "## Investigating customer conversion rates (30 min)", "_____no_output_____" ], [ "In order to validate whether our hypotheses about engineers and age are true (for example, engineers exhibit higher product sales conversion rates, and perhaps engineers over 30 tend to exhibit an even higher conversion rate), we will need to use two tables: `calls` and `customers`. This is because the column `productsold` lies only in the `calls` table, yet information about customer professions and age only lie in the `customers` table.\n\n`SELECT` commands are not restricted to a single table. In fact, theoretically, there is no limit to the number of tables that you can extract data from in a single SQL query. Let's introduce some new concepts that are relevant once we go beyond a single table.\n\n**Primary and foreign keys** are very important concepts that need to be understood by any database professional. Primary keys:\n\n1. Uniquely identify a record in the table. Their name usually includes the word \"id\"\n * For example, `customerid` is the primary key of the `customers` table, `agentid` is the primary key of the `agents` table, and `callid` is the primary key of the `calls` table \n2. Do not accept null values. And they shouldn't, because they are being used to identify the record\n3. Are limited to one per table\n\nOn the other hand, foreign keys:\n\n1. Are a field in the table that is the primary key in another table\n2. Can accept null values\n3. Are not limited in any way per table\n * For example, the `calls` tables has 2 foreign keys: `agentid` and `customerid` pointing to the `agents` and `customers` tables, respectively", "_____no_output_____" ], [ "### Extracting call data for customers working in engineering professions (10 min)", "_____no_output_____" ], [ "Let's first extract the relevant data so we can perform this analysis. Here, a `JOIN` clause will come in handy. A `JOIN` clause consists of two parts:\n\n1. The base `JOIN` statement, which is of the form `[Table 1] JOIN [Table 2]`. This performs a Cartesian product on the 2 tables being joined. For example, if we have Table A with 5 rows, and Table 5 with 3 rows, their Cartesian product will return 15 rows (5 x 3)\n2. A `JOIN` criteria, which filters the Cartesian product's results, beginning with the `ON` keyword\n\nHere is an example of a `JOIN` criteria in action, which is telling us to only give combinations of rows where the agent ID matches in both tables:\n\n```SQL\nSELECT callid, a.agentid, name\nFROM calls c\nJOIN agents a ON c.agentid = a.agentid\nORDER BY name DESC\n```", "_____no_output_____" ] ], [ [ "query6 = \"\"\"SELECT callid, a.agentid, name\nFROM calls c\nJOIN agents a ON c.agentid = a.agentid\nORDER BY name DESC\"\"\"\nrunQuery(query6)", "_____no_output_____" ] ], [ [ "Note that:\n\n1. `c` and `a` are aliases to the `calls` and `agents` tables to avoid having to type the table name every time. Unlike with column aliasing earlier, we do not need the `AS` keyword here\n2. We write `a.agentid` instead of `agentid` in the SELECT statement – this is because the `agentid` column exists in both tables, so we have to tell the database which one to get the result from", "_____no_output_____" ], [ "### Exercise 4: (5 min)\n\nWrite a query which returns all calls made out to customers in the engineering profession, and shows whether they are over or under 30 as well as whether they ended up purchasing the product from that call.", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT callid, cu.customerid, name, productsold,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%'\nORDER BY name DESC\n```", "_____no_output_____" ] ], [ [ "query7 = \"\"\"SELECT callid, cu.customerid, name, productsold,\n CASE\n WHEN age >= 30 THEN 'Yes'\n WHEN age < 30 THEN 'No'\n ELSE 'Missing Data'\n END AS Over30\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%'\nORDER BY name DESC\"\"\"\nrunQuery(query7)", "_____no_output_____" ] ], [ [ "### Analyzing the call conversion data (20 min)\n\nNow, we want to determine whether or not customers in our desired cohort exhibit a higher sales conversion rate compared to the overall population of customers. A reasonable way to do this is to count the total number of calls to this cohort which resulted in a sale, and divide that by the total number of calls to this cohort (whether or not they resulted in a sale) to get a percentage, and then compare that with the percentage we compute from the `calls` table overall.\n\nHowever, to compute these figures, we'll need to learn a bit about **aggregation functions**. An aggregation function allows you to perform a calculation on a set of values to return a single value, essentially computing some sort of summary statistic.\n\nThe following are the most commonly used SQL aggregate functions:\n\n1. `AVG()` – calculates the average of a set of values\n2. `COUNT()` – counts rows in a specified table or view\n3. `MIN()` – gets the minimum value in a set of values\n4. `MAX()` – gets the maximum value in a set of values\n5. `SUM()` – calculates the sum of values", "_____no_output_____" ], [ "### Exercise 5: (10 min)\n\nWrite two queries: one that computes the total sales and total calls made to customers in the engineering profession, and one that computes the same metrics for the entire customer base. What can you conclude regarding the conversion rate within the engineering customers vs. the overall customer base?", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%'\n```", "_____no_output_____" ] ], [ [ "query8 = \"\"\"SELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%'\"\"\"\nrunQuery(query8)", "_____no_output_____" ] ], [ [ "```SQL\nSELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\n```", "_____no_output_____" ] ], [ [ "query9 = \"\"\"SELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\"\"\"\nrunQuery(query9)", "_____no_output_____" ] ], [ [ "The conversion rate for both groups is ~20.9%, indicating that engineers are not more likely to purchase our products than the overall population.", "_____no_output_____" ], [ "### Exercise 6: (5 min)\n\nWrite a query that computes the total sales and total calls made to customers over the age of 30. Is there a notable difference between the conversion ratio here and that of the overall customer base?", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE age >= 30\n```", "_____no_output_____" ] ], [ [ "query10 = \"\"\"SELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE age >= 30\"\"\"\nrunQuery(query10)", "_____no_output_____" ] ], [ [ "The conversion rate is ~21.1% vs. the overall ~20.9%. There may be some difference, but it is quite small so we would need to run statistical significance tests in order to validate this. Since that's not the focus of this case, we'll skip that for now.", "_____no_output_____" ], [ "### Exercise 7: (5 min)\n\nHow about if you look at the sales conversion rate for engineers over the age of 30?", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%' AND age >= 30\n```", "_____no_output_____" ] ], [ [ "query11 = \"\"\"SELECT SUM(productsold) AS totalsales, COUNT(*) AS ncalls\nFROM customers cu\nJOIN calls ca ON ca.customerid = cu.customerid\nWHERE occupation LIKE '%Engineer%' AND age >= 30\"\"\"\nrunQuery(query11)", "_____no_output_____" ] ], [ [ "Here, we actually observe the opposite pattern – the conversion rate is only ~20.5%.\n\nFrom these numbers, we can conclude that a customer's status as an engineering professional has no positive effect on their conversion rate. On the other hand, having an age of at least 30 MAY have some effect; however, we would need to do more in-depth statistical testing to determine this.", "_____no_output_____" ], [ "# Transition (10 min)", "_____no_output_____" ], [ "## What is AWS?\n\nAmazon Web Services (AWS) is a cloud service from Amazon, which provides services in the form of building blocks. These building blocks can be used to create and deploy any type of application in the cloud.\n\nThese services are designed to work with each other, and result in applications which are sophisticated and highly scalable. The following are the most commonly used domains:\n\n- The **Compute** domain includes services related to compute workloads. Services in this domain can be used to run computationally intensive or repetitive tasks that you don't want to run locally\n- The **Database** domain is used for database related workloads. Services in this domain provide cost-efficient and resizable capacity and can automate time-consuming administration tasks such as provisioning hardware, setting up the database, patching, and making backups\n- The **Migration** domain is used for transferring data to or from the AWS Infrastructure\n- The **Networking and Content Delivery** domain is used for isolating your network infrastructure, and content delivery is used for faster delivery of content\n- The **Management Tools** domain consists of services which are used to manage other services in AWS\n- The **Security & Identity, Compliance** domain consist of services which are used to manage to authenticate and provide security to your AWS resources\n- The **Messaging** domain consists of services which are used for queuing, notifying or emailing messages\n\nIn this case we'll focus on the database domain, specifically the RDS service. You can find more information about other AWS services in the Additional Resources section at the end of the case.", "_____no_output_____" ], [ "## Log In! \n\nStart by signing in to [AWS](https://signin.aws.amazon.com/signin?redirect_uri=https%3A%2F%2Fconsole.aws.amazon.com%2Fconsole%2Fhome%3Fnc2%3Dh_ct%26src%3Dheader-signin%26state%3DhashArgs%2523%26isauthcode%3Dtrue&client_id=arn%3Aaws%3Aiam%3A%3A015428540659%3Auser%2Fhomepage&forceMobileApp=0) using your root user credentials. If you have not created your own AWS account, please consult with your TA to obtain the relevant instructions.", "_____no_output_____" ], [ "# Part Two: Analyzing Net Promoter Score (NPS) data with AWS", "_____no_output_____" ], [ "## Introduction (5 min)\n\n**Business Context.** You are a data scientist at a new but fast-growing startup. The startup released its first product 12 months ago and has been tracking Net Promoter Score (NPS) over its growing customer base since the product's launch.\n\nThe team assumes that the NPS score is correlated to the product stability and feature-completeness and that the product has been getting more stable and complete over time. They also realize that there have been some hiccups along the way, and they assume that NPS has therefore fluctuated up and down.\n\n**Business Problem.** The startup wants you to investigate the data and answering the following question: **\"Has our NPS improved over time? And has our average NPS decreased in specific periods over the last 12 months?\"**\n\n**Analytical Context.** In this part of the case, you will be working with a large dataset – so large that your personal laptop is not powerful enough to run heavy SQL queries on it (the startup is stingy and doesn't provide employees with hardware – luckily they have free cloud credits though!). Instead, you will be working with a powerful PostgreSQL database in the cloud (on Amazon Web Services), and uploading the data there for remote processing. We'll connect to the remote database and have the remote machine run the resource-intensive queries.", "_____no_output_____" ], [ "## Understanding the Net Promoter Score (NPS) (10 min)\n\nNPS is a metric to measure customer satisfaction. You've probably seen pop-ups online, or received surveys via email, asking you \"Would you recommend [product] to a friend or family member?\" and giving you the option to respond with a number between 0 and 10. That's someone collecting information to calculate their NPS.\n\n![nps Example Survey](images/nps-example-survey.png)\n\nThe basic idea is simple - customers who respond with high ratings are more likely to promote your product to other potential customers. Customers who give low ratings are unhappy and are unlikely to help you grow your customer base. If you ask enough people at different time periods, you can track customer satisfaction over time and see how this correlates to product development and other aspects of your business that are within your control. \n\nNPS categorizes users into three groups based on the ratings that they leave. This is done as follows:\n\n1. Users who leave a rating of 0 - 6 are regarded as \"detractors\"\n2. Users who leave a rating of 7 or 8 are regarded as \"passives\"\n3. Users who leave a rating of 9 or 10 are regarded as \"promoters\"\n\nThe final NPS score for a given period is calculated as the percentage of total users who are promoters minus the percentage of total users who are detractors. This means that an NPS score can be anything from -100 to 100.", "_____no_output_____" ], [ "### Exercise 8: (5 min)\n\nIf you have the following scores left by your customers:\n\n| Date | CustomerId | Score | Group |\n| -------------- | ---------- | ----- | --------- |\n| 1 January 2018 | 562 | 1 | Detractor |\n| 1 January 2018 | 544 | 10 | Promoter |\n| 2 January 2018 | 333 | 9 | Promoter |\n| 2 January 2018 | 102 | 9 | Promoter |\n| 4 January 2018 | 267 | 9 | Promoter |\n| 5 January 2018 | 981 | 10 | Promoter |\n| 6 January 2018 | 105 | 6 | Detractor |\n| 6 January 2018 | 459 | 7 | Passive |\n| 6 January 2018 | 188 | 10 | Promoter |\n| 8 January 2018 | 982 | 8 | Passive |\n\nWhat is your NPS? How would you adjust your calculation of NPS if instead users had many opportunities to rate you in a short time period? What would you consider to be a \"good\" NPS? ", "_____no_output_____" ], [ "**Answer.** We have 10 responses: 6 promoters, 2 detractors and 2 passives. That is 60% promoters and 20% detractors, so our NPS is $60 - 20 = 40$.", "_____no_output_____" ], [ "If users could rate us many times in a short time span, a sensible adjustment would be to first average all of the responses per user, and use this averaged response to group each user into promoter, detractor, or passive. This is because no matter how many times a single user interacts with our product, they are likely still only paying us once for it. Thus, since they are not weighted more heavily in our revenue streams, they should not be weighted more heavily in our customer satisfaction schemes either.\n\nDefining what constitutes a good NPS depends on the specific line of business the company is in. It varies between [different industries](https://www.qualtrics.com/marketplace/nps-benchmarks/) with internet providers generally getting far lower scores than technology companies.", "_____no_output_____" ], [ "## Setting up a cloud database using RDS and importing data (30 min)\n\nOk! Let's set up a database and load in some NPS data so that we can analyze it using SQL. We'll use the code at [this repository](https://github.com/sixhobbits/nps-sample-data) to generate a large sample of fake NPS data and push it into a PostgreSQL instance running in the cloud. (Don't look at the source code that generates the data, as it will spoil the fun.)\n\n1. Log into your AWS account and select \"RDS\" from the service list. You should see a screen like the one below, where you can hit the \"Create database\" button:\n\n![Create Database](images/create_db.png)\n\n2. The next option you'll see asks you if you want to use \"standard create\" or \"easy create\". Easy might sound tempting, but **choose \"standard\"** as we'll have to set up our database for public use so we can connect to it locally.\n\n3. Choose \"PostgreSQL\" as the database type, leave the version at the default AWS has chosen for you (10.6-R1 at the time of writing), and choose \"Free Tier\"\n\n4. Under the next section, choose a name for your database instance. Remember this is the machine that is hosting the database software, not the database itself (one RDS instance can host many databases), so I'm calling mine `nps-demo-instance` to reflect this, although we'll only be creating a single database for now. \n\n5. You can leave the master username as `postgres` and ask RDS to autogenerate a password (we'll be able to see this password at the next step):\n\n![Set DB password](images/set_db_password.png)\n\n6. You can leave the next settings as their defaults until you get to the \"Connectivity\" section. Usually, you'll set up an RDS instance to play with other infrastructure within your AWS account, such as EC2 servers. In our case, we want to push data in and out of the database directly from our local machine as the client, so we'll have to set our database up for \"public access\". This is generally less secure, but we'll add some firewall rules in a bit to make sure that only we can access it:\n\n * Expand the \"Additional connectivity configuration\" section\n\n * Set \"publicly accessible\" to \"Yes\"\n\n * Under \"VPC security group\", choose to \"Create new\", and give it a name like `allow-local-access`. This will create a firewall rule that will allow you to connect to your database on port 5432 (the default for PostgreSQL) using your current IP address. If you are using public WiFi, a hotspot, or if you think your IP address is likely to change soon for any reason, note that you'll have to modify this security group any time your IP address changes:\n\n![Create Security Group](images/create-sec-group.png)\n\n7. Press the \"Create database\" button in the bottom right, and you'll be taken back to the overview page where you can see your database being created. At the top, there'll be a notification where you can press \"View credential details\" to access your master password that was automatically generated. Take note of this as you can only see it once. NOTE: this creates a database in the default VPC. If your default VPC is not configured for DNS connections, you will need to create a new VPC. Please see 'Appendix 1: Troubleshooting RDS creation' for instructions on how to do achieve this.\n\n![View credentials](images/view_creds.png)\n\n8. Once your database becomes \"available\" (you might need to press the \"refresh\" button indicated below to see the change), you can connect to it. Click on the name of the database (`nps-demo-instance` in our example), to find out the connection details:\n\n![DB available](images/db-available.png)\n\n9. Once you click on the database, you should see the endpoint that you need on a screen similar to the one shown below. You need this endpoint to connect to the database from your local machine.\n\n![DB Endpoint](images/db-endpoint.png)\n\n10. Locally, open a terminal and run the following command, substituting [endpoint] with the one that you noted from the RDS console above.\n\n```bash\npsql -h [endpoint] -U postgres\n```\n\nThis will connect to our instance's default database using the master username. It will prompt you for the password and you can enter the autogenerated password from above. You should now see a SQL prompt, similar to the image below:\n\n![PSQL prompt](images/psql-prompt.png)\n\nWe've successfully created a cloud database and connected to it!", "_____no_output_____" ], [ "### Setting up our NPS database (10 min)\n\nLet's proceed by setting up our database in Amazon RDS:\n\n1. In the SQL shell, run the following commands to create a database, create a user to manage our database, and give privileges on our new database to our new user. Replace [password] with your own choice of password:\n\n```SQL\ncreate database nps_demo_db;\ncreate user nps_demo_user with login encrypted password '[password]';\ngrant all privileges on database nps_demo_db to nps_demo_user;\n\\q\n```\n\nHere, `\\q` closes the connection so you can re-open it under a different user.\n\n2. Run the following command. It is similar to the one we used before to connect but now specifies both our custom user and our custom database. Once again, substitute [endpoint] with the one you see in the RDS console.\n\n```SQL\npsql -h [endpoint] -U nps_demo_user -d nps_demo_db\n```\n\n3. Put in the new password that you entered in the SQL statement in step 1 instead of the master password that AWS automatically generated for us. You'll see a very similar prompt, but with the `nps_demo_db=>` prompt instead of `postgres=>`:\n\n![nps demo prompt](images/nps-demo-prompt.png)\n\nThe next thing we need to do is to create tables to house our data. We'll use the data from [this repository](https://github.com/sixhobbits/nps-sample-data/), consisting of two tables: `customer` and `score`. There are some extra fields on `customer` (`is_premier` and `is_spam`) that we won't use right away, but we'll create our tables to match that format anyway to make the import easier.\n\nThe important context is that we are imagining a scenario where:\n\n* We have been running a new company for around one year.\n* The product has gone through different stages of feature improvement and stability but has overall shown growth and improvement.\n* Every day, new customers join and both new and old customers may or may not leave us a score between 0-10 to rate how likely they are to recommend our product to family and friends.\n* At the start and at some key points during the year, the product is unstable or lacking features and this affects the customer rating.", "_____no_output_____" ], [ "Now, lets use SQL to create two tables: one for customers, and one for the scores that our customers leave.\n\nTo create the `customer` table, we do the following:\n\n```SQL\ncreate table customer (id serial not null, created_at date, is_premier boolean, is_spam boolean, CONSTRAINT customer_pkey PRIMARY KEY (id));\n```\n\nThis creates a `customer` table with an ID, the date the customer first signed up (`created_at`), and two boolean flags that we don't need yet. It also adds a constraint to the ID field saying it is a primary key, meaning it has to be unique. \n\n\nTo create the table of scores that our customers leave, use the following command in the same prompt:\n\n```SQL\ncreate table score (id serial not null, customer_id integer references customer(id), created_at date, score integer, CONSTRAINT scores_pkey PRIMARY KEY (id));\n```\n\nThis is similar to the `customer` table, but has a `score` field to store the value between 0 and 10 that a customer leaves each time they complete a survey, and another date field to record when the survey was done. There is also a foreign key `customer_id` to link each score to a specific entity in the `customer` table. \n\nYou can close the connection again with `\\q`.", "_____no_output_____" ], [ "### Pushing sample data into RDS (10 min)\n\nLet's now push the NPS data onto RDS:\n\n1. Download the two CSV files (`score.csv` and `customer.csv`) from https://github.com/sixhobbits/nps-sample-data into your local working directory. Don't look at the README file.\n\n2. Run the command below, again substituting [endpoint] with the actual endpoint you used above. Make sure that the `customer.csv` file is located in the same directory that you run the `psql` command from:\n\n```bash\npsql -h [endpoint] -U nps_demo_user -d nps_demo_db -c \"\\copy customer from 'customer.csv' with (format csv, header true, delimiter ',');\"\n```\n\nThe first part of the command is the same one we used before to open a SQL shell. Here we also pass the `-c` flag which allows us to specify a SQL command to be run on the database. Because our shell has permissions to access our local file system, but our database doesn't, running the command like this means we won't have problems with permissions. In the `\\copy` command, we specify which table we want to populate (`customer`), where the local file is (`customer.csv`), that our file is in CSV format, that it has a header, and that we are using a comma as a delimiter. \n\nThis should prompt you for the password (again, use the one that you created for the `nps_demo_user`). It will then let you know how many rows it has successfully imported, similar to the image below:\n\n![Copy successful](images/copy-successful.png)\n\n3. Now we can add the scores data as well using the same method. The only things we need to change are the table name and the filename from which we source the data. The full command (don't forget to substitute your endpoint) is:\n\n```bash\npsql -h [endpoint] -U nps_demo_user -d nps_demo_db -c \"\\copy score from 'score.csv' with (format csv, header true, delimiter ',');\"\n```\n\nThere are a lot more sample scores than customers (as each customer can respond to the survey more than once), so this will take a bit longer than the previous command:\n\n![Import scores](images/import-scores.png)", "_____no_output_____" ], [ "## Analyzing our NPS data using SQL (40 min)\n\nNow we can proceed to the fun part. We have NPS scores left by a large number of customers over the past year, and we want to see how these scores change over time.\n\nWe only have raw data – numbers between 0 and 10 inclusive – so we'll use SQL to group this data in different ways and transform it into NPS data. If you remember how to define NPS from the first section, you can probably work out that the main things we need to do are:\n\n1. Break down our scores per customer for any given time period (here, we will look at this per week)\n2. Divide customers into promoters, passives or detractors, based on the scores they have left in that week\n3. Calculate the NPS per week and look at how this value changes week-by-week", "_____no_output_____" ], [ "### Counting customers and scores (10 min)\n\nWe saw how many customers and scores we had when we did the import step above. However, in a real-world setting, you would have gathered this data slowly, over time, so let's start by counting out customers, our survey responses (`scores`), and looking at how many surveys each customer responds to. For each of the following, you'll need to be connected to the SQL shell, so run the following first (using your endpoint) and any time you need to.\n\nWe'll show the output of each SQL command directly below – you only need to enter the command shown in the first section in each of the following examples.\n\n```bash\npsql -h [endpoint] -U nps_demo_user -d nps_demo_db\n```\n\n#### Counting customers\n\n```SQL\nSELECT COUNT(*) FROM customer;\n```\n\n```\n count\n--------\n 188323\n(1 row)\n```\n\nWe have nearly 200k customers, which is not bad for a product that's been running for one year!\n\n#### Counting scores\n\n```SQL\nSELECT COUNT(*) FROM score;\n```\n\n```\n count\n---------\n 1577578\n(1 row)\n```\n\nAnd we have over 1.5 million survey responses. That's just over 8 responses per customer if we assume an equal distribution. Let's use SQL to look at that.", "_____no_output_____" ], [ "### Exercise 9: (5 min)\n\nWrite a SQL query that outputs a table showing the 10 customers with the highest number of responses and their total response count, in descending order (customer with most responses at the top).", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT customer_id, COUNT(score.id) AS cnt FROM score\nINNER JOIN customer ON customer_id = customer.id\nGROUP BY customer_id ORDER BY cnt DESC\nLIMIT 10;\n```\n\n```\n customer_id | count\n-------------+-------\n 31 | 38\n 928 | 38\n 4271 | 38\n 5333 | 37\n 1253 | 37\n 1259 | 36\n 1030 | 36\n 2327 | 36\n 564 | 36\n 2335 | 36\n```", "_____no_output_____" ], [ "We can also use SQL JOINs using commas and a `WHERE` clause as a shortcut. The above command is equivalent to the following one, but the earlier version is preferable in most contexts as it is more explicit:\n\n```SQL\nSELECT customer_id, COUNT(score.id) AS cnt FROM score, customer\nWHERE customer_id = customer.id\nGROUP BY customer_id ORDER BY cnt DESC\nLIMIT 10;\n```\n\n\n\nWe can also look at customers who have left very few responses by ordering by `ASC` instead of `DESC`:\n\n```SQL\nSELECT customer_id, COUNT(score.id) AS cnt FROM score\nINNER JOIN customer ON customer_id = customer.id\nGROUP BY customer_id ORDER BY cnt ASC\nLIMIT 10;\n```\n\n```\n customer_id | cnt\n-------------+-----\n 57565 | 1\n 62357 | 1\n 49021 | 1\n 57424 | 1\n 61891 | 1\n 62295 | 1\n 44796 | 1\n 44995 | 1\n 57286 | 1\n 62402 | 1\n```\n\nWe can see there are at least 10 customers who have left only a single response. Let's do a 'count of counts' query to get a better idea of how many responses most customers leave. We want to count how many customers have left exactly $x$ responses. ", "_____no_output_____" ], [ "### Exercise 10: (5 min)\n\nWrite a SQL query that outputs a table showing how many customers leave $x$ responses for any given integer $x$. Sort this table in descending order ($x$ with highest number of customers leaving $x$ responses at the top).\n\n(Hint: Use a **nested** `SELECT` statement. A nested statement is when you treat the results of one query as the input to another one.)", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT cnt, COUNT(cnt) as count_of_count FROM\n(SELECT customer_id, count(score.id) AS cnt FROM score\nINNER JOIN customer ON customer_id = customer.id\nGROUP BY customer_id ) a\nGROUP BY cnt\nORDER BY count_of_count DESC\nLIMIT 100;\n```", "_____no_output_____" ], [ "Notice in the query above we have taken a query very similar to the one from Exercise 10 and nested it in parentheses. We have then given this intermediate query an **alias**, which comes immediately after the closing parenthesis; in this case we have chosen the alias `a`. It is a common convention to use aliases `a`, `b`, `c`, etc. as a shorthand if you are primarily interested only in the final result.\n\nFrom our previous queries, we already know that all the values have to fall between 1 and 38, so there can be a maximum of 38 rows returned in this query. Therefore there is no real need to add a LIMIT clause, but we add a `LIMIT 100` anyway. This is a good habit in case you make a wrong assumption to prevent the case where you accidentally try to pull thousands or millions of rows from a remote server. For brevity, we only included the first 15 rows of output below:\n\n```\n cnt | count_of_count\n-----+----------------\n 6 | 18779\n 5 | 17218\n 7 | 17094\n 4 | 15642\n 8 | 14108\n 3 | 12983\n 9 | 11978\n 10 | 10556\n 2 | 10191\n 11 | 9001\n 12 | 7833\n 13 | 6698\n 1 | 6302\n 14 | 5707\n 15 | 4908\n```\n\nWe can see that most customers leave between 2 and 10 responses so the maximum of 38 is an outlier. A fair number of people only leave one response.", "_____no_output_____" ], [ "### Average scores per week (10 min)\n\nHowever, we still have not looked at how scores are *changing*. Let's average all scores in each week and see how the scores go up and down over time:\n\n```SQL\nSELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, AVG(score) AS avg_score\nFROM score\nGROUP BY week\nORDER BY week ASC\nLIMIT 100;\n```\n\nAgain, we did not need to add a limit clause as we know there will only be 52 rows (the number of weeks in a year, which is the span of our dataset), but we do anyway for good measure and again include only the first 15 rows of output below:\n\n```\n week | avg_score\n---------+--------------------\n 2018-01 | 5.3618090452261307\n 2018-02 | 6.1577181208053691\n 2018-03 | 5.1405228758169935\n 2018-04 | 5.2256097560975610\n 2018-05 | 6.3962765957446809\n 2018-06 | 7.2065359477124183\n 2018-07 | 7.0110294117647059\n 2018-08 | 6.9827490261547023\n 2018-09 | 7.4689516129032258\n 2018-10 | 7.9564362001124227\n 2018-11 | 8.0201993704092340\n 2018-12 | 7.8336310283235519\n 2018-13 | 7.9298795180722892\n 2018-14 | 7.9583184257602862\n 2018-15 | 7.9876211782252051\n```\n\nWe can see that the scores start low and generally trend up over time, although they go down again around week 36 (not shown above). We use the [ISO Week](https://en.wikipedia.org/wiki/ISO_week_date) through PostgreSQL's `TO_CHAR` function to break down each of our dates into a specific week number and average the scores per week. \n\nThere are a couple issues with the above query, though:\n\n1. The `AVG` function shows a lot of decimal points by default which makes it more difficult to read the data\n2. Many customers leave a different number of responses and some might leave more than one response per week\n\nA good compromise is to calculate the average score per customer per week, then average all of these to get an average score across all customers per week. Let's do this and round off some decimal points to make our data easier to read.", "_____no_output_____" ], [ "### Exercise 11: (5 min)\n\nWrite a query to compute the average score across all customers per week, rounding off to two decimal places. (Hint: Use the `ROUND()` function, which takes two arguments: the quantity you are rounding, and how many decimals you are rounding off to.)", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT week, ROUND(AVG(avg_week_score),2) as avg_score FROM\n(SELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, customer_id, AVG(score) as avg_week_score FROM score\nGROUP BY week, customer_id) a\nGROUP BY week\nORDER BY week\nLIMIT 100;\n```\n\n```\n week | avg_score\n---------+-----------\n 2018-01 | 5.12\n 2018-02 | 5.80\n 2018-03 | 5.74\n 2018-04 | 5.50\n 2018-05 | 6.33\n 2018-06 | 7.02\n 2018-07 | 7.01\n 2018-08 | 6.89\n 2018-09 | 7.38\n 2018-10 | 7.75\n 2018-11 | 7.80\n``` ", "_____no_output_____" ], [ "### Classifying our customers as promoters, passives, or detractors (10 min)\n\nNow, let's proceed to classifying our customers so we can calculate the NPS per week. We used a similar `SELECT` (two deep this time!) and a `CASE` statement. The `CASE` keyword acts as an if statement and returns specific values in specific cases. For us, anything larger than an 8 (i.e. 9 or 10) is a promoter, otherwise, anything larger than a 6 (i.e. 7 or 8) is a passive and everything else is a detractor:\n\n```SQL\nSELECT * FROM\n(SELECT CASE\n WHEN avg_week_score > 8 THEN 'promoter'\n WHEN avg_week_score > 6 THEN 'passive'\n ELSE 'detractor'\nEND AS nps_class, week FROM\n(SELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, customer_id, AVG(score) as avg_week_score FROM score\nGROUP BY week, customer_id) a) b\nlimit 10;\n```\n\nWhich gives us the following output: a huge table with the nps_class and the week number:\n\n```\n nps_class | week\n-----------+---------\n detractor | 2018-01\n detractor | 2018-01\n promoter | 2018-01\n detractor | 2018-01\n promoter | 2018-01\n detractor | 2018-01\n detractor | 2018-01\n detractor | 2018-01\n detractor | 2018-01\n promoter | 2018-01\n```\n\nThis is closer to what we need, but not very useful in its current form. We can confirm that there are still nearly a million rows by using another `COUNT`:\n\n```SQL\nSELECT count(*) FROM\n(SELECT CASE\n WHEN avg_week_score > 8 THEN 'promoter'\n WHEN avg_week_score > 6 THEN 'passive'\n ELSE 'detractor'\nEND AS nps_class, week FROM\n(SELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, customer_id, AVG(score) as avg_week_score FROM score\nGROUP BY week, customer_id) a) b\nlimit 10;\n```\n\n```\n count\n--------\n 951289\n(1 row)\n```\n\nNow that we've broken our customers into specific categories, we want to count them. It's useful to \"pivot\" this data so that we can see the count of each class of people as a separate column. In a spreadsheet program like Microsoft Excel or Google Sheets, we would think of this as a pivot table, and there are plugins for PostgreSQL to allow you to use it in a similar way. In our case, though, we can count the number of each class each week using some more `CASE` statements and the `SUM` function as follows:\n\n```SQL\nSELECT week,\nSUM(CASE WHEN nps_class = 'promoter' THEN 1 ELSE 0 END) AS \"promoter\",\nSUM(CASE WHEN nps_class = 'passive' THEN 1 ELSE 0 END) AS \"passive\",\nSUM(CASE WHEN nps_class = 'detractor' THEN 1 ELSE 0 END) AS \"detractor\",\n COUNT(*) AS \"total\" FROM\n(SELECT CASE\n WHEN avg_week_score > 8 THEN 'promoter'\n WHEN avg_week_score > 6 THEN 'passive'\n ELSE 'detractor'\nEND AS nps_class, week FROM\n(SELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, customer_id, AVG(score) as avg_week_score FROM score\nGROUP BY week, customer_id) a) b\nGROUP BY week\nORDER BY week\nlimit 100;\n```\n\nWhich results in (truncated for brevity):\n\n```\n week | promoter | passive | detractor | total\n---------+----------+---------+-----------+-------\n 2018-01 | 26 | 0 | 39 | 65\n 2018-02 | 65 | 2 | 63 | 130\n 2018-03 | 71 | 7 | 70 | 148\n 2018-04 | 76 | 6 | 83 | 165\n 2018-05 | 186 | 23 | 135 | 344\n 2018-06 | 397 | 56 | 202 | 655\n 2018-07 | 471 | 72 | 238 | 781\n 2018-08 | 520 | 79 | 276 | 875\n 2018-09 | 771 | 102 | 300 | 1173\n 2018-10 | 1154 | 154 | 351 | 1659\n ...\n```\n\nNote that we also had to add another intermediate alias (`b`) to our SQL code, as we have yet another level of nested `SELECT`.", "_____no_output_____" ], [ "### Calculating NPS per week (10 min)\n\nWe now have all the pieces in place to calculate our NPS. To do this, we will have to use a *third* nested `SELECT` and yet another table alias `c`.", "_____no_output_____" ], [ "### Exercise 12: (10 min)\n\nGiven the above guidance, write the query to compute NPS per week.", "_____no_output_____" ], [ "**Answer.** One possible solution is given below:", "_____no_output_____" ], [ "```SQL\nSELECT *, ROUND(((CAST(promoter AS DECIMAL) / total) - (CAST(detractor AS DECIMAL) / total)) * 100, 0) AS nps FROM\n(SELECT week,\nSUM(CASE WHEN nps_class = 'promoter' THEN 1 ELSE 0 END) AS \"promoter\",\nSUM(CASE WHEN nps_class = 'passive' THEN 1 ELSE 0 END) AS \"passive\",\nSUM(CASE WHEN nps_class = 'detractor' THEN 1 ELSE 0 END) AS \"detractor\",\n COUNT(*) AS \"total\" FROM\n(SELECT CASE\n WHEN avg_week_score > 8 THEN 'promoter'\n WHEN avg_week_score > 6 THEN 'passive'\n ELSE 'detractor'\nEND AS nps_class, week FROM\n(SELECT TO_CHAR(score.created_at, 'IYYY-IW') AS week, customer_id, AVG(score) as avg_week_score FROM score\nGROUP BY week, customer_id) a) b\nGROUP BY week\nORDER BY week) c\nlimit 100;\n```\n\n\n\n```\n week | promoter | passive | detractor | total | nps\n---------+----------+---------+-----------+-------+-----\n 2018-01 | 26 | 0 | 39 | 65 | -20\n 2018-02 | 65 | 2 | 63 | 130 | 2\n 2018-03 | 71 | 7 | 70 | 148 | 1\n 2018-04 | 76 | 6 | 83 | 165 | -4\n 2018-05 | 186 | 23 | 135 | 344 | 15\n 2018-06 | 397 | 56 | 202 | 655 | 30\n 2018-07 | 471 | 72 | 238 | 781 | 30\n 2018-08 | 520 | 79 | 276 | 875 | 28\n 2018-09 | 771 | 102 | 300 | 1173 | 40\n 2018-10 | 1154 | 154 | 351 | 1659 | 48\n 2018-11 | 1313 | 180 | 394 | 1887 | 49\n 2018-12 | 1419 | 204 | 423 | 2046 | 49\n```\n\nThat first line is not pretty, but it works! We can now see the NPS, correctly rounded, for any given week.", "_____no_output_____" ], [ "## Conclusions (5 min)\n\nIn the first part of this case, you learned the basics of SQL and used it to optimize the sales operations of a financial services firm. We narrowed down our set of potentially interesting customer cohorts and were able to compute summary statistics on the sales conversion rates of those cohorts, particularly versus the mean. In particular, we learned that some of our \"no-brainer\" hypotheses did not pan out, which illustrates the importance of always investigating the data to validate our thoughts.\n\nIn the second part of this case, you learned about the Net Promoter Score (NPS) metric. You set up a cloud database using Amazon RDS, a service that makes it easy to manage and scale your databases with little to no work from your local machine. You also learned how to write complex queries in SQL that could be run directly on the cloud database. These queries used advanced features like nested `SELECT` statements and `CASE` statements which can be combined in intricate ways to get the results you need directly from your database.\n\nWe found that there was a general increase in NPS over time; however, starting in September there was a significant downturn in average NPS score. It is likely that the product encountered some significant bugs or outages during this time and going forward we should check if anything was recorded by the startup's product team to confirm this.", "_____no_output_____" ], [ "## Takeaways (5 min)\n\nSQL is a powerful tool that can help us navigate and understand data in ways that Python cannot. Sometimes, it can even serve as the first stage of an exploratory data analysis and can sometimes help us answer questions all by itself. Furthermore, SQL is the means through which we can create and persist data in databases for future, large-scale use. No data scientist's toolkit is complete without an understanding of how to interface with and store the raw data that they work with.\n\nAdditionally, cloud databases are a powerful and scalable way to analyze data if you have constraints on processor, memory or storage resources for your local hardware. You can do all sorts of things in-cloud that you could originally only do on your local machine, such as run complex SQL queries directly against a cloud database.", "_____no_output_____" ], [ "## Appendix 1: Troubleshooting RDS creation\n\nIf you cannot create your database using the RDS service and instead see the error below, you will need to create a new VPC instead of using the default one. \n\n![vpc dns error](images/vpc-rds-error.png)\n\n\nTo do this, scroll back up to the 'Connectivity' section, and choose 'create new VPC' from the dropdown as shown in the image below\n\n![create new vpc](images/create-new-vpc.png)\n\nAt the bottom of the page, press \"Create Database\" again, and you should see a notification briefly at the top of the page that confirms a new VPC has been created, as in the image below. Take a note of the ID.\n\n![view vpc](images/view-vpc-id.png)\n\nYou might now see another error, as follows. This is because the VPC created from the RDS console has no name.\n\n![vpc no name error](images/vpc-no-name-error.png)\n\n\nIf this is the case, you need to name your VPC. From the services dropdown at the top of the page, search for \"VPC\" and open the VPC page in a new tab.\n\n\n![view VPCs](images/services-select-vpc.png)\n\nFind the VPC that was recently created (it will have the same ID as the one you noted above). Mouse over the 'name' field to see the pencil 'edit' option appear, click on this, and give the VPC a name.\n\n\n![name VPC](images/name-vpc.png)\n\nNow that your VPC has a name, go back to the tab where you are creating the RDS instance, and scroll back up to the connectivity section, and choose the newly created VPC (you will see the name you chose displayed) from the dropdown.\n\nNow you can finally press \"Create database\" again (at the bottom of the page) and all should work.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec8213cd76da6ccf0edfcbe2cbf8e24e7830ac42
47,234
ipynb
Jupyter Notebook
01_utils.ipynb
Tom-TBT/TheOneRig
146583c6749317f8f40010f98f6e42a497fd4cec
[ "Apache-2.0" ]
1
2021-06-18T15:46:24.000Z
2021-06-18T15:46:24.000Z
01_utils.ipynb
Tom-TBT/TheOneRig
146583c6749317f8f40010f98f6e42a497fd4cec
[ "Apache-2.0" ]
2
2021-03-29T08:33:28.000Z
2021-04-08T14:52:55.000Z
01_utils.ipynb
Tom-TBT/TheOneRig
146583c6749317f8f40010f98f6e42a497fd4cec
[ "Apache-2.0" ]
4
2020-07-15T07:49:25.000Z
2021-03-10T16:11:10.000Z
42.900999
201
0.542575
[ [ [ "# default_exp utils", "_____no_output_____" ], [ "#hide\n%load_ext autoreload\n%autoreload 2\nfrom nbdev.test import test_eq\nfrom nbdev.showdoc import *", "_____no_output_____" ] ], [ [ "# Utils\n> Useful functions to reshape/arrange/reduce raw data into clean data to add to the record", "_____no_output_____" ] ], [ [ "#export\nimport numpy as np\nimport pandas as pd\nimport os\nimport glob\nimport re\nfrom typing import Dict, Tuple, Sequence, Union, Callable\nimport scipy.interpolate as interpolate\nfrom scipy.ndimage import convolve1d\nfrom scipy.signal import savgol_filter\nimport scipy.stats\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\nimport math\nfrom cmath import *\nfrom PIL import Image\n\nfrom theonerig.core import *", "_____no_output_____" ] ], [ [ "# Synchronisation utils", "_____no_output_____" ] ], [ [ "#export\ndef extend_sync_timepoints(timepoints:np.ndarray, signals:np.ndarray, \n up_bound, low_bound=0) -> Tuple[DataChunk, DataChunk]:\n \"\"\"\n Extend arrays of timepoints and signals (with identical shape) from the low_bound up to the up_bound.\n For example, the first timepoint could be 2000, and with a low_bound of 0, it would add the\n timepoints 0, 500, 1000, 1500 if the timepoint distance is of 500 (obtained by averaging the timepoints\n distances).\n \n params:\n - timepoints: Timepoints to extend\n - signals: Signals to extend\n - up_bound: Up bound to which to extend both timepoints and signals\n - low_bound: Low bound to which to extend both timepoints and signals\n \n returns:\n - timepoint: Extended timepoints\n - signals: The datachunk array is not modified, but the idx attribute is increased by the number\n of frames added with the low_bound.\n \"\"\"\n assert len(timepoints) == len(signals)\n timepoints = np.array(timepoints)\n signals = np.array(signals)\n spb = np.mean(timepoints[1:]-timepoints[:-1]) #spf: sample_per_bin\n \n #Left and right side are just prolongation of the sample_times up \n # from (0-sample_per_fr) to (len+sample_per_fr) so it covers all timepoints\n left_side = np.arange(timepoints[0]-spb , low_bound - spb, -spb)[::-1].astype(int)\n right_side = np.arange(timepoints[-1]+spb, up_bound + spb, spb).astype(int)\n\n new_timepoints = np.concatenate((left_side, \n timepoints, \n right_side))\n \n timepoint_chunk = DataChunk(data=new_timepoints, idx=0, group=\"sync\")\n signal_chunk = DataChunk(data=signals, idx=len(left_side), group=\"sync\")\n return (timepoint_chunk, signal_chunk)", "_____no_output_____" ], [ "#export\ndef align_sync_timepoints(timepoints:DataChunk, signals:DataChunk,\n ref_timepoints:DataChunk, ref_signals:DataChunk) -> Tuple[DataChunk, DataChunk, DataChunk]:\n \"\"\"\n Align the signals of a timepoints timeserie to a reference ref_timepoints with the corresponding\n ref_signals. ref_timepoints is extended to match ref_timepoints lenght.\n \n params:\n - timepoints: timepoints to align\n - signals: signals to align\n - ref_timepoints: reference timepoints\n - ref_signals: reference signals\n \n return:\n - Aligned timepoints (DataChunk)\n - Aligned signals (DataChunk)\n \"\"\" \n shift_left = ((np.where(ref_signals)[0][0] + ref_signals.idx) \n - (np.where(signals)[0][0] + signals.idx))\n shift_right = len(ref_timepoints) - (len(timepoints) + shift_left) \n\n spb = np.mean(timepoints[1:]-timepoints[:-1]) #spf: sample_per_bin\n spb_ref = np.mean(ref_timepoints[1:]-ref_timepoints[:-1]) #spf: sample_per_bin\n \n left_timepoints = np.zeros(0)\n left_timepoints_ref = np.zeros(0)\n right_timepoints = np.zeros(0)\n right_timepoints_ref = np.zeros(0)\n \n if shift_left > 0: #the ref started before, need to extend the other\n init = timepoints[0]-spb\n left_timepoints = np.arange(init , \n init-(spb*shift_left+1), \n -spb)[:shift_left][::-1].astype(int)\n else:\n shift_left = abs(shift_left)\n init = ref_timepoints[0]-spb_ref\n left_timepoints_ref = np.arange(init , \n init-(spb_ref*shift_left+1), \n -spb_ref)[:shift_left][::-1].astype(int)\n #We also need to shift the index of the ref signals since we increased the size of the ref_timepoints\n ref_signals.idx = ref_signals.idx + len(left_timepoints_ref)\n \n if shift_right > 0: #the ref ended after, need to extend the other\n init = timepoints[-1]+spb\n right_timepoints = np.arange(init , \n init+(spb*shift_right+1), \n spb)[:shift_right].astype(int)\n else:\n shift_right = abs(shift_right)\n init = ref_timepoints[-1]+spb_ref\n right_timepoints_ref = np.arange(init , \n init+(spb_ref*shift_right+1), \n spb_ref)[:shift_right].astype(int)\n \n timepoint = DataChunk(data=np.concatenate((left_timepoints, \n timepoints, \n right_timepoints)), idx=0, group=\"sync\")\n \n timepoint_ref = DataChunk(data=np.concatenate((left_timepoints_ref, \n ref_timepoints, \n right_timepoints_ref)), idx=0, group=\"sync\")\n \n return (timepoint, timepoint_ref, ref_signals)", "_____no_output_____" ], [ "#export\ndef resample_to_timepoints(timepoints:np.ndarray, data:np.ndarray, \n ref_timepoints:DataChunk, group=\"data\") -> DataChunk:\n \"\"\"\n Resample the data at timepoints to new timepoints given by ref_timepoints.\n Return a DataChunk of the resampled data belonging to a specified group.\n \n params:\n - timepoints: Original timepoints of the data\n - data: Data to resample of shape (t, ...)\n - ref_timepoints: Target timepoints for the resampling\n - group: Group assigned to the returned DataChunk\n \n return:\n - Resampled datachunk with appropriate idx.\n \"\"\"\n \n assert len(timepoints) == len(data)\n timepoints = np.array(timepoints)\n data = np.array(data)\n \n start_idx = np.argmax(ref_timepoints >= timepoints[0])\n stop_idx = np.argmax(ref_timepoints >= timepoints[-1])\n if stop_idx == 0:\n stop_idx = len(ref_timepoints)\n \n if len(ref_timepoints[start_idx:stop_idx]) < len(timepoints): #Downsampling\n distance = (np.argmax(timepoints>ref_timepoints[start_idx+1]) \n - np.argmax(timepoints>ref_timepoints[start_idx]))\n \n kernel = np.ones(distance)/distance\n data = convolve1d(data, kernel, axis=0) #Smooting to avoid weird sampling\n\n new_data = interpolate.interp1d(timepoints, data, axis=0)(ref_timepoints[start_idx:stop_idx])\n\n idx = ref_timepoints.idx + start_idx\n return DataChunk(data=new_data, idx = idx, group=group)", "_____no_output_____" ], [ "#export\ndef link_sync_timepoints(frame_tp_1, frame_sig_1, frame_tp_2, frame_sig_2):\n \"\"\"\n Creates timepoints between two timepoints array sampled at the same rate.\n This is usefull for the LED dome which cannot generate frames in between stimuli (due to ROM update)\n \n params:\n - frame_tp_1: Timepoints of the first part\n - frame_sig_1: Signals of the first part\n - frame_tp_2: Timepoints of the second part\n - frame_sig_2: Signals of the second part\n return:\n - (concatenated_frame_timepoints, concatenated_frame_signals)\n \"\"\"\n assert abs(np.diff(frame_tp_1).mean() - np.diff(frame_tp_2).mean())<10, \"The frame rates are different\"\n assert len(frame_tp_1)==len(frame_sig_1), \"The lenght of the first signals and timepoints do not match\"\n assert len(frame_tp_2)==len(frame_sig_2), \"The lenght of the second signals and timepoints do not match\"\n \n n_tp = np.diff(frame_tp_1).mean()\n n_new_frames = int(round((frame_tp_2[0] - frame_tp_1[-1])/n_tp) - 1)\n new_frames = np.linspace(int(frame_tp_1[-1]+n_tp), frame_tp_2[0], n_new_frames, endpoint=False).astype(int)\n \n concat_frame_tp = np.concatenate((frame_tp_1, new_frames, frame_tp_2))\n concat_frame_sig = np.concatenate((frame_sig_1, [0]*n_new_frames, frame_sig_2))\n \n return concat_frame_tp, concat_frame_sig", "_____no_output_____" ], [ "#export\ndef flip_stimulus(stim_inten, ud_inv, lr_inv):\n \"\"\"\n Flip QDSpy stimuli arrays to match the up/down left/right orientation of the stimulus displayed to \n the mouse. \n \n params:\n - stim_inten: Stimulus matrix to flip of shape (t, color, y, x)\n - ud_inv: Up down inversion boolean (1 to make the flip, 0 for no operation)\n - lr_inv: Up down inversion boolean (1 to make the flip, 0 for no operation)\n \n return:\n - Flipped stimulus array\n \"\"\"\n if lr_inv:\n stim_inten = np.flip(stim_inten, axis=3) # Axis 0:t 1:color 2:y 3:x\n if not ud_inv: \n #Numpy and QDSpy orientation are different. \n #This way reorientate the stimulus approriatly for display with matplotlib and potential\n #eye tracking corrections\n stim_inten = np.flip(stim_inten, axis=2)\n return stim_inten\n\ndef flip_gratings(stim_shader, ud_inv, lr_inv):\n \"\"\"\n Flip gratings to match the up/down left/right orientation of the stimulus displayed to \n the mouse. A grating is encoded by an array of shape (t, 3(size, angle, speed)). \n Therefore the angles of the grating are modified to encode the \"flipped\" grating.\n \n params:\n - stim_shader: Grating matrix to flip of shape (t, 3(size, angle(degree), speed))\n - ud_inv: Up down inversion boolean (1 to make the flip, 0 for no operation)\n - lr_inv: Up down inversion boolean (1 to make the flip, 0 for no operation)\n \n return:\n - Flipped grating array\n \"\"\"\n mask_epochs = ~np.all(stim_shader==0,axis=1)\n if lr_inv:\n stim_shader[mask_epochs,1] = (360 + (180 - stim_shader[mask_epochs,1])) % 360 \n if ud_inv:\n stim_shader[mask_epochs,1] = (360 - stim_shader[mask_epochs,1]) % 360\n return stim_shader\n\ndef stim_to_dataChunk(stim_inten, stim_start_idx, reference:DataChunk) -> DataChunk:\n \"\"\"\n Factory function for DataChunk of a stimulus, that squeeze the stim_inten matrix.\n \n params:\n - stim_inten: Stimulus matrix of shape (t, ...)\n - stim_start_idx: Starting frame index of the stimulus\n - reference: DataChunk signal reference used to determine the starting index of the stimulus\n \n return:\n - Datachunk of the stimulus\n \"\"\"\n return DataChunk(data=np.squeeze(stim_inten), idx = (stim_start_idx + reference.idx), group=\"stim\")", "_____no_output_____" ], [ "#export\ndef phy_results_dict(phy_dir):\n \"\"\"\n Open the result arrays of spike sorting after manual merging with phy.\n\n params:\n - phy_dir: path to the phy results\n\n return:\n - Dictionnary of the phy arrays (amplitudes, channel_map, channel_positions, spike_clusters,\n spike_templates, spike_times, templates)\n \"\"\"\n res_dict = {}\n res_dict[\"amplitudes\"] = np.load(phy_dir+\"/amplitudes.npy\")\n res_dict[\"channel_map\"] = np.load(phy_dir+\"/channel_map.npy\")\n res_dict[\"channel_positions\"] = np.load(phy_dir+\"/channel_positions.npy\")\n res_dict[\"spike_clusters\"] = np.load(phy_dir+\"/spike_clusters.npy\")\n res_dict[\"spike_templates\"] = np.load(phy_dir+\"/spike_templates.npy\")\n res_dict[\"spike_times\"] = np.load(phy_dir+\"/spike_times.npy\")\n res_dict[\"templates\"] = np.load(phy_dir+\"/templates.npy\")\n if os.path.isfile(phy_dir+\"/channel_shanks.npy\"): #Newer version of phy/spyking-circus\n res_dict[\"channel_shanks\"] = np.load(phy_dir+\"/channel_shanks.npy\")\n res_dict[\"template_ind\"] = np.load(phy_dir+\"/template_ind.npy\")\n \n return res_dict\n\ndef spike_to_dataChunk(spike_timepoints, ref_timepoints:DataChunk) -> DataChunk:\n \"\"\"\n Factory function of a DataChunk for spiking count of cells from spike timepoints.\n \n params:\n - spike_timepoints: Dictionnary of the cells spike timepoints (list)\n - ref_timepoints: Reference DataChunk to align the newly created spike count Datachunk\n \n return:\n - Spike count datachunk of shape (t, n_cell)\n \"\"\"\n type_cast = type(list(spike_timepoints.keys())[0])\n cell_keys = sorted(map(int, \n spike_timepoints.keys()))\n cell_map = dict([ (cell_key, i) for i, cell_key in enumerate(cell_keys) ])\n spike_bins = np.zeros((ref_timepoints.shape[0], len(cell_keys)))\n bins = np.concatenate((ref_timepoints[:], [(ref_timepoints[-1]*2)-ref_timepoints[-2]]))\n\n for i, cell in enumerate(cell_keys):\n spike_bins[:, i] = np.histogram(spike_timepoints[type_cast(cell)], bins)[0]\n \n datachunk = DataChunk(data=spike_bins, idx = ref_timepoints.idx, group=\"cell\")\n datachunk.attrs[\"cell_map\"] = cell_map\n return datachunk", "_____no_output_____" ], [ "#export\ndef get_calcium_stack_lenghts(folder):\n \"\"\"\n Function to extract calcium stack lenghts from imageJ macro files associated to the stacks.\n \n params:\n - folder: path of the folder containing the IJ macros files\n \n return:\n - list of stack lenghts\n \"\"\"\n record_lenghts = []\n pattern_nFrame = r\".*number=(\\d*) .*\"\n for fn in glob.glob(folder+\"/*.txt\"):\n with open(fn) as f:\n line = f.readline()\n record_lenghts.append(int(re.findall(pattern_nFrame, line)[0]))\n return record_lenghts\n\ndef twoP_dataChunks(ref_timepoints:DataChunk, frame_timepoints, len_epochs, *args):\n \"\"\"\n Factory function for two photon data. \n \n params:\n - ref_timepoints: Reference timepoints to create the DataChunk\n - frame_timepoints: List of frame timepoints for each sequence of two photon frame recorded.\n - len_epochs: Lenght of the recorded epochs (<= than the corresponding frame_timepoints). Int of list\n - args: matrices of all frames detected by CaImAn. (give as many as you want to synchronise)\n \n return:\n - tuple containing the synchronised matrices in the order it was given\n \"\"\"\n assert len(args)>=1, \"no matrix to be synchronised was given\"\n res_l = [[] for i in range(len(args))]\n cursor = 0\n if isinstance(len_epochs, int):\n len_epochs = [len_epochs]\n for i, len_epoch in enumerate(len_epochs):\n start_idx = np.argmax(ref_timepoints>frame_timepoints[i][0])\n stop_idx = np.argmax(ref_timepoints>frame_timepoints[i][len_epoch-1])\n for k, matrix in enumerate(args):\n sub_mat = matrix.T[cursor:cursor+len_epoch]\n \n f = interpolate.interp1d(range(len_epoch), sub_mat, axis=0)\n res_l[k].append(DataChunk(data=f(np.linspace(0,len_epoch-1,stop_idx-start_idx)), \n idx=start_idx, \n group=\"cell\"))\n cursor += len_epoch\n \n return tuple(res_l)", "_____no_output_____" ] ], [ [ "# Modelling utils", "_____no_output_____" ] ], [ [ "#export\ndef img_2d_fit(shape, param_d, f):\n \"\"\"\n Helper function to generate the 2D image of a fit.\n \n params:\n - shape: Shape of the image in (y, x).\n - param_d: Fit dictionnary.\n - f: Function used of the fit.\n \"\"\"\n y_, x_ = shape\n xy = np.meshgrid(range(x_), range(y_))\n return f(xy, **param_d).reshape(y_, x_)", "_____no_output_____" ], [ "#export\ndef fill_nan(A):\n \"\"\"\n Fill nan values with interpolation. Credits to BRYAN WOODS@StackOverflow\n \"\"\"\n inds = np.arange(A.shape[0])\n good = np.where(np.isfinite(A))\n f = interpolate.interp1d(inds[good], A[good],bounds_error=False)\n B = np.where(np.isfinite(A),A,f(inds))\n return B", "_____no_output_____" ] ], [ [ "# Processing utils", "_____no_output_____" ] ], [ [ "#export\ndef stim_inten_norm(stim_inten):\n \"\"\"\n Normalize a stimulus with intensity in the 8bit range (0-255) to -1 to 1 range.\n \"\"\"\n stim_inten = stim_inten.astype(float)\n stim_inten -= np.min(stim_inten)\n stim_inten -= np.max(stim_inten)/2\n stim_inten /= np.max(np.abs(stim_inten))\n return np.round(stim_inten, 5)", "_____no_output_____" ], [ "#export\ndef group_direction_response(stim_prop, spike_counts, n_repeat, n_cond=32):\n \"\"\"\n Group the cells responses from shuffled grating stimulus repetitions. Retrieves a dictionnary\n with a key for each condition.\n \n params:\n - stim_prop: Grating array of shape (t, 3(size, angle, speed))\n - spike_counts: Spike counts response of the cells of shape (t, n_cell)\n - n_repeat: Number of repeat of each condition\n - n_cond: Total number of condition (speed/size condition * n_angle)\n \n return:\n - dictionnary of the spike counts for each condition (speed/size), with shape (n_angle, n_repeat, len, n_cell)\n \"\"\"\n \n n_cell = spike_counts.shape[-1]\n condition_repeat = stim_prop.reshape(n_repeat*n_cond,-1,3)[:,10,:] #Take the condition for each repeat\n # We take it at the 10th frame in case of frame replacement during synchronisation \n #(the 10th should be unchanged)\n \n #Reshape the spike response to (n_cond, len, n_cell)\n spike_resh = spike_counts.reshape(n_repeat*n_cond,-1,n_cell) \n\n angles = np.unique(condition_repeat[:,1]) \n\n data_dict = {}\n for cond in np.unique(condition_repeat, axis=0):\n spat_freq, angle, speed = tuple(cond)\n idx_cond = np.argwhere(np.all(condition_repeat==cond, axis=1))[:,0]\n\n cond_key = str(spat_freq)+\"@\"+str(round(speed,2))\n if cond_key not in data_dict.keys():\n data_dict[cond_key] = np.empty((len(angles), len(idx_cond), *spike_resh[0].shape))\n\n idx_angle = np.where(angle==angles)[0][0]\n data_dict[cond_key][idx_angle] = np.array([spike_resh[idx] for idx in idx_cond])\n return data_dict", "_____no_output_____" ], [ "#export\ndef group_chirp_bumps(stim_inten, spike_counts, n_repeat):\n \"\"\"\n Find the cells response to the OFF-ON-OFF initial parts of the chirps.\n \n params:\n - stim_inten: Stimulus intensity array\n - spike_counts: Spike counts array of shape (t, n_cell)\n - n_repeat: Number of repetitions of the chirp stimulus\n \n return:\n - Dictionnary of cells response to the different ON or OFF stimuli\n \"\"\"\n \n repeat = stim_inten.reshape(n_repeat,-1)[0]\n spike_counts = spike_counts.reshape(n_repeat,-1,spike_counts.shape[-1])\n epoch_l = [0]\n end_l = [len(repeat)]\n i = 1\n curr = repeat[0]\n\n while True:\n while repeat[i]==curr:\n i+=1\n epoch_l.append(i)\n curr = repeat[i]\n if curr==repeat[i+1]:\n continue\n else:\n break\n\n i = len(repeat)-2\n curr = repeat[-1]\n\n while True:\n while repeat[i]==curr:\n i-=1\n end_l.insert(0,i)\n curr = repeat[i]\n if curr==repeat[i-1]:\n continue\n else:\n break\n slices = [slice(epoch_l[i-1],epoch_l[i]) for i in range(1,len(epoch_l))]\n slices.extend([slice(end_l[i-1],end_l[i]) for i in range(1,len(end_l))])\n\n res_d = {}\n for slc in slices:\n key = str(stim_inten[slc.start])+\"@\"+str(slc.start)\n res_d[key] = spike_counts[:,slc]\n\n return res_d", "_____no_output_____" ], [ "#export\ndef get_repeat_corrected(stim_inten, spike_counts, n_repeats=10):\n \"\"\"\n Apply shifts (detected during synchro) to the chirp repetition.\n \n params:\n - stim_inten: Stimulus DataChunk (containing the shifts and frame replacements info)\n - spike_counts: Spike count matrix of shape (t, n_cell)\n - n_repeats: Number of repeats of the chirp\n \n return:\n - aligned cells response to stimulus, of shape (n_repeat, t, n_cell)\n - Number of duplicated frame per repetition.\n \"\"\"\n def count_repl_in_range(fr_replaced, _range):\n return sum([repl[0] in _range for repl in fr_replaced])\n \n signal_shifts = stim_inten.attrs[\"signal_shifts\"]\n frame_replacement = stim_inten.attrs[\"frame_replacement\"]\n \n spike_count_corr = spike_counts.copy()\n shift_cursor = 0\n prev_del = np.zeros((1, spike_counts.shape[1]))\n for shift, direction in signal_shifts:\n if direction==\"ins\":\n spike_count_corr[shift+1:] = spike_count_corr[shift:-1]\n prev_del = spike_count_corr[-1:]\n else:\n spike_count_corr[shift-1:-1] = spike_count_corr[shift:]\n spike_count_corr[-1:] = prev_del\n \n len_epoch = len(stim_inten)//n_repeats\n spike_counts_corrected = []\n errors_per_repeat = []\n for i in range(n_repeats):\n errors_per_repeat.append(count_repl_in_range(frame_replacement, range(len_epoch*i, len_epoch*(i+1))))\n spike_counts_corrected.append(spike_count_corr[len_epoch*i:len_epoch*(i+1)])\n return np.array(spike_counts_corrected), np.array(errors_per_repeat)", "_____no_output_____" ], [ "#export\ndef removeSlowDrift(traces, fps=60, window=80, percentile=8):\n \"\"\"\n Remove slow drifts from behavioral temporal traces such as locomotion speed obtained from the treadmill signal\n or pupil size obtained from the eye_tracking signal, by extracting a specified percentile within moving window from the signal.\n \n params:\n - traces: Behavioral temporal traces obtained from reM\n - fps: Sampling rate\n - window: Moving temporal window in seconds\n - percentile: Percentile to be extracted within moving window\n \n return: \n - Filtered temporal traces\n \"\"\"\n smoothed = np.zeros(len(traces))\n n = round(window * fps)-1\n if n%2 == 0:\n n = n+1\n \n nBefore = math.floor((n-1)/2)\n nAfter = n - nBefore - 1\n\n for k in range(len(traces)):\n idx1 = max(np.array([0,k-nBefore]))\n idx2 = min(len(traces)-1,k+nAfter)\n tmpTraces = traces[idx1:idx2]\n smoothed[k] = np.percentile(tmpTraces, percentile)\n \n smoothed = savgol_filter(smoothed, n, 3)\n\n filteredTraces = traces - smoothed\n return filteredTraces", "_____no_output_____" ], [ "#export\ndef time_shift_test_corr(spike_counts, behav_signal, n_tests = 500, seed = 1): \n \"\"\"\n Compute the null distribution of correlation between behavioral signal and spiking signal with a time shift test.\n \n params:\n - spike_counts: Array with spike counts for a specific neuron and data chunk from the reM\n - behav_signal: Array with behavioral signal for a specific neuron and data chunk from the reM\n - n_tests: number of used shifted signals to compute distribution\n - seed: seed for numpy function random.randint\n \n return: \n - null_dist_corr: Null distribution of correlation values \n \"\"\"\n \n np.random.seed(seed)\n \n null_dist_corr=[]\n for i in range(n_tests):\n #Generate time-shifted behavioral test signal for shifts between 0.05*len(behav_signal) and len(behav_signal)\n test_behav_signal = np.roll(behav_signal, np.random.randint(len(behav_signal)*0.05, len(behav_signal)))\n # Compute Pearson's correlation with behavioral time-shifted test signal and spiking signal\n null_dist_corr.append(scipy.stats.pearsonr(test_behav_signal, spike_counts)[0]) \n \n return null_dist_corr ", "_____no_output_____" ], [ "#export\ndef cross_corr_with_lag(spike_counts, behav_signal, behav, conversion_factor_treadmill=None, removeslowdrift=True, fps=60, seconds=30):\n \"\"\"\n Compute cross-correlation with lag between behavioral signal and spiking signal.\n Process signals, compute null distribution of the correlation with a time shift test and values .\n Return cross-correlation array, null-distribution array and values for plotting.\n \n params:\n - spike_counts: Array with spike counts for a specific neuron and data chunk from the reM\n - behav_signal: Array with behavioral signal for a specific neuron and data chunk from the reM\n - behav : String with name of behavioral signal to be analysed \n - conversion_factor : The value to convert the treadmill signal into cm/s\n - removeslowdrift: Boolean:\n False - doesn't remove slow drifts from the signal\n True - removes slow drifts by extracting a specified percentile within moving window from the signal\n - fps: Sampling rate\n - seconds: Window in seconds of the correlation lag\n \n return:\n - crosscorr: Cross-correlation with lag array between behavioral signal and spiking signal\n - corr_peak: Cross-correlation value at peak synchrony between behavioral signal and spiking signal\n - p_value_peak: P-value of the peak cross-correlation value\n - offset_peak: Temporal offset of the peak synchrony between behavioral signal and spiking signal in seconds\n - null_dist_corr: Null distribution of correlation values (output of 'utils.cross_corr_with_lag')\n \"\"\" \n\n if behav == \"treadmill\":\n #Convert treadmill signal to running speed (cm/s)\n behav_signal = behav_signal * conversion_factor_treadmill\n behav_signal_filtered = gaussian_filter(abs(behav_signal), sigma=60)\n else:\n behav_signal_filtered = gaussian_filter(behav_signal, sigma=60)\n \n #Convolve signals with gaussian window of 1 second/60 frame\n spike_counts_filtered = gaussian_filter(spike_counts, sigma=60)\n \n if removeslowdrift:\n #Remove slow drifts from treadmill, pupil size and spiking signal\n spike_counts_detrend = removeSlowDrift(spike_counts_filtered, fps=60, window=100, percentile=8)\n behav_signal_detrend = removeSlowDrift(behav_signal_filtered, fps=60, window=100, percentile=8)\n else:\n spike_counts_detrend = spike_counts_filtered\n behav_signal_detrend = behav_signal_filtered\n \n #Get null distribution for correlation between behav_signal and spike_counts signal\n null_dist_corr = time_shift_test_corr(spike_counts_detrend, behav_signal_detrend, n_tests = 500)\n\n #Compute cross-correlation with lag and values to plot\n d1 = pd.Series(behav_signal_detrend)\n d2 = pd.Series(spike_counts_detrend)\n crosscorr = [d1.corr(d2.shift(lag)) for lag in range(-int(seconds*fps),int(seconds*fps+1))]\n offset_peak = np.around((np.ceil(len(crosscorr)/2)-np.argmax(abs(np.array(crosscorr))))/fps, decimals=3)\n corr_peak = np.max(abs(np.array(crosscorr)))\n p_value_peak = round((100-scipy.stats.percentileofscore(abs(np.array(null_dist_corr)), abs(corr_peak), kind='strict'))/100,2)\n \n return crosscorr, corr_peak, p_value_peak, offset_peak, null_dist_corr", "_____no_output_____" ], [ "#export\ndef get_inception_generator(imageset_folder, len_set=25, width=500, height=281):\n \"\"\"\n Return a function to obtain inception loop images from their index.\n \n params:\n - imageset_folder: Path to the folder of the image sets\n - len_set: Number of images concatenated per set\n - width: image width\n return: \n - Function to obtain inception loop images from their index.\n \"\"\"\n imageset_l = []\n paths = glob.glob(os.path.join(imageset_folder,\"*.jpg\"))\n paths_sorted = sorted(paths, key=lambda i: int(os.path.splitext(os.path.basename(i))[0].split(\"_\")[-1]))\n \n for fn in paths_sorted: #Images accepted have the dimension (375,500)\n image = np.array(Image.open(fn))\n imageset_l.append(image)\n \n def image_yield(idx):\n if idx==-1:\n return np.zeros((height, width))+128\n set_idx = idx//25\n img_idx = idx%25\n return imageset_l[set_idx][:,width*img_idx:width*(img_idx+1), 1] #Returns a gray image\n \n return image_yield", "_____no_output_____" ], [ "#export\ndef group_omitted_epochs(stim_inten, spike_counts, n_fr_flash=4, n_fr_interflash=4, n_fr_isi=100):\n \"\"\"\n Group the cells reponse to the different omitted stimulus epochs conditions (n_flashes)\n \n params:\n - stim_inten: The intensities of the omitted stimulus in shape (t)\n - spike_counts: Spikes counts of the cells in shape (t, n_cell)\n - n_fr_flash: Duration of a flash (ON flash during OFF baseline, OFF flash during ON baseline)\n - n_fr_interflash: Number of frames between two flashes (during an epoch)\n - n_fr_isi: Number of frames between two epochs\n return:\n - response_d_ON, response_d_OFF: Dictionnaries of the cells responses for different number of flashes repetions. Each contain an array of shape (n_cell, n_repeats, len_epoch+n_fr_isi).\n \"\"\"\n starts_ON = []\n stops_ON = []\n n_flashes_ON = []\n\n counter = 1\n i = 0\n starts_ON.append(i)\n while i < len(stim_inten)-(n_fr_flash+n_fr_interflash):\n if stim_inten[i+(n_fr_flash*2+n_fr_interflash)]:\n break\n if stim_inten[i+(n_fr_flash+n_fr_interflash)]:\n counter += 1\n i+=(n_fr_flash+n_fr_interflash)\n else:\n stops_ON.append(i+(n_fr_flash+n_fr_interflash))\n n_flashes_ON.append(counter)\n counter = 1\n i += (n_fr_flash+n_fr_interflash+n_fr_isi)\n starts_ON.append(i)\n\n #Switching to the omitted OFF\n starts_OFF = [starts_ON.pop()]\n stops_OFF = []\n n_flashes_OFF = []\n while i < len(stim_inten)-(n_fr_flash+n_fr_interflash):\n if stim_inten[i+(n_fr_flash*2+n_fr_interflash)]==0:\n counter += 1\n i+=(n_fr_flash+n_fr_interflash)\n else:\n stops_OFF.append(i+(n_fr_flash+n_fr_interflash))\n n_flashes_OFF.append(counter)\n counter = 1\n i += (n_fr_flash+n_fr_interflash+n_fr_isi)\n starts_OFF.append(i)\n starts_OFF.pop()\n\n starts_ON = np.array(starts_ON)\n stops_ON = np.array(stops_ON)\n n_flashes_ON = np.array(n_flashes_ON)\n starts_OFF = np.array(starts_OFF)\n stops_OFF = np.array(stops_OFF)\n n_flashes_OFF = np.array(n_flashes_OFF)\n \n response_d_ON, response_d_OFF = {}, {}\n for n_repeat in set(n_flashes_ON):\n where_cond = np.where(n_flashes_ON==n_repeat)[0]\n tmp = np.array([spike_counts[start:stop+n_fr_isi] for start, stop in zip(starts_ON[where_cond], \n stops_ON[where_cond])])\n response_d_ON[n_repeat] = np.transpose(tmp, (2, 0, 1))\n for n_repeat in set(n_flashes_OFF):\n where_cond = np.where(n_flashes_OFF==n_repeat)[0]\n tmp = np.array([spike_counts[start:stop+n_fr_isi] for start, stop in zip(starts_OFF[where_cond], \n stops_OFF[where_cond])])\n response_d_OFF[n_repeat] = np.transpose(tmp, (2, 0, 1))\n \n return response_d_ON, response_d_OFF", "_____no_output_____" ] ], [ [ "# Plotting utils", "_____no_output_____" ] ], [ [ "#export\ndef get_shank_channels(channel_positions, shank_dist_th=80):\n \"\"\"\n Group the channels of a Buzsaki32 silicone probe into their shanks \n from the channel position.\n \n params:\n - channel_positions: List of channel positions\n - shank_dist_th: Distance between channels in X to rule if on same shank or not\n \n return:\n - array of grouped channel index of shape (n_shank(4), n_channel(8))\n \"\"\"\n found = np.zeros(len(channel_positions))\n shank_pos = []\n chann_pos = []\n\n while not np.all(found):\n next_idx = np.argmin(found)\n next_pos = channel_positions[next_idx][0] #getting the X position of the electrode\n this_shank = np.where(np.abs(channel_positions[:,0]-next_pos)<shank_dist_th)[0]\n chann_pos.append(this_shank)\n shank_pos.append(next_pos)\n found[this_shank] = 1\n\n shanks_idx = np.zeros((len(shank_pos), len(this_shank)), dtype=int) - 1 #Initialize with -1 in case of channel missing\n for i, order in enumerate(np.argsort(shank_pos)):\n shanks_idx[i,:len(chann_pos[order])] = chann_pos[order]\n return shanks_idx", "_____no_output_____" ], [ "#export\ndef format_pval(pval, significant_figures=2):\n \"\"\"\n Helper function to format pvalue into string.\n \"\"\"\n return '{:g}'.format(float('{:.{p}g}'.format(pval, p=significant_figures)))", "_____no_output_____" ], [ "#export\ndef stim_recap_df(reM):\n \"\"\"\n Extract stimuli parameters (originally from the Database) to put them into a\n dataframe that will be displayed in the recapitulation plot.\n \n params:\n - reM: RecordMaster to extract stimuli parameters from\n \n return:\n - dataframe with the stimuli important informations\n \"\"\"\n def parse_stim(stim_dc):\n param_d = {}\n param_d[\"hash\"] = stim_dc.attrs[\"md5\"][:10] #the first 10 letters are more than enough\n param_d[\"n frames\"] = len(stim_dc)\n param_d[\"stimulus\"] = stim_dc.attrs[\"name\"]\n\n if stim_dc.attrs[\"name\"] in [\"checkerboard\", \"fullfield_flicker\", \"flickering_bars\", \"flickering_bars_pr\"]:\n param_d[\"frequency\"] = stim_dc.attrs[\"refresh_rate\"]\n elif stim_dc.attrs[\"name\"] in [\"chirp_am\",\"chirp_fm\",\"chirp_freq_epoch\", \"chirp_co\"]:\n param_d[\"n ON\"] = int(stim_dc.attrs[\"tSteadyON_s\"]*60)\n param_d[\"n OFF\"] = int(stim_dc.attrs[\"tSteadyOFF_s\"]*60)\n param_d[\"n repeats\"] = int(stim_dc.attrs[\"n_repeat\"])\n if stim_dc.attrs[\"name\"] in [\"chirp_am\",\"chirp_co\"]:\n param_d[\"frequency\"] = stim_dc.attrs[\"contrast_frequency\"]\n elif stim_dc.attrs[\"name\"]==\"chirp_fm\":\n param_d[\"frequency\"] = stim_dc.attrs[\"max_frequency\"]\n elif stim_dc.attrs[\"name\"]==\"chirp_freq_epoch\":\n param_d[\"frequency\"] = str([round(60/nfr,2) for nfr in dc.attrs[\"n_frame_cycle\"]])\n elif stim_dc.attrs[\"name\"] in [\"fullfield_color_mix\"]:\n param_d[\"n ON\"] = int(stim_dc.attrs[\"n_frame_on\"])\n param_d[\"n OFF\"] = int(stim_dc.attrs[\"n_frame_off\"])\n param_d[\"n repeats\"] = int(stim_dc.attrs[\"n_repeat\"])\n elif stim_dc.attrs[\"name\"]==\"moving_gratings\":\n param_d[\"n repeats\"] = stim_dc.attrs[\"n_repeat\"]\n param_d[\"n ON\"] = stim_dc.attrs[\"n_frame_on\"]\n param_d[\"n OFF\"] = stim_dc.attrs[\"n_frame_off\"]\n param_d[\"speeds\"] = stim_dc.attrs[\"speeds\"]\n param_d[\"spatial frequencies\"] = stim_dc.attrs[\"spatial_frequencies\"]\n \n if \"frame_replacement\" in stim_dc.attrs:\n param_d[\"total drop\"] = len(stim_dc.attrs[\"frame_replacement\"])\n if \"signal_shifts\" in stim_dc.attrs:\n shift = 0\n for _, which_shift in stim_dc.attrs[\"signal_shifts\"]:\n if which_shift==\"ins\":\n shift += 1\n elif which_shift==\"del\":\n shift -= 1\n param_d[\"total shift\"] = shift\n\n return param_d\n\n df = pd.DataFrame(columns=[\"stimulus\", \"hash\", \"n frames\", \"n repeats\",\n \"frequency\", \"n ON\", \"n OFF\", \"speeds\", \"spatial frequencies\",\n \"total shift\", \"total drop\"])\n cursor = 0\n for seq in reM._sequences:\n for k, dc_l in seq:\n dc = dc_l[0]\n if dc.group == \"stim\":\n serie = pd.Series(data=parse_stim(dc), name=cursor)\n df = df.append(serie, ignore_index=False)\n cursor+=1\n\n df = df.fillna(\"\")\n return df", "_____no_output_____" ], [ "#hide\nfrom nbdev.export import *\nnotebook2script()", "Converted 00_core.ipynb.\nConverted 01_utils.ipynb.\nConverted 02_processing.ipynb.\nConverted 03_modelling.ipynb.\nConverted 04_plotting.ipynb.\nConverted 05_database.ipynb.\nConverted 06_eyetrack.ipynb.\nConverted 10_synchro.io.ipynb.\nConverted 11_synchro.extracting.ipynb.\nConverted 12_synchro.processing.ipynb.\nConverted 13_leddome.ipynb.\nConverted 99_testdata.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec82285ac4af1cdc1889fa94cc2457e306af6382
12,549
ipynb
Jupyter Notebook
005_bucles.ipynb
euribates/Jupyter-Intro
a199655436cc4ccd41ec22398a1c5212c541f24b
[ "MIT" ]
null
null
null
005_bucles.ipynb
euribates/Jupyter-Intro
a199655436cc4ccd41ec22398a1c5212c541f24b
[ "MIT" ]
null
null
null
005_bucles.ipynb
euribates/Jupyter-Intro
a199655436cc4ccd41ec22398a1c5212c541f24b
[ "MIT" ]
null
null
null
28.136771
465
0.557973
[ [ [ "## Estructuras de control: bucles\n\n", "_____no_output_____" ], [ "Por ahora nuestros programas han sido todos lineales: Una serie de instrucciones que se ejecutan una detras de la otra. Las estructuras de control nos permiten cambiar esto de varias formas, por ejemplo, podemos hacer que una serie de instrucciones se repita varias veces, o podemos hacer que distintas partes del código se ejecute en diferentes condiciones. \n\nLos bucles son la estructura de control que nos permite repetir algo muchas veces. En Python hay de dos tipos, los bucles _for_ y los bucles _While_. Las dos formas son igual de potentes, pero un algunos casos es más legible y sencillo usar uno que otro. En esta sección veremos solo el bucle `for`, y dejaremos la [explicación del while](./009_while.ipynb) para un poco más adelante.", "_____no_output_____" ], [ "## El bucle for\n\nLa primera estructura de control que veremos es el **bucle for**, que nos permite realizar una serie de instrucciones varias veces. Los bucles for están muy vinculados a las secuecias como las listas, porque funcionan repitiendo unas determinadas instrucciones _para cada elemento de la secuencia_. Su forma general es:\n\n<code>\n**for** _variable_ **in** _secuencia_:\n _instruccion 1_\n _instruccion 2_\n _etc..._\n</code>\n \nObservese que todas las instrucciones que se repiten están sangradas o **indentadas**, es decir, están más a la derecha que la línea for que empieza el bucle. En otrs lenguajes este sangrado o indentación es voluntario, porque se usan determinadas marcas para indicar el comienzo y el final de un bloque de código (símbolos como { y } en el caso de C y derivados, C++, C#, Java, JavaScript, etc... o con palabras reservadas como en Pascal `BEGIN` y `END`).\n\nEn Python, por el contrario, como en otros lenguajes (Haskell, Occam, ...) **el sangrado tiene significado y es, por tanto, obligatorio**. La forma que tiene el lenguaje de saber, en este caso, que líneas de programa tiene que ejecutar de forma repetida es ver que líneas están indentadas a un nivel superior.\n\n> Nota: _Indentación_ es un anglicismo (de la palabra inglesa indentation) de uso común en informática; no es un término reconocido por la Real Academia Española, por ahora. La Real Academia recomienda utilizar \"sangrado\". Este término significa mover un bloque\n\nVeamos un ejemplo, si queremos calcular la suma de los números en una lista dada:", "_____no_output_____" ] ], [ [ "lista_de_numeros = [1, 2, 3, 4, 5, 6]\nacc = 0\nfor num in lista_de_numeros:\n acc = acc + num\nprint(acc)", "21\n" ] ], [ [ "Veamos varias cosas nuevas en este codigo:\n\n - Usamos la función `print` para imprimir un resultado. Hasta ahora hemos puesto simplrmente la variable o expresión a representar al final de la celda, que Jupyter notebook la pinta por nosotros. Usando la función `print` podemos imprimir en cualquier parte del programa.\n \n - Usamos un acumulador (la variable `acc`) para ir sumando poco a poco los valores de la lista\n \n - El bucle for solo ejecuta una línea, porque solo hay una línea indentada a un mayor nivel que el propio for", "_____no_output_____" ], [ "**Ejercicio:** ¿Qué pasaría si el bucle for repitiera las dos líneas siguientes, no solo una? Es decir, si la línea `print(acc)` estuviera indentada", "_____no_output_____" ] ], [ [ "lista_de_numeros = [1, 2, 3, 4, 5, 6]\nacc = 0\nfor num in lista_de_numeros:\n acc = acc + num\n print(acc)", "1\n3\n6\n10\n15\n21\n" ] ], [ [ "**Otro ejercicio:** Calcular la media de los números de una lista\n \nPara este ejercicio, hay que usar una función llamada `len`. Esta función acepta como parámetro una secuencia (como por ejemplo, una lista) y nos devuelve el número de elementos particilares en la misma. Por ejemplo:", "_____no_output_____" ] ], [ [ "l = [0, 1, 2, 3, 4]\nprint(len(l)) # Debería ser 5", "5\n" ], [ "lista_de_numeros = [1, 2, 3, 4, 5, 6]\nacc = 0\nfor num in lista_de_numeros:\n acc = acc + num\n \nmedia = ... # Tu código aquí\n\nprint(acc, len(lista_de_numeros), media)\n", "21 6 Ellipsis\n" ] ], [ [ "Observese que la función `print` admite un **número variable de parámetros**. En este caso se le estan dado tres parámetros, y el simplemente los representa en pantalla uno después de otro.", "_____no_output_____" ], [ "¿Qué pasa si quiero ejecutar algo 1000 veces? Como hemos visto hasta ahora, el `for` depende de una secuencia para ejecutarse. Para ello recurrimos al ya conocido `range`. Esta función devuelve una secuencia de valores perfectamente recorribles por un `for`. Por ejemplo, vamos a imprimir los cuadrados de los 10 primeros números naturales:", "_____no_output_____" ] ], [ [ "for i in range(1, 11):\n print(i, i**2)", "1 1\n2 4\n3 9\n4 16\n5 25\n6 36\n7 49\n8 64\n9 81\n10 100\n" ] ], [ [ "### Cómo NO recorrer una lista\n\nA veces, la gente que viene de otros lenguajes de programación está acostumbrada\na una forma del bucle `for` en el que el valor que cambia en cada iteración no es el valor del elemento correspondiente de la secuencia, sino un índice númerico de la posición del elemento en la secuencia. Los programadores acostumbrados a ese tipo de bucles a veces hacen código de esta forma:", "_____no_output_____" ] ], [ [ "l = ['lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo']\nfor i in range(len(l)):\n dia = l[i]\n print(dia)", "lunes\nmartes\nmiércoles\njueves\nviernes\nsábado\ndomingo\n" ] ], [ [ "Hay que cambiar ese hábito, porque los bucles son así más lentos y el código, encima, más difícil de leer; comparen el código anterior con esta forma más _pythónica_:", "_____no_output_____" ] ], [ [ "l = ['lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo']\nfor dia in l:\n print(dia)", "lunes\nmartes\nmiércoles\njueves\nviernes\nsábado\ndomingo\n" ], [ "La excusa que dan a veces es ¿Y si mañana necesito el índice? por ejemplo, para mostrar el índice y el nombre del día:\n", "_____no_output_____" ], [ "l = ['lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo']\nfor i in range(len(l)):\n dia = l[i]\n print(i, dia)", "0 lunes\n1 martes\n2 miércoles\n3 jueves\n4 viernes\n5 sábado\n6 domingo\n" ] ], [ [ "Para resolver esto se usa una función llamada `enumerate`, que por cada elemento de la secuencia que le pasamos como entrada, nos devuelve una pareja de valores, siendo el primero de ellos el índice y el segundo el elemento en si, de forma que la solución _pythónica_ del caso anterior sería:", "_____no_output_____" ] ], [ [ "l = ['lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo']\nfor i, dia in enumerate(l):\n print(i, dia)", "0 lunes\n1 martes\n2 miércoles\n3 jueves\n4 viernes\n5 sábado\n6 domingo\n" ] ], [ [ "Que sigue siendo más rápida y legible que la alternativa no _pythónica_.", "_____no_output_____" ], [ "## Resumen y comentario\n\n- Los bucles nos permiten ejecutar un bloque de ĺíneas de código muchas veces\n\n- El bucle `for` es una forma especializada de bucle que está especialmente indicada\n para recorrer secuencias.\n \n- Si venimos de otros lenguajes de programación, podemos sentirnos tentados de hacer \n los bucles con for usando siempre el `range`. Esto no es recomendable porque el código \n resulta menos legible y es más lento", "_____no_output_____" ] ], [ [ "stop_words = '''\nel la los las por de del que se y que\na ante bajo con contra de desde en entre hacia hasta para por segun\nsin sobre tras durante mediante\n'''.split()\n\nstop_words", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec8233c83f70e3c24aa61535205e818d63c20c0e
302,258
ipynb
Jupyter Notebook
notebooks/003_human_mouse_hp_kmers.ipynb
olgabot/botryllus-mhc
8314a3b23bb044fba21db82561225c893b7573a4
[ "MIT" ]
null
null
null
notebooks/003_human_mouse_hp_kmers.ipynb
olgabot/botryllus-mhc
8314a3b23bb044fba21db82561225c893b7573a4
[ "MIT" ]
null
null
null
notebooks/003_human_mouse_hp_kmers.ipynb
olgabot/botryllus-mhc
8314a3b23bb044fba21db82561225c893b7573a4
[ "MIT" ]
null
null
null
67.019512
23,164
0.594975
[ [ [ "# ! wget http://www.informatics.jax.org/downloads/reports/HOM_MouseHumanSequence.rpt", "_____no_output_____" ], [ "import pandas as pd\nimport os\nimport itertools\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nSYMBOL_SEPARATOR = '---'", "_____no_output_____" ], [ "human_mouse_homologs = pd.read_csv('HOM_MouseHumanSequence.rpt', sep='\\t')\nprint(human_mouse_homologs.shape)\nhuman_mouse_homologs.head()", "(43117, 13)\n" ], [ "gather_results_dir = '/Users/olgabot/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/gather_results__may2021/'", "_____no_output_____" ], [ "mouse_gather_results = pd.read_parquet(os.path.join(gather_results_dir, 'Mus_musculus.GRCm39.pep.all.chr17.fa__gather__human-chr6__moltype-hp__ksize-72.parquet'))\nprint(mouse_gather_results.shape)\nmouse_gather_results.head()", "(6010, 15)\n" ], [ "mouse_gather_results['mouse_gene_symbol'] = mouse_gather_results.query_name.str.extract('gene_symbol:([\\w\\d\\-]+)')\nmouse_gather_results['human_gene_symbol'] = mouse_gather_results.name.str.extract('gene_symbol:([\\w\\d\\-]+)')\nmouse_gather_results.head()", "_____no_output_____" ], [ "human_mouse_homologs[\"DB Class Key\"].nunique()", "_____no_output_____" ], [ "human_mouse_homologs[\"DB Class Key\"].nunique()", "_____no_output_____" ], [ "human_mouse_homologs.head()", "_____no_output_____" ], [ "lines = []\n\n# def get_human_mouse_symbol_product(df):\nfor class_key, df in human_mouse_homologs.groupby('DB Class Key'):\n organism = df[\"Common Organism Name\"]\n mouse_symbols = df.loc[organism == \"mouse, laboratory\", \"Symbol\"].tolist()\n human_symbols = df.loc[organism == \"human\", \"Symbol\"].tolist()\n lines.extend(itertools.product(mouse_symbols, human_symbols))\n # break\n# lines = human_mouse_homologs.groupby('DB Class Key').apply(get_human_mouse_symbol_product)\nhuman_mouse_pairs = pd.DataFrame(lines)\nhuman_mouse_pairs.head()", "_____no_output_____" ], [ "print(human_mouse_pairs.shape)", "(22520, 3)\n" ], [ "human_mouse_pairs.head()", "_____no_output_____" ], [ "human_mouse_pairs_in_gather_results = human_mouse_pairs.query(\n \"(mouse in @mouse_gather_results.mouse_gene_symbol)\"\n \"or (human in @mouse_gather_results.human_gene_symbol)\"\n)\nhuman_mouse_pairs_in_gather_results.shape", "_____no_output_____" ], [ "human_mouse_pairs.shape", "_____no_output_____" ], [ "human_mouse_pairs.tail()", "_____no_output_____" ], [ "human_mouse_pairs = human_mouse_pairs.rename(columns={0:'mouse', 1:'human'})\nhuman_mouse_pairs.head()", "_____no_output_____" ], [ "human_mouse_pairs['pair'] = human_mouse_pairs['mouse'] + SYMBOL_SEPARATOR + human_mouse_pairs['human']\nhuman_mouse_pairs.head()", "_____no_output_____" ], [ "mouse_gather_results.head()", "_____no_output_____" ], [ "mouse_gather_results['pair'] = mouse_gather_results['mouse_gene_symbol'] + SYMBOL_SEPARATOR + mouse_gather_results['human_gene_symbol']\nmouse_gather_results.head()", "_____no_output_____" ], [ "mouse_gather_results.shape", "_____no_output_____" ], [ "rows = mouse_gather_results.pair.isin(human_mouse_pairs.pair)\nmouse_gather_results_known = mouse_gather_results.loc[rows]\nmouse_gather_results_known.head()", "_____no_output_____" ], [ "mouse_gather_results_known.shape", "_____no_output_____" ], [ "mouse_gather_results['known_homolog'] = rows\nmouse_gather_results.groupby('known_homolog').intersect_bp.median()", "_____no_output_____" ], [ "mouse_gather_results.query('intersect_bp > 10').known_homolog.value_counts()", "_____no_output_____" ], [ "print(mouse_gather_results.sort_values('intersect_bp', ascending=False).head(20).to_markdown())", "| | query_name | intersect_bp | f_orig_query | f_match | f_unique_to_query | f_unique_weighted | average_abund | median_abund | std_abund | filename | name | md5 | f_match_orig | moltype | ksize | mouse_gene_symbol | human_gene_symbol | pair | known_homolog | intersect_kmer |\n|---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------:|---------------:|----------:|--------------------:|--------------------:|----------------:|---------------:|------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------|---------------:|:----------|--------:|:--------------------|:--------------------|:------------------|:----------------|-----------------:|\n| 0 | ENSMUSP00000158051.2pep chromosome:GRCm39:17:30845965:31094238:1 gene:ENSMUSG00000033826.11 transcript:ENSMUST00000235390.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Dnah8 description:dynein, axonemal, heavy chain 8 [Source:MGI Symbol;Acc:MGI:107714] | 2810 | 0.582988 | 0.62306 | 0.582988 | 0.582988 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000333363.7pep chromosome:GRCh38:6:38715311:39030792:1 gene:ENSG00000124721.18 transcript:ENST00000327475.11 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:DNAH8 description:dynein axonemal heavy chain 8 [Source:HGNC Symbol;Acc:HGNC:2952] | 3b9a61241c99383f3782bc308cd01c14 | 0.62306 | hp | 72 | Dnah8 | DNAH8 | Dnah8---DNAH8 | True | 281 |\n| 0 | ENSMUSP00000157469.2pep chromosome:GRCm39:17:30845909:31096339:1 gene:ENSMUSG00000033826.11 transcript:ENSMUST00000236140.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Dnah8 description:dynein, axonemal, heavy chain 8 [Source:MGI Symbol;Acc:MGI:107714] | 2810 | 0.582988 | 0.62306 | 0.582988 | 0.582988 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000333363.7pep chromosome:GRCh38:6:38715311:39030792:1 gene:ENSG00000124721.18 transcript:ENST00000327475.11 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:DNAH8 description:dynein axonemal heavy chain 8 [Source:HGNC Symbol;Acc:HGNC:2952] | 3b9a61241c99383f3782bc308cd01c14 | 0.62306 | hp | 72 | Dnah8 | DNAH8 | Dnah8---DNAH8 | True | 281 |\n| 0 | ENSMUSP00000127878.2pep chromosome:GRCm39:17:30843328:31094238:1 gene:ENSMUSG00000033826.11 transcript:ENSMUST00000170651.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Dnah8 description:dynein, axonemal, heavy chain 8 [Source:MGI Symbol;Acc:MGI:107714] | 2810 | 0.582988 | 0.62306 | 0.582988 | 0.582988 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000333363.7pep chromosome:GRCh38:6:38715311:39030792:1 gene:ENSG00000124721.18 transcript:ENST00000327475.11 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:DNAH8 description:dynein axonemal heavy chain 8 [Source:HGNC Symbol;Acc:HGNC:2952] | 3b9a61241c99383f3782bc308cd01c14 | 0.62306 | hp | 72 | Dnah8 | DNAH8 | Dnah8---DNAH8 | True | 281 |\n| 0 | ENSMUSP00000038150.9pep chromosome:GRCm39:17:27276278:27341197:1 gene:ENSMUSG00000042644.10 transcript:ENSMUST00000049308.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Itpr3 description:inositol 1,4,5-triphosphate receptor 3 [Source:MGI Symbol;Acc:MGI:96624] | 2070 | 0.802326 | 0.824701 | 0.802326 | 0.802326 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363435.4pep chromosome:GRCh38:6:33620365:33696574:1 gene:ENSG00000096433.11 transcript:ENST00000374316.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:ITPR3 description:inositol 1,4,5-trisphosphate receptor type 3 [Source:HGNC Symbol;Acc:HGNC:6182] | 1cde52e700599a850d1977ba5397dcd2 | 0.824701 | hp | 72 | Itpr3 | ITPR3 | Itpr3---ITPR3 | True | 207 |\n| 0 | ENSMUSP00000122082.3pep chromosome:GRCm39:17:34258446:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000131134.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1300 | 0.726257 | 0.730337 | 0.726257 | 0.726257 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339915.2pep chromosome:GRCh38:6:33162694:33192467:-1 gene:ENSG00000204248.11 transcript:ENST00000341947.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 187c655510eb683a17ca86ad4026e79c | 0.730337 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 130 |\n| 0 | ENSMUSP00000157723.2pep chromosome:GRCm39:17:34258629:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000237490.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1300 | 0.730337 | 0.730337 | 0.730337 | 0.730337 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339915.2pep chromosome:GRCh38:6:33162694:33192467:-1 gene:ENSG00000204248.11 transcript:ENST00000341947.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 187c655510eb683a17ca86ad4026e79c | 0.730337 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 130 |\n| 0 | ENSMUSP00000141686.2pep chromosome:GRCm39:17:27160227:27191408:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000194598.6 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1280 | 0.948148 | 0.948148 | 0.948148 | 0.948148 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000486463.1pep chromosome:GRCh38:6:33420070:33453689:1 gene:ENSG00000197283.17 transcript:ENST00000629380.3 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | 34e197090b01fd28653e42aff06ba3cc | 0.948148 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 128 |\n| 0 | ENSMUSP00000157908.2pep chromosome:GRCm39:17:34258446:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000235819.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1280 | 0.748538 | 0.719101 | 0.748538 | 0.748538 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339915.2pep chromosome:GRCh38:6:33162694:33192467:-1 gene:ENSG00000204248.11 transcript:ENST00000341947.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 187c655510eb683a17ca86ad4026e79c | 0.719101 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 128 |\n| 0 | ENSMUSP00000109893.2pep chromosome:GRCm39:17:34258446:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000114255.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1280 | 0.748538 | 0.719101 | 0.748538 | 0.748538 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339915.2pep chromosome:GRCh38:6:33162694:33192467:-1 gene:ENSG00000204248.11 transcript:ENST00000341947.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 187c655510eb683a17ca86ad4026e79c | 0.719101 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 128 |\n| 0 | ENSMUSP00000109890.2pep chromosome:GRCm39:17:34258446:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000114252.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1270 | 0.738372 | 0.713483 | 0.738372 | 0.738372 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339915.2pep chromosome:GRCh38:6:33162694:33192467:-1 gene:ENSG00000204248.11 transcript:ENST00000341947.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 187c655510eb683a17ca86ad4026e79c | 0.713483 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 127 |\n| 0 | ENSMUSP00000157425.2pep chromosome:GRCm39:17:34258446:34284782:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000237989.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1260 | 0.754491 | 0.763636 | 0.754491 | 0.754491 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000355123.1pep chromosome:GRCh38:6:33162692:33192468:-1 gene:ENSG00000204248.11 transcript:ENST00000361917.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | c7632049f9555241ee52122c53218f94 | 0.763636 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 126 |\n| 0 | ENSMUSP00000084772.5pep chromosome:GRCm39:17:34258411:34285659:1 gene:ENSMUSG00000024330.18 transcript:ENSMUST00000087497.11 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Col11a2 description:collagen, type XI, alpha 2 [Source:MGI Symbol;Acc:MGI:88447] | 1260 | 0.75 | 0.759036 | 0.75 | 0.75 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363840.4pep chromosome:GRCh38:6:33162681:33192499:-1 gene:ENSG00000204248.11 transcript:ENST00000374708.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:COL11A2 description:collagen type XI alpha 2 chain [Source:HGNC Symbol;Acc:HGNC:2187] | 3c3c615173ac3f66311501e29f04b6a5 | 0.759036 | hp | 72 | Col11a2 | COL11A2 | Col11a2---COL11A2 | True | 126 |\n| 0 | ENSMUSP00000155085.2pep chromosome:GRCm39:17:27160356:27189575:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000229490.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1240 | 0.946565 | 0.946565 | 0.946565 | 0.946565 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000486431.1pep chromosome:GRCh38:6:33420265:33449167:1 gene:ENSG00000197283.17 transcript:ENST00000628646.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | a13c15941e468f238c31340192b9d6e7 | 0.946565 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 124 |\n| 0 | ENSMUSP00000080038.5pep chromosome:GRCm39:17:27171112:27189907:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000081285.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1240 | 0.946565 | 0.946565 | 0.946565 | 0.946565 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000412475.2pep chromosome:GRCh38:6:33431731:33452210:1 gene:ENSG00000197283.17 transcript:ENST00000428982.4 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | d66302d1d5fb33d10dc1043f3912b45c | 0.946565 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 124 |\n| 0 | ENSMUSP00000137587.2pep chromosome:GRCm39:17:27160426:27189515:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000177932.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1240 | 0.925373 | 0.946565 | 0.925373 | 0.925373 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000486431.1pep chromosome:GRCh38:6:33420265:33449167:1 gene:ENSG00000197283.17 transcript:ENST00000628646.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | a13c15941e468f238c31340192b9d6e7 | 0.946565 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 124 |\n| 0 | ENSMUSP00000119153.2pep chromosome:GRCm39:17:13980861:14124394:1 gene:ENSMUSG00000068036.17 transcript:ENSMUST00000137784.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Afdn description:afadin, adherens junction formation factor [Source:MGI Symbol;Acc:MGI:1314653] | 1230 | 0.66129 | 0.691011 | 0.66129 | 0.66129 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000252692.5pep chromosome:GRCh38:6:167826961:167970090:1 gene:ENSG00000130396.20 transcript:ENST00000351017.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AFDN description:afadin, adherens junction formation factor [Source:HGNC Symbol;Acc:HGNC:7137] | d79a8175318f61602a88ad8eb2e1a6a3 | 0.691011 | hp | 72 | Afdn | AFDN | Afdn---AFDN | True | 123 |\n| 0 | ENSMUSP00000141245.3pep chromosome:GRCm39:17:27160473:27190095:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000193200.6 gene_biotype:protein_coding transcript_biotype:nonsense_mediated_decay gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1220 | 0.96063 | 0.945736 | 0.96063 | 0.96063 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000293748.6pep chromosome:GRCh38:6:33420310:33452395:1 gene:ENSG00000197283.17 transcript:ENST00000293748.9 gene_biotype:protein_coding transcript_biotype:nonsense_mediated_decay gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | ca38a8483e58f472fa2b8d4b987dabed | 0.945736 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 122 |\n| 0 | ENSMUSP00000144248.3pep chromosome:GRCm39:17:27160416:27189555:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000201702.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1220 | 0.945736 | 0.945736 | 0.945736 | 0.945736 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000416519.4pep chromosome:GRCh38:6:33420255:33451842:1 gene:ENSG00000197283.17 transcript:ENST00000449372.7 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | 008431b814d84acb90859735e355aa7d | 0.945736 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 122 |\n| 0 | ENSMUSP00000118318.2pep chromosome:GRCm39:17:13980810:14126412:1 gene:ENSMUSG00000068036.17 transcript:ENSMUST00000139666.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Afdn description:afadin, adherens junction formation factor [Source:MGI Symbol;Acc:MGI:1314653] | 1220 | 0.659459 | 0.689266 | 0.659459 | 0.659459 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000404595.2pep chromosome:GRCh38:6:167827133:167969935:1 gene:ENSG00000130396.20 transcript:ENST00000447894.6 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AFDN description:afadin, adherens junction formation factor [Source:HGNC Symbol;Acc:HGNC:7137] | 153cee9ca3a573d2afe3ab6e8c780c53 | 0.689266 | hp | 72 | Afdn | AFDN | Afdn---AFDN | True | 122 |\n| 0 | ENSMUSP00000154838.2pep chromosome:GRCm39:17:27171293:27189907:1 gene:ENSMUSG00000067629.14 transcript:ENSMUST00000228963.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Syngap1 description:synaptic Ras GTPase activating protein 1 homolog (rat) [Source:MGI Symbol;Acc:MGI:3039785] | 1200 | 0.944882 | 0.916031 | 0.944882 | 0.944882 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000412475.2pep chromosome:GRCh38:6:33431731:33452210:1 gene:ENSG00000197283.17 transcript:ENST00000428982.4 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:SYNGAP1 description:synaptic Ras GTPase activating protein 1 [Source:HGNC Symbol;Acc:HGNC:11497] | d66302d1d5fb33d10dc1043f3912b45c | 0.916031 | hp | 72 | Syngap1 | SYNGAP1 | Syngap1---SYNGAP1 | True | 120 |\n" ], [ "gather_unknown_homologs = mouse_gather_results.query('intersect_bp > 10 and known_homolog == False')\ngather_unknown_homologs.head()", "_____no_output_____" ], [ "gather_unknown_homologs.tail()", "_____no_output_____" ], [ "gather_unknown_homologs.sample(100)", "_____no_output_____" ], [ "gather_unknown_homologs.human_gene_symbol.value_counts()", "_____no_output_____" ] ], [ [ "## What are the top unknown homologs?", "_____no_output_____" ] ], [ [ "gather_unknown_homologs.sort_values('intersect_bp', ascending=False).head(20)", "_____no_output_____" ], [ "print(gather_unknown_homologs.sort_values('intersect_bp', ascending=False).head(20).to_markdown())", "| | query_name | intersect_bp | f_orig_query | f_match | f_unique_to_query | f_unique_weighted | average_abund | median_abund | std_abund | filename | name | md5 | f_match_orig | moltype | ksize | mouse_gene_symbol | human_gene_symbol | pair | known_homolog |\n|---:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------:|---------------:|----------:|--------------------:|--------------------:|----------------:|---------------:|------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------|---------------:|:----------|--------:|:--------------------|:--------------------|:----------------------|:----------------|\n| 1 | ENSMUSP00000007248.4pep chromosome:GRCm39:17:35191679:35198261:1 gene:ENSMUSG00000007033.5 transcript:ENSMUST00000007248.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Hspa1l description:heat shock protein 1-like [Source:MGI Symbol;Acc:MGI:96231] | 460 | 0.613333 | 0.0140845 | 0.0133333 | 0.0133333 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000364801.3pep chromosome:GRCh38:6:31827738:31830254:1 gene:ENSG00000204388.7 transcript:ENST00000375650.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:HSPA1B description:heat shock protein family A (Hsp70) member 1B [Source:HGNC Symbol;Acc:HGNC:5233] | 33a5bba3f9f72d67af452966ff624595 | 0.647887 | hp | 72 | Hspa1l | HSPA1B | Hspa1l---HSPA1B | False |\n| 1 | ENSMUSP00000084586.3pep chromosome:GRCm39:17:35188166:35191132:-1 gene:ENSMUSG00000091971.4 transcript:ENSMUST00000087328.4 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Hspa1a description:heat shock protein 1A [Source:MGI Symbol;Acc:MGI:96244] | 440 | 0.666667 | 0.038961 | 0.0454545 | 0.0454545 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000364805.4pep chromosome:GRCh38:6:31809619:31815283:-1 gene:ENSG00000204390.10 transcript:ENST00000375654.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:HSPA1L description:heat shock protein family A (Hsp70) member 1 like [Source:HGNC Symbol;Acc:HGNC:5234] | e05474738a32633b2b9a5f0d75061169 | 0.571429 | hp | 72 | Hspa1a | HSPA1L | Hspa1a---HSPA1L | False |\n| 1 | ENSMUSP00000133815.2pep chromosome:GRCm39:17:35175412:35178214:-1 gene:ENSMUSG00000090877.4 transcript:ENSMUST00000172753.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Hspa1b description:heat shock protein 1B [Source:MGI Symbol;Acc:MGI:99517] | 440 | 0.676923 | 0.038961 | 0.0461538 | 0.0461538 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000364805.4pep chromosome:GRCh38:6:31809619:31815283:-1 gene:ENSG00000204390.10 transcript:ENST00000375654.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:HSPA1L description:heat shock protein family A (Hsp70) member 1 like [Source:HGNC Symbol;Acc:HGNC:5234] | e05474738a32633b2b9a5f0d75061169 | 0.571429 | hp | 72 | Hspa1b | HSPA1L | Hspa1b---HSPA1L | False |\n| 0 | ENSMUSP00000117677.2pep chromosome:GRCm39:17:35075388:35101018:-1 gene:ENSMUSG00000092511.8 transcript:ENSMUST00000146299.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Gm20547 description:predicted gene 20547 [Source:MGI Symbol;Acc:MGI:5142012] | 300 | 0.258621 | 0.26087 | 0.258621 | 0.258621 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000410815.1pep chromosome:GRCh38:6:31927698:31952048:1 gene:ENSG00000244255.5 transcript:ENST00000456570.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 1f8cd4c6066cd1d1998fe0620b16ef26 | 0.26087 | hp | 72 | Gm20547 | AL645922 | Gm20547---AL645922 | False |\n| 0 | ENSMUSP00000071135.6pep chromosome:GRCm39:17:57387066:57394782:-1 gene:ENSMUSG00000062591.6 transcript:ENSMUST00000071135.6 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Tubb4a description:tubulin, beta 4A class IVA [Source:MGI Symbol;Acc:MGI:107848] | 280 | 0.903226 | 0.848485 | 0.903226 | 0.903226 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000339001.7pep chromosome:GRCh38:6:30720352:30725422:1 gene:ENSG00000196230.14 transcript:ENST00000327892.13 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:TUBB description:tubulin beta class I [Source:HGNC Symbol;Acc:HGNC:20778] | c4c4871668f98e3989e174ed58812676 | 0.848485 | hp | 72 | Tubb4a | TUBB | Tubb4a---TUBB | False |\n| 0 | ENSMUSP00000120864.2pep chromosome:GRCm39:17:35075402:35091483:-1 gene:ENSMUSG00000092511.8 transcript:ENSMUST00000129891.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Gm20547 description:predicted gene 20547 [Source:MGI Symbol;Acc:MGI:5142012] | 270 | 0.325301 | 0.267327 | 0.325301 | 0.325301 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000418996.1pep chromosome:GRCh38:6:31927724:31952030:1 gene:ENSG00000244255.5 transcript:ENST00000477310.1 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 3c4828d198eaf1d0a3c0b6e6cf92a51d | 0.267327 | hp | 72 | Gm20547 | AL645922 | Gm20547---AL645922 | False |\n| 0 | ENSMUSP00000156878.2pep chromosome:GRCm39:17:27799754:27854196:-1 gene:ENSMUSG00000117338.2 transcript:ENSMUST00000233710.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Gm49804 description:predicted gene, 49804 [Source:MGI Symbol;Acc:MGI:6270466] | 240 | 0.827586 | 0.857143 | 0.827586 | 0.827586 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000491891.1pep chromosome:GRCh38:6:34286969:34426046:-1 gene:ENSG00000270800.3 transcript:ENST00000639877.1 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:RPS10-NUDT3 description:RPS10-NUDT3 readthrough [Source:HGNC Symbol;Acc:HGNC:49181] | 71e82290252f8944a210ee9e161a6913 | 0.857143 | hp | 72 | Gm49804 | RPS10-NUDT3 | Gm49804---RPS10-NUDT3 | False |\n| 0 | ENSMUSP00000119977.2pep chromosome:GRCm39:17:35075350:35081494:-1 gene:ENSMUSG00000090231.11 transcript:ENSMUST00000128767.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cfb description:complement factor B [Source:MGI Symbol;Acc:MGI:105975] | 230 | 0.333333 | 0.2 | 0.333333 | 0.333333 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000410815.1pep chromosome:GRCh38:6:31927698:31952048:1 gene:ENSG00000244255.5 transcript:ENST00000456570.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 1f8cd4c6066cd1d1998fe0620b16ef26 | 0.2 | hp | 72 | Cfb | AL645922 | Cfb---AL645922 | False |\n| 0 | ENSMUSP00000025229.5pep chromosome:GRCm39:17:35075360:35081490:-1 gene:ENSMUSG00000090231.11 transcript:ENSMUST00000025229.11 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cfb description:complement factor B [Source:MGI Symbol;Acc:MGI:105975] | 230 | 0.333333 | 0.2 | 0.333333 | 0.333333 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000410815.1pep chromosome:GRCh38:6:31927698:31952048:1 gene:ENSG00000244255.5 transcript:ENST00000456570.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 1f8cd4c6066cd1d1998fe0620b16ef26 | 0.2 | hp | 72 | Cfb | AL645922 | Cfb---AL645922 | False |\n| 0 | ENSMUSP00000115777.3pep chromosome:GRCm39:17:27192191:27255352:1 gene:ENSMUSG00000117819.2 transcript:ENSMUST00000133257.9 gene_biotype:protein_coding transcript_biotype:nonsense_mediated_decay gene_symbol:Gm50253 description:predicted gene, 50253 [Source:MGI Symbol;Acc:MGI:6303071] | 200 | 0.408163 | 0.408163 | 0.408163 | 0.408163 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000378503.2pep chromosome:GRCh38:6:33454576:33457544:1 gene:ENSG00000213588.6 transcript:ENST00000395064.3 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:ZBTB9 description:zinc finger and BTB domain containing 9 [Source:HGNC Symbol;Acc:HGNC:28323] | 45d5b29f3d9d1c3a5eba512b7c337f77 | 0.408163 | hp | 72 | Gm50253 | ZBTB9 | Gm50253---ZBTB9 | False |\n| 0 | ENSMUSP00000117838.2pep chromosome:GRCm39:17:29833850:29922333:1 gene:ENSMUSG00000098374.2 transcript:ENSMUST00000130871.2 gene_biotype:protein_coding transcript_biotype:nonsense_mediated_decay gene_symbol:Gm28043 description:predicted gene, 28043 [Source:MGI Symbol;Acc:MGI:5547779] | 180 | 0.290323 | 0.191489 | 0.290323 | 0.290323 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000362550.4pep chromosome:GRCh38:6:37433221:37481508:1 gene:ENSG00000137200.13 transcript:ENST00000373451.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:CMTR1 description:cap methyltransferase 1 [Source:HGNC Symbol;Acc:HGNC:21077] | 485f0e51924a108cf5a267809eb0f2db | 0.191489 | hp | 72 | Gm28043 | CMTR1 | Gm28043---CMTR1 | False |\n| 0 | ENSMUSP00000135660.3pep chromosome:GRCm39:17:35075360:35081490:-1 gene:ENSMUSG00000090231.11 transcript:ENSMUST00000176203.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cfb description:complement factor B [Source:MGI Symbol;Acc:MGI:105975] | 170 | 0.257576 | 0.147826 | 0.257576 | 0.257576 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000410815.1pep chromosome:GRCh38:6:31927698:31952048:1 gene:ENSG00000244255.5 transcript:ENST00000456570.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 1f8cd4c6066cd1d1998fe0620b16ef26 | 0.147826 | hp | 72 | Cfb | AL645922 | Cfb---AL645922 | False |\n| 0 | ENSMUSP00000120990.2pep chromosome:GRCm39:17:35075355:35081149:-1 gene:ENSMUSG00000090231.11 transcript:ENSMUST00000154526.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cfb description:complement factor B [Source:MGI Symbol;Acc:MGI:105975] | 170 | 0.257576 | 0.147826 | 0.257576 | 0.257576 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000410815.1pep chromosome:GRCh38:6:31927698:31952048:1 gene:ENSG00000244255.5 transcript:ENST00000456570.5 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:AL645922.1 description:novel complement component 2 (C2) and complement factor B (CFB) protein | 1f8cd4c6066cd1d1998fe0620b16ef26 | 0.147826 | hp | 72 | Cfb | AL645922 | Cfb---AL645922 | False |\n| 0 | ENSMUSP00000158128.2pep chromosome:GRCm39:17:32432537:32503107:-1 gene:ENSMUSG00000024002.19 transcript:ENSMUST00000237692.2 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Brd4 description:bromodomain containing 4 [Source:MGI Symbol;Acc:MGI:1888520] | 150 | 0.283019 | 0.202703 | 0.283019 | 0.283019 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363958.4pep chromosome:GRCh38:6:32968594:32981501:1 gene:ENSG00000204256.14 transcript:ENST00000374825.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:BRD2 description:bromodomain containing 2 [Source:HGNC Symbol;Acc:HGNC:1103] | 453fc13f390340bedba2044f3a66b560 | 0.202703 | hp | 72 | Brd4 | BRD2 | Brd4---BRD2 | False |\n| 0 | ENSMUSP00000003726.9pep chromosome:GRCm39:17:32415248:32503071:-1 gene:ENSMUSG00000024002.19 transcript:ENSMUST00000003726.16 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Brd4 description:bromodomain containing 4 [Source:MGI Symbol;Acc:MGI:1888520] | 150 | 0.12605 | 0.202703 | 0.12605 | 0.12605 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363958.4pep chromosome:GRCh38:6:32968594:32981501:1 gene:ENSG00000204256.14 transcript:ENST00000374825.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:BRD2 description:bromodomain containing 2 [Source:HGNC Symbol;Acc:HGNC:1103] | 453fc13f390340bedba2044f3a66b560 | 0.202703 | hp | 72 | Brd4 | BRD2 | Brd4---BRD2 | False |\n| 1 | ENSMUSP00000067736.8pep chromosome:GRCm39:17:46811531:46857314:-1 gene:ENSMUSG00000040327.17 transcript:ENSMUST00000066026.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cul9 description:cullin 9 [Source:MGI Symbol;Acc:MGI:1925559] | 150 | 0.0592885 | 0.0246914 | 0.0158103 | 0.0158103 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000265348.4pep chromosome:GRCh38:6:43037617:43053851:-1 gene:ENSG00000044090.11 transcript:ENST00000265348.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:CUL7 description:cullin 7 [Source:HGNC Symbol;Acc:HGNC:21024] | 96dde9fc3356af8fa224c1b7d0ace4e1 | 0.0925926 | hp | 72 | Cul9 | CUL7 | Cul9---CUL7 | False |\n| 1 | ENSMUSP00000138418.2pep chromosome:GRCm39:17:46811531:46857314:-1 gene:ENSMUSG00000040327.17 transcript:ENSMUST00000182485.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Cul9 description:cullin 9 [Source:MGI Symbol;Acc:MGI:1925559] | 150 | 0.0595238 | 0.0246914 | 0.015873 | 0.015873 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000265348.4pep chromosome:GRCh38:6:43037617:43053851:-1 gene:ENSG00000044090.11 transcript:ENST00000265348.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:CUL7 description:cullin 7 [Source:HGNC Symbol;Acc:HGNC:21024] | 96dde9fc3356af8fa224c1b7d0ace4e1 | 0.0925926 | hp | 72 | Cul9 | CUL7 | Cul9---CUL7 | False |\n| 0 | ENSMUSP00000115163.2pep chromosome:GRCm39:17:32432584:32503696:-1 gene:ENSMUSG00000024002.19 transcript:ENSMUST00000127893.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Brd4 description:bromodomain containing 4 [Source:MGI Symbol;Acc:MGI:1888520] | 150 | 0.288462 | 0.202703 | 0.288462 | 0.288462 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363958.4pep chromosome:GRCh38:6:32968594:32981501:1 gene:ENSG00000204256.14 transcript:ENST00000374825.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:BRD2 description:bromodomain containing 2 [Source:HGNC Symbol;Acc:HGNC:1103] | 453fc13f390340bedba2044f3a66b560 | 0.202703 | hp | 72 | Brd4 | BRD2 | Brd4---BRD2 | False |\n| 0 | ENSMUSP00000113070.2pep chromosome:GRCm39:17:32415248:32503083:-1 gene:ENSMUSG00000024002.19 transcript:ENSMUST00000121285.8 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Brd4 description:bromodomain containing 4 [Source:MGI Symbol;Acc:MGI:1888520] | 150 | 0.128205 | 0.202703 | 0.128205 | 0.128205 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363958.4pep chromosome:GRCh38:6:32968594:32981501:1 gene:ENSG00000204256.14 transcript:ENST00000374825.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:BRD2 description:bromodomain containing 2 [Source:HGNC Symbol;Acc:HGNC:1103] | 453fc13f390340bedba2044f3a66b560 | 0.202703 | hp | 72 | Brd4 | BRD2 | Brd4---BRD2 | False |\n| 0 | ENSMUSP00000112474.3pep chromosome:GRCm39:17:32423884:32503083:-1 gene:ENSMUSG00000024002.19 transcript:ENSMUST00000120276.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:Brd4 description:bromodomain containing 4 [Source:MGI Symbol;Acc:MGI:1888520] | 150 | 0.238095 | 0.202703 | 0.238095 | 0.238095 | 0 | 0 | 0 | /mnt/data_sm/olga/botryllus/pipeline-results/kmermaid/april2021-singleton-fewer-species/sketches_peptide/molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true/sigs/Homo_sapiens.GRCh38.pep.all.chr6.fa__molecule-protein,dayhoff,hp__ksize-24,27,30,39,45,51,72,81__scaled-10__track_abundance-true.sig | ENSP00000363958.4pep chromosome:GRCh38:6:32968594:32981501:1 gene:ENSG00000204256.14 transcript:ENST00000374825.9 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:BRD2 description:bromodomain containing 2 [Source:HGNC Symbol;Acc:HGNC:1103] | 453fc13f390340bedba2044f3a66b560 | 0.202703 | hp | 72 | Brd4 | BRD2 | Brd4---BRD2 | False |\n" ], [ "gather_unknown_homologs.mouse_gene_symbol.value_counts()", "_____no_output_____" ], [ "# gather_unknown_homologs.str.", "_____no_output_____" ], [ "sns.catplot(data=mouse_gather_results, x='known_homolog', y='intersect_bp', alpha=0.5)\n", "_____no_output_____" ], [ "mouse_gather_results.sort_values('intersect_bp', ascending=False)", "_____no_output_____" ], [ "mouse_gather_results['intersect_kmer'] = mouse_gather_results['intersect_bp']/10", "_____no_output_____" ], [ "g = sns.catplot(data=mouse_gather_results, x='known_homolog', y='intersect_kmer', kind='boxen')\n# g.set(yscale='log')\nfor ax in g.axes.flatten():\n ax.axhline(1, linestyle='--', color='grey')\n", "_____no_output_____" ], [ "fig, axes = plt.subplots(nrows=2, figsize=(4,6))\nfor i, ax in enumerate(axes):\n sns.boxenplot(data=mouse_gather_results, x='known_homolog', y='intersect_kmer', ax=ax)\n if i == 1:\n ax.axhline(2, linestyle='--', color='grey')\n ax.set(ylim=(0, 10))\n sns.despine(ax=ax)\n else:\n ax.set(ylim=(10, 300), xlabel=None, xticks=[])\n sns.despine(ax=ax, bottom=True)\n", "_____no_output_____" ], [ "g = sns.catplot(data=mouse_gather_results, x='known_homolog', y='intersect_bp', kind='bar')\n# g.map(sns.stripplot, 'known_homolog', 'intersect_bp', hue='known_homolog')", "_____no_output_____" ], [ "mouse_gather_results.shape", "_____no_output_____" ] ], [ [ "# Compute significance of ovelrap", "_____no_output_____" ] ], [ [ "human_mouse_pairs_in_gather_results.shape", "_____no_output_____" ], [ "mouse_gather_results.query('intersect_bp > 10').known_homolog.value_counts()", "_____no_output_____" ] ], [ [ "## ROC Curve", "_____no_output_____" ] ], [ [ "from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_fscore_support", "_____no_output_____" ], [ "fpr, tpr, thresholds = roc_curve(\n mouse_gather_results[\"known_homolog\"], mouse_gather_results[\"intersect_bp\"]\n)", "_____no_output_____" ], [ "auc_score = roc_auc_score(\n mouse_gather_results[\"known_homolog\"], mouse_gather_results[\"intersect_bp\"]\n)\nauc_score", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(fpr, tpr, label=f\"ROC curve (area = {auc_score:0.2f})\",)\nax.plot([0, 1], [0, 1], \"--\", color=\"grey\")\nax.legend()\nplt.title(\"ROC of Sourmash Gather Homology detection\")\nax.set(ylim=(0, 1), xlim=(0, 1), xlabel=\"False Positive Rate\", ylabel=\"True Positive Rate\")\nsns.despine()", "_____no_output_____" ] ], [ [ "## Set threshold to `intersect_bp > 10` --> What's the precision recall?", "_____no_output_____" ] ], [ [ "y_true = mouse_gather_results[\"known_homolog\"]\ny_pred = mouse_gather_results[\"intersect_bp\"] > 10\n\nprecision, recall, fscore, support = precision_recall_fscore_support(\n y_true, y_pred, average=\"binary\"\n)\nprint(f\"precision: {precision}\")\nprint(f\"recall: {recall}\")\nprint(f\"fscore: {fscore}\")\nprint(f\"support: {support}\")", "precision: 0.6032770605759682\nrecall: 0.9225512528473804\nfscore: 0.7295106574602221\nsupport: None\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec823bcae38a969bae1acace6fa4b54e027c41f7
130,849
ipynb
Jupyter Notebook
hwk2/.ipynb_checkpoints/CS 559 Hwk2-Copy1-checkpoint.ipynb
Dr-Spicy/Neural-Network
404e7a3134a108dc86959d3cf46081cd30efbbdd
[ "MIT" ]
null
null
null
hwk2/.ipynb_checkpoints/CS 559 Hwk2-Copy1-checkpoint.ipynb
Dr-Spicy/Neural-Network
404e7a3134a108dc86959d3cf46081cd30efbbdd
[ "MIT" ]
null
null
null
hwk2/.ipynb_checkpoints/CS 559 Hwk2-Copy1-checkpoint.ipynb
Dr-Spicy/Neural-Network
404e7a3134a108dc86959d3cf46081cd30efbbdd
[ "MIT" ]
null
null
null
151.796984
19,802
0.857339
[ [ [ "## Q.2", "_____no_output_____" ] ], [ [ "import os\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl", "_____no_output_____" ] ], [ [ "### (c)\nEach image is 28$\\times$28, so that we will have a neural network 28$\\times$28 = 784 nodes in the input layer,\nand 10 nodes in the output layer. We will ignore the biases. We wish to find 784 $\\times$ 10 = 7840\nweights such that the network outputs [1 0 0 ... 0]T if the input image corresponds to a 0,\n[0 1 0 ... 0]T if the input image corresponds to a 1, and so on.", "_____no_output_____" ] ], [ [ "# save the original binary MNIST data files in 0-255\ndef read(dataset = \"training\", path = \".\"):\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError( \"It needs to be between 'testing' and 'training'\")\n\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n # print(len(lbl))\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n image = np.fromfile(fimg, dtype=np.uint8) \n image = image.reshape(len(lbl), rows, cols)\n\n get_image = lambda idx: (lbl[idx], image[idx])\n\n for i in range(len(lbl)):\n yield get_image(i)\n\ntraining_data = list(read(dataset = \"training\",path = r'C:\\Users\\Han\\Desktop\\Box Sync\\CS 559\\hwk2'))\ntesting_data = list(read(dataset = \"testing\",path = r'C:\\Users\\Han\\Desktop\\Box Sync\\CS 559\\hwk2'))\ntraining_label = np.zeros((len(training_data),1))\ntraining_desiredout =np.zeros((len(training_data),10))\ntraining_image = np.zeros((len(training_data),28*28))\ntesting_label = np.zeros((len(testing_data),1))\ntesting_desiredout = np.zeros((len(testing_data),10))\ntesting_image = np.zeros((len(testing_data),28*28))\n\n# split the training and testing data to labels and images\nfor i in range(len(training_data)):\n temp = training_data[i]\n training_label[i] = temp[0]\n training_desiredout[i,temp[0]] = 1 \n training_image[i,] = temp[1].reshape(1,28*28)\n#training_label = training_label.reshape((1,60000))\nfor i in range(len(testing_data)):\n temp = testing_data[i]\n testing_label[i] = temp[0]\n testing_desiredout[i,temp[0]] = 1 \n testing_image[i,] = temp[1].reshape(1,28*28)\n#testing_label = testing_label.reshape((1,10000))", "_____no_output_____" ] ], [ [ "### (d)(e)(f)\nRun Steps (d) and (e) for n = 50, $\\eta$ = 1, and some very small $\\epsilon$ ($\\epsilon$ = 0 should also work). You\nshould observe that step (d) terminates with 0 errors eventually. So, we have 0% error according to\nour training samples. Plot the epoch number vs. the number of misclassification errors (including\nepoch 0). Now, run Step (e) and record the percentage of misclassified test samples (over all\n10000 test samples). ", "_____no_output_____" ] ], [ [ "### Do (d) under (f)'s request\n\n## initialization\nnp.random.seed(1)\n# use first n samples from training data to train the NN\nn = 50\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0\n# epoch number \nepoch = 0\nm = 9\n# initialize errors\nerrors = np.zeros((m+1,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n\n# realize 3)\n\nwhile conti ==True:\n if epoch >m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,m+1,1)\nplt.plot(x,errors,'k-o',label = '$\\eta$ =1, n =50, $\\epsilon$ = 0')\nplt.xlabel('epoch')\nplt.title('Plot for (f)')\nplt.ylabel('number of misclassification')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (f).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ] ], [ [ "##### <span style = 'color:red'> Q: What is the error rate when n=50?.</span.>\n##### <span style = 'color:blue'>A: Error rate is 45.59%. </span>", "_____no_output_____" ], [ "##### <span style = 'color:red'> Q: Explain the discrepancy between the percentages of errors obtained through the training and test samples.</span.>\n#### <span style = 'color:blue'>A: Here we see the NN was tuned to have a 0 error on those 50 training samples, however, it was clearly an overfitting for these 10,000 testing samples. Thus, the error rates on two batches of samples are different. </span>", "_____no_output_____" ], [ "### (d)(e)(g)\nRun Steps (d) and (e) for n = 1000, $\\eta$ = 1, and some very small $\\epsilon$ ( $\\epsilon$ = 0 should also work).\nAgain, you should observe that step (d) terminates with 0 errors eventually. Repeat the same\ntasks as in Step (f). ", "_____no_output_____" ] ], [ [ "### Do (d) under (g)'s request\n\n## initialization\nnp.random.seed(1)\n# use first n samples from training data to train the NN\nn = 1000\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0\n# epoch number \nepoch = 0\nm = 39\n# initialize errors\nerrors = np.zeros((m+1,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n\n# realize 3)\nwhile conti ==True:\n if epoch >=m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n \n## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,epoch+1,1)\nplt.plot(x,errors,'k-o',label = '$\\eta$ =1, n =1000, $\\epsilon$ = 0')\nplt.xlabel('epoch')\nplt.ylabel('number of misclassification')\nplt.title('Plot for (g)')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (g).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ] ], [ [ "##### <span style = 'color:red'> Q: What is the error rate when n=1000?.</span.>\n##### <span style = 'color:blue'>A: Error rate is 17.72%. </span>", "_____no_output_____" ], [ "##### <span style = 'color:red'> Q: Compare what you obtain here with what you have obtained in Step (f). </span> \n#### <span style = 'color:blue'>A: Here we see the NN was tuned to have a 0 error on those 1,000 training samples, which was still an overfitting for these 10,000 testing samples. Thus, the error rates on training and testing are different. However, with the increased training sample size in this case, it will represent more patterns of thses 10,000 testing samples. Therefore, the level of overfitting would be less and the error rate becomes significantly lower. </span>", "_____no_output_____" ], [ "### (h)\nRun Step (d) for n = 60000 and $\\epsilon$ = 0. Make note of (i.e., plot) the errors as the number of\nepochs grow large, and note that the algorithm may not converge. ", "_____no_output_____" ] ], [ [ "### Do (d) under (h)'s request\n\n## initialization\nnp.random.seed(1)\n# use first n samples from training data to train the NN\nn = 60000\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0\n# epoch number \nepoch = 0\nm = 100\n# initialize errors\nerrors = np.zeros((m+1,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n\n# realize 3)\nwhile conti ==True:\n if epoch >=m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,epoch+1,1)\nplt.plot(x,errors,'k-o',label = '$\\eta$ =1, n =60000, $\\epsilon$ = 0')\nplt.xlabel('epoch')\nplt.ylabel('number of misclassification')\nplt.title('Plot for (g)')\nplt.legend()\nplt.show()", "Not converged yet, need more epoches.\nBut the results are saved.\n" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (g).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ] ], [ [ "##### <span style = 'color:red'> Q: What is the error rate when n=60000?.</span.>\n##### <span style = 'color:blue'>A: Error rate is 16.08%. </span>", "_____no_output_____" ], [ "##### <span style='color:red'> Q: Comment on the results.</span>\n##### <span style='color:blue'> A: At first, the selection of 0 as the $\\epsilon$ decides the tuned NN are predestined to be overfitted over the training data, regardless of the fact that the entire training sets are used in the training. Nevertheless, as the reason described in question (g) already, the inclusion of all training samples mitigates the influences of the overfitting to its minimal possible extension. Besides, it should be noted that after 100 epochs, the convergence was still not achieved and it seemed like the convergence was likely to be impossible. Therefore, the error rate on the testing samples was brought down to a new low level. </span>", "_____no_output_____" ], [ "### (i)\nUsing your observations in the previous step, pick some appropriate value for $\\epsilon$ (such that your\nalgorithm in (d) will eventually terminate). Repeat the following two subitems three times with\ndifferent initial weights and comment on the results: \nRun Step (d) for n = 60000, some $\\eta$ of your choice and the $\\epsilon$ you picked. \nRun Step (e) to with the W you obtained in the previous step. \n", "_____no_output_____" ] ], [ [ "### 1st intialized w\n### Do (d) under (i)'s request\n\n## initialization\nnp.random.seed(4)\n# use first n samples from training data to train the NN\nn = 60000\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0.13\n# epoch number \nepoch = 0\nm = 100\n# initialize errors\nerrors = np.zeros((m,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n\n# realize 3)\nwhile conti ==True:\n if epoch >=m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n\n ## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,m,1)\nplt.plot(x[0:epoch],errors[0:epoch],'k-o',label = '$\\eta$ =1, n =60000, $\\epsilon$ = 0.13,random seed = 4')\nplt.xlabel('epoch')\nplt.ylabel('number of misclassification')\nplt.title('1st repeatition for (i)')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (g).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ], [ "### 2nd intialized w\n### Do (d) under (i)'s request\n## initialization\nnp.random.seed(3)\n# use first n samples from training data to train the NN\nn = 60000\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0.13\n# epoch number \nepoch = 0\nm = 100\n# initialize errors\nerrors = np.zeros((m,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n\n# realize 3)\nwhile conti ==True:\n if epoch >=m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n\n ## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,m,1)\nplt.plot(x[0:epoch],errors[0:epoch],'k-o',label = '$\\eta$ =1, n =60000, $\\epsilon$ = 0.13,random seed = 3')\nplt.xlabel('epoch')\nplt.ylabel('number of misclassification')\nplt.title('2nd repeatition for (i)')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (g).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ], [ "### 3rd intialized w\n### Do (d) under (i)'s request\n## initialization\nnp.random.seed(2)\n# use first n samples from training data to train the NN\nn = 60000\n# learning rate\neta = 1\n# convergence threshold\nepsi = 0.13\n# epoch number \nepoch = 0\nm = 100\n# initialize errors\nerrors = np.zeros((m,1))\n# initialize real outputs given the current w\ny = np.zeros((n,1))\n# initialize w0\nw = np.random.rand(10,28*28)\n# initialize condi\nconti = True\n# realize 3)\nwhile conti ==True:\n if epoch >=m:\n print('Not converged yet, need more epoches.')\n print('But the results are saved.')\n break\n else:\n for i in range(n): # 3.1.1) this loop is where we count the misclassification errors\n v = w.dot(training_image[i,]) #compute the induced local field\n y[i,:] = np.argmax(v) # the output of image i by argmax() instead of using the real step funcion, considering multiple 1s or no 1\n diff = y[i,:] - training_label[i]\n if diff != 0:\n errors[epoch,:] += 1\n epoch += 1\n for i in range(n): # 3.1.3) (this loop is where we update the weights)\n v = w.dot(training_image[i,])\n w += eta*(training_desiredout[i].reshape(10,1)-np.heaviside(v.reshape(10,1),1)).dot(training_image[i,].reshape(1,28*28))\n conti = errors[(epoch-1),:]/n > epsi\n conti = conti.astype(bool)\n\n ## Plot the epoch number vs. the number of misclassification errors (including epoch 0).\nx = np.arange(0,m,1)\nplt.plot(x[0:epoch],errors[0:epoch],'k-o',label = '$\\eta$ =1, n =60000, $\\epsilon$ = 0.13,random seed = 2')\nplt.xlabel('epoch')\nplt.ylabel('number of misclassification')\nplt.title('3rd repeatition for (i)')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "## Now, run Step (e) and record the percentage of misclassified test samples under (g).\n# Given W obtained from the multicategory PTA.\nW = w\n# Initialize errors = 0.\nerror_test = 0\n# loop on all testing samples\nfor i in range(10000):\n # Calculate the induced local fields with the current test sample and weights:\n vprime = W.dot(testing_image[i,])\n # Find the largest component of v0 = [v0', v1', ...v9']^T^\n predic_out = np.argmax(vprime)\n # If the prediceted output is different to the testing label, error +=1\n diff = predic_out - testing_label[i]\n if diff != 0:\n error_test += 1\nerror_test/10000", "_____no_output_____" ] ], [ [ "##### <span style='color:red'> Q: Comment on the results.</span>", "_____no_output_____" ], [ "##### <span style='color:blue'> A: By setting three different random seeds, we initialized the W matrix differently. Yet, with the same learning rate, only by relaxing the convergence threshold from 0 to 0.13, we discovered three record low error rates on the testing samples as 0.1425, 0.1587 and 0.1587 compared to the 0.1608 when $\\epsilon = 0$. Meanwhile, it should be noted that when $\\epsilon = 0$ the training won't converge whatsoever, but when $\\epsilon= 0.13$ was set, the convergence happened at the epoch 75, 2 and 44, which was another plus advantage. Hence, they both collaborated our previous argument on the overfitting on the training sample would compromise the tuned NN's performance on the testing samples. Therefore, the overfitting would be suppressed by setting an appropriate convergence threshold rather than 0 and the inclusion of a larger training sample size. </span>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
ec823d3700cd308df0fe5f156808654cc43f54a9
130,038
ipynb
Jupyter Notebook
Linear_Regression.ipynb
Manthan109/MachineLearning
33a09e6ada1799aaeb416cc698824390fa96b709
[ "MIT" ]
null
null
null
Linear_Regression.ipynb
Manthan109/MachineLearning
33a09e6ada1799aaeb416cc698824390fa96b709
[ "MIT" ]
null
null
null
Linear_Regression.ipynb
Manthan109/MachineLearning
33a09e6ada1799aaeb416cc698824390fa96b709
[ "MIT" ]
null
null
null
46.063762
22,456
0.522986
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ], [ "file=pd.read_csv('simple_linear_data.csv')", "_____no_output_____" ], [ "file", "_____no_output_____" ], [ "file.insert(0,'Weights',1)", "_____no_output_____" ], [ "file", "_____no_output_____" ], [ "X=np.array(file[file.columns[:2]])\nX", "_____no_output_____" ], [ "Y=np.array(file[file.columns[-1:]])\nY", "_____no_output_____" ], [ "X_shape = X.shape\nX_type = type(X)\ny_shape = Y.shape\ny_type = type(Y)\nprint(f'X: Type-{X_type}, Shape-{X_shape}')\nprint(f'y: Type-{y_type}, Shape-{y_shape}')", "X: Type-<class 'numpy.ndarray'>, Shape-(395, 2)\ny: Type-<class 'numpy.ndarray'>, Shape-(395, 1)\n" ], [ "plt.scatter(X[:,1],Y)\nplt.title('Relation between second year grades and third year grades')\nplt.xlabel('G2 (X - Second year grades)')\nplt.ylabel('G3 (Y - Third year grades)')\nplt.show()", "_____no_output_____" ], [ "def predict(X, weights):\n y_pred = np.dot(X,weights)\n \n assert (y_pred.shape==(X.shape[0],1))\n \n return y_pred", "_____no_output_____" ], [ "def mean_squared_error(y_true, y_pred) : \n m = y_true.shape[0]\n loss = (1 / (2 * m)) * np.sum((y_true - y_pred) ** 2)\n return loss", "_____no_output_____" ], [ "def gradient(X, y_true, y_pred):\n\n grad = np.zeros((2,1))\n m = X.shape[0]\n grad[0][0] = (2 / m) * np.sum(np.dot(X[:, 0], (y_pred - y_true)))\n grad[1][0] = (2 / m) * np.sum(np.dot(X[:, 1], (y_pred - y_true)))\n \n return grad", "_____no_output_____" ], [ "def gradient_descent(X, y, learning_rate=0.0001, max_iterations=100):\n weights = np.random.rand(2,1) \n losses = []\n y_true = y.reshape(-1, 1)\n for i in range(max_iterations):\n y_pred = predict(X, weights)\n losses.append(mean_squared_error(y_true, y_pred))\n grad = gradient(X, y_true, y_pred)\n weights[0][0] -= learning_rate * grad[0][0]\n weights[1][0] -= learning_rate * grad[1][0] \n \n return weights, losses", "_____no_output_____" ], [ "weights = np.random.rand(2, 1)\nl = []\ny_true = Y.reshape(-1, 1)\nfor i in range(10):\n y_pred = predict(X, weights)\n l.append(mean_squared_error(y_true, y_pred))\n grad = gradient(X, y_true, y_pred)\n weights[0][0] -= 0.0001 * grad[0][0]\n weights[1][0] -= 0.0001 * grad[1][0]\n \n print(l[i])\n ", "845.15168939351\n150.65416030521928\n62.974328169531624\n51.90471379235622\n50.50707420602677\n50.33051320966528\n50.308112350803185\n50.30517408291269\n50.30469295539088\n50.30452204457292\n" ], [ "optimal_weights, losses = gradient_descent(X, Y)", "_____no_output_____" ], [ "print(\"Mean squared error:\", losses[-1])", "Mean squared error: 50.60594455097147\n" ], [ "plt.plot([i for i in range(len(losses))], losses)\nplt.title(\"Loss curve\")\nplt.xlabel(\"Iteration num\")\nplt.ylabel(\"Loss\")\nplt.show()", "_____no_output_____" ], [ "y_pred = predict(X, optimal_weights)", "_____no_output_____" ], [ "plt.scatter(X[:,1], Y, c='r', label='Actual scores')\nplt.plot(X[:,1], y_pred, c='g', label='Fitted line | predictions')\nplt.legend()\nplt.title(\"Linear regression fitted line\")\nplt.xlabel(\"Second year grades\")\nplt.ylabel(\"Third year grades\")\nplt.show()", "_____no_output_____" ] ], [ [ "# Task 2", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import preprocessing", "_____no_output_____" ], [ "data=pd.read_csv('multiple_linear_data.csv')", "_____no_output_____" ], [ "orig_cols = data.columns", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "data.isnull().sum()", "_____no_output_____" ], [ "label_encoder = preprocessing.LabelEncoder()", "_____no_output_____" ], [ "cat_list=list(orig_cols[1:3])+list(orig_cols[6:13])", "_____no_output_____" ], [ "for i in cat_list:\n data[i]=label_encoder.fit_transform(data[i])\n", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "data = pd.concat([data[data.columns[:19]],pd.get_dummies(data['reason'], prefix='reason'),data[data.columns[19]]],axis=1).drop(['reason'],axis=1)", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "X=np.array(data[data.columns[:22]])\nX\n", "_____no_output_____" ], [ "Y=np.array(data[data.columns[22]])\nY", "_____no_output_____" ], [ "model=LinearRegression()", "_____no_output_____" ], [ "model.fit(X,Y)", "_____no_output_____" ], [ "model.score(X,Y)", "_____no_output_____" ], [ "model.coef_", "_____no_output_____" ], [ "model.intercept_", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ], [ "y_pred=model.predict(X)", "_____no_output_____" ], [ "mse=mean_squared_error(Y,y_pred)\nprint(f\"\\nMSE: {mse}\")", "\nMSE: 85.27642516663595\n" ], [ "new_data=X[3:16]\nprint(\"Predicted grade:\",model.predict(new_data))", "Predicted grade: [67.95234221 45.76245862 79.43507255 59.09048924 20.98613666 90.32040177\n 74.90573128 39.70529185 56.59267706 72.47735631 50.00035112 78.0364424\n 71.57569049]\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec824932b78300055a02e9a69b6c68fcd77eeabe
25,671
ipynb
Jupyter Notebook
a03_Dask/Collection/dask-tutorial-scipy-2018/02-dask-arrays.ipynb
mindis/Big_Data_Analysis
b4aec3a0e285de5cac02ad7390712635a73a24db
[ "Apache-2.0" ]
2
2021-01-09T15:57:26.000Z
2021-11-29T01:44:21.000Z
a03_Dask/Collection/dask-tutorial-scipy-2018/02-dask-arrays.ipynb
mindis/Big_Data_Analysis
b4aec3a0e285de5cac02ad7390712635a73a24db
[ "Apache-2.0" ]
5
2019-11-15T02:00:26.000Z
2021-01-06T04:26:40.000Z
dask-tutorial-scipy-2018/02-dask-arrays.ipynb
sunny2309/scipy_conf_notebooks
30a85d5137db95e01461ad21519bc1bdf294044b
[ "MIT" ]
1
2021-06-22T10:18:14.000Z
2021-06-22T10:18:14.000Z
28.117196
292
0.574968
[ [ [ "<img src=\"http://dask.readthedocs.io/en/latest/_images/dask_horizontal.svg\"\n align=\"right\"\n width=\"30%\"\n alt=\"Dask logo\\\">", "_____no_output_____" ], [ "# Table of Contents\n* [Arrays](#Arrays)\n\t* [Blocked Algorithms](#Blocked-Algorithms)\n\t\t* [Exercise: Compute the mean using a blocked algorithm](#Exercise:--Compute-the-mean-using-a-blocked-algorithm)\n\t\t* [Exercise: Compute the mean](#Exercise:--Compute-the-mean)\n\t\t* [Example](#Example)\n\t\t* [Exercise: Meteorological data](#Exercise:--Meteorological-data)\n\t\t* [Exercise: Subsample and store](#Exercise:--Subsample-and-store)\n\t* [Example: Lennard-Jones potential](#Example:-Lennard-Jones-potential)\n\t\t* [Dask version](#Dask-version)\n\t* [Profiling](#Profiling)\n", "_____no_output_____" ], [ "# Arrays", "_____no_output_____" ], [ "<img src=\"http://dask.pydata.org/en/latest/_images/dask-array-black-text.svg\" width=\"50%\" align=\"right\">\n\nDask array provides a parallel, larger-than-memory, n-dimensional array using blocked algorithms. Simply put: distributed Numpy.\n\n* **Parallel**: Uses all of the cores on your computer\n* **Larger-than-memory**: Lets you work on datasets that are larger than your available memory by breaking up your array into many small pieces, operating on those pieces in an order that minimizes the memory footprint of your computation, and effectively streaming data from disk.\n* **Blocked Algorithms**: Perform large computations by performing many smaller computations\n\n**Related Documentation**\n\n* http://dask.readthedocs.io/en/latest/array.html\n* http://dask.readthedocs.io/en/latest/array-api.html", "_____no_output_____" ], [ "## Blocked Algorithms", "_____no_output_____" ], [ "A *blocked algorithm* executes on a large dataset by breaking it up into many small blocks.\n\nFor example, consider taking the sum of a billion numbers. We might instead break up the array into 1,000 chunks, each of size 1,000,000, take the sum of each chunk, and then take the sum of the intermediate sums.\n\nWe achieve the intended result (one sum on one billion numbers) by performing many smaller results (one thousand sums on one million numbers each, followed by another sum of a thousand numbers.)\n\nWe do exactly this with Python and NumPy in the following example:", "_____no_output_____" ], [ "**Create random dataset**", "_____no_output_____" ] ], [ [ "# create data if it doesn't already exist\nfrom prep_data import random_array\nrandom_array() \n\n# Load data with h5py\n# this gives the load prescription, but does no real work.\nimport h5py\nimport os\nf = h5py.File(os.path.join('data', 'random.hdf5'), mode='r')\ndset = f['/x']", "_____no_output_____" ] ], [ [ "**Compute sum using blocked algorithm**", "_____no_output_____" ], [ "Here we compute the sum of this large array on disk by \n\n1. Computing the sum of each 1,000,000 sized chunk of the array\n2. Computing the sum of the 1,000 intermediate sums\n\nNote that we are fetching every partial result from the cluster and summing them here, in the notebook kernel.", "_____no_output_____" ] ], [ [ "# Compute sum of large array, one million numbers at a time\nsums = []\nfor i in range(0, 1000000000, 1000000):\n chunk = dset[i: i + 1000000] # pull out numpy array\n sums.append(chunk.sum())\n\ntotal = sum(sums)\nprint(total)", "_____no_output_____" ] ], [ [ "### Exercise: Compute the mean using a blocked algorithm", "_____no_output_____" ], [ "Now that we've seen the simple example above try doing a slightly more complicated problem, compute the mean of the array. You can do this by changing the code above with the following alterations:\n\n1. Compute the sum of each block\n2. Compute the length of each block\n3. Compute the sum of the 1,000 intermediate sums and the sum of the 1,000 intermediate lengths and divide one by the other\n\nThis approach is overkill for our case but does nicely generalize if we don't know the size of the array or individual blocks beforehand.", "_____no_output_____" ] ], [ [ "# Compute the mean of the array", "_____no_output_____" ], [ "%load solutions/02-dask-arrays-blocked-mean.py", "_____no_output_____" ] ], [ [ "`dask.array` contains these algorithms\n--------------------------------------------\n\nDask.array is a NumPy-like library that does these kinds of tricks to operate on large datasets that don't fit into memory. It extends beyond the linear problems discussed above to full N-Dimensional algorithms and a decent subset of the NumPy interface.", "_____no_output_____" ], [ "**Create `dask.array` object**", "_____no_output_____" ], [ "You can create a `dask.array` `Array` object with the `da.from_array` function. This function accepts\n\n1. `data`: Any object that supports NumPy slicing, like `dset`\n2. `chunks`: A chunk size to tell us how to block up our array, like `(1000000,)`", "_____no_output_____" ] ], [ [ "import dask.array as da\nx = da.from_array(dset, chunks=(1000000,))", "_____no_output_____" ] ], [ [ "** Manipulate `dask.array` object as you would a numpy array**", "_____no_output_____" ], [ "Now that we have an `Array` we perform standard numpy-style computations like arithmetic, mathematics, slicing, reductions, etc..\n\nThe interface is familiar, but the actual work is different. dask_array.sum() does not do the same thing as numpy_array.sum().", "_____no_output_____" ], [ "**What's the difference?**", "_____no_output_____" ], [ "`dask_array.sum()` builds an expression of the computation. It does not do the computation yet. `numpy_array.sum()` computes the sum immediately.", "_____no_output_____" ], [ "*Why the difference?*", "_____no_output_____" ], [ "Dask arrays are split into chunks. Each chunk must have computations run on that chunk explicitly. If the desired answer comes from a small slice of the entire dataset, running the computation over all data would be wasteful of CPU and memory.", "_____no_output_____" ] ], [ [ "result = x.sum()\nresult", "_____no_output_____" ] ], [ [ "**Compute result**", "_____no_output_____" ], [ "Dask.array objects are lazily evaluated. Operations like `.sum` build up a graph of blocked tasks to execute. \n\nWe ask for the final result with a call to `.compute()`. This triggers the actual computation.", "_____no_output_____" ] ], [ [ "result.compute()", "_____no_output_____" ] ], [ [ "### Exercise: Compute the mean", "_____no_output_____" ], [ "And the variance, std, etc.. This should be a trivial change to the example above.\n\nLook at what other operations you can do with the Jupyter notebook's tab-completion.", "_____no_output_____" ], [ "Does this match your result from before?", "_____no_output_____" ], [ "Performance and Parallelism\n-------------------------------\n\n<img src=\"static/fail-case.gif\" width=\"40%\" align=\"right\">\n\nIn our first examples we used `for` loops to walk through the array one block at a time. For simple operations like `sum` this is optimal. However for complex operations we may want to traverse through the array differently. In particular we may want the following:\n\n1. Use multiple cores in parallel\n2. Chain operations on a single blocks before moving on to the next one\n\nDask.array translates your array operations into a graph of inter-related tasks with data dependencies between them. Dask then executes this graph in parallel with multiple threads. We'll discuss more about this in the next section.\n\n", "_____no_output_____" ], [ "### Example", "_____no_output_____" ], [ "1. Construct a 20000x20000 array of normally distributed random values broken up into 1000x1000 sized chunks\n2. Take the mean along one axis\n3. Take every 100th element", "_____no_output_____" ] ], [ [ "import numpy as np\nimport dask.array as da\n\nx = da.random.normal(10, 0.1, size=(20000, 20000), # 400 million element array \n chunks=(1000, 1000)) # Cut into 1000x1000 sized chunks\ny = x.mean(axis=0)[::100] # Perform NumPy-style operations", "_____no_output_____" ], [ "x.nbytes / 1e9 # Gigabytes of the input processed lazily", "_____no_output_____" ], [ "%%time\ny.compute() # Time to compute the result", "_____no_output_____" ] ], [ [ "Performance comparision\n---------------------------\n\nThe following experiment was performed on a heavy personal laptop. Your performance may vary. If you attempt the NumPy version then please ensure that you have more than 4GB of main memory.", "_____no_output_____" ], [ "**NumPy: 19s, Needs gigabytes of memory**", "_____no_output_____" ], [ "```python\nimport numpy as np\n\n%%time \nx = np.random.normal(10, 0.1, size=(20000, 20000)) \ny = x.mean(axis=0)[::100] \ny\n\nCPU times: user 19.6 s, sys: 160 ms, total: 19.8 s\nWall time: 19.7 s\n```", "_____no_output_____" ], [ "**Dask Array: 4s, Needs megabytes of memory**", "_____no_output_____" ], [ "```python\nimport dask.array as da\n\n%%time\nx = da.random.normal(10, 0.1, size=(20000, 20000), chunks=(1000, 1000))\ny = x.mean(axis=0)[::100] \ny.compute() \n\nCPU times: user 29.4 s, sys: 1.07 s, total: 30.5 s\nWall time: 4.01 s\n```", "_____no_output_____" ], [ "**Discussion**", "_____no_output_____" ], [ "Notice that the dask array computation ran in 4 seconds, but used 29.4 seconds of user CPU time. The numpy computation ran in 19.7 seconds and used 19.6 seconds of user CPU time.\n\nDask finished faster, but used more total CPU time because Dask was able to transparently parallelize the computation because of the chunk size.", "_____no_output_____" ], [ "*Questions*", "_____no_output_____" ], [ "* What happens if the dask chunks=(20000,20000)?\n * Will the computation run in 4 seconds?\n * How much memory will be used?\n* What happens if the dask chunks=(25,25)?\n * What happens to CPU and memory?", "_____no_output_____" ], [ "### Exercise: Meteorological data", "_____no_output_____" ], [ "There is 2GB of somewhat artifical weather data in HDF5 files in `data/weather-big/*.hdf5`. We'll use the `h5py` library to interact with this data and `dask.array` to compute on it.\n\nOur goal is to visualize the average temperature on the surface of the Earth for this month. This will require a mean over all of this data. We'll do this in the following steps\n\n1. Create `h5py.Dataset` objects for each of the days of data on disk (`dsets`)\n2. Wrap these with `da.from_array` calls \n3. Stack these datasets along time with a call to `da.stack`\n4. Compute the mean along the newly stacked time axis with the `.mean()` method\n5. Visualize the result with `matplotlib.pyplot.imshow`", "_____no_output_____" ] ], [ [ "from prep_data import weather # Prep data if it doesn't exist\nweather()", "_____no_output_____" ], [ "import h5py\nfrom glob import glob\nimport os\n\nfilenames = sorted(glob(os.path.join('data', 'weather-big', '*.hdf5')))\ndsets = [h5py.File(filename, mode='r')['/t2m'] for filename in filenames]\ndsets[0]", "_____no_output_____" ], [ "dsets[0][:5, :5] # Slicing into h5py.Dataset object gives a numpy array", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(16, 8))\nplt.imshow(dsets[0][::4, ::4], cmap='RdBu_r')", "_____no_output_____" ] ], [ [ "**Integrate with `dask.array`**", "_____no_output_____" ], [ "Make a list of `dask.array` objects out of your list of `h5py.Dataset` objects using the `da.from_array` function with a chunk size of `(500, 500)`.", "_____no_output_____" ] ], [ [ "%load solutions/02-dask-arrays-make-arrays.py", "_____no_output_____" ] ], [ [ "**Stack this list of `dask.array` objects into a single `dask.array` object with `da.stack`**", "_____no_output_____" ], [ "Stack these along the first axis so that the shape of the resulting array is `(31, 5760, 11520)`.", "_____no_output_____" ] ], [ [ "%load solutions/02-dask-arrays-stacked.py", "_____no_output_____" ] ], [ [ "**Plot the mean of this array along the time (`0th`) axis**", "_____no_output_____" ] ], [ [ "%load solutions/02-dask-arrays-weather-mean.py", "_____no_output_____" ] ], [ [ "**Plot the difference of the first day from the mean**", "_____no_output_____" ] ], [ [ "%load solutions/02-dask-arrays-weather-difference.py", "_____no_output_____" ] ], [ [ "### Exercise: Subsample and store", "_____no_output_____" ], [ "In the above exercise the result of our computation is small, so we can call `compute` safely. Sometimes our result is still too large to fit into memory and we want to save it to disk. In these cases you can use one of the following two functions\n\n1. `da.store`: Store dask.array into any object that supports numpy setitem syntax, e.g.\n\n f = h5py.File('myfile.hdf5')\n output = f.create_dataset(shape=..., dtype=...)\n \n da.store(my_dask_array, output)\n \n2. `da.to_hdf5`: A specialized function that creates and stores a `dask.array` object into an `HDF5` file.\n\n da.to_hdf5('data/myfile.hdf5', '/output', my_dask_array)\n \nThe task in this exercise is to **use numpy step slicing to subsample the full dataset by a factor of two in both the latitude and longitude direction and then store this result to disk** using one of the functions listed above.\n\nAs a reminder, Python slicing takes three elements\n\n start:stop:step\n\n >>> L = [1, 2, 3, 4, 5, 6, 7]\n >>> L[::3]\n [1, 4, 7]", "_____no_output_____" ] ], [ [ "%load solutions/02-dask-arrays-store.py", "_____no_output_____" ] ], [ [ "## Example: Lennard-Jones potential", "_____no_output_____" ], [ "The [Lennard-Jones](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is used in partical simuluations in physics, chemistry and engineering. It is highly parallelizable.\n\nFirst, we'll run and profile the Numpy version on 7,000 particles.", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# make a random collection of particles\ndef make_cluster(natoms, radius=40, seed=1981):\n np.random.seed(seed)\n cluster = np.random.normal(0, radius, (natoms,3))-0.5\n return cluster\n\ndef lj(r2):\n sr6 = (1./r2)**3\n pot = 4.*(sr6*sr6 - sr6)\n return pot\n\n# build the matrix of distances\ndef distances(cluster):\n diff = cluster[:, np.newaxis, :] - cluster[np.newaxis, :, :]\n mat = (diff*diff).sum(-1)\n return mat\n\n# the lj function is evaluated over the upper traingle\n# after removing distances near zero\ndef potential(cluster):\n d2 = distances(cluster)\n dtri = np.triu(d2)\n energy = lj(dtri[dtri > 1e-6]).sum()\n return energy", "_____no_output_____" ], [ "cluster = make_cluster(int(7e3), radius=500)", "_____no_output_____" ], [ "%time potential(cluster)", "_____no_output_____" ] ], [ [ "Notice that the most time consuming function is `distances`.", "_____no_output_____" ] ], [ [ "%prun -s cumulative potential(cluster)", "_____no_output_____" ] ], [ [ "### Dask version", "_____no_output_____" ], [ "Here's the dask version. Only the `potential` function needs to be rewritten to best utilize Dask.\n\nNote that `da.nansum` has been used over the full $NxN$ distance matrix to improve parallel efficiency.", "_____no_output_____" ] ], [ [ "import dask.array as da\n\n# compute the potential on the entire\n# matrix of distances and ignore division by zero\ndef potential_dask(cluster):\n d2 = distances(cluster)\n energy = da.nansum(lj(d2))/2.\n return energy", "_____no_output_____" ] ], [ [ "Let's convert the NumPy array to a Dask array. Since the entire NumPy array fits in memory it is more computationally efficient to chunk the array by number of CPU cores.", "_____no_output_____" ] ], [ [ "from os import cpu_count\n\ndcluster = da.from_array(cluster, chunks=cluster.shape[0]//cpu_count())", "_____no_output_____" ] ], [ [ "This step should scale quite well with number of cores. The warnings are complaining about dividing by zero, which is why we used `da.nansum` in `potential_dask`.", "_____no_output_____" ] ], [ [ "e = potential_dask(dcluster)\n%time e.compute()", "_____no_output_____" ] ], [ [ "The distributed [dashboard](http://127.0.0.1:8787/tasks) shows the execution of the tasks, allowing a visualization of which is taking the most time.", "_____no_output_____" ], [ "Limitations\n-----------\n\nDask.array does not implement the entire numpy interface. Users expecting this\nwill be disappointed. Notably dask.array has the following failings:\n\n1. Dask does not implement all of ``np.linalg``. This has been done by a\n number of excellent BLAS/LAPACK implementations and is the focus of\n numerous ongoing academic research projects.\n2. Dask.array does not support some operation where the resulting shape\n depends on the values of the array. In order to form the dask graph we\n must be able to infer the shape of the array before actually executing the\n operation. Some operations that result in arrays with unknown shape are\n supported: e.g. indexing one dask array with another or operations like ``da.where``.\n3. Dask.array does not attempt operations like ``sort`` which are notoriously\n difficult to do in parallel and are of somewhat diminished value on very\n large data (you rarely actually need a full sort).\n Often we include parallel-friendly alternatives like ``topk``.\n4. Dask development is driven by immediate need, and so many lesser used\n functions, like ``np.full_like`` have not been implemented purely out of\n laziness. These would make excellent community contributions.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec824a1ba74af6d2dba00017b33d974774216c0d
29,684
ipynb
Jupyter Notebook
Plotting_Triangle_Count/advdb-plotting.ipynb
sindura93/Advanced-Databases-course-Final-project
191a4a265b1f996ecebdc0aa1858707025b2821b
[ "MIT" ]
1
2021-06-17T04:49:10.000Z
2021-06-17T04:49:10.000Z
Plotting_Triangle_Count/advdb-plotting.ipynb
sindura93/Advanced-Databases-course-Final-project
191a4a265b1f996ecebdc0aa1858707025b2821b
[ "MIT" ]
null
null
null
Plotting_Triangle_Count/advdb-plotting.ipynb
sindura93/Advanced-Databases-course-Final-project
191a4a265b1f996ecebdc0aa1858707025b2821b
[ "MIT" ]
null
null
null
25.700433
3,836
0.571217
[ [ [ "import folium", "_____no_output_____" ], [ "import json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom folium.plugins import MarkerCluster\n", "_____no_output_____" ], [ "m = folium.Map(location=[51.5, -0.1], zoom_start=15)", "_____no_output_____" ], [ "m", "_____no_output_____" ], [ "accident_data = pd.read_csv(\"dataWith5000RNRecordsWithIDAndHeaders\")\naccident_data = accident_data.sample(n=5000, random_state=42)\naccident_data.dropna(subset=[\"Latitude\", \"Longitude\"], inplace=True)", "_____no_output_____" ], [ "accident_data.shape", "_____no_output_____" ], [ "def get_geojson_grid(upper_right, lower_left, n=6):\n \"\"\"Returns a grid of geojson rectangles, and computes the exposure in each section of the grid based on the vessel data.\n\n Parameters\n ----------\n upper_right: array_like\n The upper right hand corner of \"grid of grids\" (the default is the upper right hand [lat, lon] of the USA).\n\n lower_left: array_like\n The lower left hand corner of \"grid of grids\" (the default is the lower left hand [lat, lon] of the USA).\n\n n: integer\n The number of rows/columns in the (n,n) grid.\n\n Returns\n -------\n\n list\n List of \"geojson style\" dictionary objects \n \"\"\"\n\n all_boxes = []\n\n lat_steps = np.linspace(lower_left[0], upper_right[0], n+1)\n lon_steps = np.linspace(lower_left[1], upper_right[1], n+1)\n\n lat_stride = lat_steps[1] - lat_steps[0]\n lon_stride = lon_steps[1] - lon_steps[0]\n\n for lat in lat_steps[:-1]:\n for lon in lon_steps[:-1]:\n # Define dimensions of box in grid\n upper_left = [lon, lat + lat_stride]\n upper_right = [lon + lon_stride, lat + lat_stride]\n lower_right = [lon + lon_stride, lat]\n lower_left = [lon, lat]\n\n # Define json coordinates for polygon\n coordinates = [\n upper_left,\n upper_right,\n lower_right,\n lower_left,\n upper_left\n ]\n\n geo_json = {\"type\": \"FeatureCollection\",\n \"properties\":{\n \"lower_left\": lower_left,\n \"upper_right\": upper_right\n },\n \"features\":[]}\n\n grid_feature = {\n \"type\":\"Feature\",\n \"geometry\":{\n \"type\":\"Polygon\",\n \"coordinates\": [coordinates],\n }\n }\n\n geo_json[\"features\"].append(grid_feature)\n\n all_boxes.append(geo_json)\n\n return all_boxes", "_____no_output_____" ], [ " def generate_heat_gridmap(accident_data, htmlfilename):\n m = folium.Map(zoom_start = 5, location=[55, 0])\n\n # Generate GeoJson grid\n top_right = [58, 2]\n top_left = [49, -8]\n\n grid = get_geojson_grid(top_right, top_left, n=6)\n\n # Calculate exposures in grid\n popups = []\n regional_counts = []\n\n for box in grid:\n upper_right = box[\"properties\"][\"upper_right\"]\n lower_left = box[\"properties\"][\"lower_left\"]\n\n mask = (\n (accident_data.Latitude != upper_right[1]) & (accident_data.Latitude != lower_left[1]) &\n (accident_data.Longitude != upper_right[0]) & (accident_data.Longitude != lower_left[0])\n )\n\n region_incidents = len(accident_data[mask])\n regional_counts.append(region_incidents)\n\n total_vehicles = accident_data[mask].Number_of_Vehicles.sum()\n total_casualties = accident_data[mask].Number_of_Casualties.sum()\n content = \"total vehicles {:,.0f}, total casualties {:,.0f}\".format(total_vehicles, total_casualties)\n popup = folium.Popup(content)\n popups.append(popup)\n\n worst_region = max(regional_counts)\n\n # Add GeoJson to map\n for i, box in enumerate(grid):\n geo_json = json.dumps(box)\n\n color = plt.cm.Reds(regional_counts[i] / worst_region)\n color = mpl.colors.to_hex(color)\n\n gj = folium.GeoJson(geo_json,\n style_function=lambda feature, color=color: {\n 'fillColor': color,\n 'color':\"black\",\n 'weight': 2,\n 'dashArray': '5, 5',\n 'fillOpacity': 0.55,\n })\n\n gj.add_child(popups[i])\n m.add_child(gj)\n\n # Marker clusters\n locations = list(zip(accident_data.Latitude, accident_data.Longitude))\n icons = [folium.Icon(icon=\"car\", prefix=\"fa\") for _ in range(len(locations))]\n\n # Create popups\n popup_content = []\n for incident in accident_data.itertuples():\n number_of_vehicles = \"Number of vehicles: {} \".format(incident.Number_of_Vehicles)\n number_of_casualties = \"Number of casualties: {}\".format(incident.Number_of_Casualties)\n content = number_of_vehicles + number_of_casualties\n popup_content.append(content)\n\n popups = [folium.Popup(content) for content in popup_content]\n\n cluster = MarkerCluster(locations=locations, icons=icons, popups=popups)\n m.add_child(cluster)\n\n m.save(htmlfilename)", "_____no_output_____" ], [ "generate_heat_gridmap(accident_data, \"car_accidents_all.html\")", "_____no_output_____" ] ], [ [ "Uptill here, we saw the visualization for the whole dataset\nNow lets see how the data is spread based on various light and weather conditions", "_____no_output_____" ] ], [ [ "accident_data_whole = pd.read_csv(\"dataWith5000RNRecordsWithIDAndHeaders\")", "_____no_output_____" ], [ "sorted(accident_data_whole['Light_Conditions'].unique())", "_____no_output_____" ], [ "sorted(accident_data_whole['Weather_Conditions'].unique())", "_____no_output_____" ] ], [ [ "**Assigning separate dataframes with records based on separate light conditions", "_____no_output_____" ], [ "All we got to do now is, call the generate_heat_gridmap to plot the heatmap grid for various datasets below", "_____no_output_____" ], [ "Generating gridmap for Light Conditions == 1", "_____no_output_____" ] ], [ [ "#below one has a shape of (3699,32)\naccident_light1_df = accident_data_whole[accident_data_whole['Light_Conditions'] == 1]", "_____no_output_____" ], [ "accident_light1_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light1_df, \"accident_light1.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Light Conditions == 4", "_____no_output_____" ] ], [ [ "#below one has a shape of (995,32)\naccident_light4_df = accident_data_whole[accident_data_whole['Light_Conditions'] == 4]", "_____no_output_____" ], [ "accident_light4_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light4_df, \"accident_light4.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Light Conditions == 5", "_____no_output_____" ] ], [ [ "#below one has a shape of (33,32)\naccident_light5_df = accident_data_whole[accident_data_whole['Light_Conditions'] == 5]", "_____no_output_____" ], [ "accident_light5_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light5_df, \"accident_light5.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Light Conditions == 6", "_____no_output_____" ] ], [ [ "#below one has a shape of (216,32)\naccident_light6_df = accident_data_whole[accident_data_whole['Light_Conditions'] == 6]", "_____no_output_____" ], [ "accident_light6_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light6_df, \"accident_light6.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Light Conditions == 7", "_____no_output_____" ] ], [ [ "#below one has a shape of (57,32)\naccident_light7_df = accident_data_whole[accident_data_whole['Light_Conditions'] == 7]", "_____no_output_____" ], [ "accident_light7_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light7_df, \"accident_light7.html\")", "_____no_output_____" ] ], [ [ "**Assigning separate dataframes with records based on same light and weather conditions", "_____no_output_____" ] ], [ [ "#below one has a shape of (3159,32)\naccident_light_weather = accident_data_whole[accident_data_whole['Light_Conditions'] == accident_data_whole['Weather_Conditions']]", "_____no_output_____" ], [ "accident_light_weather.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_light_weather, \"accident_light_weather.html\")", "_____no_output_____" ] ], [ [ "**Assigning separate dataframes with records based on separate weather conditions\nunique weather:[1, 2, 3, 4, 5, 6, 7, 8, 9]", "_____no_output_____" ], [ "Generating gridmap for Weather Conditions == 1", "_____no_output_____" ] ], [ [ "#below one has a shape of (4065, 32)\naccident_weather1_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 1]", "_____no_output_____" ], [ "accident_weather1_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather1_df, \"accident_weather1.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 2", "_____no_output_____" ] ], [ [ "#below one has a shape of (585, 32)\naccident_weather2_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 2]", "_____no_output_____" ], [ "accident_weather2_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather2_df, \"accident_weather2.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 3", "_____no_output_____" ] ], [ [ "#below one has a shape of (10, 32)\naccident_weather3_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 3]", "_____no_output_____" ], [ "accident_weather3_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather3_df, \"accident_weather3.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 4", "_____no_output_____" ] ], [ [ "#below one has a shape of (47, 32)\naccident_weather4_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 4]", "_____no_output_____" ], [ "accident_weather4_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather4_df, \"accident_weather4.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 5", "_____no_output_____" ] ], [ [ "#below one has a shape of (88, 32)\naccident_weather5_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 5]", "_____no_output_____" ], [ "accident_weather5_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather5_df, \"accident_weather5.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 6", "_____no_output_____" ] ], [ [ "#below one has a shape of (1, 32)\naccident_weather6_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 6]", "_____no_output_____" ], [ "accident_weather6_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather6_df, \"accident_weather6.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 7", "_____no_output_____" ] ], [ [ "#below one has a shape of (22, 32)\naccident_weather7_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 7]", "_____no_output_____" ], [ "accident_weather7_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather7_df, \"accident_weather7.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 8", "_____no_output_____" ] ], [ [ "#below one has a shape of (92, 32)\naccident_weather8_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 8]", "_____no_output_____" ], [ "accident_weather8_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather8_df, \"accident_weather8.html\")", "_____no_output_____" ] ], [ [ "Generating gridmap for Weather Conditions == 9", "_____no_output_____" ] ], [ [ "#below one has a shape of (90, 32)\naccident_weather9_df = accident_data_whole[accident_data_whole['Weather_Conditions'] == 9]", "_____no_output_____" ], [ "accident_weather9_df.shape", "_____no_output_____" ], [ "generate_heat_gridmap(accident_weather9_df, \"accident_weather9.html\")", "_____no_output_____" ] ], [ [ "Rough Work below", "_____no_output_____" ], [ "accident_data_whole[accident_data_whole['Light_Conditions'] == 7][['Latitude', 'Longitude']].shape", "_____no_output_____" ], [ "accident_data_whole[accident_data_whole['Light_Conditions'] == accident_data_whole['Weather_Conditions']].shape", "_____no_output_____" ], [ "accident_data_whole[accident_data_whole['Weather_Conditions'] == 9][['Latitude', 'Longitude']].shape", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
ec82610bf070720d28718cde32aaafcea88cea4a
77,259
ipynb
Jupyter Notebook
migs_anytrading.ipynb
carlomigs/gym-anytrading
f27a5a1ee54baf483334a4cf852e305e87788589
[ "MIT" ]
null
null
null
migs_anytrading.ipynb
carlomigs/gym-anytrading
f27a5a1ee54baf483334a4cf852e305e87788589
[ "MIT" ]
null
null
null
migs_anytrading.ipynb
carlomigs/gym-anytrading
f27a5a1ee54baf483334a4cf852e305e87788589
[ "MIT" ]
null
null
null
152.685771
21,430
0.79306
[ [ [ "<a href=\"https://colab.research.google.com/github/carlomigs/gym-anytrading/blob/master/migs_anytrading.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!git clone https://github.com/AminHP/gym-anytrading\n%cd gym-anytrading\n!pip install -e .", "Cloning into 'gym-anytrading'...\nremote: Enumerating objects: 31, done.\u001b[K\nremote: Counting objects: 100% (31/31), done.\u001b[K\nremote: Compressing objects: 100% (26/26), done.\u001b[K\nremote: Total 31 (delta 4), reused 28 (delta 4), pack-reused 0\u001b[K\nUnpacking objects: 100% (31/31), done.\n/content/gym-anytrading\nObtaining file:///content/gym-anytrading\nRequirement already satisfied: gym>=0.12.5 in /usr/local/lib/python3.6/dist-packages (from gym-anytrading==1.0.0) (0.15.4)\nRequirement already satisfied: numpy>=1.16.4 in /usr/local/lib/python3.6/dist-packages (from gym-anytrading==1.0.0) (1.17.4)\nRequirement already satisfied: pandas>=0.24.2 in /usr/local/lib/python3.6/dist-packages (from gym-anytrading==1.0.0) (0.25.3)\nRequirement already satisfied: matplotlib>=3.1.1 in /usr/local/lib/python3.6/dist-packages (from gym-anytrading==1.0.0) (3.1.1)\nRequirement already satisfied: pyglet<=1.3.2,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym>=0.12.5->gym-anytrading==1.0.0) (1.3.2)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gym>=0.12.5->gym-anytrading==1.0.0) (1.12.0)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from gym>=0.12.5->gym-anytrading==1.0.0) (3.4.7.28)\nRequirement already satisfied: cloudpickle~=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym>=0.12.5->gym-anytrading==1.0.0) (1.2.2)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym>=0.12.5->gym-anytrading==1.0.0) (1.3.2)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.2->gym-anytrading==1.0.0) (2.6.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24.2->gym-anytrading==1.0.0) (2018.9)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.1.1->gym-anytrading==1.0.0) (1.1.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.1.1->gym-anytrading==1.0.0) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.1.1->gym-anytrading==1.0.0) (2.4.5)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.3.2,>=1.2.0->gym>=0.12.5->gym-anytrading==1.0.0) (0.16.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib>=3.1.1->gym-anytrading==1.0.0) (41.6.0)\nInstalling collected packages: gym-anytrading\n Running setup.py develop for gym-anytrading\nSuccessfully installed gym-anytrading\n" ], [ "import gym\nimport gym_anytrading\nfrom gym_anytrading.envs import TradingEnv, ForexEnv, StocksEnv, Actions, Positions \nfrom gym_anytrading.datasets import FOREX_EURUSD_1H_ASK, STOCKS_GOOGL\nimport matplotlib.pyplot as plt\n\nenv = gym.make('forex-v0', frame_bound=(50, 100), window_size=10)\n\nobservation = env.reset()\nwhile True:\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n # env.render()\n if done:\n print(\"info:\", info)\n break\n\nplt.cla()\nenv.render_all()\nplt.show()", "info: {'total_reward': -21.29999999999299, 'total_profit': 0.9877752662444237, 'position': 0}\n" ], [ "!apt-get update && sudo apt-get install cmake libopenmpi-dev python3-dev zlib1g-dev", "\r0% [Working]\r \rIgn:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\n\r0% [Connecting to archive.ubuntu.com (91.189.88.173)] [Waiting for headers] [Co\r \rGet:2 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\n\r0% [Waiting for headers] [2 InRelease 14.2 kB/88.7 kB 16%] [Connecting to cloud\r \rIgn:3 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\n\r0% [Waiting for headers] [2 InRelease 14.2 kB/88.7 kB 16%] [Connecting to cloud\r \rHit:4 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:5 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:6 http://archive.ubuntu.com/ubuntu bionic InRelease\nHit:7 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nGet:10 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/ InRelease [3,626 B]\nGet:11 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\nGet:12 http://ppa.launchpad.net/marutter/c2d4u3.5/ubuntu bionic InRelease [15.4 kB]\nGet:13 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/ Packages [74.7 kB]\nGet:14 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [786 kB]\nGet:15 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nGet:16 http://ppa.launchpad.net/marutter/c2d4u3.5/ubuntu bionic/main Sources [1,728 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [1,312 kB]\nGet:18 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [735 kB]\nGet:19 http://ppa.launchpad.net/marutter/c2d4u3.5/ubuntu bionic/main amd64 Packages [833 kB]\nGet:20 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [1,031 kB]\nFetched 6,771 kB in 7s (937 kB/s)\nReading package lists... Done\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nzlib1g-dev is already the newest version (1:1.2.11.dfsg-0ubuntu2).\nzlib1g-dev set to manually installed.\nlibopenmpi-dev is already the newest version (2.1.1-8).\ncmake is already the newest version (3.10.2-1ubuntu2.18.04.1).\npython3-dev is already the newest version (3.6.7-1~18.04).\n0 upgraded, 0 newly installed, 0 to remove and 39 not upgraded.\n" ], [ "pip install stable-baselines", "_____no_output_____" ], [ "from stable_baselines import HER, DQN\nfrom stable_baselines.her import GoalSelectionStrategy, HERGoalEnvWrapper\n\nmodel_class = DQN # works also with SAC, DDPG and TD3\n\n#env = BitFlippingEnv(N_BITS, continuous=model_class in [DDPG, SAC, TD3], max_steps=N_BITS)\nenv = gym.make('forex-v0', frame_bound=(50, 100), window_size=10)\n\n# Available strategies (cf paper): future, final, episode, random\ngoal_selection_strategy = 'future' # equivalent to GoalSelectionStrategy.FUTURE\n\n# Wrap the model\nmodel = HER('MlpPolicy', env, model_class, n_sampled_goal=4, goal_selection_strategy=goal_selection_strategy,\n verbose=1)\n# Train the model\nmodel.learn(1000)\n\nmodel.save(\"./her_bit_env\")\n\n# WARNING: you must pass an env\n# or wrap your environment with HERGoalEnvWrapper to use the predict method\nmodel = HER.load('./her_bit_env', env=env)\n\nobs = env.reset()\nwhile True:\n action = model.predict(obs)\n obs, reward, done, info = env.step(action)\n\n if done:\n print(\"info:\", info)\n break\n\nplt.cla()\nenv.render_all()\nplt.show()", "_____no_output_____" ], [ "import gym\n#from stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.deepq.policies import MlpPolicy\nfrom stable_baselines import DQN\n\nenv = gym.make('forex-v0', frame_bound=(50, 1000), window_size=10, unit_side='left')\n\nmodel = DQN(MlpPolicy, env, verbose=0)\nmodel.learn(total_timesteps=1000)\n#model.save(\"dqn_env\")\n\n#model = DQN.load('dqn_env', env=env)\n\nobservation = env.reset()\nwhile True:\n action = model.predict(observation)\n #action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n #env.render()\n if done:\n print(\"info:\", info)\n break\n\nplt.cla()\nenv.render_all()\nplt.show()", "info: {'total_reward': 0.0, 'total_profit': 0.9829643052645488, 'position': 0}\n" ], [ "env.observation_space", "_____no_output_____" ], [ "import gym\n\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.deepq.policies import MlpPolicy\nfrom stable_baselines import DQN\n\nenv = gym.make('CartPole-v1')\n\nmodel = DQN(MlpPolicy, env, verbose=1)\nmodel.learn(total_timesteps=25000)\nmodel.save(\"deepq_cartpole\")\n\ndel model # remove to demonstrate saving and loading\n\nmodel = DQN.load(\"deepq_cartpole\")\n\nobs = env.reset()\nwhile True:\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()", "--------------------------------------\n| % time spent exploring | 29 |\n| episodes | 100 |\n| mean 100 episode reward | 18.1 |\n| steps | 1787 |\n--------------------------------------\n--------------------------------------\n| % time spent exploring | 2 |\n| episodes | 200 |\n| mean 100 episode reward | 109 |\n| steps | 12684 |\n--------------------------------------\nLoading a model without an environment, this model cannot be trained until it has a valid environment.\n" ] ], [ [ "https://www.ai-articles.net/author/matoksoz/\nhttps://github.com/LantaoYu/MARL-Papers\nhttps://bair.berkeley.edu/blog/2018/12/12/rllib/\n\nhttps://askubuntu.com/questions/1068819/how-to-upload-a-file-from-google-colab-to-github-or-kaggle\nhttps://cloud.google.com/ai-platform/notebooks/docs/save-to-github\nhttps://datascience.stackexchange.com/questions/37420/how-to-upload-a-saved-file-from-google-colab-to-a-site-such-as-kaggle-or-github\nhttps://dev.to/kriyeng/8-tips-for-google-colab-notebooks-to-take-advantage-of-their-free-of-charge-12gb-ram-gpu-be4\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec82659886fe0a06c3de55631ba424696a68e1bc
11,083
ipynb
Jupyter Notebook
src/digitClassifier.ipynb
GeoRouv/digit-recognizer
ea215c9393418aa09fa45745bb110acfc8ec38d7
[ "MIT" ]
3
2021-03-07T14:20:47.000Z
2022-02-03T12:04:42.000Z
src/digitClassifier.ipynb
GeoRouv/digit-recognizer
ea215c9393418aa09fa45745bb110acfc8ec38d7
[ "MIT" ]
null
null
null
src/digitClassifier.ipynb
GeoRouv/digit-recognizer
ea215c9393418aa09fa45745bb110acfc8ec38d7
[ "MIT" ]
null
null
null
11,083
11,083
0.65614
[ [ [ "## Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\r\nfrom numpy import mean\r\nfrom numpy import std\r\nfrom matplotlib import pyplot\r\nfrom keras.datasets import mnist\r\nfrom keras.utils import to_categorical\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dropout\r\nfrom keras.optimizers import SGD,RMSprop\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\r\nimport cv2\r\n\r\nfrom google.colab import drive\r\ndrive.mount('/content/drive')\r\n\r\nimport os\r\nos.chdir(\"drive/My Drive/Colab Notebooks/ML/\")", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ "### Load MNIST dataset\r\n\r\n- Loading data from keras library\r\n- Reshaping dataset to have a single channel\r\n- Converting categorical data to numerical data using one hot encoding", "_____no_output_____" ] ], [ [ "# Load dataset\r\n(trainX, trainY), (testX, testY) = mnist.load_data()\r\n \r\n# Reshape dataset\r\ntrainX = trainX.reshape((trainX.shape[0], 28, 28, 1))\r\ntestX = testX.reshape((testX.shape[0], 28, 28, 1))\r\n\r\n# One hot encode target values\r\ntrainY = to_categorical(trainY)\r\ntestY = to_categorical(testY)", "_____no_output_____" ] ], [ [ "### Pixels Scaling\r\n\r\n**Normalization**: Pixel values are scaled to the range 0-1\r\n\r\nNeural network models often cannot be trained on raw pixel values, such as pixel values in the range of 0 to 255.\r\n\r\nThe reason is that the network uses a weighted sum of inputs, and for the network to both be stable and train effectively, weights should be kept small.\r\n\r\nInstead, the pixel values must be scaled prior to training.\r\n\r\nNormalization is often the default approach as we can assume pixel values are always in the range 0-255, making the procedure very simple and efficient to implement.", "_____no_output_____" ] ], [ [ "# Convert from integers to floats\r\ntrainX = trainX.astype('float32')\r\ntestX = testX.astype('float32')\r\n\r\n# Normalize to range 0-1\r\ntrainX = trainX / 255.0\r\ntestX = testX / 255.0", "_____no_output_____" ] ], [ [ "### CNN model definition\r\n\r\n- #### Configuration\r\n - **1 2D Convolution Layer**: (3,3) is the dimensionality space of output, RELU is the activation function. HE initializer performs better than normal thats why is selected.\r\n - **1 Flatten Layer**: Flatten the data so they can be passed to dense layer (keeping 1 dimension)\r\n - **2 Dense Layers**: Dense layers are used when association can exist among any feature to any other feature in data point. Since between two layers of size n1 and n2, there can n1∗n2 connections and these are referred to as Dense. The first one contains a RELU activation function while the second is the softmax layer.\r\n\r\n- #### Compilation\r\n - **Optimizer**: Gradient descent is a good one for general purposes ( Adam can be used as well ) \r\n - **Loss function**: Since we’re using a Softmax output layer, we’ll use the Cross-Entropy loss", "_____no_output_____" ] ], [ [ "def define_model():\r\n model = Sequential()\r\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))\r\n model.add(MaxPooling2D((2, 2)))\r\n model.add(Flatten())\r\n model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))\r\n model.add(Dense(10, activation='softmax'))\r\n \r\n # model compilation\r\n opt = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0, centered=False,name=\"RMSprop\")\r\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\r\n return model\r\n\r\nlearning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', \r\n patience=3, \r\n verbose=1, \r\n factor=0.5, \r\n min_lr=0.00001)", "_____no_output_____" ] ], [ [ "## Data Augmentation\r\n\r\nIn order to avoid over-fitting problem, we need to expand artificially our handwritten digit dataset.\r\nData augmentation is a strategy that enables to significantly increase the diversity of data available for our training model. ", "_____no_output_____" ] ], [ [ "def adjust_gamma(image):\r\n img = np.power(image/float(np.max(image)), 1.5)\r\n\r\n return img\r\n\r\ndef my_preprocessing_func(img):\r\n img = adjust_gamma(img)\r\n\r\n image = np.array(img)\r\n return image / 255\r\n\r\ndatagen = ImageDataGenerator(\r\n featurewise_center=True, # set input mean to 0 over the dataset\r\n samplewise_center=True, # set each sample mean to 0\r\n featurewise_std_normalization=True, # divide inputs by std of the dataset\r\n samplewise_std_normalization=True, # divide each input by its std\r\n zca_whitening=False, # apply ZCA whitening\r\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\r\n zoom_range = 0.1, # Randomly zoom image \r\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\r\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\r\n horizontal_flip=False, # randomly flip images\r\n vertical_flip=False, # randomly flip images\r\n preprocessing_function=my_preprocessing_func) \r\ndatagen.fit(trainX)", "_____no_output_____" ] ], [ [ "## Dataset Expansion\r\n\r\nExpand the original dataset with the augmented MNIST images.", "_____no_output_____" ] ], [ [ "counter = 0\r\nbatch_size=9\r\noriginal_samples = trainX.shape[0]\r\n\r\nfor X_batch, y_batch in datagen.flow(trainX, trainY, batch_size):\r\n trainX = np.concatenate((trainX, X_batch), axis=0)\r\n trainY = np.concatenate((trainY, y_batch), axis=0)\r\n\r\n if counter == 1000:\r\n break\r\n\r\n counter += 1\r\n\r\n # # create a grid of 3x3 images\r\n # for i in range(0, 9):\r\n \r\n # pyplot.subplot(330 + 1 + i)\r\n # pyplot.imshow(X_batch[i].reshape(28, 28), cmap=pyplot.get_cmap('gray'))\r\n # pyplot.show()\r\n # break\r\n # ###########\r\n", "_____no_output_____" ] ], [ [ "### Model Evaluation\r\n\r\nModel was finally fitted to the original dataset, merged with the augmented.\r\nValidation was performed on the validation data.\r\n\r\nThe steps per epoch was calculated as train-length / batch-size, since this uses all of the data points, one batch size worth at a time.\r\n\r\nWhen the metric had stopped improving the learning rate was reduced.", "_____no_output_____" ] ], [ [ "model = define_model()\r\nhistory = model.fit(trainX, trainY, batch_size=32, validation_data=(testX, testY),steps_per_epoch=len(trainX) / 32, epochs=10, callbacks=[learning_rate_reduction], verbose=1)\r\n_, acc = model.evaluate(testX, testY, verbose=0)\r\nprint('> %.3f' % (acc * 100.0))\r\n\r\nmodel.save(\"./mymodel.h5\")", "Epoch 1/10\n2156/2156 [==============================] - 44s 20ms/step - loss: 0.8638 - accuracy: 0.8334 - val_loss: 0.0748 - val_accuracy: 0.9765\nEpoch 2/10\n2156/2156 [==============================] - 44s 20ms/step - loss: 0.2748 - accuracy: 0.9520 - val_loss: 0.0630 - val_accuracy: 0.9813\nEpoch 3/10\n2156/2156 [==============================] - 42s 20ms/step - loss: 0.2471 - accuracy: 0.9647 - val_loss: 0.0534 - val_accuracy: 0.9832\nEpoch 4/10\n2156/2156 [==============================] - 43s 20ms/step - loss: 0.2273 - accuracy: 0.9704 - val_loss: 0.0532 - val_accuracy: 0.9836\nEpoch 5/10\n2156/2156 [==============================] - 43s 20ms/step - loss: 0.2424 - accuracy: 0.9752 - val_loss: 0.0536 - val_accuracy: 0.9827\nEpoch 6/10\n2156/2156 [==============================] - 45s 21ms/step - loss: 0.1968 - accuracy: 0.9780 - val_loss: 0.0530 - val_accuracy: 0.9833\nEpoch 7/10\n2156/2156 [==============================] - 43s 20ms/step - loss: 0.2105 - accuracy: 0.9787 - val_loss: 0.0505 - val_accuracy: 0.9846\nEpoch 8/10\n2156/2156 [==============================] - 43s 20ms/step - loss: 0.2009 - accuracy: 0.9812 - val_loss: 0.0654 - val_accuracy: 0.9824\nEpoch 9/10\n2156/2156 [==============================] - 43s 20ms/step - loss: 0.1764 - accuracy: 0.9832 - val_loss: 0.0578 - val_accuracy: 0.9835\nEpoch 10/10\n2156/2156 [==============================] - 44s 20ms/step - loss: 0.1851 - accuracy: 0.9839 - val_loss: 0.0582 - val_accuracy: 0.9836\n\nEpoch 00010: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.\n> 98.360\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec827837b11c731f394f3362fcc45881c3ed0427
620,397
ipynb
Jupyter Notebook
Data/low_temperature_hyst/low_temp_hyst_plots.ipynb
Swanson-Hysell-Group/2018_Red_Bed_Intraclasts
74779a4d75747f2c9998a05f962dd90c8ab2662c
[ "CC-BY-4.0" ]
null
null
null
Data/low_temperature_hyst/low_temp_hyst_plots.ipynb
Swanson-Hysell-Group/2018_Red_Bed_Intraclasts
74779a4d75747f2c9998a05f962dd90c8ab2662c
[ "CC-BY-4.0" ]
null
null
null
Data/low_temperature_hyst/low_temp_hyst_plots.ipynb
Swanson-Hysell-Group/2018_Red_Bed_Intraclasts
74779a4d75747f2c9998a05f962dd90c8ab2662c
[ "CC-BY-4.0" ]
null
null
null
254.887839
180,636
0.883655
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nimport matplotlib.colors as colors", "_____no_output_____" ], [ "viridis = cm = plt.get_cmap('viridis') \ncNorm = matplotlib.colors.Normalize(vmin=50, vmax=300)\nscalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap='viridis')\n\ncolor_50 = scalarMap.to_rgba(50)\ncolor_50_hex = colors.rgb2hex(color_50)\ncolor_100 = scalarMap.to_rgba(100)\ncolor_100_hex = colors.rgb2hex(color_100)\ncolor_150 = scalarMap.to_rgba(150)\ncolor_150_hex = colors.rgb2hex(color_150)\ncolor_200 = scalarMap.to_rgba(200)\ncolor_200_hex = colors.rgb2hex(color_200)\ncolor_250 = scalarMap.to_rgba(250)\ncolor_250_hex = colors.rgb2hex(color_250)\ncolor_300 = scalarMap.to_rgba(300)\ncolor_300_hex = colors.rgb2hex(color_300)", "_____no_output_____" ], [ "BRIC_26_loops = pd.read_csv('BRIC_26_lowt_loops.csv')\n\nBRIC_26_loop_300 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==300]\nBRIC_26_loop_250 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==250]\nBRIC_26_loop_200 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==200]\nBRIC_26_loop_150 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==150]\nBRIC_26_loop_100 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==100]\nBRIC_26_loop_50 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==50]\nBRIC_26_loop_10 = BRIC_26_loops[BRIC_26_loops['Temp (K)']==10]\n\nplt.plot(BRIC_26_loop_300['X.2'],BRIC_26_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_26_loop_250['X.2'],BRIC_26_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_26_loop_200['X.2'],BRIC_26_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_26_loop_150['X.2'],BRIC_26_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_26_loop_100['X.2'],BRIC_26_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_26_loop_50['X.2'],BRIC_26_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)')\nplt.ylabel('magnetization (Am$^2$/kg)')\nplt.text(4.5,ymin*0.9,'BRIC 26',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.show()", "_____no_output_____" ], [ "print(len(BRIC_26_loop_250))\nprint(len(BRIC_26_loop_200))\nprint(len(BRIC_26_loop_150))\nprint(len(BRIC_26_loop_100))\nprint(len(BRIC_26_loop_50))", "4020\n4020\n4020\n4020\n4020\n" ], [ "BRIC_20_loops = pd.read_csv('BRIC_20_lowt_loops.csv')\n\nBRIC_20_loop_300 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==300]\nBRIC_20_loop_250 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==250]\nBRIC_20_loop_200 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==200]\nBRIC_20_loop_150 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==150]\nBRIC_20_loop_100 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==100]\nBRIC_20_loop_50 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==50]\nBRIC_20_loop_10 = BRIC_20_loops[BRIC_20_loops['Temp (K)']==10]\n\nplt.plot(BRIC_20_loop_300['X.2'],BRIC_20_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.2'],BRIC_20_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.2'],BRIC_20_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.2'],BRIC_20_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.2'],BRIC_20_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.2'],BRIC_20_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)')\nplt.ylabel('magnetization (Am$^2$/kg)')\nplt.text(4.5,ymin*0.9,'BRIC 20',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.show()", "_____no_output_____" ], [ "BRIC_31_loops = pd.read_csv('BRIC_31_lowt_loops.csv')\n\nBRIC_31_loop_300 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==300]\nBRIC_31_loop_250 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==250]\nBRIC_31_loop_200 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==200]\nBRIC_31_loop_150 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==150]\nBRIC_31_loop_100 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==100]\nBRIC_31_loop_50 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==50]\nBRIC_31_loop_10 = BRIC_31_loops[BRIC_31_loops['Temp (K)']==10]\n\nplt.plot(BRIC_31_loop_300['X.2'],BRIC_31_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_31_loop_250['X.2'],BRIC_31_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_31_loop_200['X.2'],BRIC_31_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_31_loop_150['X.2'],BRIC_31_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_31_loop_100['X.2'],BRIC_31_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_31_loop_50['X.2'],BRIC_31_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)')\nplt.ylabel('magnetization (Am$^2$/kg)')\nplt.text(4.5,ymin*0.9,'BRIC 31',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.show()", "_____no_output_____" ], [ "BRIC_22_loops = pd.read_csv('BRIC_22_lowt_loops.csv')\n\nBRIC_22_loop_300 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==300]\nBRIC_22_loop_250 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==250]\nBRIC_22_loop_200 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==200]\nBRIC_22_loop_150 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==150]\nBRIC_22_loop_100 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==100]\n#BRIC_22_loop_50 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==50]\nBRIC_22_loop_10 = BRIC_22_loops[BRIC_22_loops['Temp (K)']==10]\n\nplt.plot(BRIC_22_loop_300['X.2'],BRIC_22_loop_300['Mf'],color=color_300_hex,label='300 K')\n#plt.plot(BRIC_22_loop_250['X.2'],BRIC_22_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_22_loop_200['X.2'],BRIC_22_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_22_loop_150['X.2'],BRIC_22_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_22_loop_100['X.2'],BRIC_22_loop_100['Mf'],color=color_100_hex,label='100 K')\n#plt.plot(BRIC_22_loop_50['X.2'],BRIC_22_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)')\nplt.ylabel('magnetization (Am$^2$/kg)')\nplt.text(4.5,ymin*0.9,'BRIC 22',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.show()", "_____no_output_____" ], [ "BRIC_20_lowt_loops_summary = pd.read_csv('BRIC_20_lowt_loops_summary.csv')\nBRIC_22_lowt_loops_summary = pd.read_csv('BRIC_22_lowt_loops_summary.csv')\nBRIC_26_lowt_loops_summary = pd.read_csv('BRIC_26_lowt_loops_summary.csv')\nBRIC_31_lowt_loops_summary = pd.read_csv('BRIC_31_lowt_loops_summary.csv')\nBRIC_31_lowt_loops_summary.columns", "_____no_output_____" ], [ "plt.scatter(BRIC_20_lowt_loops_summary['T [K]'],BRIC_20_lowt_loops_summary['Ms [Am2/kg]'])\nplt.scatter(BRIC_22_lowt_loops_summary['T [K]'],BRIC_22_lowt_loops_summary['Ms [Am2/kg]'])\nplt.scatter(BRIC_26_lowt_loops_summary['T [K]'],BRIC_26_lowt_loops_summary['Ms [Am2/kg]'])\nplt.scatter(BRIC_31_lowt_loops_summary['T [K]'],BRIC_31_lowt_loops_summary['Ms [Am2/kg]'])\nplt.ylim(0.01,0.061)", "_____no_output_____" ], [ "plt.scatter(BRIC_20_lowt_loops_summary['T [K]'],BRIC_20_lowt_loops_summary['Mr [Am2/kg]'])\n#plt.scatter(BRIC_22_lowt_loops_summary['T [K]'],BRIC_22_lowt_loops_summary['Mr [Am2/kg]'])\nplt.scatter(BRIC_26_lowt_loops_summary['T [K]'],BRIC_26_lowt_loops_summary['Mr [Am2/kg]'])\n#plt.scatter(BRIC_31_lowt_loops_summary['T [K]'],BRIC_31_lowt_loops_summary['Mr [Am2/kg]'])\nplt.ylim(0.0075,0.014)", "_____no_output_____" ], [ "BRIC_22_lowt_loops_summary", "_____no_output_____" ], [ "BRIC_20_300_50_percent_increase = ((BRIC_20_lowt_loops_summary['Mr [Am2/kg]'][2]-BRIC_20_lowt_loops_summary['Mr [Am2/kg]'][0])/BRIC_20_lowt_loops_summary['Mr [Am2/kg]'][0])*100\nprint(BRIC_20_300_50_percent_increase)\nBRIC_26_300_50_percent_increase = ((BRIC_26_lowt_loops_summary['Mr [Am2/kg]'][2]-BRIC_26_lowt_loops_summary['Mr [Am2/kg]'][0])/BRIC_26_lowt_loops_summary['Mr [Am2/kg]'][0])*100\nprint(BRIC_26_300_50_percent_increase)\nBRIC_31_300_50_percent_increase = ((BRIC_31_lowt_loops_summary['Mr [Am2/kg]'][2]-BRIC_31_lowt_loops_summary['Mr [Am2/kg]'][0])/BRIC_31_lowt_loops_summary['Mr [Am2/kg]'][0])*100\nprint(BRIC_31_300_50_percent_increase)", "12.5975473802\n11.0169491525\n8.99001109878\n" ], [ "plt.figure(figsize=(10,4))\n\nplt.subplot(1,2,1)\nplt.text(4.5,0.035,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(4.8, np.max(BRIC_20_loop_300['Mf']), 0, (np.max(BRIC_20_loop_50['Mf'])-np.max(BRIC_20_loop_300['Mf']))*0.9,\n head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_20_loop_300['X.2'],BRIC_20_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.2'],BRIC_20_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.2'],BRIC_20_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.2'],BRIC_20_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.2'],BRIC_20_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.2'],BRIC_20_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\n\n\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 20',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.tick_params(axis='both', which='major', labelsize=12)\n\n\nplt.subplot(1,2,2)\nplt.text(4.5,0.031,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(4.8, np.max(BRIC_26_loop_300['Mf']), 0, (np.max(BRIC_26_loop_50['Mf'])-np.max(BRIC_26_loop_300['Mf']))*.9,\n head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_26_loop_300['X.2'],BRIC_26_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_26_loop_250['X.2'],BRIC_26_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_26_loop_200['X.2'],BRIC_26_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_26_loop_150['X.2'],BRIC_26_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_26_loop_100['X.2'],BRIC_26_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_26_loop_50['X.2'],BRIC_26_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 26',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.tick_params(axis='both', which='major', labelsize=12)\nplt.tight_layout()\nplt.savefig('low_temp_loops.pdf')\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(10,4))\n\nplt.subplot(1,2,1)\n#plt.text(4.5,0.035,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\n#plt.arrow(4.8, np.max(BRIC_20_loop_300['Mf']), 0, (np.max(BRIC_20_loop_50['Mf'])-np.max(BRIC_20_loop_300['Mf']))*0.9,\n# head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_20_loop_300['X.2'],BRIC_20_loop_300['Mf'],'o-',color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.2'],BRIC_20_loop_250['Mf'],'o-',color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.2'],BRIC_20_loop_200['Mf'],'o-',color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.2'],BRIC_20_loop_150['Mf'],'o-',color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.2'],BRIC_20_loop_100['Mf'],'o-',color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.2'],BRIC_20_loop_50['Mf'],'o-',color=color_50_hex,label='50 K')\n#plt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-.02,.02)\nplt.ylim(.0075,.011)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\n\n\n# plt.ylim(ymin, ymax)\nplt.legend()\n# plt.xlabel('applied field (T)',fontsize=12)\n# plt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\n# plt.text(4.5,ymin*0.9,'BRIC 20',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\n# plt.tick_params(axis='both', which='major', labelsize=12)\n\n\n# plt.subplot(1,2,2)\n# plt.text(4.5,0.031,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\n# plt.arrow(4.8, np.max(BRIC_26_loop_300['Mf']), 0, (np.max(BRIC_26_loop_50['Mf'])-np.max(BRIC_26_loop_300['Mf']))*.9,\n# head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\n# plt.plot(BRIC_26_loop_300['X.2'],BRIC_26_loop_300['Mf'],color=color_300_hex,label='300 K')\n# plt.plot(BRIC_26_loop_250['X.2'],BRIC_26_loop_250['Mf'],color=color_250_hex,label='250 K')\n# plt.plot(BRIC_26_loop_200['X.2'],BRIC_26_loop_200['Mf'],color=color_200_hex,label='200 K')\n# plt.plot(BRIC_26_loop_150['X.2'],BRIC_26_loop_150['Mf'],color=color_150_hex,label='150 K')\n# plt.plot(BRIC_26_loop_100['X.2'],BRIC_26_loop_100['Mf'],color=color_100_hex,label='100 K')\n# plt.plot(BRIC_26_loop_50['X.2'],BRIC_26_loop_50['Mf'],color=color_50_hex,label='50 K')\n# plt.hlines(0,-5,5,zorder=-1)\n# plt.xlim(-5,5)\n# ymin, ymax = plt.ylim()\n# plt.vlines(0,ymin,ymax,zorder=-1)\n# plt.ylim(ymin, ymax)\n# plt.legend()\n# plt.xlabel('applied field (T)',fontsize=12)\n# plt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\n# plt.text(4.5,ymin*0.9,'BRIC 26',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\n# plt.tick_params(axis='both', which='major', labelsize=12)\n# plt.tight_layout()\n# plt.savefig('low_temp_loops.pdf')\n# plt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(12,7))\n\nplt.subplot(2,3,1)\nplt.plot(BRIC_20_loop_300['X.1'],BRIC_20_loop_300['M_loop_raw'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.1'],BRIC_20_loop_250['M_loop_raw'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.1'],BRIC_20_loop_200['M_loop_raw'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.1'],BRIC_20_loop_150['M_loop_raw'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.1'],BRIC_20_loop_100['M_loop_raw'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.1'],BRIC_20_loop_50['M_loop_raw'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 20',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.legend()\n\nplt.subplot(2,3,2)\nplt.text(4.5,0.035,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(4.8, np.max(BRIC_20_loop_300['Mf']), 0, (np.max(BRIC_20_loop_50['Mf'])-np.max(BRIC_20_loop_300['Mf']))*0.9,\n head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_20_loop_300['X.2'],BRIC_20_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.2'],BRIC_20_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.2'],BRIC_20_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.2'],BRIC_20_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.2'],BRIC_20_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.2'],BRIC_20_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.tick_params(axis='both', which='major', labelsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 20',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\n\nplt.subplot(2,3,3)\n#plt.text(4.5,0.035,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\n#plt.arrow(4.8, np.max(BRIC_20_loop_300['Mf']), 0, (np.max(BRIC_20_loop_50['Mf'])-np.max(BRIC_20_loop_300['Mf']))*0.9,\n# head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_20_loop_300['X.2'],BRIC_20_loop_300['Mf'],'o-',color=color_300_hex,label='300 K')\nplt.plot(BRIC_20_loop_250['X.2'],BRIC_20_loop_250['Mf'],'o-',color=color_250_hex,label='250 K')\nplt.plot(BRIC_20_loop_200['X.2'],BRIC_20_loop_200['Mf'],'o-',color=color_200_hex,label='200 K')\nplt.plot(BRIC_20_loop_150['X.2'],BRIC_20_loop_150['Mf'],'o-',color=color_150_hex,label='150 K')\nplt.plot(BRIC_20_loop_100['X.2'],BRIC_20_loop_100['Mf'],'o-',color=color_100_hex,label='100 K')\nplt.plot(BRIC_20_loop_50['X.2'],BRIC_20_loop_50['Mf'],'o-',color=color_50_hex,label='50 K')\n#plt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-.024,.024)\nplt.ylim(.0085,.0105)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.tick_params(axis='both', which='major', labelsize=12)\nplt.text(-0.0015,0.010,'$M_r$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(-0.001, 0.00885, 0, 0.00125, width=0.0005,\n head_width=0.002, head_length=0.00016,\n length_includes_head=True,linewidth=0.5,color='grey')\n\nplt.subplot(2,3,4)\nplt.plot(BRIC_26_loop_300['X.1'],BRIC_26_loop_300['M_loop_raw'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_26_loop_250['X.1'],BRIC_26_loop_250['M_loop_raw'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_26_loop_200['X.1'],BRIC_26_loop_200['M_loop_raw'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_26_loop_150['X.1'],BRIC_26_loop_150['M_loop_raw'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_26_loop_100['X.1'],BRIC_26_loop_100['M_loop_raw'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_26_loop_50['X.1'],BRIC_26_loop_50['M_loop_raw'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 26',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\nplt.legend()\n\nplt.subplot(2,3,5)\nplt.text(4.5,0.03,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(4.8, np.max(BRIC_26_loop_300['Mf']), 0, (np.max(BRIC_26_loop_50['Mf'])-np.max(BRIC_26_loop_300['Mf']))*0.9,\n head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_26_loop_300['X.2'],BRIC_26_loop_300['Mf'],color=color_300_hex,label='300 K')\nplt.plot(BRIC_26_loop_250['X.2'],BRIC_26_loop_250['Mf'],color=color_250_hex,label='250 K')\nplt.plot(BRIC_26_loop_200['X.2'],BRIC_26_loop_200['Mf'],color=color_200_hex,label='200 K')\nplt.plot(BRIC_26_loop_150['X.2'],BRIC_26_loop_150['Mf'],color=color_150_hex,label='150 K')\nplt.plot(BRIC_26_loop_100['X.2'],BRIC_26_loop_100['Mf'],color=color_100_hex,label='100 K')\nplt.plot(BRIC_26_loop_50['X.2'],BRIC_26_loop_50['Mf'],color=color_50_hex,label='50 K')\nplt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-5,5)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.ylim(ymin, ymax)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.tick_params(axis='both', which='major', labelsize=12)\nplt.text(4.5,ymin*0.9,'BRIC 26',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18)\n\n\n\nplt.subplot(2,3,6)\n#plt.text(4.5,0.035,'$M_s$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\n#plt.arrow(4.8, np.max(BRIC_26_loop_300['Mf']), 0, (np.max(BRIC_26_loop_50['Mf'])-np.max(BRIC_26_loop_300['Mf']))*0.9,\n# head_width=0.2, head_length=0.003,length_includes_head=True,linewidth=3,color='grey')\nplt.plot(BRIC_26_loop_300['X.2'],BRIC_26_loop_300['Mf'],'o-',color=color_300_hex,label='300 K')\nplt.plot(BRIC_26_loop_250['X.2'],BRIC_26_loop_250['Mf'],'o-',color=color_250_hex,label='250 K')\nplt.plot(BRIC_26_loop_200['X.2'],BRIC_26_loop_200['Mf'],'o-',color=color_200_hex,label='200 K')\nplt.plot(BRIC_26_loop_150['X.2'],BRIC_26_loop_150['Mf'],'o-',color=color_150_hex,label='150 K')\nplt.plot(BRIC_26_loop_100['X.2'],BRIC_26_loop_100['Mf'],'o-',color=color_100_hex,label='100 K')\nplt.plot(BRIC_26_loop_50['X.2'],BRIC_26_loop_50['Mf'],'o-',color=color_50_hex,label='50 K')\n#plt.hlines(0,-5,5,zorder=-1)\nplt.xlim(-.024,.024)\nplt.ylim(.0115,.0135)\nymin, ymax = plt.ylim()\nplt.vlines(0,ymin,ymax,zorder=-1)\nplt.legend()\nplt.xlabel('applied field (T)',fontsize=12)\nplt.ylabel('magnetization (Am$^2$/kg)',fontsize=12)\nplt.text(-0.0015,0.013,'$M_r$',horizontalalignment='right',verticalalignment='bottom',fontweight='bold',fontsize=18,color='grey')\nplt.arrow(-0.001, 0.0118, 0, 0.0013, width=0.0005,\n head_width=0.002, head_length=0.00016,\n length_includes_head=True,linewidth=0.5,color='grey')\nplt.tick_params(axis='both', which='major', labelsize=12)\n\n\nplt.tight_layout()\nplt.savefig('low_temp_loops.pdf')", "_____no_output_____" ], [ "BRIC_20_loop_300", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec827cf2fa49644de5571e4f21317343e78b99c0
47,227
ipynb
Jupyter Notebook
convolutional_autoencoder.ipynb
mendelson/autoencoders
647d830d7903c8ab91871558ff39bcfcf0607c26
[ "MIT" ]
null
null
null
convolutional_autoencoder.ipynb
mendelson/autoencoders
647d830d7903c8ab91871558ff39bcfcf0607c26
[ "MIT" ]
null
null
null
convolutional_autoencoder.ipynb
mendelson/autoencoders
647d830d7903c8ab91871558ff39bcfcf0607c26
[ "MIT" ]
null
null
null
123.955381
28,064
0.846677
[ [ [ "# Convolutional Autoencoder\n\nSticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. I'll build a convolutional autoencoder to compress the MNIST dataset. \n\n>The encoder portion will be made of convolutional and pooling layers and the decoder will be made of **transpose convolutional layers** that learn to \"upsample\" a compressed representation.", "_____no_output_____" ] ], [ [ "import torch\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# load the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)", "_____no_output_____" ], [ "# Create training and test dataloaders\n\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)", "_____no_output_____" ] ], [ [ "### Visualize the Data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# get one image from the batch\nimg = np.squeeze(images[0])\n\nfig = plt.figure(figsize = (5,5)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')", "_____no_output_____" ] ], [ [ "---\n## Convolutional Autoencoder\n\n#### Encoder\nThe encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. \n\n#### Decoder\n\nThe decoder needs to convert from a narrow representation to a wide, reconstructed image. For example, the representation could be a 7x7x4 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the compressed representation.\n\nHere, my final encoder layer has size 7x7x4 = 196. The original images have size 28x28 = 784, so the encoded vector is 25% the size of the original image.\n\n### Transpose Convolutions, Decoder\n\nThis decoder uses **transposed convolutional** layers to increase the width and height of the input layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the NN architecture\nclass ConvAutoencoder(nn.Module):\n def __init__(self):\n super(ConvAutoencoder, self).__init__()\n ## encoder layers ##\n # conv layer (depth from 1 --> 16), 3x3 kernels\n self.conv1 = nn.Conv2d(1, 16, 3, padding=1) \n # conv layer (depth from 16 --> 4), 3x3 kernels\n self.conv2 = nn.Conv2d(16, 4, 3, padding=1)\n # pooling layer to reduce x-y dims by two; kernel and stride of 2\n self.pool = nn.MaxPool2d(2, 2)\n \n ## decoder layers ##\n ## a kernel of 2 and a stride of 2 will increase the spatial dims by 2\n self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)\n self.t_conv2 = nn.ConvTranspose2d(16, 1, 2, stride=2)\n\n\n def forward(self, x):\n ## encode ##\n x = F.relu(self.conv1(x))\n x = self.pool(x)\n # add second hidden layer\n x = F.relu(self.conv2(x))\n x = self.pool(x) # compressed representation\n \n ## decode ##\n ## apply ReLu to all hidden layers *except for the output layer\n ## apply a sigmoid to the output layer\n # add transpose conv layers, with relu activation function\n x = F.relu(self.t_conv1(x))\n # output layer (with sigmoid for scaling from 0 to 1)\n x = F.sigmoid(self.t_conv2(x))\n \n return x\n\n# initialize the NN\nmodel = ConvAutoencoder()\nprint(model)", "ConvAutoencoder(\n (conv1): Conv2d(1, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (conv2): Conv2d(16, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (t_conv1): ConvTranspose2d(4, 16, kernel_size=(2, 2), stride=(2, 2))\n (t_conv2): ConvTranspose2d(16, 1, kernel_size=(2, 2), stride=(2, 2))\n)\n" ] ], [ [ "---\n## Training\n\nHere I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss and the test loss afterwards. \n\nI'm not concerned with labels in this case, just images, which we can get from the `train_loader`. Because I'm comparing pixel values in input and output images, it will be best to use a loss that is meant for a regression task. Regression is all about comparing quantities rather than probabilistic values. So, in this case, I'll use `MSELoss`, and compare output images and input images as follows:\n```\nloss = criterion(outputs, images)\n```\n\nOtherwise, this is pretty straightfoward training with PyTorch. Since this is a convlutional autoencoder, our images _do not_ need to be flattened before being passed in an input to our model.", "_____no_output_____" ] ], [ [ "# specify loss function\ncriterion = nn.MSELoss()\n\n# specify loss function\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)", "_____no_output_____" ], [ "# number of epochs to train the model\nn_epochs = 30\n\nfor epoch in range(1, n_epochs+1):\n # monitor training loss\n train_loss = 0.0\n \n ###################\n # train the model #\n ###################\n for data in train_loader:\n # _ stands in for labels, here\n # no need to flatten images\n images, _ = data\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n outputs = model(images)\n # calculate the loss\n loss = criterion(outputs, images)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update running training loss\n train_loss += loss.item()*images.size(0)\n \n # print avg training statistics \n train_loss = train_loss/len(train_loader)\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(\n epoch, \n train_loss\n ))", "/home/mendelson/miniconda3/lib/python3.7/site-packages/torch/nn/functional.py:1351: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" ] ], [ [ "## Checking out the results\n\nBelow I've plotted some of the test images along with their reconstructions. These look a little rough around the edges, likely due to the checkerboard effect we mentioned above that tends to happen with transpose layers.", "_____no_output_____" ] ], [ [ "# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# get sample outputs\noutput = model(images)\n# prep images for display\nimages = images.numpy()\n\n# output is resized into a batch of iages\noutput = output.view(batch_size, 1, 28, 28)\n# use detach when it's an output that requires_grad\noutput = output.detach().numpy()\n\n# plot the first ten input images and then reconstructed images\nfig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25,4))\n\n# input images on top row, reconstructions on bottom\nfor images, row in zip([images, output], axes):\n for img, ax in zip(images, row):\n ax.imshow(np.squeeze(img), cmap='gray')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec82861bbc19f026b65e615da0fa3607399aeb9f
13,474
ipynb
Jupyter Notebook
00-Introduction.ipynb
robbieball3/Tutorials
577b63c9668e4c0dd8299b3617e4bae19d017ec8
[ "MIT" ]
1
2021-07-21T15:03:36.000Z
2021-07-21T15:03:36.000Z
00-Introduction.ipynb
BaichuanTang/Tutorials
577b63c9668e4c0dd8299b3617e4bae19d017ec8
[ "MIT" ]
null
null
null
00-Introduction.ipynb
BaichuanTang/Tutorials
577b63c9668e4c0dd8299b3617e4bae19d017ec8
[ "MIT" ]
null
null
null
29.743929
270
0.576889
[ [ [ "# Data Science in Practice\n\nWelcome to the hands on materials for the Data Science in Practice class.\n\nThis notebook will guide through getting the tools you will need for working with these tutorials and assignments.", "_____no_output_____" ], [ "## What do you need for these tutorials?\n\n### Software\n\n- Working install of python3.6, with the anaconda distribution ([datahub](http://datahub.ucsd.edu) satisfies this requirement)\n- Jupyter Notebooks ([datahub](http://datahub.ucsd.edu) satisfies this requirement)\n- git/GitHub", "_____no_output_____" ], [ "### Prerequisites\n\nThis class, and this series of tutorials, presumes that you already have some basic knowledge of programming.\n\nIn particular it assumes some knowledge Python, covering the standard library. \n\nIf you are somewhat unfamiliar with Python, you can follow the links in the Python notebook to catch up. \n\n### Computational Resources\n\nThe examples throughout these tutorials, and in the assignments are not computationally heavy. \n\nYou should be able to run all these materials on any computer you have access to, assuming it will run the aforementioned tools. ", "_____no_output_____" ], [ "## Tools\n\nThe following are a series of tools that you will need for this class", "_____no_output_____" ], [ "<img src=\"img/jupyter.png\" width=\"300px\">", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nJupyter notebooks are a way to intermix code, outputs and plain text. \nThey run in a web browser, and connect to a kernel to be able to execute code. \n</div>\n\n<div class=\"alert alert-info\">\nThe official Jupyter website is available \n<a href=\"http://jupyter.org\" class=\"alert-link\">here</a>.\n</div>\n\n<hr>", "_____no_output_____" ], [ "Note that you do not need to download Jupyter separately, as it comes packaged with anaconda, described below. \n\n<hr>", "_____no_output_____" ], [ "<img src=\"img/NBViewer.png\" width=\"800px\">", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nNotebooks can be rendered on webpages, and shared with others. NBViewer is a tool to host and render notebooks.\n</div>\n\n<div class=\"alert alert-info\">\nNBViewer is available \n<a href=\"https://nbviewer.jupyter.org/\" class=\"alert-link\">here</a>.\n</div>", "_____no_output_____" ], [ "Note that NBViewer is not a tool that you need to download, or necessarily use at all, it is simply a useful tool available online to view notebooks.\n\n<hr>", "_____no_output_____" ], [ "<img src=\"img/anaconda.png\" width=\"450px\">", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nAnaconda is an open-source distribution of Python, designed for scientific computing, data science and machine learning. \n</div>\n\n<div class=\"alert alert-info\">\nThe anaconda website is \n<a href=\"https://www.anaconda.com\" class=\"alert-link\">here</a>,\nwith the download page\n<a href=\"https://www.anaconda.com\" class=\"alert-link\">here</a>.\n</div>", "_____no_output_____" ], [ "Anaconda itself is a distribution, that is, a collection of packages that are curated and maintained together, and serve as powerful. \n\nAnaconda also comes with conda, which is a package manager, allowing you to download, install, and manage other packages. \n\nThe anaconda distribution includes all packages that are required for these tutorials.", "_____no_output_____" ], [ "Notes\n-----\n- If you are on Mac, you have a native installation of python. This native installation of Python may be older, will not include the extra packages that you will need for this class, and is best left untouched. \n - Downloading anaconda will install a separate, independent install of Python, leaving your native install untouched. \n- Windows does not require Python natively and so it is not typically pre-installed. \n- If you want a local copy (which is a good idea going forward!), follow this tutorial step-by-step. Alternatively, you can use [datahub](http://datahub.ucsd.edu) for everything in this course.", "_____no_output_____" ] ], [ [ "# You can check which python you are using, and what version it is.\n# Once you have installed anaconda, you should see you are using Python in your anaconda folder\n# Make sure that the version you have is 3.6 (or at least 3.X)\n# Note: these are command-line functions that may not work on windows\n!which python\n!python --version", "/anaconda3/bin/python\nPython 3.6.8 :: Anaconda, Inc.\n" ] ], [ [ "<img src=\"img/git.png\" width=\"400px\">", "_____no_output_____" ], [ "<img src=\"img/github.png\" width=\"400px\">", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nGit is a tool, a software package, for version control. Github is an online hosting service that can be used with git, and offers online tools to use git. \n</div>\n\n<div class=\"alert alert-info\">\nInstall \n<a href=\"https://git-scm.com/book/en/v2/Getting-Started-Installing-Git\" class=\"alert-link\">git</a>,\nif you don't already have it, and create an account on \n<a href=\"https://github.com/\" class=\"alert-link\">Github</a>.\n</div>\n\nGit & GitHub are not the same thing, though, in practice, they are commonly used together, whereby git is used as a tool to version control code and manage multiple copies stored across your computer, as well as on remote repositories that are stored on Github.\n\nNote that while GitHub is a private company, git is an open-source tool, and can be used independent of GitHub.", "_____no_output_____" ] ], [ [ "# Check that you have git installed (which version doesn't really matter)\n!git --version", "git version 2.14.3 (Apple Git-98)\r\n" ] ], [ [ "<hr>\n<img src=\"img/sourcetree.png\" width=\"500px\">", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nSource Tree is a free graphical user interface (GUI) for managing repositories with git & Github. \n</div>\n\n<div class=\"alert alert-info\">\nSource Tree is available \n<a href=\"https://www.sourcetreeapp.com\" class=\"alert-link\">here</a>.\nYou will need an account on \n<a href=\"https://www.atlassian.com\" class=\"alert-link\">Atlassian</a>,\nwho make Source Tree, but this is free.\n</div>\n\nYou don't need to use SourceTree (or any other GUI) if you know, or want to learn to use git from the command line.", "_____no_output_____" ], [ "## Environments", "_____no_output_____" ], [ "<div class=\"alert alert-success\">\nEnvironments are isolated, independent installations of a programming language and groups of packages, that don't interfere with each other. \n</div>\n\n<div class=\"alert alert-info\">\nAnaconda has detailed instructions on using environments available \n<a href=\"https://conda.io/docs/using/envs.html\" class=\"alert-link\">here</a>.\n</div>", "_____no_output_____" ], [ "You do not need to use environments, however you may find it useful if you want or need to maintain multiple different versions of Python. \n\nIf you want to use an environment, and already have conda, you can run this command from command line: <br>\n$ conda create --name *envname* python=3.6 anaconda <br>\n^ Replace '*envname*' with a name to call this environment.<br>\nThis will install a new environment, with Python 3.6 and the anaconda distribution.\n\nYou will then need to activate this environment (everytime) you want to use it. \n\nTo activate your environment: <br>\n$ source activate *envname*\n\nTo deactivate your environment: <br>\n$ source deactivate *envname*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec829afd8455c90ea380a65a2fb0d50ffca45f83
351,565
ipynb
Jupyter Notebook
histogram-filter/histogram_filter_lesson.ipynb
mithi/some-udacity-projects
59020af09bbb3f5db3a64cc2fee09951027c478f
[ "MIT" ]
12
2018-08-03T06:46:52.000Z
2022-02-26T12:45:58.000Z
histogram-filter/histogram_filter_lesson.ipynb
mithi/some-udacity-projects
59020af09bbb3f5db3a64cc2fee09951027c478f
[ "MIT" ]
1
2018-12-12T07:07:53.000Z
2018-12-12T07:07:53.000Z
histogram-filter/histogram_filter_lesson.ipynb
mithi/some-udacity-projects
59020af09bbb3f5db3a64cc2fee09951027c478f
[ "MIT" ]
8
2018-08-03T06:46:26.000Z
2020-07-07T15:41:47.000Z
385.065717
17,488
0.940853
[ [ [ "#### (IMPORT STATEMENTS)", "_____no_output_____" ] ], [ [ "from jupyterthemes import get_themes\nimport jupyterthemes as jt\nfrom jupyterthemes.stylefx import set_nb_theme\nfrom jupyterthemes import jtplot\n\n# jt -t onedork -tf latosans -tfs 15 -lineh 170 -ofs 14\n# onedork | grade3 | oceans16 | chesterish | monokai | solarizedl | solarizedd\nset_nb_theme('onedork')\njtplot.style(theme='onedork')\njtplot.style(ticks=True, grid=False)\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ] ], [ [ "---\n# LOCALIZATION \n---", "_____no_output_____" ], [ "- Give the robot the ability to locate itself in the world ", "_____no_output_____" ], [ "GIVEN: \n - A true (and non-changing) map of the world\n - Sensor measurement at a particular time\n - Motion control at a particular time\n\nFIND: \n - In the world map at a particular time, where am I?", "_____no_output_____" ], [ "---\n# HISTOGRAM FILTERS\n### (A.K.A MONTE-CARLO LOCALIZATION)\n---", "_____no_output_____" ], [ "- Repetition of sensing and moving\n- Updates belief about robot's location over possible locations \n- Mathematically, updates probability distribution over sample space", "_____no_output_____" ], [ "![](./img/hf-summary.png)", "_____no_output_____" ], [ "---\n---\n## INTUITION\n---\n---", "_____no_output_____" ], [ "GIVEN: \n- A one-dimensional known map \n- You are a clueless robot! (Atleast for now!) \n\nTASK:\n- Try to locate yourself within the known map! ", "_____no_output_____" ], [ "![](./img/hf-intuition.png)", "_____no_output_____" ], [ "If your histogram filter is working properly:\n 1. There should be ONE SHARP PEAK \n 2. That SHARP PEAK should be correct\n\nIn other words, we should be very certain about where we are!", "_____no_output_____" ], [ "1. Start with an initial belief: MAXIMUM CONFUSION!\n2. THEN: \n - SENSE -> Update belief (we become more certain)\n - MOVE -> Update belief (we become less certain)\n - REPEAT!", "_____no_output_____" ], [ "----\n----\n## SENSE STEP\n----\n----", "_____no_output_____" ], [ "- Calculate the probability of where we are, given a sensor measurement\n- Given a simplified cyclical world of green and red cells\n- If our sensor measurement is the same as the color of the cell, multiply by a large factor\n- Otherwise, multiply by a small factor\n- Normalize so that the sum of all cells equals one. ", "_____no_output_____" ], [ "![SENSING](./img/sensing.png)", "_____no_output_____" ], [ "----\n----\n## MOVE STEP\n----\n----", "_____no_output_____" ], [ "### THE CASE OF EXACT MOTION", "_____no_output_____" ], [ "- Given that you have the respective probability for each grid cell, where the sum of all probably is equal to one, IE `P1 + P2 + P3 + P4 + P5 = 1`\n\n```\n....................................\n| P1 | P2 | P3 | P4 | P5 |\n....................................\n```\n\n- If you have exact motion, and you move exactly one grid cell to the right, the probablities will just shift accordingly.\n\n```\n....................................\n| P5 | P1 | P2 | P3 | P4 |\n....................................\n```\n- Similarly, if you move exactly two grid cells to the left, the probablities will just shift accordingly.\n\n```\n....................................\n| P3 | P4 | P5 | P1 | P2 |\n....................................\n```", "_____no_output_____" ], [ "### THE CASE OF INEXACT MOTION", "_____no_output_____" ], [ "![](./img/1-move.png) \n![](./img/2-move.png)\n![](./img/3-move.png)\n![](./img/4-move.png)\n", "_____no_output_____" ], [ "#### (HELPER FUNCTIONS)", "_____no_output_____" ] ], [ [ "# prints probability for each cell in a nice format\ndef formatted_print(s, l):\n s = str(s)\n print('{:15}: '.format(s), end = \"\")\n \n for i in range(len(l)):\n print(\"{:.3f} |\".format(l[i]), \" \", end = \"\")\n\n print()", "_____no_output_____" ], [ "# shows bar chart of probabilities \ndef plot_probability(probability, world):\n indices = np.arange(5)\n plt.figure(figsize=(20,10))\n plt.bar(indices, probability, align='center', color ='#947CB0')\n plt.xticks(indices, world, fontsize = 30)\n plt.ylabel('PROBABILITY', fontsize = 30)\n plt.ylim((0, 1.0))\n plt.show()", "_____no_output_____" ], [ "# Test function if they're working\ndef visualize(probability, world, label):\n plot_probability(probability, world)\n formatted_print(label, probability)\n \n \nprobability1 = np.array([0.1, 0.2, 0.3, 0.0, 0.4])\nprobability2 = np.array([0.4, 0.0, 0.2, 0.4, 0.3])\nworld = ('GREEN', 'RED', 'RED', 'GREEN', 'GREEN')\n\nvisualize(probability1, world, \"Test1\")\nvisualize(probability2, world, \"Test2\")\n", "_____no_output_____" ] ], [ [ "\n# 1D HISTOGRAM FILTER CLASS\n", "_____no_output_____" ] ], [ [ "class HistogramFilter1D:\n def __init__(self, world_map, hit_weight = 0.6, miss_weight = 0.2, \n undershoot_prob = 0.1, overshoot_prob = 0.1, exact_prob = 0.8):\n \n self.hit_weight = hit_weight\n self.miss_weight = miss_weight\n \n self.world_map = world_map\n self.world_size = len(world_map)\n \n # Current is list of probability for each cell\n # Current distribution\n self.current = self.confuse() \n \n self.undershoot_prob = undershoot_prob\n self.overshoot_prob = overshoot_prob\n self.exact_prob = exact_prob\n\n def set_current(self, prob):\n self.current = prob\n\n def get_current(self):\n return self.current \n\n def confuse(self):\n self.current = [1 / self.world_size] * self.world_size\n return self.current\n \n def sense(self, value):\n \n def normalize(x):\n s = sum(x)\n for i in range(len(x)):\n x[i] = x[i] / s\n return x\n \n for i in range(self.world_size):\n if value == self.world_map[i]:\n self.current[i] = self.hit_weight * self.current[i]\n else:\n self.current[i] = self.miss_weight * self.current[i]\n \n self.current = normalize(self.current)\n return self.current\n\n def move(self, steps): # cyclical world\n \n temp = [0] * self.world_size\n \n for position in range(self.world_size):\n val = self.current[position] \n loc_exact = (position + steps) % self.world_size\n loc_over = (position + steps + 1) % self.world_size\n loc_under = (position + steps - 1) % self.world_size\n \n temp[loc_exact] += val * self.exact_prob\n temp[loc_over] += val * self.overshoot_prob\n temp[loc_under] += val * self.undershoot_prob\n \n self.current = temp \n return temp", "_____no_output_____" ] ], [ [ "#### (INITIALIZE OUR 1D FILTER)", "_____no_output_____" ] ], [ [ "world = ['green', 'red', 'red', 'green', 'green']\nrobot = HistogramFilter1D(world_map = world, \n hit_weight = 0.6, miss_weight = 0.2,\n exact_prob = 0.8, undershoot_prob = 0.1, overshoot_prob = 0.1)", "_____no_output_____" ] ], [ [ "# (EXAMPLE 1)\n- Given we don't know anything. \n- First we sensed red, then without moving, we sensed green ", "_____no_output_____" ] ], [ [ "robot.confuse()\nvisualize(robot.current, world, \"initial\")\nvisualize(robot.sense('red'), world, \"sensed red:\")\nvisualize(robot.sense('green'), world, \"sensed green:\")", "_____no_output_____" ] ], [ [ "# (EXAMPLE 2)\n- Given we are 100% sure that we are on the second cell\n- Then we moved one step\n- Then after we moved 1000 steps", "_____no_output_____" ] ], [ [ "robot.set_current([0, 1.0, 0, 0, 0])\nvisualize(robot.current, world, \"initial\")\nvisualize(robot.move(1), world, \"moved one step\")\n\nfor i in range(5):\n robot.move(1)\n\nvisualize(robot.move(1), world, \"moved 5 one steps\")\n\nfor i in range(50):\n robot.move(1)\n\nvisualize(robot.move(1), world, \"moved 50 step\")", "_____no_output_____" ] ], [ [ "# (EXAMPLE 3)\n- Given we don't know where we are\n- Then we sensed red\n- Then we moved one step\n- Then we sensed green\n- Then we moved one step", "_____no_output_____" ] ], [ [ "visualize(robot.confuse(), world, \"initial\")\nvisualize(robot.sense('red'), world, \"sensed red\")\nvisualize(robot.move(1), world, \"moved 1 step\")\nvisualize(robot.sense('green'), world, \"sensed green\")\nvisualize(robot.move(1), world, \"moved 1 step\")\n", "_____no_output_____" ] ], [ [ "# (EXAMPLE 4)\n- Given we don't know where we are\n- Then we sensed red \n- Then we moved one step\n- Then we sensed red again \n- Then we moved one more step", "_____no_output_____" ] ], [ [ "visualize(robot.confuse(), world, \"initial\")\nvisualize(robot.sense('red'), world, \"sensed red\")\nvisualize(robot.move(1), world, \"moved 1 step\")\nvisualize(robot.sense('red'), world, \"sensed red\")\nvisualize(robot.move(1), world, \"moved 1 step\")\n", "_____no_output_____" ] ], [ [ "# (OTHER EXAMPLES)\n- You can also try out the other scenarios, and check if it makes sense", "_____no_output_____" ] ], [ [ "# 1\nrobot.set_current([0, 0.5, 0.5, 0, 0])\nformatted_print(\"initial\", robot.get_current())\nformatted_print(\"moved one step\", robot.move(2))", "initial : 0.000 | 0.500 | 0.500 | 0.000 | 0.000 | \nmoved one step : 0.050 | 0.000 | 0.050 | 0.450 | 0.450 | \n" ], [ "robot.set_current([0, 0.5, 0, 0.5, 0])\nformatted_print(\"initial\", robot.get_current())\nformatted_print(\"moved one step\", robot.move(2))", "initial : 0.000 | 0.500 | 0.000 | 0.500 | 0.000 | \nmoved one step : 0.400 | 0.050 | 0.050 | 0.400 | 0.100 | \n" ], [ "robot.set_current([0, 0.25, 0, 0.75, 0])\nformatted_print(\"initial\", robot.get_current())\nformatted_print(\"moved one step\", robot.move(2))", "initial : 0.000 | 0.250 | 0.000 | 0.750 | 0.000 | \nmoved one step : 0.600 | 0.075 | 0.025 | 0.200 | 0.100 | \n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec82b635bb426aa03f3854889c4b07840ac2d587
992
ipynb
Jupyter Notebook
Car_Price_prediction_.ipynb
majithecoder/Python-Portfolio
bed3bb0f6d0ad18a31217fde8f976b224026ee00
[ "Apache-2.0" ]
null
null
null
Car_Price_prediction_.ipynb
majithecoder/Python-Portfolio
bed3bb0f6d0ad18a31217fde8f976b224026ee00
[ "Apache-2.0" ]
null
null
null
Car_Price_prediction_.ipynb
majithecoder/Python-Portfolio
bed3bb0f6d0ad18a31217fde8f976b224026ee00
[ "Apache-2.0" ]
null
null
null
23.619048
247
0.513105
[ [ [ "<a href=\"https://colab.research.google.com/github/majithecoder/Python-Portfolio/blob/main/Car_Price_prediction_.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ec82d1f1bb96b56af8bb3e1e67007740d00d4c95
8,512
ipynb
Jupyter Notebook
VideoColorizerColab.ipynb
Team-16-B/VintageColorizer
80c502fae32f5d4ca92727f2ffa63a48331bd8a8
[ "MIT" ]
1
2021-06-18T09:16:58.000Z
2021-06-18T09:16:58.000Z
VideoColorizerColab.ipynb
Team-16-B/VintageColorizer
80c502fae32f5d4ca92727f2ffa63a48331bd8a8
[ "MIT" ]
null
null
null
VideoColorizerColab.ipynb
Team-16-B/VintageColorizer
80c502fae32f5d4ca92727f2ffa63a48331bd8a8
[ "MIT" ]
null
null
null
29.762238
592
0.603971
[ [ [ "<a href=\"https://colab.research.google.com/github/Team-16-B/VintageColorizer/blob/master/VideoColorizerColab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "### **<font color='blue'> Video Colorizer </font>**", "_____no_output_____" ], [ "#◢ Vintage Colorizer - Colorize your own videos!\n\n", "_____no_output_____" ], [ "\n\n---\n\n\n#◢ Verify Correct Runtime Settings\n\n**<font color='#FF000'> IMPORTANT </font>**\n\nIn the \"Runtime\" menu for the notebook window, select \"Change runtime type.\" Ensure that the following are selected:\n* Runtime Type = Python 3\n* Hardware Accelerator = GPU \n", "_____no_output_____" ], [ "#◢ Git clone and install ", "_____no_output_____" ] ], [ [ "git clone https://github.com/Team-16-B/VintageColorizer Vintage_Colorizer", "_____no_output_____" ], [ "cd Vintage_Colorizer", "_____no_output_____" ] ], [ [ "#◢ Setup", "_____no_output_____" ] ], [ [ "#NOTE: This must be the first call in order to work properly!\nfrom vintageColorizer import device\nfrom vintageColorizer.device_id import DeviceId\n#choices: CPU, GPU0...GPU7\ndevice.set(device=DeviceId.GPU0)\n\nimport torch\n\nif not torch.cuda.is_available():\n print('GPU not available.')\n\nfrom os import path", "_____no_output_____" ], [ "!pip install -r colab_requirements.txt", "_____no_output_____" ], [ "import fastai\nfrom vintageColorizer.visualize import *\nfrom pathlib import Path\ntorch.backends.cudnn.benchmark=True\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning, message=\".*?Your .*? set is empty.*?\")", "_____no_output_____" ], [ "!mkdir 'models'\n!wget https://data.deepai.org/deoldify/ColorizeVideo_gen.pth -O ./models/ColorizeVideo_gen.pth", "_____no_output_____" ], [ "!wget https://media.githubusercontent.com/media/Team-16-B/VintageColorizer/master/resource_images/watermark.png -O ./resource_images/watermark.png", "_____no_output_____" ], [ "colorizer = get_video_colorizer()", "_____no_output_____" ] ], [ [ "#◢ Instructions", "_____no_output_____" ], [ "### source_url\nType in a url hosting a video from YouTube, Imgur, Twitter, Reddit, Vimeo, etc. Many sources work! GIFs also work. Full list here: https://ytdl-org.github.io/youtube-dl/supportedsites.html NOTE: If you want to use your own video, upload it first to a site like YouTube. \n\n### render_factor\nThe default value of 21 has been carefully chosen and should work -ok- for most scenarios (but probably won't be the -best-). This determines resolution at which the color portion of the video is rendered. Lower resolution will render faster, and colors also tend to look more vibrant. Older and lower quality film in particular will generally benefit by lowering the render factor. Higher render factors are often better for higher quality videos and inconsistencies (flashy render) will generally be reduced, but the colors may get slightly washed out.\n\n### watermarked\nSelected by default, this places a watermark icon of a palette at the bottom left corner of the image. This is intended to be a standard way to convey to others viewing the image that it is colorized by AI. We want to help promote this as a standard, especially as the technology continues to improve and the distinction between real and fake becomes harder to discern.\n\n### How to Download a Copy\nSimply right click on the displayed video and click \"Save video as...\"!\n\n## Pro Tips\n1. If a video takes a long time to render and you're wondering how well the frames will actually be colorized, you can preview how well the frames will be rendered at each render_factor by using the code at the bottom. Just stop the video rendering by hitting the stop button on the cell, then run that bottom cell under \"See how well render_factor values perform on a frame here\". It's not perfect and you may still need to experiment a bit especially when it comes to figuring out how to reduce frame inconsistency. But it'll go a long way in narrowing down what actually works.\n2. If videos are taking way too much time for your liking, running the Jupyter notebook VideoColorizer.ipynb on your own machine) will generally be much faster (as long as you have the hardware for it). \n3. Longer videos (running multiple minutes) are going to have a rough time on Colabs.\n\nIf a video you downloaded doesn't play, it's probably because the cell didn't complete processing and the video is in a half-finished state.", "_____no_output_____" ], [ "#◢ Colorize!!", "_____no_output_____" ] ], [ [ "source_url = '' #@param {type:\"string\"}\nrender_factor = 21 #@param {type: \"slider\", min: 5, max: 40}\nwatermarked = True #@param {type:\"boolean\"}\n\nif source_url is not None and source_url !='':\n video_path = colorizer.colorize_from_url(source_url, 'video.mp4', render_factor, watermarked=watermarked)\n show_video_in_notebook(video_path)\nelse:\n print('Provide a video url and try again.')", "_____no_output_____" ] ], [ [ "## See how well render_factor values perform on a frame here", "_____no_output_____" ] ], [ [ "for i in range(10,40,2):\n colorizer.vis.plot_transformed_image('video/bwframes/video/00001.jpg', render_factor=i, display_render_factor=True, figsize=(8,8))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec82d9df7bf88f991b5a361d1fa0d736e4170b7e
52,695
ipynb
Jupyter Notebook
Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb
raphaeltinarrage/TDA-tutorial
5cb074d80908b1c9946095793876447a8018395e
[ "MIT" ]
null
null
null
Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb
raphaeltinarrage/TDA-tutorial
5cb074d80908b1c9946095793876447a8018395e
[ "MIT" ]
null
null
null
Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb
raphaeltinarrage/TDA-tutorial
5cb074d80908b1c9946095793876447a8018395e
[ "MIT" ]
null
null
null
45.309544
19,956
0.643401
[ [ [ "# TDA with Python using the Gudhi Library \n\n# Building simplicial complexes from distance matrices", "_____no_output_____" ], [ "**Authors :** F. Chazal and B. Michel", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport pickle as pickle\nimport gudhi as gd \nfrom pylab import *\n%matplotlib inline", "_____no_output_____" ] ], [ [ "TDA typically aims at extracting topological signatures from a point cloud in $\\mathbb R^d$ or in a general metric space. [Simplicial complexes](https://en.wikipedia.org/wiki/Simplicial_complex) are used in computational geometry to infer topological signatures from data.\n\nThis tutorial explains how to build [Vietoris-Rips complexes](https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex) and [alpha complexes](https://en.wikipedia.org/wiki/Alpha_shape#Alpha_complex) from a matrix of pairwise distances.\n\n\n\n### Vietoris-Rips filtration defined from a matrix of distances \n\nThe [$\\alpha$-Rips complex](https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex) of a metric space $\\mathbb X$ is an [abstract simplicial complex](https://en.wikipedia.org/wiki/Abstract_simplicial_complex) that can be defined by forming a simplex for every finite subset of $\\mathbb X$ that has diameter at most $\\alpha$. \n\n![title](https://upload.wikimedia.org/wikipedia/commons/thumb/d/d0/VR_complex.svg/600px-VR_complex.svg.png)\n\n \n\n\n### Protein binding dataset\n\nThe data we study in this notebook represent configurations of protein binding. This example is borrowed from the paper a paper of Kovacev-Nikolic et.al [1](https://arxiv.org/pdf/1412.1394.pdf).\n\nThe paper compares closed and open forms of the maltose-binding protein (MBP), a large biomolecule consisting of 370 amino acid residues. The analysis is not based on geometric distances in $\\mathbb R^3$ but on a metric of *dynamical distances* defined by\n$$D_{ij} = 1 - |C_{ij}|,$$\nwhere $C$ is the correlation matrix between residues.\n\nCorrelation matrices between residues can be found at this [link](https://www.researchgate.net/publication/301543862_corr), we are greatful to the authors for sharing data !", "_____no_output_____" ], [ "The next statments load the fourteen correlation matrices with pandas:", "_____no_output_____" ] ], [ [ "path_file = \"./datasets/Corr_ProteinBinding/\"\nfiles_list = [\n'1anf.corr_1.txt',\n'1ez9.corr_1.txt',\n'1fqa.corr_2.txt',\n'1fqb.corr_3.txt',\n'1fqc.corr_2.txt',\n'1fqd.corr_3.txt',\n'1jw4.corr_4.txt',\n'1jw5.corr_5.txt',\n'1lls.corr_6.txt',\n'1mpd.corr_4.txt',\n'1omp.corr_7.txt',\n'3hpi.corr_5.txt',\n'3mbp.corr_6.txt',\n'4mbp.corr_7.txt']\nlen(files_list)", "_____no_output_____" ], [ "corr_list = [pd.read_csv(path_file+u,\n header=None,\n delim_whitespace=True) for u in files_list]", "_____no_output_____" ] ], [ [ "The object <code>corr_list</code> is the list of the 14 correlation matrices. \nWe can iterate in the list to compute the matrix of distances associated to each configuration:", "_____no_output_____" ] ], [ [ "dist_list = [1- np.abs(c) for c in corr_list]", "_____no_output_____" ] ], [ [ "Let's print the first lines of the first distance matrix:", "_____no_output_____" ] ], [ [ "mat_dist0 = dist_list[0]\nmat_dist0.head()", "_____no_output_____" ] ], [ [ "### Vietoris-Rips filtration of Protein binding distance matrix", "_____no_output_____" ], [ "The <code>RipsComplex()</code> function creates a [one skeleton](https://en.wikipedia.org/wiki/N-skeleton) graph from the point cloud, see the [Documentation](http://gudhi.gforge.inria.fr/python/latest/rips_complex_user.html) for details on the syntax.", "_____no_output_____" ] ], [ [ "skeleton_protein = gd.RipsComplex(distance_matrix=mat_dist0.values,\n max_edge_length=0.8) ", "_____no_output_____" ] ], [ [ "The `max_edge_length` parameter is the maximal diameter: only the edges of length less vers this value are included in the one skeleton graph. \n\nNext, we create the Rips simplicial complex from this one-skeleton graph. This is a filtered Rips complex which filtration function is exacly the diameter of the simplices. We use the `create_simplex_tree()` function:", "_____no_output_____" ] ], [ [ "Rips_simplex_tree_protein = skeleton_protein.create_simplex_tree(max_dimension=2)", "_____no_output_____" ] ], [ [ "The `max_dimension` parameter is the maximum dimension of the simplices included in the the filtration. The object returned by the function is a simplex tree, of dimension 2 in this example:", "_____no_output_____" ] ], [ [ "Rips_simplex_tree_protein.dimension()", "_____no_output_____" ] ], [ [ "We can use the fonctionalites of the simplex tree object to describe the Rips filtration.\nFor instance we can check that the 370 points of the first distance matrix are all vertices of the Vietoris-Rips filtration:", "_____no_output_____" ] ], [ [ "print(Rips_simplex_tree_protein.num_vertices())", "370\n" ] ], [ [ "The number of simplices in a Rips complex increases very fast with the number of points and the dimension. There is more than on million of simplexes in the Rips complex:", "_____no_output_____" ] ], [ [ "print(Rips_simplex_tree_protein.num_simplices())", "1626660\n" ] ], [ [ "Note that this is actually the number of simplices in the \"last\" Rips complex of the filtration, namely with parameter $\\alpha=$ `max_edge_length=`0.8. ", "_____no_output_____" ], [ "Let's compute the list of simplices in the Rips complex with the `get_filtration()` function:\n", "_____no_output_____" ] ], [ [ "filt_Rips_protein = list(Rips_simplex_tree_protein.get_filtration())\nprint(len(filt_Rips_protein))", "1626660\n" ], [ "for splx in filt_Rips_protein[0:400] :\n print(splx)", "([0], 0.0)\n([1], 0.0)\n([2], 0.0)\n([3], 0.0)\n([4], 0.0)\n([5], 0.0)\n([6], 0.0)\n([7], 0.0)\n([8], 0.0)\n([9], 0.0)\n([10], 0.0)\n([11], 0.0)\n([12], 0.0)\n([13], 0.0)\n([14], 0.0)\n([15], 0.0)\n([16], 0.0)\n([17], 0.0)\n([18], 0.0)\n([19], 0.0)\n([20], 0.0)\n([21], 0.0)\n([22], 0.0)\n([23], 0.0)\n([24], 0.0)\n([25], 0.0)\n([26], 0.0)\n([27], 0.0)\n([28], 0.0)\n([29], 0.0)\n([30], 0.0)\n([31], 0.0)\n([32], 0.0)\n([33], 0.0)\n([34], 0.0)\n([35], 0.0)\n([36], 0.0)\n([37], 0.0)\n([38], 0.0)\n([39], 0.0)\n([40], 0.0)\n([41], 0.0)\n([42], 0.0)\n([43], 0.0)\n([44], 0.0)\n([45], 0.0)\n([46], 0.0)\n([47], 0.0)\n([48], 0.0)\n([49], 0.0)\n([50], 0.0)\n([51], 0.0)\n([52], 0.0)\n([53], 0.0)\n([54], 0.0)\n([55], 0.0)\n([56], 0.0)\n([57], 0.0)\n([58], 0.0)\n([59], 0.0)\n([60], 0.0)\n([61], 0.0)\n([62], 0.0)\n([63], 0.0)\n([64], 0.0)\n([65], 0.0)\n([66], 0.0)\n([67], 0.0)\n([68], 0.0)\n([69], 0.0)\n([70], 0.0)\n([71], 0.0)\n([72], 0.0)\n([73], 0.0)\n([74], 0.0)\n([75], 0.0)\n([76], 0.0)\n([77], 0.0)\n([78], 0.0)\n([79], 0.0)\n([80], 0.0)\n([81], 0.0)\n([82], 0.0)\n([83], 0.0)\n([84], 0.0)\n([85], 0.0)\n([86], 0.0)\n([87], 0.0)\n([88], 0.0)\n([89], 0.0)\n([90], 0.0)\n([91], 0.0)\n([92], 0.0)\n([93], 0.0)\n([94], 0.0)\n([95], 0.0)\n([96], 0.0)\n([97], 0.0)\n([98], 0.0)\n([99], 0.0)\n([100], 0.0)\n([101], 0.0)\n([102], 0.0)\n([103], 0.0)\n([104], 0.0)\n([105], 0.0)\n([106], 0.0)\n([107], 0.0)\n([108], 0.0)\n([109], 0.0)\n([110], 0.0)\n([111], 0.0)\n([112], 0.0)\n([113], 0.0)\n([114], 0.0)\n([115], 0.0)\n([116], 0.0)\n([117], 0.0)\n([118], 0.0)\n([119], 0.0)\n([120], 0.0)\n([121], 0.0)\n([122], 0.0)\n([123], 0.0)\n([124], 0.0)\n([125], 0.0)\n([126], 0.0)\n([127], 0.0)\n([128], 0.0)\n([129], 0.0)\n([130], 0.0)\n([131], 0.0)\n([132], 0.0)\n([133], 0.0)\n([134], 0.0)\n([135], 0.0)\n([136], 0.0)\n([137], 0.0)\n([138], 0.0)\n([139], 0.0)\n([140], 0.0)\n([141], 0.0)\n([142], 0.0)\n([143], 0.0)\n([144], 0.0)\n([145], 0.0)\n([146], 0.0)\n([147], 0.0)\n([148], 0.0)\n([149], 0.0)\n([150], 0.0)\n([151], 0.0)\n([152], 0.0)\n([153], 0.0)\n([154], 0.0)\n([155], 0.0)\n([156], 0.0)\n([157], 0.0)\n([158], 0.0)\n([159], 0.0)\n([160], 0.0)\n([161], 0.0)\n([162], 0.0)\n([163], 0.0)\n([164], 0.0)\n([165], 0.0)\n([166], 0.0)\n([167], 0.0)\n([168], 0.0)\n([169], 0.0)\n([170], 0.0)\n([171], 0.0)\n([172], 0.0)\n([173], 0.0)\n([174], 0.0)\n([175], 0.0)\n([176], 0.0)\n([177], 0.0)\n([178], 0.0)\n([179], 0.0)\n([180], 0.0)\n([181], 0.0)\n([182], 0.0)\n([183], 0.0)\n([184], 0.0)\n([185], 0.0)\n([186], 0.0)\n([187], 0.0)\n([188], 0.0)\n([189], 0.0)\n([190], 0.0)\n([191], 0.0)\n([192], 0.0)\n([193], 0.0)\n([194], 0.0)\n([195], 0.0)\n([196], 0.0)\n([197], 0.0)\n([198], 0.0)\n([199], 0.0)\n([200], 0.0)\n([201], 0.0)\n([202], 0.0)\n([203], 0.0)\n([204], 0.0)\n([205], 0.0)\n([206], 0.0)\n([207], 0.0)\n([208], 0.0)\n([209], 0.0)\n([210], 0.0)\n([211], 0.0)\n([212], 0.0)\n([213], 0.0)\n([214], 0.0)\n([215], 0.0)\n([216], 0.0)\n([217], 0.0)\n([218], 0.0)\n([219], 0.0)\n([220], 0.0)\n([221], 0.0)\n([222], 0.0)\n([223], 0.0)\n([224], 0.0)\n([225], 0.0)\n([226], 0.0)\n([227], 0.0)\n([228], 0.0)\n([229], 0.0)\n([230], 0.0)\n([231], 0.0)\n([232], 0.0)\n([233], 0.0)\n([234], 0.0)\n([235], 0.0)\n([236], 0.0)\n([237], 0.0)\n([238], 0.0)\n([239], 0.0)\n([240], 0.0)\n([241], 0.0)\n([242], 0.0)\n([243], 0.0)\n([244], 0.0)\n([245], 0.0)\n([246], 0.0)\n([247], 0.0)\n([248], 0.0)\n([249], 0.0)\n([250], 0.0)\n([251], 0.0)\n([252], 0.0)\n([253], 0.0)\n([254], 0.0)\n([255], 0.0)\n([256], 0.0)\n([257], 0.0)\n([258], 0.0)\n([259], 0.0)\n([260], 0.0)\n([261], 0.0)\n([262], 0.0)\n([263], 0.0)\n([264], 0.0)\n([265], 0.0)\n([266], 0.0)\n([267], 0.0)\n([268], 0.0)\n([269], 0.0)\n([270], 0.0)\n([271], 0.0)\n([272], 0.0)\n([273], 0.0)\n([274], 0.0)\n([275], 0.0)\n([276], 0.0)\n([277], 0.0)\n([278], 0.0)\n([279], 0.0)\n([280], 0.0)\n([281], 0.0)\n([282], 0.0)\n([283], 0.0)\n([284], 0.0)\n([285], 0.0)\n([286], 0.0)\n([287], 0.0)\n([288], 0.0)\n([289], 0.0)\n([290], 0.0)\n([291], 0.0)\n([292], 0.0)\n([293], 0.0)\n([294], 0.0)\n([295], 0.0)\n([296], 0.0)\n([297], 0.0)\n([298], 0.0)\n([299], 0.0)\n([300], 0.0)\n([301], 0.0)\n([302], 0.0)\n([303], 0.0)\n([304], 0.0)\n([305], 0.0)\n([306], 0.0)\n([307], 0.0)\n([308], 0.0)\n([309], 0.0)\n([310], 0.0)\n([311], 0.0)\n([312], 0.0)\n([313], 0.0)\n([314], 0.0)\n([315], 0.0)\n([316], 0.0)\n([317], 0.0)\n([318], 0.0)\n([319], 0.0)\n([320], 0.0)\n([321], 0.0)\n([322], 0.0)\n([323], 0.0)\n([324], 0.0)\n([325], 0.0)\n([326], 0.0)\n([327], 0.0)\n([328], 0.0)\n([329], 0.0)\n([330], 0.0)\n([331], 0.0)\n([332], 0.0)\n([333], 0.0)\n([334], 0.0)\n([335], 0.0)\n([336], 0.0)\n([337], 0.0)\n([338], 0.0)\n([339], 0.0)\n([340], 0.0)\n([341], 0.0)\n([342], 0.0)\n([343], 0.0)\n([344], 0.0)\n([345], 0.0)\n([346], 0.0)\n([347], 0.0)\n([348], 0.0)\n([349], 0.0)\n([350], 0.0)\n([351], 0.0)\n([352], 0.0)\n([353], 0.0)\n([354], 0.0)\n([355], 0.0)\n([356], 0.0)\n([357], 0.0)\n([358], 0.0)\n([359], 0.0)\n([360], 0.0)\n([361], 0.0)\n([362], 0.0)\n([363], 0.0)\n([364], 0.0)\n([365], 0.0)\n([366], 0.0)\n([367], 0.0)\n([368], 0.0)\n([369], 0.0)\n([289, 290], 0.014988169999999967)\n([45, 46], 0.016141790000000045)\n([70, 71], 0.016186290000000048)\n([211, 212], 0.019646410000000003)\n([48, 49], 0.02002598)\n([293, 294], 0.020196210000000048)\n([50, 51], 0.02128167000000003)\n([360, 361], 0.02139837)\n([101, 102], 0.021894140000000006)\n([43, 44], 0.022110460000000054)\n([135, 136], 0.022791349999999988)\n([240, 241], 0.023054660000000005)\n([82, 83], 0.02376383999999998)\n([355, 356], 0.023989579999999955)\n([231, 232], 0.024279770000000034)\n([350, 351], 0.02431161000000004)\n([238, 239], 0.02514183000000003)\n([16, 17], 0.025262099999999954)\n([297, 298], 0.025565999999999978)\n([287, 288], 0.025724140000000006)\n([284, 285], 0.026689060000000042)\n([46, 48], 0.027036240000000045)\n([10, 38], 0.027239420000000014)\n([347, 348], 0.027307149999999947)\n([216, 217], 0.02737215000000004)\n([10, 11], 0.027430060000000034)\n([303, 304], 0.02758413000000004)\n([72, 73], 0.02769299000000003)\n([362, 363], 0.027711200000000047)\n([21, 22], 0.028153890000000015)\n" ] ], [ [ "The integers represent the points in the metric space: the vertex [2] corresponds to the point decribed by the second raw (or the second column) in the distance matrix `mat_dist0`. \n\nThe filtration value is the diameter of the simplex, which is zero for the vertices of course. The first edge in the filtration is [289, 290], these two points are the two closest points according to `mat_dist0`, at distance 0.015 of each other. ", "_____no_output_____" ], [ "### How to define an Alpha complex from a matrix of distance ?", "_____no_output_____" ], [ "The [alpha complex filtration](https://en.wikipedia.org/wiki/Alpha_shape#Alpha_complex) of a point cloud in $\\mathbb R^p$ is a filtered simplicial complex constructed from the finite cells of a [Delaunay Triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation).\n\nIn our case, the data does not belong to euclideen space $\\mathbb R^p$ and we are not in position to directly compute a Delaunay Triangulation in the metric space, using the pairwise distances.\n\nThe aim of [Multidimensional Scaling](https://en.wikipedia.org/wiki/Multidimensional_scaling) (MDS) methods is precisely to find a representation of $n$ points in a space $\\mathbb R^p$ that preserves as well as possible the pairwise distances between the $n$ points in the original metric space. The are several versions of MDS algorithms, the most popular ones are available in the [sckit-learn library](https://scikit-learn.org/stable/index.html), see this [documention](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.MDS.html).\n\nLet's compute a (classical) MDS representation of the matrix `mat_dist0` in $\\mathbb R^3$:", "_____no_output_____" ] ], [ [ "from sklearn.manifold import MDS\n \nembedding = MDS(n_components=3,dissimilarity='precomputed')\nX_transformed = embedding.fit_transform(mat_dist0)\nX_transformed.shape", "_____no_output_____" ] ], [ [ "Now we can represent this configuration, for instance on the two first axes:", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nplt.scatter(X_transformed[:, 0], X_transformed[:, 1],label='MDS');", "_____no_output_____" ] ], [ [ "Of course you should keep in mind that MDS provides an embedding of the data in $\\mathbb R^p$ that **approximatively** preserves the distance matrix.", "_____no_output_____" ], [ "The main advantage of Alpha complexes is that they contain less simplices than Rips complexes do and so it can be a better option than Rips complexes. As subcomplexes of the Delaunay Triangulation complex, an alpha complex is a geometric simpicial complex.\n\nThe `AlphaComplex()` function directly computes the simplex tree representing the Alpha complex:", "_____no_output_____" ] ], [ [ "alpha_complex = gd.AlphaComplex(points=X_transformed)", "_____no_output_____" ], [ "st_alpha = alpha_complex.create_simplex_tree()", "_____no_output_____" ] ], [ [ "The point cloud `X_transformed` belongs to $\\mathbb R^3$ and so does the Alpha Complex:", "_____no_output_____" ] ], [ [ "st_alpha.dimension()", "_____no_output_____" ] ], [ [ "As for the Rips complex, the 370 points are all vertices of the Alpha complex :", "_____no_output_____" ] ], [ [ "print(st_alpha.num_vertices())", "370\n" ] ], [ [ "Note that the number of simplexes in the Alpha complex is much smaller then for the Rips complex we computed before:\n", "_____no_output_____" ] ], [ [ "print(st_alpha.num_simplices())", "9253\n" ] ], [ [ "### References", "_____no_output_____" ], [ "[1] Using persistent homology and dynamical distances to analyze protein binding, V. Kovacev-Nikolic, P. Bubenik, D. Nikolic and G. Heo. Stat Appl Genet Mol Biol 2016 [arxiv link](https://arxiv.org/pdf/1412.1394.pdf).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec82e00973583f16e4ec6e64a2f5de603eb47e71
2,739
ipynb
Jupyter Notebook
100days/day 22 - determinant.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
13
2021-03-11T00:25:22.000Z
2022-03-19T00:19:23.000Z
100days/day 22 - determinant.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
160
2021-04-26T19:04:15.000Z
2022-03-26T20:18:37.000Z
100days/day 22 - determinant.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
12
2021-04-26T19:43:01.000Z
2022-01-31T08:36:29.000Z
20.139706
85
0.456371
[ [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "## algorithm", "_____no_output_____" ] ], [ [ "def determinant(x):\n if x.size == 1:\n return x[0, 0]\n \n # pivot\n i = np.abs(x[:, 0]).argmax()\n pivot = x[i, 0]\n if np.abs(pivot) < 1e-15:\n return 0\n \n # gauss elimination\n n = len(x)\n y = x - x[:, 0].reshape(n, 1) @ (x[i, :] / x[i, 0]).reshape(1, n)\n y = y[np.arange(n) != i, 1:]\n\n # recursion\n return pivot * (-1) ** (i % 2) * determinant(y)", "_____no_output_____" ] ], [ [ "## run", "_____no_output_____" ] ], [ [ "X = np.random.rand(5, 5) * 2 - 1\nX", "_____no_output_____" ], [ "determinant(X)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]