hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d064f5e9dc2955067a80d1f04ff4044b82f79ddc
33,648
ipynb
Jupyter Notebook
notebooks/indi_dev.ipynb
tepickering/sbigclient
60066c3f126671ac62470459200668af10c3ab80
[ "BSD-3-Clause" ]
3
2021-03-10T22:07:13.000Z
2021-06-28T03:38:22.000Z
notebooks/indi_dev.ipynb
tepickering/sbigclient
60066c3f126671ac62470459200668af10c3ab80
[ "BSD-3-Clause" ]
1
2021-10-01T16:10:56.000Z
2021-10-01T16:10:56.000Z
notebooks/indi_dev.ipynb
tepickering/sbigclient
60066c3f126671ac62470459200668af10c3ab80
[ "BSD-3-Clause" ]
3
2021-02-15T11:12:55.000Z
2021-09-30T20:12:45.000Z
48.068571
144
0.677811
[ [ [ "import sys\nimport time\nimport numpy as np\nimport matplotlib\nmatplotlib.use('nbagg')\nimport matplotlib.pyplot as plt\nfrom astropy import stats, visualization\n\nfrom indiclient.indicam import MATCam, F9WFSCam, CCDCam\nfrom camsrv.camsrv import CAMsrv\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "s = CAMsrv()", "[I 190311 13:12:18 indiclient:1235] INDISwitchVector: CCD Simulator CONNECTION Connection SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: CONNECT Connect Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: DISCONNECT Disconnect Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: CCD Simulator DRIVER_INFO Driver Info TextVector read only\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_NAME Name Text CCD Simulator\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_EXEC Exec Text indi_simulator_ccd\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_VERSION Version Text 1.0\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_INTERFACE Interface Text 22\n[I 190311 13:12:18 indiclient:1104] INDIVector: CCD Simulator POLLING_PERIOD Polling NumberVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: PERIOD_MS Period (ms) Number 1000\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator CONNECTION Connection SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: CONNECT Connect Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: DISCONNECT Disconnect Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator DRIVER_INFO Driver Info TextVector read only\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_NAME Name Text Telescope Simulator\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_EXEC Exec Text indi_simulator_telescope\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_VERSION Version Text 1.0\n[I 190311 13:12:18 indiclient:580] INDIElement: DRIVER_INTERFACE Interface Text 5\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator POLLING_PERIOD Polling NumberVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: PERIOD_MS Period (ms) Number 250\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator DEBUG Debug SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: ENABLE Enable Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: DISABLE Disable Switch On\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator CONFIG_PROCESS Configuration SwitchVector AtMostOne\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_LOAD Load Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_SAVE Save Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_DEFAULT Default Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator CONNECTION_MODE Connection Mode SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: CONNECTION_SERIAL Serial Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: CONNECTION_TCP Ethernet Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator DEVICE_PORT Ports TextVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: PORT Port Text /dev/cu.usbserial\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator DEVICE_BAUD_RATE Baud Rate SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: 9600 9600 Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: 19200 19200 Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: 38400 38400 Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: 57600 57600 Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: 115200 115200 Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: 230400 230400 Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator DEVICE_AUTO_SEARCH Auto Search SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: ENABLED Enabled Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: DISABLED Disabled Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator DEVICE_PORT_SCAN Refresh SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: Scan Ports Scan Ports Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator SYSTEM_PORTS System Ports SwitchVector AtMostOne\n[I 190311 13:12:18 indiclient:580] INDIElement: /dev/cu.Bluetooth-Incoming-Port /dev/cu.Bluetooth-Incoming-Port Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: /dev/cu.MALS /dev/cu.MALS Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: /dev/cu.SOC /dev/cu.SOC Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator ACTIVE_DEVICES Snoop devices TextVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: ACTIVE_GPS GPS Text GPS Simulator\n[I 190311 13:12:18 indiclient:580] INDIElement: ACTIVE_DOME DOME Text Dome Simulator\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator DOME_POLICY Dome parking policy SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: NO_ACTION Ignore dome Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: LOCK_PARKING Dome locks Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: FORCE_CLOSE Dome parks Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: LOCK_AND_FORCE Both Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator TELESCOPE_INFO Scope Properties NumberVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: TELESCOPE_APERTURE Aperture (mm) Number 120\n[I 190311 13:12:18 indiclient:580] INDIElement: TELESCOPE_FOCAL_LENGTH Focal Length (mm) Number 900\n[I 190311 13:12:18 indiclient:580] INDIElement: GUIDER_APERTURE Guider Aperture (mm) Number 120\n[I 190311 13:12:18 indiclient:580] INDIElement: GUIDER_FOCAL_LENGTH Guider Focal Length (mm) Number 900\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator SCOPE_CONFIG_NAME Scope Name TextVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: SCOPE_CONFIG_NAME Config Name Text \n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator ON_COORD_SET On Set SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: TRACK Track Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: SLEW Slew Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: SYNC Sync Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: Telescope Simulator EQUATORIAL_EOD_COORD Eq. Coordinates NumberVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: RA RA (hh:mm:ss) Number 7.4845283712546706312\n[I 190311 13:12:18 indiclient:580] INDIElement: DEC DEC (dd:mm:ss) Number 90\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator TELESCOPE_ABORT_MOTION Abort Motion SwitchVector AtMostOne\n[I 190311 13:12:18 indiclient:580] INDIElement: ABORT Abort Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator TELESCOPE_TRACK_MODE Track Mode SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: TRACK_SIDEREAL Sidereal Switch On\n[I 190311 13:12:18 indiclient:580] INDIElement: TRACK_CUSTOM Custom Switch Off\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: Telescope Simulator TELESCOPE_TRACK_STATE Tracking SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: TRACK_ON On Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: TRACK_OFF Off Switch On\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: CCD Simulator DEBUG Debug SwitchVector OneOfMany\n[I 190311 13:12:18 indiclient:580] INDIElement: ENABLE Enable Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: DISABLE Disable Switch On\n[I 190311 13:12:18 indiclient:1235] INDISwitchVector: CCD Simulator CONFIG_PROCESS Configuration SwitchVector AtMostOne\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_LOAD Load Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_SAVE Save Switch Off\n[I 190311 13:12:18 indiclient:580] INDIElement: CONFIG_DEFAULT Default Switch Off\n[I 190311 13:12:18 indiclient:1104] INDIVector: CCD Simulator ACTIVE_DEVICES Snoop devices TextVector read and write\n[I 190311 13:12:18 indiclient:580] INDIElement: ACTIVE_TELESCOPE Telescope Text Telescope Simulator\n[I 190311 13:12:18 indiclient:580] INDIElement: ACTIVE_FOCUSER Focuser Text Focuser Simulator\n[I 190311 13:12:18 indiclient:580] INDIElement: ACTIVE_FILTER Filter Text CCD Simulator\n" ], [ "c.cooler", "_____no_output_____" ], [ "sys.getsizeof(s.camera)", "_____no_output_____" ], [ "# filter 1 -> R\n# filter 2 -> V\n# filter 3 -> B\n# filter 4 -> clear\nc.filter", "_____no_output_____" ], [ "c.currentElement", "_____no_output_____" ], [ "f = c.expose(exptime=60, exptype=\"Light\")", "_____no_output_____" ], [ "norm = visualization.ImageNormalize(\n f[0].data,\n interval=visualization.ZScaleInterval(),\n stretch=visualization.AsinhStretch()\n)\nplt.imshow(f[0].data, norm=norm)\nplt.show()", "_____no_output_____" ], [ "f.writeto(\"globular.fits\", clobber=True)", "_____no_output_____" ], [ "f[0].header", "_____no_output_____" ], [ "int(c.get_float(\"SBIG CCD\", \"FILTER_SLOT\", \"FILTER_SLOT_VALUE\"))", "_____no_output_____" ], [ "c.temperature", "_____no_output_____" ] ], [ [ "# F/9 WFS Camera dev", "_____no_output_____" ] ], [ [ "f9 = F9WFSCam()", "_____no_output_____" ], [ "f9.process_events()", "_____no_output_____" ], [ "v = f9.get_vector(\"SBIG CCD\", \"CCD_BINNING\")\ne = v.elements[0]", "_____no_output_____" ], [ "for e in v.elements:\n print(\"%s %s\" % (e.getName(), e.get_int()))", "_____no_output_____" ], [ "f9.connected", "_____no_output_____" ], [ "f9.process_events()", "_____no_output_____" ], [ "f9.wfs_config()", "_____no_output_____" ], [ "f9.default_config()", "_____no_output_____" ], [ "f9.binning", "_____no_output_____" ], [ "f9.process_events()\nf = f9.expose(exptime=1.0, exptype=\"Light\")", "_____no_output_____" ], [ "norm = visualization.ImageNormalize(\n f[0].data,\n interval=visualization.ZScaleInterval(),\n stretch=visualization.AsinhStretch()\n)\nplt.imshow(f[0].data, norm=norm)\nplt.show()", "_____no_output_____" ], [ "f.writeto(\"f9_sbig_ref.fits\")", "_____no_output_____" ], [ "f[0].data.max()", "_____no_output_____" ], [ "f9.temperature = 20", "_____no_output_____" ], [ "f9.get_float(\"SBIG CCD\", \"CCD_TEMPERATURE\", \"CCD_TEMPERATURE_VALUE\")", "_____no_output_____" ], [ "v = f9.get_vector(\"SBIG CCD\", \"CCD_TEMPERATURE\")\nv.get_element(\"CCD_TEMPERATURE_VALUE\").get_float()", "_____no_output_____" ], [ "f9.get_float(\"SBIG CCD\", \"CCD_COOLER_POWER\", \"CCD_COOLER_VALUE\")", "_____no_output_____" ], [ "f9.temperature", "_____no_output_____" ], [ "f9.cooling_power", "_____no_output_____" ], [ "f9.cooler", "_____no_output_____" ], [ "f[0].header", "_____no_output_____" ], [ "f9.observer = \"F/9 WFS\"", "_____no_output_____" ], [ "f9.object = \"reference\"", "_____no_output_____" ], [ "f[0].header", "_____no_output_____" ], [ "f9.tell()", "_____no_output_____" ], [ "f9.process_events()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0650f6b2145bb9d6a89197307d8ab258f205a0b
262,674
ipynb
Jupyter Notebook
temporal-difference/Temporal_Difference_Solution.ipynb
JeroenSweerts/deep-reinforcement-learning
d7b73a6245ff82030203586c8c5db0d10a52623f
[ "MIT" ]
null
null
null
temporal-difference/Temporal_Difference_Solution.ipynb
JeroenSweerts/deep-reinforcement-learning
d7b73a6245ff82030203586c8c5db0d10a52623f
[ "MIT" ]
null
null
null
temporal-difference/Temporal_Difference_Solution.ipynb
JeroenSweerts/deep-reinforcement-learning
d7b73a6245ff82030203586c8c5db0d10a52623f
[ "MIT" ]
null
null
null
359.827397
51,836
0.911563
[ [ [ "# Temporal-Difference Methods\n\nIn this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.\n\nWhile we have provided some starter code, you are welcome to erase these hints and write your code from scratch.\n\n---\n\n### Part 0: Explore CliffWalkingEnv\n\nWe begin by importing the necessary packages.", "_____no_output_____" ] ], [ [ "import sys\nimport gym\nimport numpy as np\nimport random\nimport math\nfrom collections import defaultdict, deque\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport check_test\nfrom plot_utils import plot_values", "_____no_output_____" ] ], [ [ "Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.", "_____no_output_____" ] ], [ [ "env = gym.make('CliffWalking-v0')", "_____no_output_____" ] ], [ [ "The agent moves through a $4\\times 12$ gridworld, with states numbered as follows:\n```\n[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],\n [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]\n```\nAt the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.\n\nThe agent has 4 potential actions:\n```\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\n```\n\nThus, $\\mathcal{S}^+=\\{0, 1, \\ldots, 47\\}$, and $\\mathcal{A} =\\{0, 1, 2, 3\\}$. Verify this by running the code cell below.", "_____no_output_____" ] ], [ [ "print(env.action_space)\nprint(env.observation_space)", "Discrete(4)\nDiscrete(48)\n" ] ], [ [ "In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function.\n\n_**Note**: You can safely ignore the values of the cliff \"states\" as these are not true states from which the agent can make decisions. For the cliff \"states\", the state-value function is not well-defined._", "_____no_output_____" ] ], [ [ "# define the optimal state-value function\nV_opt = np.zeros((4,12))\nV_opt[0][0:13] = -np.arange(3, 15)[::-1]\nV_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1\nV_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2\nV_opt[3][0] = -13\n\nplot_values(V_opt)", "_____no_output_____" ] ], [ [ "### Part 1: TD Control: Sarsa\n\nIn this section, you will write your own implementation of the Sarsa control algorithm.\n\nYour algorithm has four arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `alpha`: This is the step-size parameter for the update step.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.\n\nPlease complete the function in the code cell below.\n\n(_Feel free to define additional functions to help you to organize your code._)", "_____no_output_____" ] ], [ [ "def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):\n \"\"\"Returns updated Q-value for the most recent experience.\"\"\"\n current = Q[state][action] # estimate in Q-table (for current state, action pair)\n # get value of state, action pair at next time step\n Qsa_next = Q[next_state][next_action] if next_state is not None else 0 \n target = reward + (gamma * Qsa_next) # construct TD target\n new_value = current + (alpha * (target - current)) # get updated value\n return new_value\n\ndef epsilon_greedy(Q, state, nA, eps):\n \"\"\"Selects epsilon-greedy action for supplied state.\n \n Params\n ======\n Q (dictionary): action-value function\n state (int): current state\n nA (int): number actions in the environment\n eps (float): epsilon\n \"\"\"\n if random.random() > eps: # select greedy action with probability epsilon\n return np.argmax(Q[state])\n else: # otherwise, select an action randomly\n return random.choice(np.arange(env.action_space.n))", "_____no_output_____" ], [ "def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):\n nA = env.action_space.n # number of actions\n Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays\n \n # monitor performance\n tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores\n avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes\n \n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 100 == 0:\n print(\"\\rEpisode {}/{}\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush() \n score = 0 # initialize score\n state = env.reset() # start episode\n \n eps = 1.0 / i_episode # set value of epsilon\n action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection\n \n while True:\n next_state, reward, done, info = env.step(action) # take action A, observe R, S'\n score += reward # add reward to agent's score\n if not done:\n next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action\n Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \\\n state, action, reward, next_state, next_action)\n \n state = next_state # S <- S'\n action = next_action # A <- A'\n if done:\n Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \\\n state, action, reward)\n tmp_scores.append(score) # append score\n break\n if (i_episode % plot_every == 0):\n avg_scores.append(np.mean(tmp_scores))\n\n # plot performance\n plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))\n plt.xlabel('Episode Number')\n plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)\n plt.show()\n # print best 100-episode performance\n print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) \n return Q", "_____no_output_____" ] ], [ [ "Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. \n\nIf the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.", "_____no_output_____" ] ], [ [ "# obtain the estimated optimal policy and corresponding action-value function\nQ_sarsa = sarsa(env, 50000, .01)\n\n# print the estimated optimal policy\npolicy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)\ncheck_test.run_check('td_control_check', policy_sarsa)\nprint(\"\\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):\")\nprint(policy_sarsa)\n\n# plot the estimated optimal state-value function\nV_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])\nplot_values(V_sarsa)", "Episode 50000/50000" ] ], [ [ "### Part 2: TD Control: Q-learning\n\nIn this section, you will write your own implementation of the Q-learning control algorithm.\n\nYour algorithm has four arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `alpha`: This is the step-size parameter for the update step.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.\n\nPlease complete the function in the code cell below.\n\n(_Feel free to define additional functions to help you to organize your code._)", "_____no_output_____" ] ], [ [ "def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):\n \"\"\"Returns updated Q-value for the most recent experience.\"\"\"\n current = Q[state][action] # estimate in Q-table (for current state, action pair)\n Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state \n target = reward + (gamma * Qsa_next) # construct TD target\n new_value = current + (alpha * (target - current)) # get updated value \n return new_value", "_____no_output_____" ], [ "def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):\n \"\"\"Q-Learning - TD Control\n \n Params\n ======\n num_episodes (int): number of episodes to run the algorithm\n alpha (float): learning rate\n gamma (float): discount factor\n plot_every (int): number of episodes to use when calculating average score\n \"\"\"\n nA = env.action_space.n # number of actions\n Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays\n \n # monitor performance\n tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores\n avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes\n \n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 100 == 0:\n print(\"\\rEpisode {}/{}\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n score = 0 # initialize score\n state = env.reset() # start episode\n eps = 1.0 / i_episode # set value of epsilon\n \n while True:\n action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection\n next_state, reward, done, info = env.step(action) # take action A, observe R, S'\n score += reward # add reward to agent's score\n Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \\\n state, action, reward, next_state) \n state = next_state # S <- S'\n if done:\n tmp_scores.append(score) # append score\n break\n if (i_episode % plot_every == 0):\n avg_scores.append(np.mean(tmp_scores))\n \n # plot performance\n plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))\n plt.xlabel('Episode Number')\n plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)\n plt.show()\n # print best 100-episode performance\n print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))\n return Q", "_____no_output_____" ] ], [ [ "Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. \n\nIf the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.", "_____no_output_____" ] ], [ [ "# obtain the estimated optimal policy and corresponding action-value function\nQ_sarsamax = q_learning(env, 5000, .01)\n\n# print the estimated optimal policy\npolicy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))\ncheck_test.run_check('td_control_check', policy_sarsamax)\nprint(\"\\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):\")\nprint(policy_sarsamax)\n\n# plot the estimated optimal state-value function\nplot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])", "Episode 5000/5000" ] ], [ [ "### Part 3: TD Control: Expected Sarsa\n\nIn this section, you will write your own implementation of the Expected Sarsa control algorithm.\n\nYour algorithm has four arguments:\n- `env`: This is an instance of an OpenAI Gym environment.\n- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.\n- `alpha`: This is the step-size parameter for the update step.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as output:\n- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.\n\nPlease complete the function in the code cell below.\n\n(_Feel free to define additional functions to help you to organize your code._)", "_____no_output_____" ] ], [ [ "def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):\n \"\"\"Returns updated Q-value for the most recent experience.\"\"\"\n current = Q[state][action] # estimate in Q-table (for current state, action pair)\n policy_s = np.ones(nA) * eps / nA # current policy (for next state S')\n policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action\n Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step\n target = reward + (gamma * Qsa_next) # construct target\n new_value = current + (alpha * (target - current)) # get updated value \n return new_value", "_____no_output_____" ], [ "def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):\n \"\"\"Expected SARSA - TD Control\n \n Params\n ======\n num_episodes (int): number of episodes to run the algorithm\n alpha (float): step-size parameters for the update step\n gamma (float): discount factor\n plot_every (int): number of episodes to use when calculating average score\n \"\"\"\n nA = env.action_space.n # number of actions\n Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays\n \n # monitor performance\n tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores\n avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes\n \n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 100 == 0:\n print(\"\\rEpisode {}/{}\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n score = 0 # initialize score\n state = env.reset() # start episode\n eps = 0.005 # set value of epsilon\n \n while True:\n action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection\n next_state, reward, done, info = env.step(action) # take action A, observe R, S'\n score += reward # add reward to agent's score\n # update Q\n Q[state][action] = update_Q_expsarsa(alpha, gamma, nA, eps, Q, \\\n state, action, reward, next_state) \n state = next_state # S <- S'\n if done:\n tmp_scores.append(score) # append score\n break\n if (i_episode % plot_every == 0):\n avg_scores.append(np.mean(tmp_scores))\n \n # plot performance\n plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))\n plt.xlabel('Episode Number')\n plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)\n plt.show()\n # print best 100-episode performance\n print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))\n return Q", "_____no_output_____" ] ], [ [ "Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. \n\nIf the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.", "_____no_output_____" ] ], [ [ "# obtain the estimated optimal policy and corresponding action-value function\nQ_expsarsa = expected_sarsa(env, 50000, 1)\n\n# print the estimated optimal policy\npolicy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)\ncheck_test.run_check('td_control_check', policy_expsarsa)\nprint(\"\\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):\")\nprint(policy_expsarsa)\n\n# plot the estimated optimal state-value function\nplot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])", "Episode 50000/50000" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0651a742ad5e0a900b350e6b5f324e310dcee89
5,154
ipynb
Jupyter Notebook
files/notebooks/09-Lists.ipynb
mforneris/introduction_to_python_course
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
[ "CC-BY-4.0" ]
null
null
null
files/notebooks/09-Lists.ipynb
mforneris/introduction_to_python_course
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
[ "CC-BY-4.0" ]
null
null
null
files/notebooks/09-Lists.ipynb
mforneris/introduction_to_python_course
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
[ "CC-BY-4.0" ]
1
2020-01-09T10:58:56.000Z
2020-01-09T10:58:56.000Z
18.147887
64
0.503492
[ [ [ "# Lists", "_____no_output_____" ], [ "### A list stores many values in a single structure.", "_____no_output_____" ], [ "### Use an item’s index to fetch it from a list.", "_____no_output_____" ], [ "### Lists’ values can be replaced by assigning to them.", "_____no_output_____" ], [ "### Appending items to a list lengthens it.", "_____no_output_____" ], [ "### Use `del` to remove items from a list entirely.", "_____no_output_____" ], [ "### The empty list contains no values.", "_____no_output_____" ], [ "### Lists may contain values of different types.", "_____no_output_____" ], [ "### Character strings can be indexed like lists.", "_____no_output_____" ], [ "but character strings are _immutable_.", "_____no_output_____" ], [ "### Indexing beyond the end of the collection is an error.", "_____no_output_____" ], [ "## Exercises", "_____no_output_____" ] ], [ [ "%load ../exercises/lists-blanks.py", "_____no_output_____" ], [ "%load ../exercises/lists-string-conversion.py", "_____no_output_____" ] ], [ [ "## Key Points", "_____no_output_____" ], [ "- A list stores many values in a single structure.\n- Use an item’s index to fetch it from a list.\n- Lists’ values can be replaced by assigning to them.\n- Appending items to a list lengthens it.\n- Use del to remove items from a list entirely.\n- The empty list contains no values.\n- Lists may contain values of different types.\n- Character strings can be indexed like lists.\n- Character strings are immutable.\n- Indexing beyond the end of the collection is an error.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d065337b193a5054114b20380b19ceb4416ed862
418,764
ipynb
Jupyter Notebook
deep-learning/2.ipynb
nothingelsematters/University
b1e188cb59e5a436731b92c914494626a99e1ae0
[ "WTFPL" ]
1
2018-06-03T17:48:50.000Z
2018-06-03T17:48:50.000Z
deep-learning/2.ipynb
nothingelsematters/University
b1e188cb59e5a436731b92c914494626a99e1ae0
[ "WTFPL" ]
null
null
null
deep-learning/2.ipynb
nothingelsematters/University
b1e188cb59e5a436731b92c914494626a99e1ae0
[ "WTFPL" ]
null
null
null
54.948694
114,490
0.669332
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0653c43c1f219bdcd2c4304b7560e153e962271
5,420
ipynb
Jupyter Notebook
docs/examples/legacy/Measure without a Loop.ipynb
jakeogh/Qcodes
3042317038e89264d481b212c9640c4d6b356c88
[ "MIT" ]
223
2016-10-29T15:00:24.000Z
2022-03-20T06:53:34.000Z
docs/examples/legacy/Measure without a Loop.ipynb
jakeogh/Qcodes
3042317038e89264d481b212c9640c4d6b356c88
[ "MIT" ]
3,406
2016-10-25T10:44:50.000Z
2022-03-31T09:47:35.000Z
docs/examples/legacy/Measure without a Loop.ipynb
Akshita07/Qcodes
f75e4786e268f415935aa4658d92526279c7a102
[ "MIT" ]
263
2016-10-25T11:35:36.000Z
2022-03-31T08:53:20.000Z
31.695906
280
0.604059
[ [ [ "# Measure without a Loop\n\nIf you have a parameter that returns a whole array at once, often you want to measure it directly into a DataSet.\n\nThis shows how that works in QCoDeS", "_____no_output_____" ] ], [ [ "%matplotlib nbagg\nimport qcodes as qc\nimport numpy as np\n# import dummy driver for the tutorial\nfrom qcodes.tests.instrument_mocks import DummyInstrument, DummyChannelInstrument\nfrom qcodes.measure import Measure\nfrom qcodes.actions import Task\n\ndac1 = DummyInstrument(name=\"dac\")\ndac2 = DummyChannelInstrument(name=\"dac2\")\n\n\n# the default dummy instrument returns always a constant value, in the following line we make it random \n# just for the looks 💅\ndac2.A.dummy_array_parameter.get = lambda: np.random.randint(0, 100, size=5)\n\n# The station is a container for all instruments that makes it easy \n# to log meta-data\nstation = qc.Station(dac1, dac2)", "2020-03-24 18:45:32,769 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanA(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n2020-03-24 18:45:32,798 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanB(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n2020-03-24 18:45:32,804 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanC(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n2020-03-24 18:45:32,807 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanD(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n2020-03-24 18:45:32,819 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanE(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n2020-03-24 18:45:32,838 ¦ qcodes.instrument.base ¦ WARNING ¦ base ¦ snapshot_base ¦ 214 ¦ [dac2_ChanF(DummyChannel)] Snapshot: Could not update parameter: dummy_sp_axis\n" ] ], [ [ "## Instantiates all the instruments needed for the demo\n\nFor this tutorial we're going to use the regular parameters (c0, c1, c2, vsd) and ArrayGetter, which is just a way to construct a parameter that returns a whole array at once out of simple parameters, as well as AverageAndRaw, which returns a scalar *and* an array together.", "_____no_output_____" ], [ "### Only array output\nThe arguments to Measure are all the same actions you use in a Loop.\nIf they return only arrays, you will see exactly those arrays (with their setpoints) in the output DataSet", "_____no_output_____" ] ], [ [ "data = Measure(\n Task(dac1.dac1.set, 0),\n dac2.A.dummy_array_parameter,\n Task(dac1.dac1.set, 2),\n dac2.A.dummy_array_parameter,\n).run()", "DataSet:\n location = 'data/2020-03-24/#013_{name}_18-45-41'\n <Type> | <array_id> | <array.name> | <array.shape>\n Measured | dac2_ChanA_dummy_array_parameter_1 | dummy_array_parameter | (5,)\n Measured | dac2_ChanA_dummy_array_parameter_3 | dummy_array_parameter | (5,)\nacquired at 2020-03-24 18:45:41\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d065543a601a6ba271f295d16ecf849d2cf066be
29,969
ipynb
Jupyter Notebook
RegExXMLParsing.ipynb
lindsayduca/PatentProcessProject-
62ac9e0caacb83f8d1ce9d5b651e018c8a1ca1dc
[ "MIT" ]
null
null
null
RegExXMLParsing.ipynb
lindsayduca/PatentProcessProject-
62ac9e0caacb83f8d1ce9d5b651e018c8a1ca1dc
[ "MIT" ]
null
null
null
RegExXMLParsing.ipynb
lindsayduca/PatentProcessProject-
62ac9e0caacb83f8d1ce9d5b651e018c8a1ca1dc
[ "MIT" ]
null
null
null
75.488665
1,415
0.633254
[ [ [ "import re \nimport pandas as pd\nimport os \nimport html", "_____no_output_____" ], [ "os.chdir('/Users/lindsayduca/Desktop/Downloads')\n#file=\"US20220000001A1-20220106.XML\" \n#file=\"USD0864516-20191029.XML\" \n\nfile = open(file=\"ipa220106.txt\", mode='r') #opening the file in read mode\nfile_content_raw = file.read()\nfile.close()\ntext1=re.compile(\"<\\?xml version=\\\"1\\.0\\\" encoding\\=\\\"UTF\\-8\\\"\\?>\")\nfile_content = text1.split(file_content_raw)\nwhile '' in file_content:\n file_content.remove('')\nprint(\"No of patents :\", len(file_content)) ", "No of patents : 7882\n" ], [ "# Writing regular expressions for the desired columns\n\n#grant_id = re.compile('file\\=\\\"([U][S]\\w\\w\\d{6})\\-\\d{8}\\.XML\\\"') \ngrant_id = re.compile('file\\=\\\"([U][S]\\w{13})\\-\\w{8}\\.[X][M][L]\\\"') \n\n#patent_title = re.compile(\"<invention-title id=\\\"\\w{5,6}\\\">(.*?)</invention-title>\") \nkind = re.compile(\"<kind>([A-Z]\\d)</kind>\")\n#number_of_claim = re.compile(\"\\<number\\-of\\-claims\\>(\\d{1,4})\\<\\/number\\-of\\-claims\\>\")\nfirst_name=re.compile(\"<first-name>(.*?)</first-name>\")\nlast_name=re.compile(\"<last-name>(.*?)</last-name>\")\ncitation_by_examiner = re.compile(\"\\<category\\>cited by examiner<\\/category\\>\")\ncitation_by_applicant = re.compile(\"\\<category>cited by applicant\\<\\/category\\>\")\nclaim_text=re.compile(\"<claim-text>[\\s\\S<]*</claim-text>\")\nabstract=re.compile(\"\\<abstract id\\=\\\"abstract\\\"\\>\\n\\<p id\\=\\\"p\\-0001\\\" num\\=\\\"0000\\\"\\>(.*?)\\<\\/p\\>\\n\\<\\/abstract\\>\")\n\n# some extra care with claim-text\ncleaner = re.compile('<.*?>') \ncleaner2 = re.compile('\\n')\ncleaner3 = re.compile('\\,\\,\\,')\ncleaner4 = re.compile(\"[\\.][\\,][\\,]\")\ncleaner5 = re.compile(\"[\\,][\\,]\")\ncleaner6 = re.compile(\"[\\;][\\,]\")", "_____no_output_____" ], [ "for line in file_content:\n \n gid=grant_id.findall(line) #to find grant_id\n \n# checking length of gid is not equal to 0 then do append to all the lists\n if len(gid)!=0: \n gid_list.append(gid[0])\n \ndata_frame = pd.DataFrame(\n {'grant_id': gid_list})\ndata_frame", "_____no_output_____" ], [ "gid_list, title_list, kind_list, no_of_claim_list, name_list, applicant_list, examiners_list, claim_list, abstract_list, = ([] for i in range(9))\n\nfor line in file_content:\n \n gid=grant_id.findall(line) #to find grant_id\n title=patent_title.findall(line) #to find patent_title\n kinds=kind.findall(line) #to find kind\n # sclaim=number_of_claim.findall(line) #to find no_of_claims\n #to find inventors\n inventors=re.findall(\"<inventor.*?>[\\s\\S]*</inventor>\",line)\n for person in inventors:\n first=first_name.findall(person)\n last=last_name.findall(person)\n name = [firstName +\" \"+ lastName for firstName, lastName in zip(first,last)]\n if len(name)==0:\n names=\"NA\"\n else:\n names=name\n \n # here we count citation_by_applicant\n if len(citation_by_applicant.findall(line))==0:\n citation_by_applicants=0\n else:\n citation_by_applicants=len(citation_by_applicant.findall(line)) \n \n # count for citation_by_examiner\n if len(citation_by_examiner.findall(line))==0:\n citation_by_examiners=0\n else: \n citation_by_examiners=len(citation_by_examiner.findall(line)) \n \n # For claim_text\n if (len(re.findall(\"<claim-text>[\\s\\S<]*</claim-text>\",line))==0):\n claim_text=[\"NA\"]\n else:\n claim_text=re.findall(\"<claim-text>[\\s\\S<]*</claim-text>\",line) \n \n # For abstract\n abst=abstract.findall(line)\n if len(abst)==0:\n abstracts=[\"NA\"]\n else: \n abstracts=abst \n \n # checking length of gid is not equal to 0 then do append to all the lists\n if len(gid)!=0: \n gid_list.append(gid[0])\n # title_list.append(title[0])\n kind_list.append(kinds[0])\n # no_of_claim_list.append(sclaim[0])\n name_list.append(names)\n applicant_list.append(citation_by_applicants)\n examiners_list.append(citation_by_examiners)\n claim_list.append(claim_text[0])\n abstract_list.append(abstracts[0])\n\"\"\" \n# cleaning claim text \nelement=0\nfor items in claim_list:\n claim_list[element]=re.sub(cleaner,'',claim_list[element])\n claim_list[element]=re.sub(cleaner2,',',claim_list[element])\n claim_list[element]=re.sub(cleaner3,',',claim_list[element])\n claim_list[element]=re.sub(cleaner4,'.,',claim_list[element])\n claim_list[element]=re.sub(cleaner5,',',claim_list[element])\n claim_list[element]=re.sub(cleaner6,'; ',claim_list[element])\n element=element+1\n\"\"\"\n# For kind \nKind1 = [w.replace('P2', 'Plant Patent Grant(with a published application) issued on or after January 2, 2001') for w in kind_list]\nKind2 = [w.replace('B2', 'Utility Patent Grant (with a published application) issued on or after January 2, 2001.') for w in Kind1]\nKind3 = [w.replace('S1', 'Design Patent') for w in Kind2]\nKind4 = [w.replace('B1', 'Utility Patent Grant (no published application) issued on or after January 2, 2001.') for w in Kind3]", "_____no_output_____" ], [ "# Creating data frame\ndata_frame = pd.DataFrame(\n {'grant_id': gid_list,\n #'patent_title': title_list,\n 'kind': Kind4,\n #'number_of_claims':no_of_claim_list,\n 'inventors':name_list,\n 'citations_applicant_count':applicant_list,\n 'citations_examiner_count':examiners_list,\n 'claims_text':claim_list,\n 'abstract':abstract_list\n })", "_____no_output_____" ], [ "data_frame.isnull().sum()", "_____no_output_____" ], [ "data_frame", "_____no_output_____" ], [ "data_frame.to_csv('ParsedPatentGrant.csv')", "_____no_output_____" ], [ "import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nr = requests.get(\"https://developer.uspto.gov/ibd-api/v1/patent/application?patentNumber=9876543&start=0&rows=100\")\nsoup = BeautifulSoup(r.text, \"html.parser\")\ntext = soup.get_text()\n\nr_dict = json.loads(str(text))\nprint(r_dict['response']['docs'][0]['inventor'][0])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06558fa05f6bd2f2cedf5d405e80dc1df1713a5
206,435
ipynb
Jupyter Notebook
notebooks/get_prediction_from_pre_trained_model.ipynb
raghavgoyal14/smth-smth-v2-baseline-with-models
a2138058354fe119805ba93bdf2dea9677f14569
[ "Apache-2.0", "MIT" ]
2
2022-01-20T04:53:50.000Z
2022-03-22T22:26:44.000Z
notebooks/get_prediction_from_pre_trained_model.ipynb
raghavgoyal14/smth-smth-v2-baseline-with-models
a2138058354fe119805ba93bdf2dea9677f14569
[ "Apache-2.0", "MIT" ]
null
null
null
notebooks/get_prediction_from_pre_trained_model.ipynb
raghavgoyal14/smth-smth-v2-baseline-with-models
a2138058354fe119805ba93bdf2dea9677f14569
[ "Apache-2.0", "MIT" ]
1
2021-11-07T12:27:24.000Z
2021-11-07T12:27:24.000Z
532.048969
196,316
0.946332
[ [ [ "# Loads pre-trained model and get prediction on validation samples", "_____no_output_____" ], [ "### 1. Info\nPlease provide path to the relevant config file ", "_____no_output_____" ] ], [ [ "config_file_path = \"../configs/pretrained/config_model1.json\"", "_____no_output_____" ] ], [ [ "### 2. Importing required modules", "_____no_output_____" ] ], [ [ "import os\nimport cv2\nimport sys\nimport importlib\nimport torch\nimport torchvision\nimport numpy as np\n\nsys.path.insert(0, \"../\")\n\n# imports for displaying a video an IPython cell\nimport io\nimport base64\nfrom IPython.display import HTML", "_____no_output_____" ], [ "from data_parser import WebmDataset\nfrom data_loader_av import VideoFolder\n\nfrom models.multi_column import MultiColumn\nfrom transforms_video import *\n\nfrom utils import load_json_config, remove_module_from_checkpoint_state_dict\nfrom pprint import pprint", "_____no_output_____" ] ], [ [ "### 3. Loading configuration file, model definition and its path", "_____no_output_____" ] ], [ [ "# Load config file\nconfig = load_json_config(config_file_path)", "_____no_output_____" ], [ "# set column model\ncolumn_cnn_def = importlib.import_module(\"{}\".format(config['conv_model']))\nmodel_name = config[\"model_name\"]\n\nprint(\"=> Name of the model -- {}\".format(model_name))\n\n# checkpoint path to a trained model\ncheckpoint_path = os.path.join(\"../\", config[\"output_dir\"], config[\"model_name\"], \"model_best.pth.tar\")\nprint(\"=> Checkpoint path --> {}\".format(checkpoint_path))", "=> Name of the model -- model3D_1\n=> Checkpoint path --> ../trained_models/pretrained/model3D_1/model_best.pth.tar\n" ] ], [ [ "### 3. Load model", "_____no_output_____" ], [ "_Note: without cuda() for ease_", "_____no_output_____" ] ], [ [ "model = MultiColumn(config['num_classes'], column_cnn_def.Model, int(config[\"column_units\"]))\nmodel.eval();", "_____no_output_____" ], [ "print(\"=> loading checkpoint\")\ncheckpoint = torch.load(checkpoint_path)\ncheckpoint['state_dict'] = remove_module_from_checkpoint_state_dict(\n checkpoint['state_dict'])\nmodel.load_state_dict(checkpoint['state_dict'])\nprint(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(checkpoint_path, checkpoint['epoch']))", "=> loading checkpoint\n=> loaded checkpoint '../trained_models/pretrained/model3D_1/model_best.pth.tar' (epoch 55)\n" ] ], [ [ "### 4. Load data", "_____no_output_____" ] ], [ [ "# Center crop videos during evaluation\ntransform_eval_pre = ComposeMix([\n [Scale(config['input_spatial_size']), \"img\"],\n [torchvision.transforms.ToPILImage(), \"img\"],\n [torchvision.transforms.CenterCrop(config['input_spatial_size']), \"img\"]\n ])\n\ntransform_post = ComposeMix([\n [torchvision.transforms.ToTensor(), \"img\"],\n [torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406], # default values for imagenet\n std=[0.229, 0.224, 0.225]), \"img\"]\n ])\n\nval_data = VideoFolder(root=config['data_folder'],\n json_file_input=config['json_data_val'],\n json_file_labels=config['json_file_labels'],\n clip_size=config['clip_size'],\n nclips=config['nclips_val'],\n step_size=config['step_size_val'],\n is_val=True,\n transform_pre=transform_eval_pre,\n transform_post=transform_post,\n get_item_id=True,\n )\ndict_two_way = val_data.classes_dict", "_____no_output_____" ] ], [ [ "### 5. Get predictions", "_____no_output_____" ], [ "#### 5.1. Select random sample (or specify the index)", "_____no_output_____" ] ], [ [ "selected_indx = np.random.randint(len(val_data))\n# selected_indx = 136", "_____no_output_____" ] ], [ [ "#### 5.2 Get data in required format", "_____no_output_____" ] ], [ [ "input_data, target, item_id = val_data[selected_indx]\ninput_data = input_data.unsqueeze(0)\nprint(\"Id of the video sample = {}\".format(item_id))\nprint(\"True label --> {} ({})\".format(target, dict_two_way[target]))", "Id of the video sample = 166766\nTrue label --> 57 (Poking something so that it falls over)\n" ], [ "if config['nclips_val'] > 1:\n input_var = list(input_data.split(config['clip_size'], 2))\n for idx, inp in enumerate(input_var):\n input_var[idx] = torch.autograd.Variable(inp)\nelse:\n input_var = [torch.autograd.Variable(input_data)]", "_____no_output_____" ] ], [ [ "#### 5.3 Compute output from the model", "_____no_output_____" ] ], [ [ "output = model(input_var).squeeze(0)\noutput = torch.nn.functional.softmax(output, dim=0)", "_____no_output_____" ], [ "# compute top5 predictions\npred_prob, pred_top5 = output.data.topk(5)\npred_prob = pred_prob.numpy()\npred_top5 = pred_top5.numpy()", "_____no_output_____" ] ], [ [ "#### 5.4 Visualize predictions", "_____no_output_____" ] ], [ [ "print(\"Id of the video sample = {}\".format(item_id))\nprint(\"True label --> {} ({})\".format(target, dict_two_way[target]))\nprint(\"\\nTop-5 Predictions:\")\nfor i, pred in enumerate(pred_top5):\n print(\"Top {} :== {}. Prob := {:.2f}%\".format(i + 1, dict_two_way[pred], pred_prob[i] * 100))", "Id of the video sample = 166766\nTrue label --> 57 (Poking something so that it falls over)\n\nTop-5 Predictions:\nTop 1 :== Poking something so that it falls over. Prob := 55.23%\nTop 2 :== Tipping something over. Prob := 40.12%\nTop 3 :== Poking a stack of something so the stack collapses. Prob := 4.04%\nTop 4 :== Tipping something with something in it over, so something in it falls out. Prob := 0.26%\nTop 5 :== Poking something so it slightly moves. Prob := 0.12%\n" ], [ "path_to_vid = os.path.join(config[\"data_folder\"], item_id + \".webm\")\nvideo = io.open(path_to_vid, 'r+b').read()\nencoded = base64.b64encode(video)\nHTML(data='''<video alt=\"test\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" />\n </video>'''.format(encoded.decode('ascii')))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0656135646553dededca4c2fd72f1c78f4f7ddf
8,568
ipynb
Jupyter Notebook
docs/source/examples/bars.ipynb
rafique/ipyvolume
0a230ee1923aab245f4abd38c8cf9b9b6c5386cc
[ "MIT" ]
1,784
2017-01-24T08:20:07.000Z
2022-02-10T21:19:43.000Z
docs/source/examples/bars.ipynb
rafique/ipyvolume
0a230ee1923aab245f4abd38c8cf9b9b6c5386cc
[ "MIT" ]
378
2017-02-17T17:14:16.000Z
2022-02-07T21:06:38.000Z
docs/source/examples/bars.ipynb
rafique/ipyvolume
0a230ee1923aab245f4abd38c8cf9b9b6c5386cc
[ "MIT" ]
232
2017-01-24T08:20:35.000Z
2022-01-30T04:08:48.000Z
24.410256
212
0.531279
[ [ [ "# Bar charts\nThis is 'abusing' the scatter object to create a 3d bar chart", "_____no_output_____" ] ], [ [ "import ipyvolume as ipv\nimport numpy as np", "_____no_output_____" ], [ "# set up data similar to animation notebook\n\nu_scale = 10\nNx, Ny = 30, 15\nu = np.linspace(-u_scale, u_scale, Nx)\nv = np.linspace(-u_scale, u_scale, Ny)\nx, y = np.meshgrid(u, v, indexing='ij')\nr = np.sqrt(x**2+y**2)\nx = x.flatten()\ny = y.flatten()\nr = r.flatten()\n\ntime = np.linspace(0, np.pi*2, 15)\nz = np.array([(np.cos(r + t) * np.exp(-r/5)) for t in time])\nzz = z", "_____no_output_____" ], [ "fig = ipv.figure()\ns = ipv.scatter(x, 0, y, aux=zz, marker=\"sphere\")\ndx = u[1] - u[0]\ndy = v[1] - v[0]\n# make the x and z lim half a 'box' larger\nipv.xlim(-u_scale-dx/2, u_scale+dx/2)\nipv.zlim(-u_scale-dx/2, u_scale+dx/2)\nipv.ylim(-1.2, 1.2)\nipv.show()", "_____no_output_____" ] ], [ [ "We now make boxes, that fit exactly in the volume, by giving them a size of 1, in domain coordinates (so 1 unit as read of by the x-axis etc)", "_____no_output_____" ] ], [ [ "# make the size 1, in domain coordinates (so 1 unit as read of by the x-axis etc)\ns.geo = 'box'\ns.size = 1\ns.size_x_scale = fig.scales['x']\ns.size_y_scale = fig.scales['y']\ns.size_z_scale = fig.scales['z']", "_____no_output_____" ], [ "s.shader_snippets = {'size':\n 'size_vector.y = SCALE_SIZE_Y(aux_current); '\n}\n", "_____no_output_____" ] ], [ [ "Using a shader snippet (that runs on the GPU), we set the y size equal to the aux value. However, since the box has size 1 around the origin of (0,0,0), we need to translate it up in the y direction by 0.5.", "_____no_output_____" ] ], [ [ "s.shader_snippets = {'size':\n 'size_vector.y = SCALE_SIZE_Y(aux_current) - SCALE_SIZE_Y(0.0) ; '\n}\n\ns.geo_matrix = [dx, 0, 0, 0, 0, 1, 0, 0, 0, 0, dy, 0, 0.0, 0.5, 0, 1]", "_____no_output_____" ] ], [ [ "Since we see the boxes with negative sizes inside out, we made the material double sided", "_____no_output_____" ] ], [ [ "# since we see the boxes with negative sizes inside out, we made the material double sided\ns.material.side = \"DoubleSide\"", "_____no_output_____" ], [ "# Now also include, color, which containts rgb values\ncolor = np.array([[np.cos(r + t), 1-np.abs(z[i]), 0.1+z[i]*0] for i, t in enumerate(time)])\ncolor = np.transpose(color, (0, 2, 1)) # flip the last axes\ns.color = color", "_____no_output_____" ], [ "ipv.animation_control(s, interval=200)", "_____no_output_____" ] ], [ [ "\n# Spherical bar charts", "_____no_output_____" ] ], [ [ "# Create spherical coordinates\nu = np.linspace(0, 1, Nx)\nv = np.linspace(0, 1, Ny)\nu, v = np.meshgrid(u, v, indexing='ij')\nphi = u * 2 * np.pi\ntheta = v * np.pi\nradius = 1\nxs = radius * np.cos(phi) * np.sin(theta)\nys = radius * np.sin(phi) * np.sin(theta)\nzs = radius * np.cos(theta)\nxs = xs.flatten()\nys = ys.flatten()\nzs = zs.flatten()\n", "_____no_output_____" ], [ "fig = ipv.figure()\n# we use the coordinates as the normals, and thus direction\ns = ipv.scatter(xs, ys, zs, vx=xs, vy=ys, vz=zs, aux=zz, color=color, marker=\"cylinder_hr\")\nipv.xyzlim(2)\nipv.show()", "_____no_output_____" ], [ "ipv.animation_control(s, interval=200)", "_____no_output_____" ], [ "import bqplot\n# the aux range is from -1 to 1, but if we put 0 as min, negative values will go inside\n# the max determines the 'height' of the bars\naux_scale = bqplot.LinearScale(min=0, max=5)\ns.aux_scale = aux_scale", "_____no_output_____" ], [ "s.shader_snippets = {'size':\n '''float sc = (SCALE_AUX(aux_current) - SCALE_AUX(0.0)); size_vector.y = sc;\n '''}\ns.material.side = \"DoubleSide\"\ns.size = 2\ns.geo_matrix = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0.0, 0.5, 0, 1]", "_____no_output_____" ], [ "ipv.style.box_off()\nipv.style.axes_off()", "_____no_output_____" ] ], [ [ "[screenshot](screenshot/bars.gif)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0656615ecaa514ddd2d8cc5cd2dcc39ef4a8a98
95,247
ipynb
Jupyter Notebook
Reinforcement_Learning_UCB_ThompsonSampling.ipynb
tourloukisg/ReinforcementLearning_UCB_ThompsonSampling
479d88c9384b567bc75f6f0267fc3e6c432574c2
[ "MIT" ]
null
null
null
Reinforcement_Learning_UCB_ThompsonSampling.ipynb
tourloukisg/ReinforcementLearning_UCB_ThompsonSampling
479d88c9384b567bc75f6f0267fc3e6c432574c2
[ "MIT" ]
null
null
null
Reinforcement_Learning_UCB_ThompsonSampling.ipynb
tourloukisg/ReinforcementLearning_UCB_ThompsonSampling
479d88c9384b567bc75f6f0267fc3e6c432574c2
[ "MIT" ]
null
null
null
176.710575
38,104
0.870012
[ [ [ "# ReinforcementLearning: a)UCB, b)ThompsonSampling\n\n**--------------------------------------------------------------------------------------------------------------------------**\n**--------------------------------------------------------------------------------------------------------------------------**\n**--------------------------------------------------------------------------------------------------------------------------**\n**---------------------------------------------------**\n\n\n**STRUCTURE**\n\n*In this notebook, the use of two models (**Part A**: UCB and **Part B**: Thompson Sampling) for an online advertising (Click-through rate) case study is demonstrated. Both models are part of Reinforcement Learning (RL) which is a machine learning category that is focused on different types of rewards depending on the actions taken at each step of the learning process. RL algorithms are capable of learning based on their interactions with the environment, where a reward is given each time the correct decision has been taken, in contrast to the supervised ML models where the presence of labels is required.*\n\n*For this demonstration, a dataset has been generated to represent 9 web advertisements of a product on its columns(dataset features) and the user selections (dataset rows). This dataset is based on the assumption that every time a user visits this web page, a different advertisement (ADV1 - ADV9) is displayed. The goal is to apply a Reinforcement Learning algorithm that will try to learn as quickly as possible which advertisement is selected the most (click-through rate) so as to be presented when users visit the site. Initially, the models display different advertisements to each user but as the algorithms gain more information with respect to the users selections (clicks), the advertisement that leads to the highest reward is chosen to be diplayed. The difference between the 'Upper Confidence Bound' (UCB) and the 'Thompson Sampling' algorithm lies in the selection process of the next advertisement that is to be displayed. UCB is a deterministic model, whereas Thompson Sampling is based on random variation (probabilistic model). In order to evaluate their ability to choose the advertisement with the highest conversion rate for different number of samples (users), the total reward for each model is provided, together with plots presenting the number of times each advertisement has been displayed at the web page.*\n\n\n\n", "_____no_output_____" ] ], [ [ "# Importing the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nimport random\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# Creating the dataset by generating random values(0 & 1) with different probabilities for each 'Adv' \n# Len.Dataset=20000\nnp.random.seed(0)\ndataset={'Adv1':np.random.choice(2, 20000,p=[0.6,0.4]),\n'Adv2':np.random.choice(2, 20000,p=[0.65,0.35]),\n'Adv3':np.random.choice(2, 20000,p=[0.44,0.56]),\n'Adv4':np.random.choice(2, 20000,p=[0.6,0.4]),\n'Adv5':np.random.choice(2, 20000,p=[0.50,0.50]),\n'Adv6':np.random.choice(2, 20000,p=[0.49,0.51]),\n'Adv7':np.random.choice(2, 20000,p=[0.4,0.6]),\n'Adv8':np.random.choice(2, 20000,p=[0.52,0.48]),\n'Adv9':np.random.choice(2, 20000,p=[0.47,0.53])}\ndata=pd.DataFrame(data=dataset)", "_____no_output_____" ], [ "# Dataset-First ten records\ndata.head(10)", "_____no_output_____" ] ], [ [ "## UCB", "_____no_output_____" ] ], [ [ "#Upper Confidence Bound Algorithm\ndef ucb_rewards(Users_Num):\n #Total Number of Advertisements\n Ad_Num=9\n #List of advertisements that are selected by the algorithm based on the user clicks at each step (initially empty)\n Ad_to_Display=[]\n # Count how many times each advertisement is selected\n Ad_Cnt_Selection=[0]*Ad_Num\n # For each advertisement compute sum of its rewards (initially empty)\n Ad_Rewards=[0]*Ad_Num\n # Total number of rewards (initially zero)\n Ad_Total_Rewards=0\n \n for x in range(1,Users_Num+1): \n Ad=0\n UCB_max=0\n for j in range(0,Ad_Num):\n if Ad_Cnt_Selection[j]>0:\n Ad_Avg_Reward=Ad_Rewards[j]/Ad_Cnt_Selection[j]\n UCB= Ad_Avg_Reward + np.sqrt(3*np.log(x)/(2*Ad_Cnt_Selection[j]))\n else:# The purpose of the else statement is to ensure that all Ads are selected (in order to determine the UCB)\n UCB=1e309 \n if UCB>UCB_max:\n UCB_max=UCB\n Ad=j\n Ad_to_Display.append(Ad)\n Ad_Cnt_Selection[Ad]+=1\n Ad_Rewards[Ad]+=data.values[x-1,Ad]\n Ad_Total_Rewards+=data.values[x-1,Ad]\n return Ad_to_Display,Ad_Total_Rewards\n \n \n\n \n ", "_____no_output_____" ], [ "# The algorithm is to be executed for different samples, whose number progressively increases, so as to observe how many\n# samples were required for the model to be able to identify clearly the Ad with the highest conversion rate\nselected_Ad_2000=ucb_rewards(Users_Num=2000)\nselected_Ad_5000=ucb_rewards(Users_Num=5000)\nselected_Ad_10000=ucb_rewards(Users_Num=10000)\nselected_Ad_20000=ucb_rewards(Users_Num=20000)", "_____no_output_____" ], [ "# Conversion to pandas dataframe\ndf_selected_Ad_2000=pd.DataFrame(data=selected_Ad_2000[0],columns=['Advertisements - Users:2000'])\ndf_selected_Ad_5000=pd.DataFrame(data=selected_Ad_5000[0],columns=['Advertisements - Users:5000'])\ndf_selected_Ad_10000=pd.DataFrame(data=selected_Ad_10000[0],columns=['Advertisements - Users:10000'])\ndf_selected_Ad_20000=pd.DataFrame(data=selected_Ad_20000[0],columns=['Advertisements - Users:20000'])", "_____no_output_____" ], [ "# As it can be observed, the model managed to identify clearly the Ad with the highest conversion rate at the first 10000\n# samples, with good performance at the first 2000 & 5000 samples as well\nfig,axs=plt.subplots(2,2,figsize=(14,8))\nsns.countplot(data=df_selected_Ad_2000, x=\"Advertisements - Users:2000\",label='Users:2000',ax=axs[0,0])\nsns.countplot(data=df_selected_Ad_5000, x=\"Advertisements - Users:5000\",label='Users:5000',ax=axs[0,1])\nsns.countplot(data=df_selected_Ad_10000, x=\"Advertisements - Users:10000\",label='Users:10000',ax=axs[1,0])\nsns.countplot(data=df_selected_Ad_20000, x=\"Advertisements - Users:20000\",label='Users:20000',ax=axs[1,1])\nfor ax in axs.flat:\n fig.suptitle(\"Displayed Advertisements - Upper Confidence Bound\", fontweight='bold',fontsize=18)\n ax.set_xlabel('Advertisements',fontsize=12,fontweight='bold')\n ax.set_ylabel('Count',fontsize=12,fontweight='bold')\n ax.legend()\n ax.figure.tight_layout(pad=2);\n ", "_____no_output_____" ] ], [ [ "## Thompson Sampling", "_____no_output_____" ] ], [ [ "#Thompson Sampling Algorithm\ndef TSampling_rewards(Users_Num):\n #Total Number of Advertisements\n Ad_Num=9\n #List of advertisements that are selected by the algorithm based on the user clicks at each step (initially empty)\n Ad_to_Display=[]\n # Count each time an advertisement gets reward=1\n Ad_Count_Reward_1=[0]*Ad_Num\n # Count each time an advertisement gets reward=0\n Ad_Count_Reward_0=[0]*Ad_Num\n # Total number of rewards (initially zero)\n Ad_Total_Rewards=0\n \n for x in range(1,Users_Num+1):\n Ad=0\n draw_max=0\n for j in range(0,Ad_Num):\n draw_rndm=random.betavariate(Ad_Count_Reward_1[j]+1,Ad_Count_Reward_0[j]+1)\n if draw_rndm>draw_max:\n draw_max=draw_rndm\n Ad=j\n \n Ad_to_Display.append(Ad)\n Tsample_reward = data.values[x-1, Ad]\n if Tsample_reward == 1:\n Ad_Count_Reward_1[Ad]+= 1\n else:\n Ad_Count_Reward_0[Ad]+= 1\n Ad_Total_Rewards+= Tsample_reward\n \n \n \n return Ad_to_Display,Ad_Total_Rewards\n ", "_____no_output_____" ], [ "# The algorithm is to be executed for different samples, whose number progressively increases, so as to observe how many\n# samples were required for the model to be able to identify clearly the Ad with the highest conversion rate\nselect_Ad_2000=TSampling_rewards(Users_Num=2000)\nselect_Ad_5000=TSampling_rewards(Users_Num=5000)\nselect_Ad_10000=TSampling_rewards(Users_Num=10000)\nselect_Ad_20000=TSampling_rewards(Users_Num=20000)", "_____no_output_____" ], [ "# Conversion to pandas dataframe\ndf_select_Ad_2000=pd.DataFrame(data=select_Ad_2000[0],columns=['Advertisements - Users:2000'])\ndf_select_Ad_5000=pd.DataFrame(data=select_Ad_5000[0],columns=['Advertisements - Users:5000'])\ndf_select_Ad_10000=pd.DataFrame(data=select_Ad_10000[0],columns=['Advertisements - Users:10000'])\ndf_select_Ad_20000=pd.DataFrame(data=select_Ad_20000[0],columns=['Advertisements - Users:20000'])", "_____no_output_____" ], [ "# As it can be observed, the Thompson Sampling algorithm managed to outperform UCB as it has clearly identified the Ad with\n# the highest conversion rate at the first 5000 samples, with almost excellent performance at the first 2000 samples as well\nfig,axs=plt.subplots(2,2,figsize=(14,8))\nsns.countplot(data=df_select_Ad_2000, x=\"Advertisements - Users:2000\",label='Users:2000',ax=axs[0,0])\nsns.countplot(data=df_select_Ad_5000, x=\"Advertisements - Users:5000\",label='Users:5000',ax=axs[0,1])\nsns.countplot(data=df_select_Ad_10000, x=\"Advertisements - Users:10000\",label='Users:10000',ax=axs[1,0])\nsns.countplot(data=df_select_Ad_20000, x=\"Advertisements - Users:20000\",label='Users:20000',ax=axs[1,1])\n\nfor ax in axs.flat:\n fig.suptitle(\"Displayed Advertisements - Thompson Sampling\", fontweight='bold',fontsize=18)\n ax.set_xlabel('Advertisements',fontsize=12,fontweight='bold')\n ax.set_ylabel('Count',fontsize=12,fontweight='bold')\n ax.legend()\n ax.figure.tight_layout(pad=2);", "_____no_output_____" ], [ "# Total rewards for selected data samples\n\nUCB_total_rewards2000=selected_Ad_2000[1]\nUCB_total_rewards5000=selected_Ad_5000[1]\nUCB_total_rewards10000=selected_Ad_10000[1]\nUCB_total_rewards20000=selected_Ad_20000[1]\nprint('UCB Total Rewards 2000 samples: {}'.format(UCB_total_rewards2000))\nprint('UCB Total Rewards 5000 samples: {}'.format(UCB_total_rewards5000))\nprint('UCB Total Rewards 10000 samples: {}'.format(UCB_total_rewards10000))\nprint('UCB Total Rewards 20000 samples: {}'.format(UCB_total_rewards20000))\nprint('\\r')\nTSampling_total_rewards2000=select_Ad_2000[1]\nTSampling_total_rewards5000=select_Ad_5000[1]\nTSampling_total_rewards10000=select_Ad_10000[1]\nTSampling_total_rewards20000=select_Ad_20000[1]\nprint('TSampling Total Rewards 2000 samples: {}'.format(TSampling_total_rewards2000))\nprint('TSampling Total Rewards 5000 samples: {}'.format(TSampling_total_rewards5000))\nprint('TSampling Total Rewards 10000 samples: {}'.format(TSampling_total_rewards10000))\nprint('TSampling Total Rewards 20000 samples: {}'.format(TSampling_total_rewards20000))", "UCB Total Rewards 2000 samples: 1082\nUCB Total Rewards 5000 samples: 2738\nUCB Total Rewards 10000 samples: 5588\nUCB Total Rewards 20000 samples: 11402\n\r\nTSampling Total Rewards 2000 samples: 1148\nTSampling Total Rewards 5000 samples: 2937\nTSampling Total Rewards 10000 samples: 5904\nTSampling Total Rewards 20000 samples: 11954\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0656b08cceedda933f354481009e14208fbf272
4,950
ipynb
Jupyter Notebook
notebooks/index.ipynb
charelstoncrabb/emukit
d8cbb655849e2d235323b15dc61b4d68aacd35a6
[ "Apache-2.0" ]
272
2018-09-18T11:56:37.000Z
2020-10-10T22:21:25.000Z
notebooks/index.ipynb
charelstoncrabb/emukit
d8cbb655849e2d235323b15dc61b4d68aacd35a6
[ "Apache-2.0" ]
278
2018-09-19T15:38:50.000Z
2020-10-14T13:45:24.000Z
notebooks/index.ipynb
charelstoncrabb/emukit
d8cbb655849e2d235323b15dc61b4d68aacd35a6
[ "Apache-2.0" ]
88
2018-09-18T11:56:48.000Z
2020-09-23T13:33:55.000Z
49.009901
470
0.707879
[ [ [ "# Emukit tutorials\n\nEmukit tutorials can be added and used through the links below. The goal of each of these tutorials is to explain a particular functionality of the Emukit project. These tutorials are stand-alone notebooks that don't require any extra files and fully sit on Emukit components (apart from the creation of the model).\n\nSome tutorials have been written with the purpose of explaining some scientific concepts and can be used for learning about different topics in emulation and uncertainty quantification. Other tutorials are a small guide to describe some feature of the library.\n\nAnother great resource to learn Emukit are the [examples](../emukit/examples) which are more elaborated modules focused either on the implementation of a new method with Emukit components or on the analysis and solution of some specific problem.", "_____no_output_____" ], [ "### Getting Started\n\nTutorials in this section will get you up and running with Emukit as quickly as possible.\n\n* [5 minutes introduction to Emukit](Emukit-tutorial-intro.ipynb)\n* [Philosophy and Basic use of the library](Emukit-tutorial-basic-use-of-the-library.ipynb)", "_____no_output_____" ], [ "### Scientific tutorials\n\nTutorials in this section will teach you about the theoretical foundations of surrogate optimization using Emukit.\n* [Introduction to Bayesian optimization](Emukit-tutorial-Bayesian-optimization-introduction.ipynb)\n* [Introduction to multi-fidelity Gaussian processes](Emukit-tutorial-multi-fidelity.ipynb)\n* [Introduction to sensitivity analysis](Emukit-tutorial-sensitivity-montecarlo.ipynb)\n* [Introduction to Bayesian Quadrature](Emukit-tutorial-Bayesian-quadrature-introduction.ipynb)\n* [Introduction to Experimental Design](Emukit-tutorial-experimental-design-introduction.ipynb)", "_____no_output_____" ], [ "### Features tutorials\n\nTutorials in this section will give you code snippets and explanations of various practical features included in the Emukit project.\n* [Bayesian optimization with external evaluation of the objective](Emukit-tutorial-bayesian-optimization-external-objective-evaluation.ipynb)\n* [Bayesian optimization with context variables](Emukit-tutorial-bayesian-optimization-context-variables.ipynb)\n* [Learn how to to combine an acquisition function (entropy search) with a multi-source (fidelity) Gaussian process](Emukit-tutorial-multi-fidelity-bayesian-optimization.ipynb)\n* [How to benchmark several Bayesian optimization methods with Emukit](Emukit-tutorial-bayesian-optimization-benchmark.ipynb)\n* [How to perform Bayesian optimization with non-linear constraints](Emukit-tutorial-constrained-optimization.ipynb)\n* [Bayesian optimization integrating the hyper-parameters of the model](Emukit-tutorial-bayesian-optimization-integrating-model-hyperparameters.ipynb)\n* [How to use custom model](Emukit-tutorial-custom-model.ipynb)\n* [How to select neural network hyperparameters: categorical variables in Emukit](Emukit-tutorial-select-neural-net-hyperparameters.ipynb)\n* [How to parallelize external objective function evaluations in Bayesian optimization](Emukit-tutorial-parallel-eval-of-obj-fun.ipynb)", "_____no_output_____" ], [ "## Contribution guide\n\nCommunity contributions are vital to the success of any open source project. [Tutorials](Emukit-tutorial-how-to-write-a-notebook.ipynb) and [examples](https://github.com/emukit/emukit/tree/main/emukit/examples) are a great way to spread what you have learned about Emukit across the community and an excellent way to showcase new features. If you want to contribute with a new tutorial please follow [these steps](Emukit-tutorial-how-to-write-a-notebook.ipynb).\n\nWe also welcome feedback, so if there is any aspect of Emukit that we can improve, please [raise an issue](https://github.com/EmuKit/emukit/issues/new)!", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0656b5a97c09114c4b20b0dcfd37afbb715f263
104,265
ipynb
Jupyter Notebook
SingleIRdetection/notebooks/example_resonance_class.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
3
2021-11-30T18:41:11.000Z
2021-12-12T12:27:14.000Z
SingleIRdetection/notebooks/example_resonance_class.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
SingleIRdetection/notebooks/example_resonance_class.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
256.810345
29,924
0.871126
[ [ [ "import sys\nsys.path.insert(1, '../')\nimport resonance", "_____no_output_____" ], [ "res_obj = resonance.ResonanceKid(filename=\"../data/resonance01.txt\")", "_____no_output_____" ], [ "res_obj.plot_fit()", "No fit found: doing it now\n" ], [ "res_obj.fit()", "_____no_output_____" ], [ "print(\"Last fit Chi2: \", res_obj.chi2)", "Last fit Chi2: 0.3265423634975578\n" ], [ "res_obj.plot_phase()", "_____no_output_____" ], [ "res_obj.plot_amp()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d06578db06221a89b7dc540bc849161d7d99a2a2
18,939
ipynb
Jupyter Notebook
4-Science-case-studies/1-Computing-orbits-for-Gaia-stars.ipynb
CCADynamicsGroup/SummerSchoolWorkshops
b7f2f2cd049eb21c7b2220e424e67e466c5ba106
[ "MIT" ]
5
2021-07-09T00:18:32.000Z
2022-02-21T16:44:15.000Z
4-Science-case-studies/1-Computing-orbits-for-Gaia-stars.ipynb
CCADynamicsGroup/SummerSchoolWorkshops
b7f2f2cd049eb21c7b2220e424e67e466c5ba106
[ "MIT" ]
7
2021-06-28T14:04:40.000Z
2021-07-08T13:16:09.000Z
4-Science-case-studies/1-Computing-orbits-for-Gaia-stars.ipynb
CCADynamicsGroup/SummerSchoolWorkshops
b7f2f2cd049eb21c7b2220e424e67e466c5ba106
[ "MIT" ]
4
2021-09-24T21:48:58.000Z
2022-02-21T16:44:59.000Z
29.047546
662
0.584667
[ [ [ "%run ../setup/nb_setup\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Compute a Galactic orbit for a star using Gaia data\n\nAuthor(s): Adrian Price-Whelan\n\n\n## Learning goals\n\nIn this tutorial, we will retrieve the sky coordinates, astrometry, and radial velocity for a star — [Kepler-444](https://en.wikipedia.org/wiki/Kepler-444) — and compute its orbit in the default Milky Way mass model implemented in Gala. We will compare the orbit of Kepler-444 to the orbit of the Sun and a random sample of nearby stars.\n\n\n### Notebook Setup and Package Imports", "_____no_output_____" ] ], [ [ "import astropy.coordinates as coord\nimport astropy.units as u\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pyia import GaiaData\n\n# Gala\nimport gala.dynamics as gd\nimport gala.potential as gp", "_____no_output_____" ] ], [ [ "## Define a Galactocentric Coordinate Frame\n\nWe will start by defining a Galactocentric coordinate system using `astropy.coordinates`. We will adopt the latest parameter set assumptions for the solar Galactocentric position and velocity as implemented in Astropy, but note that these parameters are customizable by passing parameters into the `Galactocentric` class below (e.g., you could change the sun-galactic center distance by setting `galcen_distance=...`).", "_____no_output_____" ] ], [ [ "with coord.galactocentric_frame_defaults.set(\"v4.0\"):\n galcen_frame = coord.Galactocentric()\ngalcen_frame", "_____no_output_____" ] ], [ [ "## Define the Solar Position and Velocity", "_____no_output_____" ], [ "In this coordinate system, the sun is along the $x$-axis (at a negative $x$ value), and the Galactic rotation at this position is in the $+y$ direction. The 3D position of the sun is therefore given by:", "_____no_output_____" ] ], [ [ "sun_xyz = u.Quantity(\n [-galcen_frame.galcen_distance, 0 * u.kpc, galcen_frame.z_sun] # x,y,z\n)", "_____no_output_____" ] ], [ [ "We can combine this with the solar velocity vector (defined in the `astropy.coordinates.Galactocentric` frame) to define the sun's phase-space position, which we will use as initial conditions shortly to compute the orbit of the Sun:", "_____no_output_____" ] ], [ [ "sun_vxyz = galcen_frame.galcen_v_sun\nsun_vxyz", "_____no_output_____" ], [ "sun_w0 = gd.PhaseSpacePosition(pos=sun_xyz, vel=sun_vxyz)", "_____no_output_____" ] ], [ [ "To compute the sun's orbit, we need to specify a mass model for the Galaxy. Here, we will use the default Milky Way mass model implemented in Gala, which is defined in detail in the Gala documentation: [Defining a Milky Way model](define-milky-way-model.html). Here, we will initialize the potential model with default parameters:", "_____no_output_____" ] ], [ [ "mw_potential = gp.MilkyWayPotential()\nmw_potential", "_____no_output_____" ] ], [ [ "This potential is composed of four mass components meant to represent simple models of the different structural components of the Milky Way:", "_____no_output_____" ] ], [ [ "for k, pot in mw_potential.items():\n print(f\"{k}: {pot!r}\")", "_____no_output_____" ] ], [ [ "With a potential model for the Galaxy and initial conditions for the sun, we can now compute the Sun's orbit using the default integrator (Leapfrog integration): We will compute the orbit for 4 Gyr, which is about 16 orbital periods.", "_____no_output_____" ] ], [ [ "sun_orbit = mw_potential.integrate_orbit(sun_w0, dt=0.5 * u.Myr, t1=0, t2=4 * u.Gyr)", "_____no_output_____" ] ], [ [ "Let's plot the Sun's orbit in 3D to get a feel for the geometry of the orbit:", "_____no_output_____" ] ], [ [ "fig, ax = sun_orbit.plot_3d()\n\nlim = (-12, 12)\nax.set(xlim=lim, ylim=lim, zlim=lim)", "_____no_output_____" ] ], [ [ "## Retrieve Gaia Data for Kepler-444", "_____no_output_____" ], [ "As a comparison, we will compute the orbit of the exoplanet-hosting star \"Kepler-444.\" To get Gaia data for this star, we first have to retrieve its sky coordinates so that we can do a positional cross-match query on the Gaia catalog. We can retrieve the sky position of Kepler-444 from Simbad using the `SkyCoord.from_name()` classmethod, which queries Simbad under the hood to resolve the name:", "_____no_output_____" ] ], [ [ "star_sky_c = coord.SkyCoord.from_name(\"Kepler-444\")\nstar_sky_c", "_____no_output_____" ] ], [ [ "We happen to know a priori that Kepler-444 has a large proper motion, so the sky position reported by Simbad could be off from the Gaia sky position (epoch=2016) by many arcseconds. To run and retrieve the Gaia data, we will use the [pyia](http://pyia.readthedocs.io/) package: We can pass in an ADQL query, which `pyia` uses to query the Gaia science archive using `astroquery`, and returns the data as a `pyia.GaiaData` object. To run the query, we will do a sky position cross-match with a large positional tolerance by setting the cross-match radius to 15 arcseconds, but we will take the brightest cross-matched source within this region as our match:", "_____no_output_____" ] ], [ [ "star_gaia = GaiaData.from_query(\n f\"\"\"\n SELECT TOP 1 * FROM gaiaedr3.gaia_source\n WHERE 1=CONTAINS(\n POINT('ICRS', {star_sky_c.ra.degree}, {star_sky_c.dec.degree}),\n CIRCLE('ICRS', ra, dec, {(15*u.arcsec).to_value(u.degree)})\n )\n ORDER BY phot_g_mean_mag\n \"\"\"\n)\nstar_gaia", "_____no_output_____" ] ], [ [ "We will assume (and hope!) that this source is Kepler-444, but we know that it is fairly bright compared to a typical Gaia source, so we should be safe.\n\nWe can now use the returned `pyia.GaiaData` object to retrieve an astropy `SkyCoord` object with all of the position and velocity measurements taken from the Gaia archive record for this source:", "_____no_output_____" ] ], [ [ "star_gaia_c = star_gaia.get_skycoord()\nstar_gaia_c", "_____no_output_____" ] ], [ [ "To compute this star's Galactic orbit, we need to convert its observed, Heliocentric (actually solar system barycentric) data into the Galactocentric coordinate frame we defined above. To do this, we will use the `astropy.coordinates` transformation framework using the `.transform_to()` method, and we will pass in the `Galactocentric` coordinate frame we defined above:", "_____no_output_____" ] ], [ [ "star_galcen = star_gaia_c.transform_to(galcen_frame)\nstar_galcen", "_____no_output_____" ] ], [ [ "Let's print out the Cartesian position and velocity for Kepler-444:", "_____no_output_____" ] ], [ [ "print(star_galcen.cartesian)\nprint(star_galcen.velocity)", "_____no_output_____" ] ], [ [ "Now with Galactocentric position and velocity components for Kepler-444, we can create Gala initial conditions and compute its orbit on the time grid used to compute the Sun's orbit above:", "_____no_output_____" ] ], [ [ "star_w0 = gd.PhaseSpacePosition(star_galcen.data)\nstar_orbit = mw_potential.integrate_orbit(star_w0, t=sun_orbit.t)", "_____no_output_____" ] ], [ [ "We can now compare the orbit of Kepler-444 to the solar orbit we computed above. We will plot the two orbits in two projections: First in the $x$-$y$ plane (Cartesian positions), then in the *meridional plane*, showing the cylindrical $R$ and $z$ position dependence of the orbits:", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)\n\nsun_orbit.plot([\"x\", \"y\"], axes=axes[0])\nstar_orbit.plot([\"x\", \"y\"], axes=axes[0])\naxes[0].set_xlim(-10, 10)\naxes[0].set_ylim(-10, 10)\n\nsun_orbit.cylindrical.plot(\n [\"rho\", \"z\"],\n axes=axes[1],\n auto_aspect=False,\n labels=[\"$R$ [kpc]\", \"$z$ [kpc]\"],\n label=\"Sun\",\n)\nstar_orbit.cylindrical.plot(\n [\"rho\", \"z\"],\n axes=axes[1],\n auto_aspect=False,\n labels=[\"$R$ [kpc]\", \"$z$ [kpc]\"],\n label=\"Kepler-444\",\n)\naxes[1].set_xlim(0, 10)\naxes[1].set_ylim(-5, 5)\naxes[1].set_aspect(\"auto\")\naxes[1].legend(loc=\"best\", fontsize=15)", "_____no_output_____" ] ], [ [ "### Exercise: How does Kepler-444's orbit differ from the Sun's?\n\n- What are the guiding center radii of the two orbits? \n- What is the maximum $z$ height reached by each orbit? \n- What are their eccentricities? \n- Can you guess which star is older based on their kinematics? \n- Which star do you think has a higher metallicity?", "_____no_output_____" ], [ "### Exercise: Compute orbits for Monte Carlo sampled initial conditions using the Gaia error distribution\n\n*Hint: Use the `pyia.GaiaData.get_error_samples()` method to generate samples from the Gaia error distribution*\n\n- Generate 128 samples from the error distribution\n- Construct a `SkyCoord` object with all of these Monte Carlo samples \n- Transform the error sample coordinates to the Galactocentric frame and define Gala initial conditions (a `PhaseSpacePosition` object)\n- Compute orbits for all error samples using the same time grid we used above\n- Compute the eccentricity and $L_z$ for all samples: what is the standard deviation of the eccentricity and $L_z$ values? \n- With what fractional precision can we measure this star's eccentricity and $L_z$? (i.e. what is $\\textrm{std}(e) / \\textrm{mean}(e)$ and the same for $L_z$)", "_____no_output_____" ], [ "### Exercise: Comparing these orbits to the orbits of other Gaia stars\n\nRetrieve Gaia data for a set of 100 random Gaia stars within 200 pc of the sun with measured radial velocities and well-measured parallaxes using the query:\n\n SELECT TOP 100 * FROM gaiaedr3.gaia_source \n WHERE dr2_radial_velocity IS NOT NULL AND\n parallax_over_error > 10 AND\n ruwe < 1.2 AND\n parallax > 5\n ORDER BY random_index", "_____no_output_____" ] ], [ [ "# random_stars_g = ..", "_____no_output_____" ] ], [ [ "Compute orbits for these stars for the same time grid used above to compute the sun's orbit:", "_____no_output_____" ] ], [ [ "# random_stars_c = ...", "_____no_output_____" ], [ "# random_stars_galcen = ...\n# random_stars_w0 = ...", "_____no_output_____" ], [ "# random_stars_orbits = ...", "_____no_output_____" ] ], [ [ "Plot the initial (present-day) positions of all of these stars in Galactocentric Cartesian coordinates:", "_____no_output_____" ], [ "Now plot the orbits of these stars in the x-y and R-z planes:", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)\n\nrandom_stars_orbits.plot([\"x\", \"y\"], axes=axes[0])\naxes[0].set_xlim(-15, 15)\naxes[0].set_ylim(-15, 15)\n\nrandom_stars_orbits.cylindrical.plot(\n [\"rho\", \"z\"],\n axes=axes[1],\n auto_aspect=False,\n labels=[\"$R$ [kpc]\", \"$z$ [kpc]\"],\n)\n\naxes[1].set_xlim(0, 15)\naxes[1].set_ylim(-5, 5)\naxes[1].set_aspect(\"auto\")", "_____no_output_____" ] ], [ [ "Compute maximum $z$ heights ($z_\\textrm{max}$) and eccentricities for all of these orbits. Compare the Sun, Kepler-444, and this random sampling of nearby stars. Where do the Sun and Kepler-444 sit relative to the random sample of nearby stars in terms of $z_\\textrm{max}$ and eccentricity? (Hint: plot $z_\\textrm{max}$ vs. eccentricity and highlight the Sun and Kepler-444!) Are either of them outliers in any way?", "_____no_output_____" ] ], [ [ "# rand_zmax = ...", "_____no_output_____" ], [ "# rand_ecc = ...", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(8, 6))\nax.scatter(\n rand_ecc, rand_zmax, color=\"k\", alpha=0.4, s=14, lw=0, label=\"random nearby stars\"\n)\nax.scatter(sun_orbit.eccentricity(), sun_orbit.zmax(), color=\"tab:orange\", label=\"Sun\")\nax.scatter(\n star_orbit.eccentricity(), star_orbit.zmax(), color=\"tab:cyan\", label=\"Kepler-444\"\n)\nax.legend(loc=\"best\", fontsize=14)\nax.set_xlabel(\"eccentricity, $e$\")\nax.set_ylabel(r\"max. $z$ height, $z_{\\rm max}$ [kpc]\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d065928755487a52676e0136aa8412b7c6c37911
846,820
ipynb
Jupyter Notebook
Housing.ipynb
ManasSattiIITD/Housing-Price-Prediction
c1d47f990ac2406a06838d077ae04d99418980c0
[ "MIT" ]
null
null
null
Housing.ipynb
ManasSattiIITD/Housing-Price-Prediction
c1d47f990ac2406a06838d077ae04d99418980c0
[ "MIT" ]
null
null
null
Housing.ipynb
ManasSattiIITD/Housing-Price-Prediction
c1d47f990ac2406a06838d077ae04d99418980c0
[ "MIT" ]
null
null
null
445.460284
302,308
0.937838
[ [ [ "import os\nimport tarfile\nimport urllib", "_____no_output_____" ], [ "DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\nHOUSING_PATH = os.path.join(\"datasets\",\"housing\")\nHOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"", "_____no_output_____" ], [ "def fetch_housing_data(housing_url = HOUSING_URL,housing_path = HOUSING_PATH):\n os.makedirs(housing_path,exist_ok=True)\n tgz_path = os.path.join(housing_path,\"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "_____no_output_____" ], [ "fetch_housing_data()", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "def load_housing_data(housing_path = HOUSING_PATH):\n csv_path = os.path.join(housing_path,\"housing.csv\")\n return pd.read_csv(csv_path)", "_____no_output_____" ], [ "housing = load_housing_data()", "_____no_output_____" ], [ "housing.head()", "_____no_output_____" ], [ "housing.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20640 entries, 0 to 20639\nData columns (total 10 columns):\nlongitude 20640 non-null float64\nlatitude 20640 non-null float64\nhousing_median_age 20640 non-null float64\ntotal_rooms 20640 non-null float64\ntotal_bedrooms 20433 non-null float64\npopulation 20640 non-null float64\nhouseholds 20640 non-null float64\nmedian_income 20640 non-null float64\nmedian_house_value 20640 non-null float64\nocean_proximity 20640 non-null object\ndtypes: float64(9), object(1)\nmemory usage: 1.6+ MB\n" ], [ "housing[\"ocean_proximity\"].value_counts()", "_____no_output_____" ], [ "housing.describe()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "housing.hist(bins=50, figsize=(20,15))\nplt.show()", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "def split_test_data(data,test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data)*test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]", "_____no_output_____" ], [ "train_set, test_set = split_test_data(housing,0.2)", "_____no_output_____" ], [ "len(train_set)", "_____no_output_____" ], [ "len(test_set)", "_____no_output_____" ], [ "from zlib import crc32", "_____no_output_____" ], [ "def test_set_check(identifier, test_ratio):\n return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32", "_____no_output_____" ], [ "def split_train_test_by_id(data, test_ratio, id_column):\n ids = data[id_column]\n in_test_set = ids.apply(lambda id_ : test_set_check(id_ , test_ratio))\n return data.loc[~in_test_set], data.loc[in_test_set]", "_____no_output_____" ], [ "housing_with_id = housing.reset_index()\ntrain_set, test_set = split_train_test_by_id(housing_with_id,0.2,\"index\")", "_____no_output_____" ], [ "housing_with_id[\"id\"] = housing[\"longitude\"] * 1000 + housing[\"latitude\"]\ntrain_set, test_set = split_train_test_by_id(housing_with_id,0.2,\"id\")", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "train_set, test_set = train_test_split(housing,test_size = 0.2, random_state = 42)", "_____no_output_____" ], [ "housing[\"income_cat\"] = pd.cut(housing[\"median_income\"],bins=[0.,1.5,3.0,4.5,6.,np.inf],labels=[1,2,3,4,5])", "_____no_output_____" ], [ "housing[\"income_cat\"].hist()", "_____no_output_____" ], [ "from sklearn.model_selection import StratifiedShuffleSplit", "_____no_output_____" ], [ "split = StratifiedShuffleSplit(n_splits = 1,test_size=0.2,random_state=42)\nfor train_index, test_index in split.split(housing,housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]", "_____no_output_____" ], [ "strat_test_set[\"income_cat\"].value_counts()/len(strat_test_set)", "_____no_output_____" ], [ "strat_train_set[\"income_cat\"].value_counts()/len(strat_train_set)", "_____no_output_____" ], [ "for set_ in (strat_train_set,strat_test_set):\n set_.drop(\"income_cat\",axis=1,inplace=True)", "_____no_output_____" ], [ "housing = strat_train_set.copy()", "_____no_output_____" ], [ "housing.plot(kind=\"scatter\",x=\"longitude\",y=\"latitude\")", "_____no_output_____" ], [ "housing.plot(kind=\"scatter\",x=\"longitude\",y=\"latitude\",alpha=0.1)", "_____no_output_____" ], [ "housing.plot(kind=\"scatter\",x=\"longitude\",y=\"latitude\",alpha=0.4,s=housing[\"population\"]/100,label = \"population\",figsize =(10,7),c=\"median_house_value\",cmap=plt.get_cmap(\"jet\"),colorbar=True)\nplt.legend()", "_____no_output_____" ], [ "housing.plot(kind=\"scatter\",x=\"longitude\",y=\"latitude\",alpha=0.4,s=housing[\"population\"]/100,label = \"population\",figsize =(10,7),c=\"median_house_value\",cmap=plt.get_cmap(\"jet\"),colorbar=True)\nplt.legend()", "_____no_output_____" ], [ "corr_matrix = housing.corr()", "_____no_output_____" ], [ "corr_matrix[\"median_house_value\"].sort_values(ascending=False)", "_____no_output_____" ], [ "corr_matrix", "_____no_output_____" ], [ "from pandas.plotting import scatter_matrix", "_____no_output_____" ], [ "attributes = [\"median_house_value\",\"median_income\",\"total_rooms\",\"housing_median_age\"]", "_____no_output_____" ], [ "scatter_matrix(housing[attributes],figsize=(12,8))", "_____no_output_____" ], [ "housing.plot(kind=\"scatter\",x=\"median_income\",y=\"median_house_value\",alpha = 0.1)", "_____no_output_____" ], [ "housing[\"rooms_per_household\"] = housing[\"total_rooms\"]/housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"]/housing[\"total_rooms\"]\nhousing[\"population_per_household\"] = housing[\"population\"]/housing[\"households\"]", "_____no_output_____" ], [ "corr_matrix = housing.corr()\ncorr_matrix[\"median_house_value\"].sort_values(ascending=False)", "_____no_output_____" ], [ "housing = strat_train_set.drop(\"median_house_value\",axis=1)\nhousing_labels = strat_train_set[\"median_house_value\"].copy()", "_____no_output_____" ], [ "# housing.dropna(subset=[\"total_bedrooms\"]) //option 1\n# housing.drop(\"total_bedrooms\",axis=1) //option 2\nmedian = housing[\"total_bedrooms\"].median() #//option 3\nhousing[\"total_bedrooms\"].fillna(median,inplace=True)", "_____no_output_____" ], [ "from sklearn.impute import SimpleImputer\nimputer = SimpleImputer(strategy=\"median\")", "_____no_output_____" ], [ "housing_num = housing.drop(\"ocean_proximity\",axis=1)", "_____no_output_____" ], [ "imputer.fit(housing_num)", "_____no_output_____" ], [ "imputer.statistics_", "_____no_output_____" ], [ "housing_num.median().values", "_____no_output_____" ], [ "X = imputer.transform(housing_num)", "_____no_output_____" ], [ "housing_tr = pd.DataFrame(X,columns=housing_num.columns,index=housing_num.index)", "_____no_output_____" ], [ "housing_cat = housing[[\"ocean_proximity\"]]\nhousing_cat.head(10)", "_____no_output_____" ], [ "from sklearn.preprocessing import OrdinalEncoder\nordinal_encoder = OrdinalEncoder()\nhousing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)\nhousing_cat_encoded[0:10]", "_____no_output_____" ], [ "ordinal_encoder.categories_", "_____no_output_____" ], [ "from sklearn.preprocessing import OneHotEncoder\ncat_encoder = OneHotEncoder()\nhousing_cat_1hot = cat_encoder.fit_transform(housing_cat)\nhousing_cat_1hot", "_____no_output_____" ], [ "housing_cat_1hot.toarray()", "_____no_output_____" ], [ "cat_encoder.categories_", "_____no_output_____" ], [ "from sklearn.base import BaseEstimator, TransformerMixin", "_____no_output_____" ], [ "rooms_ix, bedrooms_ix, population_ix, households_ix = 3,4,5,6", "_____no_output_____" ], [ "class CombinedAttributesAdder(BaseEstimator,TransformerMixin):\n def __init__(self,add_bedrooms_per_room=True):\n self.add_bedrooms_per_room = add_bedrooms_per_room\n \n def fit(self,X,y=None):\n return self\n \n def transform(self,X,y=None):\n rooms_per_households = X[:,rooms_ix]/X[:,households_ix]\n population_per_household = X[:,population_ix]/X[:,households_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room = X[:,bedrooms_ix]/X[:,rooms_ix]\n return np.c_[X,rooms_per_households,population_per_household,bedrooms_per_room]\n else:\n return np.c_[X,rooms_per_households,population_per_household]\n \nattr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\nhousing_extra_attribs = attr_adder.transform(housing.values)", "_____no_output_____" ], [ "from sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nnum_pipeine = Pipeline([('imputer',SimpleImputer(strategy=\"median\")),(\"attribs_adder\",CombinedAttributesAdder()),(\"std_scaler\",StandardScaler())])\nhousing_num_tr = num_pipeine.fit_transform(housing_num)", "_____no_output_____" ], [ "from sklearn.compose import ColumnTransformer\nnum_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]\nfull_pipeline = ColumnTransformer([(\"num\",num_pipeine,num_attribs),(\"cat\",OneHotEncoder(),cat_attribs)])\nhousing_prepared = full_pipeline.fit_transform(housing)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(housing_prepared, housing_labels)", "_____no_output_____" ], [ "some_data = housing.iloc[:5]\nsome_labels = housing_labels.iloc[:5]\nsome_data_prepared = full_pipeline.transform(some_data)\nprint(\"Predictions:\",lin_reg.predict(some_data_prepared))\nprint(\"Labels:\",list(some_labels))", "Predictions: [210644.60459286 317768.80697211 210956.43331178 59218.98886849\n 189747.55849879]\nLabels: [286600.0, 340600.0, 196900.0, 46300.0, 254500.0]\n" ], [ "from sklearn.metrics import mean_squared_error\nhousing_predictions = lin_reg.predict(housing_prepared)\nlin_mse = mean_squared_error(housing_labels,housing_predictions)\nlin_rmse = np.sqrt(lin_mse)\nprint(lin_rmse)", "68628.19819848922\n" ], [ "from sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeRegressor\ntree_reg = DecisionTreeRegressor()\ntree_reg.fit(housing_prepared,housing_labels)\nhousing_predictions = tree_reg.predict(housing_prepared)\ntree_mse = mean_squared_error(housing_labels, housing_predictions)\ntree_rmse = np.sqrt(tree_mse)\ntree_rmse", "_____no_output_____" ], [ "scores = cross_val_score(tree_reg, housing_prepared,housing_labels,scoring=\"neg_mean_squared_error\",cv=10)\ntree_rmse_scores = np.sqrt(-scores)", "_____no_output_____" ], [ "def display_scores(scores):\n print(\"Scores:\",scores)\n print(\"Mean:\",scores.mean())\n print(\"Standard deviation:\",scores.std())\ndisplay_scores(tree_rmse_scores)", "Scores: [68264.58632794 66832.02566438 70984.75467681 69766.2729475\n 71429.50148006 75290.54803507 70553.58788847 72351.24063877\n 76364.90662642 68927.4177411 ]\nMean: 71076.48420265231\nStandard deviation: 2828.6842149131676\n" ], [ "lin_scores = cross_val_score(lin_reg,housing_prepared,housing_labels,scoring=\"neg_mean_squared_error\",cv=10)\nlin_rmse_scores = np.sqrt(-lin_scores)\ndisplay_scores(lin_rmse_scores)", "Scores: [66782.73843989 66960.118071 70347.95244419 74739.57052552\n 68031.13388938 71193.84183426 64969.63056405 68281.61137997\n 71552.91566558 67665.10082067]\nMean: 69052.46136345083\nStandard deviation: 2731.674001798348\n" ], [ "from sklearn.ensemble import RandomForestRegressor\nforest_reg = RandomForestRegressor()\nforest_reg.fit(housing_prepared,housing_labels)\nhousing_predictions = forest_reg.predict(housing_prepared)\nforest_mse = mean_squared_error(housing_labels,housing_predictions)\nforest_rmse = np.sqrt(forest_mse)\nprint(forest_rmse)\nforest_scores = cross_val_score(forest_reg,housing_prepared,housing_labels,scoring=\"neg_mean_squared_error\",cv=10)\nforest_rmse_scores = np.sqrt(-forest_scores)\ndisplay_scores(forest_rmse_scores)", "18616.024716061336\nScores: [49101.84709414 47627.20374801 49933.62977321 52464.36121838\n 49878.78526325 53573.7375593 48850.97811029 47606.5883449\n 52885.34168166 50151.58707814]\nMean: 50207.405987128644\nStandard deviation: 2006.6710879860314\n" ], [ "# imp+", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d065a55de9b251e2c671dba450635b2d2827dd46
213,459
ipynb
Jupyter Notebook
Image_Noise_Reduction.ipynb
Hevenicio/Image-Noise-Reduction-with-Auto-encoders-using-TensorFlow
c5bc8d911e61d2b31e0207d01c31bf616b6371c2
[ "MIT" ]
null
null
null
Image_Noise_Reduction.ipynb
Hevenicio/Image-Noise-Reduction-with-Auto-encoders-using-TensorFlow
c5bc8d911e61d2b31e0207d01c31bf616b6371c2
[ "MIT" ]
null
null
null
Image_Noise_Reduction.ipynb
Hevenicio/Image-Noise-Reduction-with-Auto-encoders-using-TensorFlow
c5bc8d911e61d2b31e0207d01c31bf616b6371c2
[ "MIT" ]
null
null
null
337.751582
52,506
0.919146
[ [ [ "# Image Denoising with Autoencoders\n\n## Introduction and Importing Libraries\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "import numpy as np\n\nfrom tensorflow.keras.datasets import mnist\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.callbacks import EarlyStopping, LambdaCallback\nfrom tensorflow.keras.utils import to_categorical\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Data Preprocessing\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.astype('float')/255.\nx_test = x_test.astype('float')/255.\n\nx_train = np.reshape(x_train, (60000, 784))\nx_test = np.reshape(x_test, (10000, 784))", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\n" ] ], [ [ "## Adding Noise\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "x_train_noisy = x_train + np.random.rand(60000, 784)*0.9\nx_test_noisy = x_test + np.random.rand(10000, 784)*0.9\nx_train_noisy = np.clip(x_train_noisy, 0., 1.)\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)", "_____no_output_____" ], [ "def Plot(x, p, labels = False):\n plt.figure(figsize = (20, 2))\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.imshow(x[i].reshape(28,28), cmap = 'viridis')\n plt.xticks([])\n plt.yticks([])\n if labels:\n plt.xlabel(np.argmax(p[i]))\n plt.show()\n \nPlot(x_train, None)", "_____no_output_____" ], [ "Plot(x_train_noisy, None)", "_____no_output_____" ] ], [ [ "## Building and Training a Classifier\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "classifier = Sequential([\n Dense(256, activation = 'relu', input_shape = (784,)),\n Dense(256, activation = 'relu'),\n Dense(256, activation = 'softmax')\n])\n\nclassifier.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy',\n metrics = ['accuracy'])\n\nclassifier.fit(x_train, y_train, batch_size = 512, epochs = 3)", "Epoch 1/3\n118/118 [==============================] - 2s 21ms/step - loss: 0.7610 - accuracy: 0.8274\nEpoch 2/3\n118/118 [==============================] - 3s 22ms/step - loss: 0.2073 - accuracy: 0.9405\nEpoch 3/3\n118/118 [==============================] - 3s 22ms/step - loss: 0.1464 - accuracy: 0.9575\n" ], [ "loss, acc = classifier.evaluate(x_test, y_test)\n\nprint(acc)", "313/313 [==============================] - 1s 2ms/step - loss: 0.1415 - accuracy: 0.9594\n0.9593999981880188\n" ], [ "loss, acc = classifier.evaluate(x_test_noisy, y_test)\n\nprint(acc)", "313/313 [==============================] - 0s 1ms/step - loss: 11.9475 - accuracy: 0.1621\n0.16210000216960907\n" ] ], [ [ "## Building the Autoencoder\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "input_image = Input(shape = (784,))\nencoded = Dense(64, activation = 'relu')(input_image)\ndecoded = Dense(784, activation = 'sigmoid')(encoded)\n\nautoencoder = Model(input_image, decoded)\nautoencoder.compile(loss = 'binary_crossentropy', optimizer = 'adam')", "_____no_output_____" ] ], [ [ "## Training the Autoencoder\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "autoencoder.fit(\n x_train_noisy, \n x_train,\n epochs = 100,\n batch_size = 512,\n validation_split = 0.2,\n verbose = False,\n callbacks = [\n EarlyStopping(monitor = 'val_loss', patience = 5),\n LambdaCallback(on_epoch_end = lambda e,l: print('{:.3f}'.format(l['val_loss']), end = ' _ '))\n ]\n)\n\nprint(' _ ')\nprint('Training is complete!')", "0.261 _ 0.236 _ 0.204 _ 0.187 _ 0.176 _ 0.166 _ 0.158 _ 0.151 _ 0.146 _ 0.141 _ 0.138 _ 0.134 _ 0.132 _ 0.129 _ 0.127 _ 0.125 _ 0.123 _ 0.122 _ 0.121 _ 0.119 _ 0.118 _ 0.117 _ 0.117 _ 0.116 _ 0.115 _ 0.115 _ 0.114 _ 0.114 _ 0.113 _ 0.113 _ 0.113 _ 0.113 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.112 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.111 _ 0.110 _ 0.110 _ 0.110 _ _ \nTraining is complete!\n" ] ], [ [ "## Denoised Images\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "preds = autoencoder.predict(x_test_noisy)", "_____no_output_____" ], [ "Plot(x_test_noisy, None)", "_____no_output_____" ], [ "Plot(preds, None)", "_____no_output_____" ], [ "loss, acc = classifier.evaluate(preds, y_test)\nprint(acc)", "313/313 [==============================] - 0s 2ms/step - loss: 0.2170 - accuracy: 0.9334\n0.9333999752998352\n" ] ], [ [ "## Composite Model\n___\nNote: If you are starting the notebook from this task, you can run cells from all previous tasks in the kernel by going to the top menu and then selecting Kernel > Restart and Run All\n___", "_____no_output_____" ] ], [ [ "input_image=Input(shape=(784,))\nx=autoencoder(input_image)\ny=classifier(x)\n\ndenoise_and_classfiy = Model(input_image, y)", "_____no_output_____" ], [ "predictions=denoise_and_classfiy.predict(x_test_noisy)", "_____no_output_____" ], [ "Plot(x_test_noisy, predictions, True)", "_____no_output_____" ], [ "Plot(x_test, to_categorical(y_test), True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d065bb8afacd04cf6279f0c8667f060cb3c1685d
173,172
ipynb
Jupyter Notebook
examples/text_annotator_example.ipynb
iarfmoose/bulgarian-nlp
9860cd99d7f6605486593db736116cec5aebc8c9
[ "MIT" ]
null
null
null
examples/text_annotator_example.ipynb
iarfmoose/bulgarian-nlp
9860cd99d7f6605486593db736116cec5aebc8c9
[ "MIT" ]
null
null
null
examples/text_annotator_example.ipynb
iarfmoose/bulgarian-nlp
9860cd99d7f6605486593db736116cec5aebc8c9
[ "MIT" ]
null
null
null
36.312015
509
0.487279
[ [ [ "# Text Annotation Example", "_____no_output_____" ] ], [ [ "%pip install transformers==4.17.0 -qq\n!git clone https://github.com/AMontgomerie/bulgarian-nlp\n%cd bulgarian-nlp", "Cloning into 'bulgarian-nlp'...\nremote: Enumerating objects: 141, done.\u001b[K\nremote: Counting objects: 100% (141/141), done.\u001b[K\nremote: Compressing objects: 100% (126/126), done.\u001b[K\nremote: Total 141 (delta 62), reused 9 (delta 2), pack-reused 0\u001b[K\nReceiving objects: 100% (141/141), 70.39 KiB | 1.68 MiB/s, done.\nResolving deltas: 100% (62/62), done.\n/content/bulgarian-nlp\n" ] ], [ [ "First we create an instance of the annotator.", "_____no_output_____" ] ], [ [ "from annotation.annotators import TextAnnotator\n\nannotator = TextAnnotator()", "_____no_output_____" ] ], [ [ "Next we create an example input and pass it as an argument to the annotator.", "_____no_output_____" ] ], [ [ "example_input = 'България е член на ЕС.'\nannotations = annotator(example_input)\nannotations", "_____no_output_____" ] ], [ [ "As you can see, the raw output is a dictionary of tokens and corresponding tags. To make it more readable, let's display the tag level output as a dataframe.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ntokens = [t[\"text\"] for t in annotations[\"tokens\"]]\npos_tags = [t[\"pos\"] for t in annotations[\"tokens\"]]\nentity_tags = [t[\"entity\"] for t in annotations[\"tokens\"]]\ndf = pd.DataFrame({\"token\": tokens, \"pos\": pos_tags, \"entity\": entity_tags})\ndf", "_____no_output_____" ] ], [ [ "For more information about the meanings of the POS tags, see https://universaldependencies.org/u/pos/\n\n\nThe sentence level entities are also available in `annotations[\"entities\"]`:", "_____no_output_____" ] ], [ [ "for entity in annotations[\"entities\"]:\n print(f\"{entity['text']}: {entity['type']}\")", "България: LOCATION\nЕС: ORGANISATION\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d065c9a9bead8cd741a00dbf27dd240db3fd39b0
1,124
ipynb
Jupyter Notebook
lectures/experiments/notebook.ipynb
HumanCapitalAnalysis/ose-data-science
d5be68de68f170f8e8f11c9ed635b42f19100f87
[ "MIT" ]
62
2019-04-02T11:51:06.000Z
2020-07-11T05:28:27.000Z
lectures/experiments/notebook.ipynb
HumanCapitalAnalysis/microeconometrics
d5be68de68f170f8e8f11c9ed635b42f19100f87
[ "MIT" ]
49
2019-04-05T10:57:07.000Z
2020-07-07T20:41:19.000Z
lectures/experiments/notebook.ipynb
HumanCapitalAnalysis/ose-data-science
d5be68de68f170f8e8f11c9ed635b42f19100f87
[ "MIT" ]
46
2019-04-03T08:31:02.000Z
2020-07-13T12:43:26.000Z
24.977778
230
0.588078
[ [ [ "# Randomized experiments", "_____no_output_____" ], [ "* **Athey, S., & Imbens, G. (2017)**. [Chapter 3 - The econometrics of randomized experiments](https://www.sciencedirect.com/science/article/abs/pii/S2214658X16300174), in *Handbook of Economic Field Experiments*, 73-140.\n\n\n* **Freedman, D.A. (2008)**. [On regression adjustments to experimental data](https://www.sciencedirect.com/science/article/pii/S019688580700005X), *Advances in Applied Mathematics*, 40(2), 180-193.\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown" ] ]
d065d1aad31e0fdf29a67b061ef3df04d6cf75b4
568
ipynb
Jupyter Notebook
prev_ob_models/validation-ephyz.ipynb
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
prev_ob_models/validation-ephyz.ipynb
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
prev_ob_models/validation-ephyz.ipynb
fameshpatel/olfactorybulb
8d7a644b4560309ef177c0590ff73ed4c2432604
[ "MIT" ]
null
null
null
16.705882
34
0.517606
[ [ [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d065d1ff21249d66311b08847fff699792448b1d
139,736
ipynb
Jupyter Notebook
codes/labs_lecture03/lab04_train_vanilla_nn/train_vanilla_nn_solution.ipynb
alanwuha/CE7454_2019
b5986db2ae890b940de8d37f31e83be58c826f1c
[ "MIT" ]
1
2020-05-31T17:08:21.000Z
2020-05-31T17:08:21.000Z
codes/labs_lecture03/lab04_train_vanilla_nn/train_vanilla_nn_solution.ipynb
alanwuha/CE7454_2019
b5986db2ae890b940de8d37f31e83be58c826f1c
[ "MIT" ]
null
null
null
codes/labs_lecture03/lab04_train_vanilla_nn/train_vanilla_nn_solution.ipynb
alanwuha/CE7454_2019
b5986db2ae890b940de8d37f31e83be58c826f1c
[ "MIT" ]
null
null
null
318.305239
34,216
0.932494
[ [ [ "# Lab 04 : Train vanilla neural network -- solution\n\n\n# Training a one-layer net on FASHION-MNIST", "_____no_output_____" ] ], [ [ "# For Google Colaboratory\nimport sys, os\nif 'google.colab' in sys.modules:\n from google.colab import drive\n drive.mount('/content/gdrive')\n file_name = 'train_vanilla_nn_solution.ipynb'\n import subprocess\n path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode(\"utf-8\")\n print(path_to_file)\n path_to_file = path_to_file.replace(file_name,\"\").replace('\\n',\"\")\n os.chdir(path_to_file)\n !pwd", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom random import randint\nimport utils", "_____no_output_____" ] ], [ [ "### Download the TRAINING SET (data+labels)", "_____no_output_____" ] ], [ [ "from utils import check_fashion_mnist_dataset_exists\ndata_path=check_fashion_mnist_dataset_exists()\n\ntrain_data=torch.load(data_path+'fashion-mnist/train_data.pt')\ntrain_label=torch.load(data_path+'fashion-mnist/train_label.pt')\nprint(train_data.size())\nprint(train_label.size())", "torch.Size([60000, 28, 28])\ntorch.Size([60000])\n" ] ], [ [ "### Download the TEST SET (data only)", "_____no_output_____" ] ], [ [ "test_data=torch.load(data_path+'fashion-mnist/test_data.pt')\nprint(test_data.size())", "torch.Size([10000, 28, 28])\n" ] ], [ [ "### Make a one layer net class", "_____no_output_____" ] ], [ [ "class one_layer_net(nn.Module):\n\n def __init__(self, input_size, output_size):\n super(one_layer_net , self).__init__()\n self.linear_layer = nn.Linear( input_size, output_size , bias=False)\n \n def forward(self, x):\n y = self.linear_layer(x)\n prob = F.softmax(y, dim=1)\n return prob", "_____no_output_____" ] ], [ [ "### Build the net", "_____no_output_____" ] ], [ [ "net=one_layer_net(784,10)\nprint(net)", "one_layer_net(\n (linear_layer): Linear(in_features=784, out_features=10, bias=False)\n)\n" ] ], [ [ "### Take the 4th image of the test set:", "_____no_output_____" ] ], [ [ "im=test_data[4]\nutils.show(im)", "_____no_output_____" ] ], [ [ "### And feed it to the UNTRAINED network:", "_____no_output_____" ] ], [ [ "p = net( im.view(1,784)) \nprint(p)", "tensor([[0.1320, 0.0970, 0.0802, 0.0831, 0.1544, 0.0777, 0.1040, 0.1219, 0.0820,\n 0.0678]], grad_fn=<SoftmaxBackward>)\n" ] ], [ [ "### Display visually the confidence scores", "_____no_output_____" ] ], [ [ "utils.show_prob_fashion_mnist(p)", "_____no_output_____" ] ], [ [ "### Train the network (only 5000 iterations) on the train set", "_____no_output_____" ] ], [ [ "criterion = nn.NLLLoss()\noptimizer=torch.optim.SGD(net.parameters() , lr=0.01 )\n\nfor iter in range(1,5000):\n \n # choose a random integer between 0 and 59,999 \n # extract the corresponding picture and label\n # and reshape them to fit the network\n idx=randint(0, 60000-1)\n input=train_data[idx].view(1,784)\n label=train_label[idx].view(1)\n\n\n # feed the input to the net \n input.requires_grad_()\n prob=net(input) \n \n # update the weights (all the magic happens here -- we will discuss it later)\n log_prob=torch.log(prob)\n loss = criterion(log_prob, label) \n optimizer.zero_grad() \n loss.backward()\n optimizer.step()", "_____no_output_____" ] ], [ [ "### Take the 34th image of the test set:", "_____no_output_____" ] ], [ [ "im=test_data[34]\nutils.show(im)", "_____no_output_____" ] ], [ [ "### Feed it to the TRAINED net:", "_____no_output_____" ] ], [ [ "p = net( im.view(1,784)) \nprint(p)", "tensor([[2.3781e-04, 8.4407e-06, 6.5949e-03, 6.4070e-03, 5.8398e-03, 3.5421e-02,\n 5.3267e-03, 5.8309e-04, 9.3951e-01, 6.6500e-05]],\n grad_fn=<SoftmaxBackward>)\n" ] ], [ [ "### Display visually the confidence scores", "_____no_output_____" ] ], [ [ "utils.show_prob_fashion_mnist(prob)", "_____no_output_____" ] ], [ [ "### Choose image at random from the test set and see how good/bad are the predictions", "_____no_output_____" ] ], [ [ "# choose a picture at random\nidx=randint(0, 10000-1)\nim=test_data[idx]\n\n# diplay the picture\nutils.show(im)\n\n# feed it to the net and display the confidence scores\nprob = net( im.view(1,784)) \nutils.show_prob_fashion_mnist(prob)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d065e04dd309c44b81ca8c403d614f3f3af4b3df
38,663
ipynb
Jupyter Notebook
IA_ConvNN_classificacao_MNIST.ipynb
TerradasExatas/Introdu-o-IA-e-Machine-Learning
243a599bde920768df995f9778c78b3ab1ae9e30
[ "MIT" ]
null
null
null
IA_ConvNN_classificacao_MNIST.ipynb
TerradasExatas/Introdu-o-IA-e-Machine-Learning
243a599bde920768df995f9778c78b3ab1ae9e30
[ "MIT" ]
null
null
null
IA_ConvNN_classificacao_MNIST.ipynb
TerradasExatas/Introdu-o-IA-e-Machine-Learning
243a599bde920768df995f9778c78b3ab1ae9e30
[ "MIT" ]
null
null
null
171.835556
18,850
0.837933
[ [ [ "<a href=\"https://colab.research.google.com/github/TerradasExatas/IA_e_Machine_Learning/blob/main/IA_ConvNN_classificacao_MNIST.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "#https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-from-scratch-for-mnist-handwritten-digit-classification/\n#https://towardsdatascience.com/convolutional-neural-networks-for-beginners-using-keras-and-tensorflow-2-c578f7b3bf25\n#https://github.com/jorditorresBCN/python-deep-learning/blob/master/08_redes_neuronales_convolucionales.ipynb", "_____no_output_____" ], [ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#importa o dataset (as imagens da base \"mnist\")\nmnist = tf.keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n#inspeciona o data set\nprint('train imagens original shape:',train_images.shape)\nprint('train labels original shape:',train_labels.shape)\n\nplt.rcParams.update({'font.size':14})\nplt.figure(figsize=(8,4))\nfor i in range(2*4):\n plt.subplot(2,4,i+1)\n plt.xticks([]);plt.yticks([])\n plt.imshow(train_images[i],cmap=plt.cm.binary)\n plt.xlabel(str(train_labels[i]))\nplt.show()\n\n#prepara o data set\ntrain_images = train_images.reshape((60000, 28, 28, 1))\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 28, 28, 1))\ntest_images = test_images.astype('float32') / 255\n\n#inspeciona os dados preparados\nprint ('train images new shape:',train_images.shape)\n\nN_class=10\n#Criando a rede neural\nmodel = tf.keras.Sequential(name='rede_IF_CNN_MNIST')\n#Adicionando as camadas\nmodel.add(tf.keras.layers.Conv2D(12, (5, 5), \n activation='relu', input_shape=(28, 28, 1)))\nmodel.add(tf.keras.layers.MaxPooling2D((2, 2)))\nmodel.add(tf.keras.layers.Conv2D(24, (3, 3), activation='relu'))\nmodel.add(tf.keras.layers.MaxPooling2D((2, 2)))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(N_class, activation='softmax'))\n\n#compilando a rede\nopt=tf.keras.optimizers.Adam(learning_rate=0.002)\nmodel.compile(optimizer=opt, loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n# treinando a rede\nhistory=model.fit(train_images, train_labels,epochs=8,verbose=1)\n\n#mostra a performace do treinamento da rede\nplt.figure()\nplt.subplot(2,1,1);plt.semilogy(history.history['loss'],'k')\nplt.legend(['loss'])\nplt.subplot(2,1,2);plt.plot(history.history['accuracy'],'k')\nplt.legend(['acuracia'])\nplt.tight_layout()\n\n#testando a rede com os dados de teste\npred = model.predict(test_images)\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\nprint('\\n accuracia dos dados de teste: ', test_acc)\n\n#encontra a classe de maior probabilidade\nlabels_pred=np.argmax(pred,axis=1)\n#mostra 15 resultados esperados e os alcançados lado a lado\nprint('data and pred = \\n',np.concatenate(\n (test_labels[None].T[0:15], labels_pred[None].T[0:15]),axis=1))", "train imagens original shape: (60000, 28, 28)\ntrain labels original shape: (60000,)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
d066004660fb46385e1cd3ee466784f7a198734f
13,673
ipynb
Jupyter Notebook
test.ipynb
benuri/BlazeFace
7abbe2d0d5e7fb1b1368a8cd34db179e89f0e395
[ "MIT" ]
2
2019-11-17T13:29:31.000Z
2020-02-07T07:58:23.000Z
test.ipynb
benuri/BlazeFace
7abbe2d0d5e7fb1b1368a8cd34db179e89f0e395
[ "MIT" ]
null
null
null
test.ipynb
benuri/BlazeFace
7abbe2d0d5e7fb1b1368a8cd34db179e89f0e395
[ "MIT" ]
1
2020-05-20T21:26:46.000Z
2020-05-20T21:26:46.000Z
27.128968
185
0.431361
[ [ [ "from pandas import read_csv\nimport cv2\nimport glob\nimport os\nimport numpy as np\nimport logging\nimport coloredlogs\nlogger = logging.getLogger(__name__)\ncoloredlogs.install(level='DEBUG')\ncoloredlogs.install(level='DEBUG', logger=logger)", "_____no_output_____" ], [ "IM_EXTENSIONS = ['png', 'jpg', 'jpeg', 'bmp']", "_____no_output_____" ], [ "def read_img(img_path, img_shape=(128, 128)):\n \"\"\"\n load image file and divide by 255.\n \"\"\"\n img = cv2.imread(img_path)\n img = cv2.resize(img, img_shape)\n img = img.astype('float')\n img /= 255.\n\n return img", "_____no_output_____" ], [ "dataset_dir = './data/images/'\nlabel_path = './data/label.csv'\nbatch_size=32,\nimg_shape=(128, 128)", "_____no_output_____" ], [ "label_df = read_csv(label_path)\n# img_files = glob.glob(dataset_dir + '*')\n# img_files = [f for f in img_files if f[-3:] in IM_EXTENSIONS]\n\nlabel_idx = label_df.set_index('filename')\nimg_files = label_idx.index.unique().values", "_____no_output_____" ], [ "label_idx.loc['0_Parade_Parade_0_628.jpg'].head()", "_____no_output_____" ], [ "label_idx.iloc[0:5]", "_____no_output_____" ], [ "len(img_files)", "_____no_output_____" ], [ "def append_zero(arr):\n return np.append([0], arr)", "_____no_output_____" ], [ "# temp = label_idx.loc[img_files[0]].values[:, :4] #[0, 26, 299, 36, 315]\n# np.apply_along_axis(append_zero, 1, temp)", "_____no_output_____" ], [ "\"\"\"\ndata loader\n\nreturn image, [class_label, class_and_location_label]\n\"\"\"\n\nnumofData = len(img_files) # endwiths(png,jpg ...)\ndata_idx = np.arange(numofData)\n\nwhile True:\n batch_idx = np.random.choice(data_idx, size=batch_size, replace=False)\n\n batch_img = []\n batch_label = []\n batch_label_cls = []\n\n for i in batch_idx:\n\n img = read_img(dataset_dir + img_files[i], img_shape=img_shape)\n label_idx = label_df.set_index('filename')\n img_files = label_idx.index.unique().values\n label = label_idx.loc[img_files[i]].values\n label = np.array(label, ndmin=2)\n label = label[:, :4]\n cls_loc_label = np.apply_along_axis(append_zero, 1, label)\n batch_img.append(img)\n batch_label.append(label)\n batch_label_cls.append(0) # label[0:1]) ---> face\n# yield ({'input_1': np.array(batch_img, dtype=np.float32)},\n# {'clf_output': np.array(batch_label_cls, dtype=np.float32),\n# 'bb_output': np.array(batch_label, dtype=np.float32)})\n", "[[0 244 104 306 191]\n [0 317 425 381 501]\n [0 490 313 558 406]\n [0 641 90 702 157]]\n" ], [ "import tensorflow as tf", "_____no_output_____" ], [ "def dataloader(dataset_dir, label_path, batch_size=1000, img_shape=(128, 128)):\n \"\"\"\n data loader\n\n return image, [class_label, class_and_location_label]\n \"\"\"\n\n label_df = read_csv(label_path)\n label_idx = label_df.set_index('filename')\n img_files = label_idx.index.unique().values\n\n numofData = len(img_files) # endwiths(png,jpg ...)\n data_idx = np.arange(numofData)\n\n while True:\n batch_idx = np.random.choice(data_idx, size=batch_size, replace=False)\n\n batch_img = []\n batch_label = []\n batch_class = []\n\n for i in batch_idx:\n\n img = read_img(dataset_dir + img_files[i], img_shape=img_shape)\n\n label = label_idx.loc[img_files[i]].values\n label = np.array(label, ndmin=2)\n label = label[:, :4]\n\n cls_loc_label = np.apply_along_axis(append_zero, 1, label)\n\n batch_img.append(img)\n batch_label.append(cls_loc_label) # face + bb\n batch_class.append(cls_loc_label[:, 0:1]) # label[:, 0:1]) ---> face\n\n# yield {'input_1': np.array(batch_img, dtype=np.float32)}, {'clf_output': np.array(batch_class, dtype=np.float32),'bb_output': np.array(batch_label, dtype=np.float32)}\n \n yield np.array(batch_img, dtype=np.float32), [np.array(batch_class, dtype=np.float32), np.array(batch_label, dtype=np.float32)]", "_____no_output_____" ], [ "data_gen = dataloader(dataset_dir, label_path, batch_size=1, img_shape=(128, 128))", "_____no_output_____" ], [ "data = next(data_gen)", "_____no_output_____" ], [ "len(data)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06604cf8820555fcdc7074eddc394d6b5ae4bce
561,902
ipynb
Jupyter Notebook
neural_networks_and_deep_learning/Week 3/Planar data classification with one hidden layer/Planar_data_classification_with_onehidden_layer_v6b.ipynb
shengfeng/coursera_deep_learning
66e93eccb5ba9b0aacf24a3950661bd1dbcadf27
[ "MIT" ]
125
2021-01-02T03:37:27.000Z
2022-03-23T21:58:13.000Z
neural_networks_and_deep_learning/Week 3/Planar data classification with one hidden layer/Planar_data_classification_with_onehidden_layer_v6b.ipynb
shengfeng/coursera_deep_learning
66e93eccb5ba9b0aacf24a3950661bd1dbcadf27
[ "MIT" ]
2
2021-02-08T04:26:14.000Z
2021-12-31T08:41:38.000Z
neural_networks_and_deep_learning/Week 3/Planar data classification with one hidden layer/Planar_data_classification_with_onehidden_layer_v6b.ipynb
shengfeng/coursera_deep_learning
66e93eccb5ba9b0aacf24a3950661bd1dbcadf27
[ "MIT" ]
150
2021-01-02T00:27:46.000Z
2022-03-30T03:42:27.000Z
351.848466
331,108
0.914852
[ [ [ "\n<font color = \"mediumblue\">Note: Notebook was updated July 2, 2019 with bug fixes.</font>\n\n#### If you were working on the older version:\n* Please click on the \"Coursera\" icon in the top right to open up the folder directory. \n* Navigate to the folder: Week 3/ Planar data classification with one hidden layer. You can see your prior work in version 5: Planar data classification with one hidden layer v5.ipynb\n\n#### List of bug fixes and enhancements\n* Clarifies that the classifier will learn to classify regions as either red or blue.\n* compute_cost function fixes np.squeeze by casting it as a float.\n* compute_cost instructions clarify the purpose of np.squeeze.\n* compute_cost clarifies that \"parameters\" parameter is not needed, but is kept in the function definition until the auto-grader is also updated.\n* nn_model removes extraction of parameter values, as the entire parameter dictionary is passed to the invoked functions.", "_____no_output_____" ], [ "# Planar data classification with one hidden layer\n\nWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. \n\n**You will learn how to:**\n- Implement a 2-class classification neural network with a single hidden layer\n- Use units with a non-linear activation function, such as tanh \n- Compute the cross entropy loss \n- Implement forward and backward propagation\n", "_____no_output_____" ], [ "## 1 - Packages ##\n\nLet's first import all the packages that you will need during this assignment.\n- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.\n- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis. \n- [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.\n- testCases provides some test examples to assess the correctness of your functions\n- planar_utils provide various useful functions used in this assignment", "_____no_output_____" ] ], [ [ "# Package imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom testCases_v2 import *\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n\n%matplotlib inline\n\nnp.random.seed(1) # set a seed so that the results are consistent", "_____no_output_____" ] ], [ [ "## 2 - Dataset ##\n\nFirst, let's get the dataset you will work on. The following code will load a \"flower\" 2-class dataset into variables `X` and `Y`.", "_____no_output_____" ] ], [ [ "X, Y = load_planar_dataset()", "_____no_output_____" ] ], [ [ "Visualize the dataset using matplotlib. The data looks like a \"flower\" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data. In other words, we want the classifier to define regions as either red or blue.", "_____no_output_____" ] ], [ [ "# Visualize the data:\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "_____no_output_____" ] ], [ [ "You have:\n - a numpy-array (matrix) X that contains your features (x1, x2)\n - a numpy-array (vector) Y that contains your labels (red:0, blue:1).\n\nLets first get a better sense of what our data is like. \n\n**Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`? \n\n**Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (≈ 3 lines of code)\nshape_X = None\nshape_Y = None\nm = X.shape[1] # training set size\n### END CODE HERE ###\n\nprint ('The shape of X is: ' + str(shape_X))\nprint ('The shape of Y is: ' + str(shape_Y))\nprint ('I have m = %d training examples!' % (m))", "The shape of X is: (2, 400)\nThe shape of Y is: (1, 400)\nI have m = 400 training examples!\n" ] ], [ [ "**Expected Output**:\n \n<table style=\"width:20%\">\n \n <tr>\n <td>**shape of X**</td>\n <td> (2, 400) </td> \n </tr>\n \n <tr>\n <td>**shape of Y**</td>\n <td>(1, 400) </td> \n </tr>\n \n <tr>\n <td>**m**</td>\n <td> 400 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "## 3 - Simple Logistic Regression\n\nBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.", "_____no_output_____" ] ], [ [ "# Train the logistic regression classifier\nclf = sklearn.linear_model.LogisticRegressionCV();\nclf.fit(X.T, Y.T);", "_____no_output_____" ] ], [ [ "You can now plot the decision boundary of these models. Run the code below.", "_____no_output_____" ] ], [ [ "# Plot the decision boundary for logistic regression\nplot_decision_boundary(lambda x: clf.predict(x), X, Y)\nplt.title(\"Logistic Regression\")\n\n# Print accuracy\nLR_predictions = clf.predict(X.T)\nprint ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +\n '% ' + \"(percentage of correctly labelled datapoints)\")", "Accuracy of logistic regression: 47 % (percentage of correctly labelled datapoints)\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:20%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 47% </td> \n </tr>\n \n</table>\n", "_____no_output_____" ], [ "**Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! ", "_____no_output_____" ], [ "## 4 - Neural Network model\n\nLogistic regression did not work well on the \"flower dataset\". You are going to train a Neural Network with a single hidden layer.\n\n**Here is our model**:\n<img src=\"images/classification_kiank.png\" style=\"width:600px;height:300px;\">\n\n**Mathematically**:\n\nFor one example $x^{(i)}$:\n$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1]}\\tag{1}$$ \n$$a^{[1] (i)} = \\tanh(z^{[1] (i)})\\tag{2}$$\n$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2]}\\tag{3}$$\n$$\\hat{y}^{(i)} = a^{[2] (i)} = \\sigma(z^{ [2] (i)})\\tag{4}$$\n$$y^{(i)}_{prediction} = \\begin{cases} 1 & \\mbox{if } a^{[2](i)} > 0.5 \\\\ 0 & \\mbox{otherwise } \\end{cases}\\tag{5}$$\n\nGiven the predictions on all the examples, you can also compute the cost $J$ as follows: \n$$J = - \\frac{1}{m} \\sum\\limits_{i = 0}^{m} \\large\\left(\\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large \\right) \\small \\tag{6}$$\n\n**Reminder**: The general methodology to build a Neural Network is to:\n 1. Define the neural network structure ( # of input units, # of hidden units, etc). \n 2. Initialize the model's parameters\n 3. Loop:\n - Implement forward propagation\n - Compute loss\n - Implement backward propagation to get the gradients\n - Update parameters (gradient descent)\n\nYou often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data.", "_____no_output_____" ], [ "### 4.1 - Defining the neural network structure ####\n\n**Exercise**: Define three variables:\n - n_x: the size of the input layer\n - n_h: the size of the hidden layer (set this to 4) \n - n_y: the size of the output layer\n\n**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: layer_sizes\n\ndef layer_sizes(X, Y):\n \"\"\"\n Arguments:\n X -- input dataset of shape (input size, number of examples)\n Y -- labels of shape (output size, number of examples)\n \n Returns:\n n_x -- the size of the input layer\n n_h -- the size of the hidden layer\n n_y -- the size of the output layer\n \"\"\"\n ### START CODE HERE ### (≈ 3 lines of code)\n n_x = None # size of input layer\n n_h = None\n n_y = None # size of output layer\n ### END CODE HERE ###\n return (n_x, n_h, n_y)", "_____no_output_____" ], [ "X_assess, Y_assess = layer_sizes_test_case()\n(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)\nprint(\"The size of the input layer is: n_x = \" + str(n_x))\nprint(\"The size of the hidden layer is: n_h = \" + str(n_h))\nprint(\"The size of the output layer is: n_y = \" + str(n_y))", "The size of the input layer is: n_x = 5\nThe size of the hidden layer is: n_h = 4\nThe size of the output layer is: n_y = 2\n" ] ], [ [ "**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).\n\n<table style=\"width:20%\">\n <tr>\n <td>**n_x**</td>\n <td> 5 </td> \n </tr>\n \n <tr>\n <td>**n_h**</td>\n <td> 4 </td> \n </tr>\n \n <tr>\n <td>**n_y**</td>\n <td> 2 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "### 4.2 - Initialize the model's parameters ####\n\n**Exercise**: Implement the function `initialize_parameters()`.\n\n**Instructions**:\n- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.\n- You will initialize the weights matrices with random values. \n - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).\n- You will initialize the bias vectors as zeros. \n - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.\n\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = None\n b1 = None\n W2 = None\n b2 = None\n ### END CODE HERE ###\n \n assert (W1.shape == (n_h, n_x))\n assert (b1.shape == (n_h, 1))\n assert (W2.shape == (n_y, n_h))\n assert (b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters", "_____no_output_____" ], [ "n_x, n_h, n_y = initialize_parameters_test_case()\n\nparameters = initialize_parameters(n_x, n_h, n_y)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = [[-0.00416758 -0.00056267]\n [-0.02136196 0.01640271]\n [-0.01793436 -0.00841747]\n [ 0.00502881 -0.01245288]]\nb1 = [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\nW2 = [[-0.01057952 -0.00909008 0.00551454 0.02292208]]\nb2 = [[ 0.]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:90%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00416758 -0.00056267]\n [-0.02136196 0.01640271]\n [-0.01793436 -0.00841747]\n [ 0.00502881 -0.01245288]] </td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-0.01057952 -0.00909008 0.00551454 0.02292208]]</td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.]] </td> \n </tr>\n \n</table>\n\n", "_____no_output_____" ], [ "### 4.3 - The Loop ####\n\n**Question**: Implement `forward_propagation()`.\n\n**Instructions**:\n- Look above at the mathematical representation of your classifier.\n- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.\n- You can use the function `np.tanh()`. It is part of the numpy library.\n- The steps you have to implement are:\n 1. Retrieve each parameter from the dictionary \"parameters\" (which is the output of `initialize_parameters()`) by using `parameters[\"..\"]`.\n 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).\n- Values needed in the backpropagation are stored in \"`cache`\". The `cache` will be given as an input to the backpropagation function.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters (output of initialization function)\n \n Returns:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = None\n b1 = None\n W2 = None\n b2 = None\n ### END CODE HERE ###\n \n # Implement Forward Propagation to calculate A2 (probabilities)\n ### START CODE HERE ### (≈ 4 lines of code)\n Z1 = None\n A1 = None\n Z2 = None\n A2 = None\n ### END CODE HERE ###\n \n assert(A2.shape == (1, X.shape[1]))\n \n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n \n return A2, cache", "_____no_output_____" ], [ "X_assess, parameters = forward_propagation_test_case()\nA2, cache = forward_propagation(X_assess, parameters)\n\n# Note: we use the mean here just to make sure that your output matches ours. \nprint(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))", "0.262818640198 0.091999045227 -1.30766601287 0.212877681719\n" ] ], [ [ "**Expected Output**:\n<table style=\"width:50%\">\n <tr>\n <td> 0.262818640198 0.091999045227 -1.30766601287 0.212877681719 </td> \n </tr>\n</table>", "_____no_output_____" ], [ "Now that you have computed $A^{[2]}$ (in the Python variable \"`A2`\"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:\n\n$$J = - \\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(} \\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large{)} \\small\\tag{13}$$\n\n**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.\n\n**Instructions**:\n- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented\n$- \\sum\\limits_{i=0}^{m} y^{(i)}\\log(a^{[2](i)})$:\n```python\nlogprobs = np.multiply(np.log(A2),Y)\ncost = - np.sum(logprobs) # no need to use a for loop!\n```\n\n(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`). \nNote that if you use `np.multiply` followed by `np.sum` the end result will be a type `float`, whereas if you use `np.dot`, the result will be a 2D numpy array. We can use `np.squeeze()` to remove redundant dimensions (in the case of single float, this will be reduced to a zero-dimension array). We can cast the array as a type `float` using `float()`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost\n\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n \n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n [Note that the parameters argument is not used in this function, \n but the auto-grader currently expects this parameter.\n Future version of this notebook will fix both the notebook \n and the auto-grader so that `parameters` is not needed.\n For now, please include `parameters` in the function signature,\n and also when invoking this function.]\n \n Returns:\n cost -- cross-entropy cost given equation (13)\n \n \"\"\"\n \n m = Y.shape[1] # number of example\n\n # Compute the cross-entropy cost\n ### START CODE HERE ### (≈ 2 lines of code)\n logprobs = None\n cost = None\n ### END CODE HERE ###\n \n cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect. \n # E.g., turns [[17]] into 17 \n assert(isinstance(cost, float))\n \n return cost", "_____no_output_____" ], [ "A2, Y_assess, parameters = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(A2, Y_assess, parameters)))", "cost = 0.6930587610394646\n" ] ], [ [ "**Expected Output**:\n<table style=\"width:20%\">\n <tr>\n <td>**cost**</td>\n <td> 0.693058761... </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "Using the cache computed during forward propagation, you can now implement backward propagation.\n\n**Question**: Implement the function `backward_propagation()`.\n\n**Instructions**:\nBackpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. \n\n<img src=\"images/grad_summary.png\" style=\"width:600px;height:300px;\">\n\n<!--\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } = \\frac{1}{m} (a^{[2](i)} - y^{(i)})$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_2 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } a^{[1] (i) T} $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial b_2 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)}}}$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } = W_2^T \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_1 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } X^T $\n\n$\\frac{\\partial \\mathcal{J} _i }{ \\partial b_1 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)}}}$\n\n- Note that $*$ denotes elementwise multiplication.\n- The notation you will use is common in deep learning coding:\n - dW1 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_1 }$\n - db1 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_1 }$\n - dW2 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_2 }$\n - db2 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_2 }$\n \n!-->\n\n- Tips:\n - To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute \n $g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation\n\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n \n Arguments:\n parameters -- python dictionary containing our parameters \n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n \n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1]\n \n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n ### START CODE HERE ### (≈ 2 lines of code)\n W1 = None\n W2 = None\n ### END CODE HERE ###\n \n # Retrieve also A1 and A2 from dictionary \"cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n A1 = None\n A2 = None\n ### END CODE HERE ###\n \n # Backward propagation: calculate dW1, db1, dW2, db2. \n ### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)\n dZ2 = None\n dW2 = None\n db2 = None\n dZ1 = None\n dW1 = None\n db1 = None\n ### END CODE HERE ###\n \n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2}\n \n return grads", "_____no_output_____" ], [ "parameters, cache, X_assess, Y_assess = backward_propagation_test_case()\n\ngrads = backward_propagation(parameters, cache, X_assess, Y_assess)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"db1 = \"+ str(grads[\"db1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"db2 = \"+ str(grads[\"db2\"]))", "dW1 = [[ 0.00301023 -0.00747267]\n [ 0.00257968 -0.00641288]\n [-0.00156892 0.003893 ]\n [-0.00652037 0.01618243]]\ndb1 = [[ 0.00176201]\n [ 0.00150995]\n [-0.00091736]\n [-0.00381422]]\ndW2 = [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]]\ndb2 = [[-0.16655712]]\n" ] ], [ [ "**Expected output**:\n\n\n\n<table style=\"width:80%\">\n <tr>\n <td>**dW1**</td>\n <td> [[ 0.00301023 -0.00747267]\n [ 0.00257968 -0.00641288]\n [-0.00156892 0.003893 ]\n [-0.00652037 0.01618243]] </td> \n </tr>\n \n <tr>\n <td>**db1**</td>\n <td> [[ 0.00176201]\n [ 0.00150995]\n [-0.00091736]\n [-0.00381422]] </td> \n </tr>\n \n <tr>\n <td>**dW2**</td>\n <td> [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]] </td> \n </tr>\n \n\n <tr>\n <td>**db2**</td>\n <td> [[-0.16655712]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "**Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).\n\n**General gradient descent rule**: $ \\theta = \\theta - \\alpha \\frac{\\partial J }{ \\partial \\theta }$ where $\\alpha$ is the learning rate and $\\theta$ represents a parameter.\n\n**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.\n\n<img src=\"images/sgd.gif\" style=\"width:400;height:400;\"> <img src=\"images/sgd_bad.gif\" style=\"width:400;height:400;\">\n\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate = 1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients \n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = None\n b1 = None\n W2 = None\n b2 = None\n ### END CODE HERE ###\n \n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = None\n db1 = None\n dW2 = None\n db2 = None\n ## END CODE HERE ###\n \n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = None\n b1 = None\n W2 = None\n b2 = None\n ### END CODE HERE ###\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters", "_____no_output_____" ], [ "parameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = [[-0.00643025 0.01936718]\n [-0.02410458 0.03978052]\n [-0.01653973 -0.02096177]\n [ 0.01046864 -0.05990141]]\nb1 = [[ 1.21732533e-05]\n [ 2.12263977e-05]\n [ 1.36755874e-05]\n [ 1.05251698e-05]]\nW2 = [[-0.01041081 -0.04463285 0.01758031 0.04747113]]\nb2 = [[ 0.00010457]]\n" ] ], [ [ "**Expected Output**:\n\n\n<table style=\"width:80%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00643025 0.01936718]\n [-0.02410458 0.03978052]\n [-0.01653973 -0.02096177]\n [ 0.01046864 -0.05990141]]</td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ -1.02420756e-06]\n [ 1.27373948e-05]\n [ 8.32996807e-07]\n [ -3.20136836e-06]]</td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-0.01041081 -0.04463285 0.01758031 0.04747113]] </td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.00010457]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "### 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() ####\n\n**Question**: Build your neural network model in `nn_model()`.\n\n**Instructions**: The neural network model has to use the previous functions in the right order.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: nn_model\n\ndef nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n np.random.seed(3)\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n \n # Initialize parameters\n ### START CODE HERE ### (≈ 1 line of code)\n parameters = None\n ### END CODE HERE ###\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache = None\n \n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = None\n \n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = None\n \n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters = None\n \n ### END CODE HERE ###\n \n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n return parameters", "_____no_output_____" ], [ "X_assess, Y_assess = nn_model_test_case()\nparameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=True)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "Cost after iteration 0: 0.692739\nCost after iteration 1000: 0.000218\nCost after iteration 2000: 0.000107\nCost after iteration 3000: 0.000071\nCost after iteration 4000: 0.000053\nCost after iteration 5000: 0.000042\nCost after iteration 6000: 0.000035\nCost after iteration 7000: 0.000030\nCost after iteration 8000: 0.000026\nCost after iteration 9000: 0.000023\nW1 = [[-0.65848169 1.21866811]\n [-0.76204273 1.39377573]\n [ 0.5792005 -1.10397703]\n [ 0.76773391 -1.41477129]]\nb1 = [[ 0.287592 ]\n [ 0.3511264 ]\n [-0.2431246 ]\n [-0.35772805]]\nW2 = [[-2.45566237 -3.27042274 2.00784958 3.36773273]]\nb2 = [[ 0.20459656]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:90%\">\n\n<tr> \n <td> \n **cost after iteration 0**\n </td>\n <td> \n 0.692739\n </td>\n</tr>\n\n<tr> \n <td> \n <center> $\\vdots$ </center>\n </td>\n <td> \n <center> $\\vdots$ </center>\n </td>\n</tr>\n\n <tr>\n <td>**W1**</td>\n <td> [[-0.65848169 1.21866811]\n [-0.76204273 1.39377573]\n [ 0.5792005 -1.10397703]\n [ 0.76773391 -1.41477129]]</td> \n </tr>\n \n <tr>\n <td>**b1**</td>\n <td> [[ 0.287592 ]\n [ 0.3511264 ]\n [-0.2431246 ]\n [-0.35772805]] </td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[-2.45566237 -3.27042274 2.00784958 3.36773273]] </td> \n </tr>\n \n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.20459656]] </td> \n </tr>\n \n</table> ", "_____no_output_____" ], [ "### 4.5 Predictions\n\n**Question**: Use your model to predict by building predict().\nUse forward propagation to predict results.\n\n**Reminder**: predictions = $y_{prediction} = \\mathbb 1 \\text{{activation > 0.5}} = \\begin{cases}\n 1 & \\text{if}\\ activation > 0.5 \\\\\n 0 & \\text{otherwise}\n \\end{cases}$ \n \nAs an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: predict\n\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (n_x, m)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n ### START CODE HERE ### (≈ 2 lines of code)\n A2, cache = None\n predictions = None\n ### END CODE HERE ###\n \n return predictions", "_____no_output_____" ], [ "parameters, X_assess = predict_test_case()\n\npredictions = predict(parameters, X_assess)\nprint(\"predictions mean = \" + str(np.mean(predictions)))", "predictions mean = 0.666666666667\n" ] ], [ [ "**Expected Output**: \n\n\n<table style=\"width:40%\">\n <tr>\n <td>**predictions mean**</td>\n <td> 0.666666666667 </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.", "_____no_output_____" ] ], [ [ "# Build a model with a n_h-dimensional hidden layer\nparameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))", "Cost after iteration 0: 0.693048\nCost after iteration 1000: 0.288083\nCost after iteration 2000: 0.254385\nCost after iteration 3000: 0.233864\nCost after iteration 4000: 0.226792\nCost after iteration 5000: 0.222644\nCost after iteration 6000: 0.219731\nCost after iteration 7000: 0.217504\nCost after iteration 8000: 0.219471\nCost after iteration 9000: 0.218612\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:40%\">\n <tr>\n <td>**Cost after iteration 9000**</td>\n <td> 0.218607 </td> \n </tr>\n \n</table>\n", "_____no_output_____" ] ], [ [ "# Print accuracy\npredictions = predict(parameters, X)\nprint ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')", "Accuracy: 90%\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:15%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 90% </td> \n </tr>\n</table>", "_____no_output_____" ], [ "Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. \n\nNow, let's try out several hidden layer sizes.", "_____no_output_____" ], [ "### 4.6 - Tuning hidden layer size (optional/ungraded exercise) ###\n\nRun the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.", "_____no_output_____" ] ], [ [ "# This may take about 2 minutes to run\n\nplt.figure(figsize=(16, 32))\nhidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]\nfor i, n_h in enumerate(hidden_layer_sizes):\n plt.subplot(5, 2, i+1)\n plt.title('Hidden Layer of size %d' % n_h)\n parameters = nn_model(X, Y, n_h, num_iterations = 5000)\n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)\n print (\"Accuracy for {} hidden units: {} %\".format(n_h, accuracy))", "Accuracy for 1 hidden units: 67.5 %\nAccuracy for 2 hidden units: 67.25 %\nAccuracy for 3 hidden units: 90.75 %\nAccuracy for 4 hidden units: 90.5 %\nAccuracy for 5 hidden units: 91.25 %\nAccuracy for 20 hidden units: 90.0 %\nAccuracy for 50 hidden units: 90.25 %\n" ] ], [ [ "**Interpretation**:\n- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. \n- The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticeable overfitting.\n- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. ", "_____no_output_____" ], [ "**Optional questions**:\n\n**Note**: Remember to submit the assignment by clicking the blue \"Submit Assignment\" button at the upper-right. \n\nSome optional/ungraded questions that you can explore if you wish: \n- What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?\n- Play with the learning_rate. What happens?\n- What if we change the dataset? (See part 5 below!)", "_____no_output_____" ], [ "<font color='blue'>\n**You've learnt to:**\n- Build a complete neural network with a hidden layer\n- Make a good use of a non-linear unit\n- Implemented forward propagation and backpropagation, and trained a neural network\n- See the impact of varying the hidden layer size, including overfitting.", "_____no_output_____" ], [ "Nice work! ", "_____no_output_____" ], [ "## 5) Performance on other datasets", "_____no_output_____" ], [ "If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.", "_____no_output_____" ] ], [ [ "# Datasets\nnoisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()\n\ndatasets = {\"noisy_circles\": noisy_circles,\n \"noisy_moons\": noisy_moons,\n \"blobs\": blobs,\n \"gaussian_quantiles\": gaussian_quantiles}\n\n### START CODE HERE ### (choose your dataset)\ndataset = \"noisy_moons\"\n### END CODE HERE ###\n\nX, Y = datasets[dataset]\nX, Y = X.T, Y.reshape(1, Y.shape[0])\n\n# make blobs binary\nif dataset == \"blobs\":\n Y = Y%2\n\n# Visualize the data\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "_____no_output_____" ] ], [ [ "Congrats on finishing this Programming Assignment!\n\nReference:\n- http://scs.ryerson.ca/~aharley/neural-networks/\n- http://cs231n.github.io/neural-networks-case-study/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d0660534c7a12e1b6fb0cd5ba08692404db1c0d4
213,523
ipynb
Jupyter Notebook
notebook/GEFF Access.ipynb
EduardRosert/magics
2236652893cfe036af368eddde3d4e64435d510e
[ "ECL-2.0", "Apache-2.0" ]
41
2018-12-07T23:10:50.000Z
2022-02-19T03:01:49.000Z
notebook/GEFF Access.ipynb
EduardRosert/magics
2236652893cfe036af368eddde3d4e64435d510e
[ "ECL-2.0", "Apache-2.0" ]
59
2019-01-04T15:43:30.000Z
2022-03-31T09:48:15.000Z
notebook/GEFF Access.ipynb
EduardRosert/magics
2236652893cfe036af368eddde3d4e64435d510e
[ "ECL-2.0", "Apache-2.0" ]
13
2019-01-07T14:36:33.000Z
2021-09-06T14:48:36.000Z
1,227.143678
203,542
0.940887
[ [ [ "from ecmwfapi import ECMWFDataServer", "_____no_output_____" ], [ "target = \"fire.nc\"\nprint target", "fire.nc\n" ] ], [ [ "I get the data", "_____no_output_____" ] ], [ [ "server = ECMWFDataServer(url = \"https://api.ecmwf.int/v1\",\n key = \"XXXXXXXXXXXXXXXX\",\n email = \"[email protected]\") \n\nrequest = {\n \"dataset\": \"geff_reanalysis\",\n \"date\": \"2016-12-01/to/2016-12-31\",\n \"origin\": \"fwis\",\n \"param\": \"fwi\",\n \"step\": \"00\",\n \"time\": \"0000\",\n \"type\": \"an\",\n \"target\": target,\n }\n\n \nserver.retrieve(request)\n\nprint \"Data are downloaded\"", "2017-02-27 16:57:07 ECMWF API python library 1.4.1\n2017-02-27 16:57:07 ECMWF API at https://api.ecmwf.int/v1\n2017-02-27 16:57:07 Welcome Sylvie Lamy-Thepaut\n2017-02-27 16:57:07 In case of problems, please check https://software.ecmwf.int/wiki/display/WEBAPI/Troubleshooting or contact [email protected]\n2017-02-27 16:57:07 \n2017-02-27 16:57:07 Request submitted\n2017-02-27 16:57:07 Request id: 58b45a6374a7fbec2ebc983a\n2017-02-27 16:57:07 Request is queued\n2017-02-27 17:34:46 Request is active\nCalling '['tar', '-cf', '/data/data01/scratch/get-files-atls19-95e2cf679cd58ee9b4db4dd119a05a8d-8mhWsc.tar', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161201_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161202_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161203_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161204_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161205_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161206_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161207_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161208_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161209_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161210_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161211_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161212_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161213_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161214_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161215_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161216_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161217_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161218_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161219_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161220_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161221_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161222_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161223_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161224_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161225_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161226_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161227_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161228_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161229_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161230_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161231_0000_00.nc.gz']'\nProcess '['tar', '-cf', '/data/data01/scratch/get-files-atls19-95e2cf679cd58ee9b4db4dd119a05a8d-8mhWsc.tar', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161201_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161202_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161203_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161204_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161205_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161206_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161207_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161208_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161209_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161210_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161211_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161212_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161213_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161214_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161215_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161216_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161217_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161218_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161219_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161220_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161221_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161222_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161223_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161224_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161225_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161226_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161227_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161228_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161229_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161230_0000_00.nc.gz', '-C', '/data/data01/scratch', 'geff_reanalysis_an_fwis_fwi_20161231_0000_00.nc.gz']' finished\n2017-02-27 17:34:51 Request is complete\n2017-02-27 17:34:51 Transfering 1.5625 Mbytes into fire.nc\n2017-02-27 17:34:51 From http://stream.ecmwf.int/data/atls19/data/data01/scratch/get-files-atls19-95e2cf679cd58ee9b4db4dd119a05a8d-8mhWsc.tar\n2017-02-27 17:34:51 Transfer rate 4.77369 Mbytes/s\nData are downloaded\n" ], [ "import Magics.macro as magics\n#Setting the coordinates of the geographical area\nprojection = magics.mmap(subpage_map_projection = 'robinson',\n )\n\nnetcdf = magics.mnetcdf(netcdf_filename='geff_reanalysis_an_fwis_fwi_20161214_0000_00.nc', \n netcdf_value_variable = 'fwi')\n\ncontour = magics.mcont(\n contour_shade='on',\n contour_shade_method = 'area_fill',\n contour_shade_colour_direction = \"clockwise\",\n contour_shade_colour_method = \"calculate\",\n contour_shade_max_level_colour= \"red\",\n contour_shade_min_level_colour= \"blue\",\n legend=\"on\",\n contour='off',\n contour_min_level=10.\n)\n\ntitle = magics.mtext(text_lines=[\"<netcdf_info variable='fwi' attribute='title'/>\"])\nlegend = magics.mlegend( legend_display_type = \"continuous\")\n\nmagics.plot(projection, netcdf, contour, title, magics.mcoast(), legend)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0660c2fce104edc7f9595ddee9aa6d02d80c17a
32,302
ipynb
Jupyter Notebook
past-team-code/Fall2018Team1/News Articles Data/1119_article_and_bitcoin.ipynb
shun-lin/project-paradigm-chatbot
8c8a7d68d18ab04c78b5b705180aca26c428181b
[ "MIT" ]
1
2019-03-10T19:38:55.000Z
2019-03-10T19:38:55.000Z
past-team-code/Fall2018Team1/News Articles Data/1119_article_and_bitcoin.ipynb
shun-lin/project-paradigm-chatbot
8c8a7d68d18ab04c78b5b705180aca26c428181b
[ "MIT" ]
null
null
null
past-team-code/Fall2018Team1/News Articles Data/1119_article_and_bitcoin.ipynb
shun-lin/project-paradigm-chatbot
8c8a7d68d18ab04c78b5b705180aca26c428181b
[ "MIT" ]
null
null
null
39.879012
789
0.446505
[ [ [ "# Import Libraries and Packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport re\nimport datetime\nimport nltk\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import adjusted_rand_score\neng_stopwords = nltk.corpus.stopwords.words('english')\narticles=pd.read_csv(\"New_marked_news.csv\").drop('Unnamed: 0', axis = 1)\narticles.head()", "_____no_output_____" ], [ "# df_marker_1=pd.read_csv(\"markerPosi.csv\")\n# df_marker_2=pd.read_csv(\"markerNega.csv\")\n# frames=[df_marker_1,df_marker_2]\n# df_marker= pd.concat(frames, ignore_index=True)\n# df_marker.tail()", "_____no_output_____" ], [ "#df['marker']=np.zeros(len(df))\n#for i in range(len(df)-1):\n #if df['date'][i] in df_marker['Date']:\n# df['marker'][i]=1\n# else:\n# df['marker'][i]=0\n# df", "_____no_output_____" ] ], [ [ "__Standardize timestamps__", "_____no_output_____" ] ], [ [ "#temp = pd.DatetimeIndex(articles['timeStamp']) # Gather all datetime objects\n#articles['date'] = temp.date # Pull out the date from the datetime objects and assign to Date column\n#articles['time'] = temp.time # Pull out the time from the datetime objects and assign to Time column\nprint(len(articles))\narticles.tail(3)", "7101\n" ], [ "articles.contents[10]", "_____no_output_____" ] ], [ [ "__Preprocess text for NLP formulations__", "_____no_output_____" ] ], [ [ "articles.head()", "_____no_output_____" ], [ "#Clean the articles - Remove stopwords, remove punctuation, all lowercase\nimport re\nfor i in articles.index:\n text = articles.loc[i, 'contents']\n if pd.isnull(text):\n pass\n else:\n text = re.sub(r\"[^a-zA-Z]\", \" \", text)\n text = [word for word in text.split() if not word in eng_stopwords]\n text = (' '.join(text))\n text = text.lower()\n articles.loc[i, 'contents'] = text", "_____no_output_____" ] ], [ [ "__Combine cleaned articles with \"Markers\" from Time Series event detection__ ", "_____no_output_____" ] ], [ [ "df=articles\ndf.to_csv(\"1119_article_data_and_price_labeled_publisher.csv\")", "_____no_output_____" ] ], [ [ "___", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d06618520969f92ce8a3c89d29b4ba62f58e861e
21,394
ipynb
Jupyter Notebook
Python for Data Science and AI/w5/PY0101EN-5-2-Numpy2D.ipynb
Carlosriosch/IBM-Data-Science
1dc46dd692f5dd8fd4ccd4e6819befdce6a61fc8
[ "MIT" ]
1
2020-08-29T18:40:11.000Z
2020-08-29T18:40:11.000Z
Python for Data Science and AI/w5/PY0101EN-5-2-Numpy2D.ipynb
Carlosriosch/IBM-Data-Science
1dc46dd692f5dd8fd4ccd4e6819befdce6a61fc8
[ "MIT" ]
null
null
null
Python for Data Science and AI/w5/PY0101EN-5-2-Numpy2D.ipynb
Carlosriosch/IBM-Data-Science
1dc46dd692f5dd8fd4ccd4e6819befdce6a61fc8
[ "MIT" ]
null
null
null
22.49632
716
0.52267
[ [ [ "<a href=\"https://cognitiveclass.ai/\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png\" width=\"200\" align=\"center\">\n</a>", "_____no_output_____" ], [ "<h1>2D <code>Numpy</code> in Python</h1>", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about using <code>Numpy</code> in the Python Programming Language. By the end of this lab, you'll know what <code>Numpy</code> is and the <code>Numpy</code> operations.</p>", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li><a href=\"create\">Create a 2D Numpy Array</a></li>\n <li><a href=\"access\">Accessing different elements of a Numpy Array</a></li>\n <li><a href=\"op\">Basic Operations</a></li>\n </ul>\n <p>\n Estimated time needed: <strong>20 min</strong>\n </p>\n</div>\n\n<hr>", "_____no_output_____" ], [ "<h2 id=\"create\">Create a 2D Numpy Array</h2>", "_____no_output_____" ] ], [ [ "# Import the libraries\n\nimport numpy as np \nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Consider the list <code>a</code>, the list contains three nested lists **each of equal size**. ", "_____no_output_____" ] ], [ [ "# Create a list\n\na = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]\na", "_____no_output_____" ] ], [ [ "We can cast the list to a Numpy Array as follow", "_____no_output_____" ] ], [ [ "# Convert list to Numpy Array\n# Every element is the same type\n\nA = np.array(a)\nA", "_____no_output_____" ] ], [ [ "We can use the attribute <code>ndim</code> to obtain the number of axes or dimensions referred to as the rank. ", "_____no_output_____" ] ], [ [ "# Show the numpy array dimensions\n\nA.ndim", "_____no_output_____" ] ], [ [ "Attribute <code>shape</code> returns a tuple corresponding to the size or number of each dimension.", "_____no_output_____" ] ], [ [ "# Show the numpy array shape\n\nA.shape", "_____no_output_____" ] ], [ [ "The total number of elements in the array is given by the attribute <code>size</code>.", "_____no_output_____" ] ], [ [ "# Show the numpy array size\n\nA.size", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"access\">Accessing different elements of a Numpy Array</h2>", "_____no_output_____" ], [ "We can use rectangular brackets to access the different elements of the array. The correspondence between the rectangular brackets and the list and the rectangular representation is shown in the following figure for a 3x3 array: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoEg.png\" width=\"500\" />", "_____no_output_____" ], [ "We can access the 2nd-row 3rd column as shown in the following figure:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoFT.png\" width=\"400\" />", "_____no_output_____" ], [ " We simply use the square brackets and the indices corresponding to the element we would like:", "_____no_output_____" ] ], [ [ "# Access the element on the second row and third column\n\nA[1, 2]", "_____no_output_____" ] ], [ [ " We can also use the following notation to obtain the elements: ", "_____no_output_____" ] ], [ [ "# Access the element on the second row and third column\n\nA[1][2]", "_____no_output_____" ] ], [ [ " Consider the elements shown in the following figure ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoFF.png\" width=\"400\" />", "_____no_output_____" ], [ "We can access the element as follows ", "_____no_output_____" ] ], [ [ "# Access the element on the first row and first column\n\nA[0][0]", "_____no_output_____" ] ], [ [ "We can also use slicing in numpy arrays. Consider the following figure. We would like to obtain the first two columns in the first row", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoFSF.png\" width=\"400\" />", "_____no_output_____" ], [ " This can be done with the following syntax ", "_____no_output_____" ] ], [ [ "# Access the element on the first row and first and second columns\n\nA[0][0:2]", "_____no_output_____" ] ], [ [ "Similarly, we can obtain the first two rows of the 3rd column as follows:", "_____no_output_____" ] ], [ [ "# Access the element on the first and second rows and third column\n\nA[0:2, 2]", "_____no_output_____" ] ], [ [ "Corresponding to the following figure: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoTST.png\" width=\"400\" />", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"op\">Basic Operations</h2>", "_____no_output_____" ], [ "We can also add arrays. The process is identical to matrix addition. Matrix addition of <code>X</code> and <code>Y</code> is shown in the following figure:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoAdd.png\" width=\"500\" />", "_____no_output_____" ], [ "The numpy array is given by <code>X</code> and <code>Y</code>", "_____no_output_____" ] ], [ [ "# Create a numpy array X\n\nX = np.array([[1, 0], [0, 1]]) \nX", "_____no_output_____" ], [ "# Create a numpy array Y\n\nY = np.array([[2, 1], [1, 2]]) \nY", "_____no_output_____" ] ], [ [ " We can add the numpy arrays as follows.", "_____no_output_____" ] ], [ [ "# Add X and Y\n\nZ = X + Y\nZ", "_____no_output_____" ] ], [ [ "Multiplying a numpy array by a scaler is identical to multiplying a matrix by a scaler. If we multiply the matrix <code>Y</code> by the scaler 2, we simply multiply every element in the matrix by 2 as shown in the figure.", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoDb.png\" width=\"500\" />", "_____no_output_____" ], [ "We can perform the same operation in numpy as follows ", "_____no_output_____" ] ], [ [ "# Create a numpy array Y\n\nY = np.array([[2, 1], [1, 2]]) \nY", "_____no_output_____" ], [ "# Multiply Y with 2\n\nZ = 2 * Y\nZ", "_____no_output_____" ] ], [ [ "Multiplication of two arrays corresponds to an element-wise product or Hadamard product. Consider matrix <code>X</code> and <code>Y</code>. The Hadamard product corresponds to multiplying each of the elements in the same position, i.e. multiplying elements contained in the same color boxes together. The result is a new matrix that is the same size as matrix <code>Y</code> or <code>X</code>, as shown in the following figure.", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumTwoMul.png\" width=\"500\" />", "_____no_output_____" ], [ "We can perform element-wise product of the array <code>X</code> and <code>Y</code> as follows:", "_____no_output_____" ] ], [ [ "# Create a numpy array Y\n\nY = np.array([[2, 1], [1, 2]]) \nY", "_____no_output_____" ], [ "# Create a numpy array X\n\nX = np.array([[1, 0], [0, 1]]) \nX", "_____no_output_____" ], [ "# Multiply X with Y\n\nZ = X * Y\nZ", "_____no_output_____" ] ], [ [ "We can also perform matrix multiplication with the numpy arrays <code>A</code> and <code>B</code> as follows:", "_____no_output_____" ], [ "First, we define matrix <code>A</code> and <code>B</code>:", "_____no_output_____" ] ], [ [ "# Create a matrix A\n\nA = np.array([[0, 1, 1], [1, 0, 1]])\nA", "_____no_output_____" ], [ "# Create a matrix B\n\nB = np.array([[1, 1], [1, 1], [-1, 1]])\nB", "_____no_output_____" ] ], [ [ "We use the numpy function <code>dot</code> to multiply the arrays together.", "_____no_output_____" ] ], [ [ "# Calculate the dot product\n\nZ = np.dot(A,B)\nZ", "_____no_output_____" ], [ "# Calculate the sine of Z\n\nnp.sin(Z)", "_____no_output_____" ] ], [ [ "We use the numpy attribute <code>T</code> to calculate the transposed matrix", "_____no_output_____" ] ], [ [ "# Create a matrix C\n\nC = np.array([[1,1],[2,2],[3,3]])\nC", "_____no_output_____" ], [ "# Get the transposed of C\n\nC.T", "_____no_output_____" ] ], [ [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<h2>Get IBM Watson Studio free of charge!</h2>\n <p><a href=\"https://cocl.us/bottemNotebooksPython101Coursera\"><img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png\" width=\"750\" align=\"center\"></a></p>\n</div>", "_____no_output_____" ], [ "<h3>About the Authors:</h3> \n<p><a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>", "_____no_output_____" ], [ "Other contributors: <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d066199a2fb7928ced938461d566dc0c0f872b4a
92,004
ipynb
Jupyter Notebook
Trematinib-Combo-CI/python/Hill-Equation-Bayesian-Regression.ipynb
nathanieljevans/HNSCC_functional_data_pipeline
b55571660f2799db699ae1c7ab97ae58c342fe56
[ "MIT" ]
null
null
null
Trematinib-Combo-CI/python/Hill-Equation-Bayesian-Regression.ipynb
nathanieljevans/HNSCC_functional_data_pipeline
b55571660f2799db699ae1c7ab97ae58c342fe56
[ "MIT" ]
null
null
null
Trematinib-Combo-CI/python/Hill-Equation-Bayesian-Regression.ipynb
nathanieljevans/HNSCC_functional_data_pipeline
b55571660f2799db699ae1c7ab97ae58c342fe56
[ "MIT" ]
null
null
null
106.981395
43,400
0.844192
[ [ [ "import scipy.special as sps\nimport pyro \nimport pyro.distributions as dist\nimport torch\nfrom torch.distributions import constraints\nfrom pyro.infer import MCMC, NUTS\nfrom scipy.stats import norm\nfrom torch import nn\nfrom pyro.infer.autoguide import AutoDiagonalNormal\nfrom pyro.nn import PyroModule\nfrom pyro import optim\nfrom pyro.infer import SVI, Trace_ELBO\nfrom pyro.nn import PyroSample\nfrom pyro.infer import Predictive\n\n\npyro.enable_validation(True)\npyro.set_rng_seed(1)\npyro.enable_validation(True)\n\nimport os\n\nimport pandas as pd \nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport statsmodels.api as sm\nimport statsmodels\n\n#import HNSCC_analysis_pipeline_lib as lib\n\nimport pickle as pkl\nimport seaborn as sbn\n\nprint(pyro.__version__)\nassert pyro.__version__.startswith('1.1.0')\n\nimport time\n\nfrom __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\nfrom scipy.stats import norm, gamma, poisson, beta \n\n%matplotlib inline", "1.1.0\n" ] ], [ [ "# Hill-Langmuir Bayesian Regression \n\nGoals similar to: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3773943/pdf/nihms187302.pdf \n\nHowever, they use a different paramerization that does not include Emax \n\n# Bayesian Hill Model Regression \n\nThe Hill model is defined as: \n\n$$ F(c, E_{max}, E_0, EC_{50}, H) = E_0 + \\frac{E_{max} - E_0}{1 + (\\frac{EC_{50}}{C})^H} $$\n\nWhere concentration, $c$ is in uM, and is *not* in logspace. \n\nTo quantify uncertainty in downstream modeling, and to allow placement of priors on the relevant variables, we will do this in a bayesian framework. \n\n# Building Intuition with the Hill Equation\n\n![](https://media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fsrep14701/MediaObjects/41598_2015_Article_BFsrep14701_Fig1_HTML.jpg?as=webp)\n\n1. Di Veroli GY, Fornari C, Goldlust I, Mills G, Koh SB, Bramhall JL, et al. An automated fitting procedure and software for dose-response curves with multiphasic features. Scientific Reports. 2015 Oct 1;5(1):1–11. \n", "_____no_output_____" ] ], [ [ "# https://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html\ndef f(E0=2.5, Emax=0, log_EC50=-2, H=1):\n \n EC50 = 10**log_EC50\n plt.figure(2, figsize=(10,5))\n xx = np.logspace(-4, 1, 100)\n yy = E0 + (Emax - E0)/(1+(EC50/xx)**H)\n \n plt.plot(np.log10(xx),yy, 'r-')\n plt.ylim(-0.2, 3)\n plt.xlabel('log10 [Concentration (uM)] ')\n plt.ylabel('cell response')\n plt.show()\n\ninteractive_plot = interactive(f, E0=(0,3,0.5), Emax=(0.,1.,0.05), log_EC50=(-5,2,0.1), H=(1,5,1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ] ], [ [ "# Define Model + Guide ", "_____no_output_____" ] ], [ [ "class plotter: \n def __init__(self, params, figsize=(20,10), subplots = (2,7)): \n '''\n '''\n assert len(params) <= subplots[0]*subplots[1], 'wrong number of subplots for given params to report'\n self.fig, self.axes = plt.subplots(*subplots,figsize=figsize, sharex='col', sharey='row')\n self.vals = {p:[] for p in params}\n self.params = params\n \n def record(self):\n '''\n '''\n for p in self.params: \n self.vals[p].append(pyro.param(p).item())\n \n def plot_all(self): \n '''\n '''\n for p, ax in zip(self.params, self.axes.flat): \n ax.plot(self.vals[p], 'b-')\n ax.set_title(p, fontsize=25)\n ax.set_xlabel('step', fontsize=20)\n ax.set_ylabel('param value', fontsize=20)\n \n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.35)\n plt.show()\n \ndef model(X, Y=None):\n '''\n \n '''\n E0 = pyro.sample('E0', dist.Normal(1., E0_std))\n\n Emax = pyro.sample('Emax', dist.Beta(a_emax, b_emax))\n \n H = pyro.sample('H', dist.Gamma(alpha_H, beta_H))\n \n EC50 = 10**pyro.sample('log_EC50', dist.Normal(mu_ec50, std_ec50))\n\n obs_sigma = pyro.sample(\"obs_sigma\", dist.Gamma(a_obs, b_obs))\n \n obs_mean = E0 + (Emax - E0)/(1+(EC50/X)**H)\n\n with pyro.plate(\"data\", X.shape[0]):\n obs = pyro.sample(\"obs\", dist.Normal(obs_mean.squeeze(-1), obs_sigma), obs=Y)\n \n return obs_mean\n\ndef guide(X, Y=None):\n \n _E0_mean = pyro.param('E0_mean', torch.tensor(0.))\n _E0_std = pyro.param('E0_std', torch.tensor(E0_std), constraint=constraints.positive)\n E0 = pyro.sample('E0', dist.Normal(_E0_mean, _E0_std))\n \n _a_emax = pyro.param('_a_emax', torch.tensor(a_emax), constraint=constraints.positive)\n _b_emax = pyro.param('_b_emax', torch.tensor(b_emax), constraint=constraints.positive)\n Emax = pyro.sample('Emax', dist.Beta(_a_emax, _b_emax))\n \n _alpha_H = pyro.param('_alpha_H', torch.tensor(alpha_H), constraint=constraints.positive)\n _beta_H = pyro.param('_beta_H', torch.tensor(beta_H), constraint=constraints.positive)\n H = pyro.sample('H', dist.Gamma(_alpha_H, _beta_H))\n\n _mu_ec50 = pyro.param('_mu_ec50', torch.tensor(mu_ec50))\n _std_ec50 = pyro.param('_std_ec50', torch.tensor(std_ec50), constraint=constraints.positive)\n EC50 = pyro.sample('log_EC50', dist.Normal(_mu_ec50, _std_ec50))\n \n _a_obs = pyro.param('_a_obs', torch.tensor(a_obs), constraint=constraints.positive)\n _b_obs = pyro.param('_b_obs', torch.tensor(b_obs), constraint=constraints.positive)\n obs_sigma = pyro.sample(\"obs_sigma\", dist.Gamma(_a_obs, _b_obs))\n \n obs_mean = E0 + (Emax - E0)/(1+(EC50/X)**H)\n \n return obs_mean", "_____no_output_____" ] ], [ [ "## choosing priors \n\n\n\n### $E_0$\nThe upper bound or maximum value of our function, $E_0$ should be centered at 1, although it's possible to be a little above or below that, we'll model this with a Normal distribution and a fairly tight variance around 1. \n\n$$ E_0 \\propto N(1, \\sigma_{E_0}) $$ \n\n### $E_{max}$ \n$E_{max}$ is the lower bound, or minimum value of our function, and is expected to be at 0, however, for some inhibitors it's significantly above this. \n\n$$ E_{max} \\propto Beta(a_{E_{max}}, b_{E_{max}}) $$ \n\n$$ e[E_{max}] = \\frac{a}{a+b} $$\n\n### H \n\nHill coefficient, $H$ should be a positive integer, however, we're going to approximate this as gamma since a poisson is not flexible enough to characterize this properly. \n\n$$ H \\propto gamma(\\alpha_{H}, \\beta_{H}) $$\n\n$$ Mean = E[gamma] = \\frac{ \\alpha_{H} }{\\beta_{H}} $$ \n\n### $EC_{50}$ \n\nEC50 was actually a little tough, we could imagine encoding IC50 as a gamma distribution in concentration space, however, this results in poor behavior when used in logspace. Therefore, it actually works much better to encode this as a Normal distribution in logspace. \n\n$$ log10(EC50) \\propto Normal(\\mu_{EC50}, \\sigma_{EC50}) $$ \n\n### cell viability ($Y$) \n\nWe'll assume this is a normal distribution, centered around the hill function with standard deviation $\\sigma_{obs}$. \n\n$$ \\mu_{obs} = E_0 + \\frac{E_{max} - E_0}{1 + (\\frac{EC_{50}}{C})^H} $$\n\n$$ Y \\propto N(\\mu_{obs}, \\sigma_{obs}) $$ ", "_____no_output_____" ], [ "# Building Prior Intuition \n\n## E0 Prior ", "_____no_output_____" ] ], [ [ "def f(E0_std):\n plt.figure(2)\n xx = np.linspace(-2, 4, 50)\n \n rv = norm(1, E0_std)\n yy = rv.pdf(xx)\n \n plt.ylim(0,1)\n plt.title('E0 parameter')\n plt.xlabel('E0')\n plt.ylabel('probability')\n plt.plot(xx, yy, 'r-')\n plt.show()\n\ninteractive_plot = interactive(f, E0_std=(0.1,4,0.1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ] ], [ [ "## Expecation, Variance to Alpha,Beta for Gamma ", "_____no_output_____" ] ], [ [ "def gamma_modes_to_params(E, S): \n '''\n '''\n beta = E/S \n alpha = E**2/S \n \n return alpha, beta\n ", "_____no_output_____" ] ], [ [ "## Emax Prior ", "_____no_output_____" ] ], [ [ "# TODO: Have inputs be E[] and Var[] rather than a,b... more useful for setting up priors. \ndef f(emax_mean=1, emax_var=3):\n \n a_emax, b_emax = gamma_modes_to_params(emax_mean, emax_var)\n \n plt.figure(2)\n xx = np.linspace(0, 1.2, 100)\n \n rv = gamma(a_emax, scale=1/b_emax, loc=0)\n yy = rv.pdf(xx)\n \n plt.title('Emax Parameter')\n plt.xlabel('Emax')\n plt.ylabel('probability')\n \n plt.ylim(0,5)\n plt.plot(xx, yy, 'r-', label=f'alpha={a_emax:.2f}, beta={b_emax:.2f}')\n plt.legend()\n plt.show()\n\ninteractive_plot = interactive(f, emax_mean=(0.1,1.2,0.05), emax_var=(0.01,1,0.05))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ], [ "def f(alpha_H=1, beta_H=0.5):\n f, axes = plt.subplots(1,1,figsize=(5,5))\n \n xx = np.linspace(0, 5, 100)\n g = gamma(alpha_H, scale=1/beta_H, loc=0)\n yy = g.pdf(xx)\n \n axes.set_xlabel('H')\n axes.set_ylabel('probability')\n \n plt.xlim(0,5)\n plt.ylim(0,5)\n \n axes.plot(xx,yy, 'r-')\n plt.tight_layout()\n plt.title('Hill Coefficient')\n plt.show()\n\ninteractive_plot = interactive(f, alpha_H=(1,10,1), beta_H=(0.1,5,0.1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ], [ "def f(mu_ec50=-1, std_ec50=0.5):\n f, axes = plt.subplots(1,1,figsize=(5,5))\n \n xx = np.log10( np.logspace(-5, 2, 100) )\n g = norm(mu_ec50, std_ec50)\n yy = g.pdf(xx)\n \n axes.plot(xx,yy, 'r-')\n plt.xlabel('log10 EC50')\n plt.ylabel('probability')\n plt.title('EC50 parameter')\n plt.tight_layout()\n plt.show()\n\ninteractive_plot = interactive(f, mu_ec50=(-5,2,0.1), std_ec50=(0.01,5,0.1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ], [ "def f(a_obs=1, b_obs=1):\n plt.figure(2)\n xx = np.linspace(0, 3, 50)\n \n rv = gamma(a_obs, scale=1/b_obs, loc=0)\n yy = rv.pdf(xx)\n \n plt.ylim(0,5)\n plt.plot(xx, yy, 'r-')\n plt.xlabel('std_obs')\n plt.ylabel('probability')\n plt.title('Observation (Y) std')\n plt.show()\n\ninteractive_plot = interactive(f, a_obs=(1,100,1), b_obs=(1,100,1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ] ], [ [ "# Define Priors ", "_____no_output_____" ] ], [ [ "############ PRIORS ###############\nE0_std = 0.05\n\n# uniform\n# 50,100 -> example if we have strong support for Emax around 0.5 \na_emax = 50. #2.\nb_emax = 100. #8.\n\n# H gamma prior \nalpha_H = 1\nbeta_H = 1 \n\n#EC50\n# this is in logspace, so in uM -> 10**mu_ec50\nmu_ec50 = -2.\nstd_ec50 = 3.\n\n# obs error \na_obs = 1\nb_obs = 1\n###################################", "_____no_output_____" ] ], [ [ "# Define Data\n\nWe'll use fake data for now. ", "_____no_output_____" ] ], [ [ "Y = torch.tensor([1., 1., 1., 0.9, 0.7, 0.6, 0.5], dtype=torch.float)\nX = torch.tensor([10./3**i for i in range(7)][::-1], dtype=torch.float).unsqueeze(-1)", "_____no_output_____" ] ], [ [ "# Fit model with MCMC\n\nhttps://forum.pyro.ai/t/need-help-with-very-simple-model/600\nhttps://pyro.ai/examples/bayesian_regression_ii.html", "_____no_output_____" ] ], [ [ "torch.manual_seed(99999)\nnuts_kernel = NUTS(model, adapt_step_size=True)\nmcmc_run = MCMC(nuts_kernel, num_samples=400, warmup_steps=100, num_chains=1)\nmcmc_run.run(X,Y)", "Sample: 100%|██████████| 500/500 [00:32, 15.43it/s, step size=2.56e-01, acc. prob=0.949]\n" ] ], [ [ "## visualize results", "_____no_output_____" ] ], [ [ "samples = {k: v.detach().cpu().numpy() for k, v in mcmc_run.get_samples().items()}\n\nf, axes = plt.subplots(3,2, figsize=(10,5))\n\nfor ax, key in zip(axes.flat, samples.keys()): \n \n ax.set_title(key)\n ax.hist(samples[key], bins=np.linspace(min(samples[key]), max(samples[key]), 50), density=True)\n ax.set_xlabel(key)\n ax.set_ylabel('probability')\n \naxes.flat[-1].hist(10**samples['log_EC50'], bins=np.linspace(min(10**(samples['log_EC50'])), max(10**(samples['log_EC50'])), 50))\naxes.flat[-1].set_title('EC50')\naxes.flat[-1].set_xlabel('EC50 [uM]')\n \nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "## plot fitted hill f-n", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(7,7))\n\nxx = np.logspace(-7, 6, 200)\n\nfor i,s in pd.DataFrame(samples).iterrows(): \n yy = s.E0 + (s.Emax - s.E0)/(1+(10**s.log_EC50/xx)**s.H)\n plt.plot(np.log10(xx), yy, 'ro', alpha=0.01)\n \n \nplt.plot(np.log10(X), Y, 'b.', label='data')\nplt.xlabel('log10 Concentration')\nplt.ylabel('cell_viability')\nplt.ylim(0,1.2)\nplt.legend()\nplt.title('MCMC results')\nplt.show()", "_____no_output_____" ] ], [ [ "# Deprecated\n\n## EC50 example - gamma in concentration space ", "_____no_output_____" ] ], [ [ "def f(alpha_ec50=1, beta_ec50=0.5):\n f, axes = plt.subplots(1,2,figsize=(8,4))\n \n xx = np.logspace(-5, 2, 100)\n g = gamma(alpha_ec50, scale=1/beta_ec50, loc=0)\n yy = g.pdf(xx)\n \n g_samples = g.rvs(1000)\n \n axes[0].plot(xx,yy, 'r-')\n \n axes[1].plot(np.log10(xx), yy, 'b-')\n plt.tight_layout()\n plt.show()\n\ninteractive_plot = interactive(f, alpha_ec50=(1,10,1), beta_ec50=(0.01,5,0.1))\noutput = interactive_plot.children[-1]\noutput.layout.height = '350px'\ninteractive_plot", "_____no_output_____" ] ], [ [ "# Fit Model with `stochastic variational inference` ", "_____no_output_____" ] ], [ [ "adam = optim.Adam({\"lr\": 1e-1})\n\nsvi = SVI(model, guide, adam, loss=Trace_ELBO())\n\ntic = time.time()\nSTEPS = 2500\npyro.clear_param_store() \nmyplotter = plotter(['_alpha_H', '_beta_H', '_a_emax', '_b_emax', '_a_obs', '_b_obs', '_mu_ec50', '_std_ec50'], figsize=(12, 8), subplots=(2,5))\n_losses = []\nlast=0\nloss = 0\nn = 100\n\ntry: \n for j in range(STEPS):\n loss += svi.step(X, Y) \n myplotter.record()\n if j % n == 0:\n print(f\"[iteration {j}] loss: {(loss / n) :.2f} [change={(loss/n - last/n):.2f}]\", end='\\t\\t\\t\\r')\n _losses.append(np.log10(loss))\n last = loss\n loss = 0\n myplotter.plot_all()\nexcept: \n myplotter.plot_all()\n raise\n \nplt.figure()\nplt.plot(_losses)\nplt.xlabel('steps')\nplt.ylabel('loss')\nplt.show()\n\ntoc = time.time()\nprint(f'time to train {STEPS} iterations: {toc-tic:.2g}s')", "_____no_output_____" ], [ "x_data = torch.tensor(np.logspace(-5, 5, 200)).unsqueeze(-1)\n\ndef summary(samples):\n site_stats = {}\n for k, v in samples.items():\n site_stats[k] = {\n \"mean\": torch.mean(v, 0),\n \"std\": torch.std(v, 0),\n \"5%\": v.kthvalue(int(len(v) * 0.05), dim=0)[0],\n \"95%\": v.kthvalue(int(len(v) * 0.95), dim=0)[0],\n }\n return site_stats\n\n\npredictive = Predictive(model, guide=guide, num_samples=800,\n return_sites=(\"linear.weight\", \"obs\", \"_RETURN\"))\n\nsamples = predictive(x_data)\npred_summary = summary(samples)\n\ny_mean = pred_summary['obs']['mean'].detach().numpy()\ny_5 = pred_summary['obs']['5%'].detach().numpy()\ny_95 = pred_summary['obs']['95%'].detach().numpy()\n\nplt.figure(figsize=(7,7))\nplt.plot(np.log10(X),Y, 'k*', label='data')\nplt.plot(np.log10(x_data), y_mean, 'r-')\nplt.plot(np.log10(x_data), y_5, 'g-', label='95% Posterior Predictive CI')\nplt.plot(np.log10(x_data), y_95, 'g-')\n\nplt.ylim(0,1.2)\n\nplt.legend()\nplt.show()\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0661d80c59abf0f50fafbd9dbbc2cf4be0f3317
26,264
ipynb
Jupyter Notebook
Cesium_Advent_Calendar_3rd.ipynb
tkama/hello_cesiumpy
5c32041cf3744546687a810f101c7678ab2e9f59
[ "Apache-2.0" ]
null
null
null
Cesium_Advent_Calendar_3rd.ipynb
tkama/hello_cesiumpy
5c32041cf3744546687a810f101c7678ab2e9f59
[ "Apache-2.0" ]
null
null
null
Cesium_Advent_Calendar_3rd.ipynb
tkama/hello_cesiumpy
5c32041cf3744546687a810f101c7678ab2e9f59
[ "Apache-2.0" ]
null
null
null
89.333333
205
0.652795
[ [ [ "ライブラリのインポートとバージョン表示", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "import cesiumpy\ncesiumpy.__version__", "_____no_output_____" ] ], [ [ "CSVファイルの読み込み", "_____no_output_____" ] ], [ [ "filename = '07hoikuennyoutien-asakashi_utf8.csv'\ndf = pd.read_csv( filename )", "_____no_output_____" ] ], [ [ "バブルチャートの表示", "_____no_output_____" ] ], [ [ "v = cesiumpy.Viewer()\nfor i, row in df.iterrows():\n l = row['施設_収容人数[総定員]人数']\n p = cesiumpy.Point(position=[row['施設_経度'], row['施設_緯度'], 0] \n , pixelSize=l/5, color='blue')\n v.entities.add(p)\nv", "_____no_output_____" ] ], [ [ "3D棒グラフの表示", "_____no_output_____" ] ], [ [ "v = cesiumpy.Viewer()\nfor i, row in df.iterrows():\n l = row['施設_収容人数[総定員]人数']\n cyl = cesiumpy.Cylinder(position=[row['施設_経度'], row['施設_緯度'] ], \n length=l*10,topRadius=50, bottomRadius=50, material='aqua')\n v.entities.add(cyl)\nv", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d066240a175b942319ea05fb7744c4b851a7fc1a
968
ipynb
Jupyter Notebook
Untitled2.ipynb
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
null
null
null
Untitled2.ipynb
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
null
null
null
Untitled2.ipynb
Asha-ai/BERT_abstractive_proj
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
[ "Apache-2.0" ]
null
null
null
23.609756
237
0.504132
[ [ [ "<a href=\"https://colab.research.google.com/github/Asha-ai/BERT_abstractive_proj/blob/master/Untitled2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d06630bafc6e40465bf7b97df207adf9b4666f00
10,481
ipynb
Jupyter Notebook
Vallacher_PdfGen.ipynb
AlxndrMlk/PDF_generator
3a508982cf1573cd7694ba8b00b926b9f257253d
[ "MIT" ]
null
null
null
Vallacher_PdfGen.ipynb
AlxndrMlk/PDF_generator
3a508982cf1573cd7694ba8b00b926b9f257253d
[ "MIT" ]
null
null
null
Vallacher_PdfGen.ipynb
AlxndrMlk/PDF_generator
3a508982cf1573cd7694ba8b00b926b9f257253d
[ "MIT" ]
null
null
null
38.112727
153
0.411602
[ [ [ "import pdfkit\nfrom string import Template\nimport numpy as np\n\nfrom PyPDF2 import PdfFileReader, PdfFileMerger\nimport glob", "_____no_output_____" ], [ "temp_address = r'C:\\Users\\Ol\\Documents\\EXPERIMENTS\\ACT_ID\\Materials\\projectsListTemp_pl.html'", "_____no_output_____" ], [ "# Import content\nwith open(r'C:\\Users\\Ol\\Documents\\EXPERIMENTS\\ACT_ID\\Materials\\progs_.txt', 'r', encoding='utf-8') as f:\n content = f.readlines()", "_____no_output_____" ], [ "# Strip newline characters\ncontent = [x.strip('\\n') for x in content]", "_____no_output_____" ], [ "header = \"\"\"&emsp;&emsp;Poniżej znajdziesz 10 krótkich opisów projektów naukowych \ni edukacyjnych, które będą dostępne dla studentów Florida Atlantic University i Uniwersytetu \nWarszawskiego w ramach międzynarodowego projektu \nrealizowanego przy współpracy Instytutu Studiów Społecznych UW (ISS UW) oraz \nDepartament of Psychology at Florida Atlantic University.\n<br><br>\nChcemy Cię prosić o ich ocenę. Uporządkuj je według Twoich \npreferencji.\n<br><br>\n<b>1</b> oznacza, że przyznajesz pierwsze miejsce, <b>10</b>, że ostatnie. \n<br><br>\nUdział w którym projekcie byłby dla Ciebie najbardziej atrakcyjny? <br><br>\n\"\"\"", "_____no_output_____" ], [ "def fill_temp(template_location, content):\n \n with open(template_location) as f:\n template = f.read()\n \n template = Template(template[6:])\n\n filled = template.safe_substitute(HEADER = header,\n DESCR = 'Skrócony opis projektu',\n POS = 'Pozycja',\n P1 = content[choice[0]],\n P2 = content[choice[1]],\n P3 = content[choice[2]],\n P4 = content[choice[3]],\n P5 = content[choice[4]],\n P6 = content[choice[5]],\n P7 = content[choice[6]],\n P8 = content[choice[7]],\n P9 = content[choice[8]],\n P10 = content[choice[9]])\n \n return filled\n ", "_____no_output_____" ], [ "# Define options\noptions = {'page-size': 'A4',\n 'encoding': \"utf-8\"}", "_____no_output_____" ], [ "# Generate pdfs\nfor i in range(50):\n choice = np.random.choice(10, 10, replace=False)\n quest_ready = fill_temp(temp_address, content)\n pdfkit.from_string(quest_ready, r'C:\\Users\\Ol\\Documents\\EXPERIMENTS\\ACT_ID\\Materials\\Pdf\\pList{:02}.pdf'.format(50+i), options=options)", "Loading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \nLoading pages (1/6)\nQNetworkAccessFileBackendFactory: URL has no schema set, use file:// for files\nCounting pages (2/6) \nResolving links (4/6) \nLoading headers and footers (5/6) \nPrinting pages (6/6)\nDone \n" ] ], [ [ "## Merge files\n\ninto one printable pdf", "_____no_output_____" ] ], [ [ "# Define naming pattern\npattern = r'C:\\Users\\Ol\\Documents\\EXPERIMENTS\\ACT_ID\\Materials\\PDF\\2\\*.pdf'", "_____no_output_____" ], [ "# Generate files list\npdfs_list = glob.glob(pattern)", "_____no_output_____" ], [ "# Merge and write the output file\n\nmerger = PdfFileMerger()\n\nfor f in pdfs_list:\n merger.append(PdfFileReader(f), 'rb')\n\nmerger.write(r'C:\\Users\\Ol\\Documents\\EXPERIMENTS\\ACT_ID\\Materials\\PDF\\FAU_UW_project2.pdf')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0663593358693c0afccd639ba585b864b272aae
2,247
ipynb
Jupyter Notebook
src/analysis/training_result.ipynb
hamling-ling/FacialSentiment
8147cfa2903e98d3ecbaebda7974aa621ac16929
[ "MIT" ]
1
2020-05-11T07:32:07.000Z
2020-05-11T07:32:07.000Z
src/analysis/training_result.ipynb
hamling-ling/FacialSentiment
8147cfa2903e98d3ecbaebda7974aa621ac16929
[ "MIT" ]
null
null
null
src/analysis/training_result.ipynb
hamling-ling/FacialSentiment
8147cfa2903e98d3ecbaebda7974aa621ac16929
[ "MIT" ]
null
null
null
22.247525
156
0.506008
[ [ [ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom PIL import Image", "_____no_output_____" ], [ "!echo \"loss,accuracy,val_loss,val_accuracy\" > curve.csv\n!cat ../face96_300eps.log | \\\ngrep -P \"\\s*1109/1109\" | \\\nsed -n \"s/^.*loss: \\([0-9\\.]*\\).*accuracy: \\([0-9\\.]*\\).*val_loss: \\([0-9\\.]*\\).*val_accuracy: \\([0-9\\.]*\\)$/\\1, \\2, \\3, \\4/p\" \\\n>> curve.csv", "_____no_output_____" ], [ "df = pd.read_csv('curve.csv')\ndf.head(5)", "_____no_output_____" ], [ "plt.plot(df['loss'], label=\"training loss\")\nplt.plot(df['val_loss'], label=\"validation loss\")\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "fig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\n# plot\nax.plot(df['accuracy'], label=\"training accuracy\")\nax.plot(df['val_accuracy'], label=\"validation accuracy\")\nax.set_xlabel('epoch')\nax.set_title('96x96 Grayscale Facial Sentiment Analysis Training Curve')\nax.legend()\n\n# save as png\nplt.savefig('figure.png')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d06635fc6e971a27399ff4253a2f15162a6c38e7
15,037
ipynb
Jupyter Notebook
doc/auto_examples/plot_tutorial.ipynb
alexrockhill/dipole-simulator2
b0ff67ad0baffb07a4b72ae0877b0686c80fbd55
[ "BSD-3-Clause" ]
null
null
null
doc/auto_examples/plot_tutorial.ipynb
alexrockhill/dipole-simulator2
b0ff67ad0baffb07a4b72ae0877b0686c80fbd55
[ "BSD-3-Clause" ]
null
null
null
doc/auto_examples/plot_tutorial.ipynb
alexrockhill/dipole-simulator2
b0ff67ad0baffb07a4b72ae0877b0686c80fbd55
[ "BSD-3-Clause" ]
null
null
null
60.633065
1,110
0.641019
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n\n# About Dipoles in MEG and EEG\n\nFor an explanation of what is going on in the demo and background information\nabout magentoencephalography (MEG) and electroencephalography (EEG) in\ngeneral, let's walk through some code. To execute this code, you'll need\nto have a working version of python with ``mne`` installed, see the\n`quick-start` documentation for instructions. You'll need the development\nversion, so you'll need to do\n``pip install git+https://github.com/mne-tools/mne-python.git`` You'll also\nneed to install the requirements such as with\n``pip install -r requirements.txt``.\n", "_____no_output_____" ] ], [ [ "# Author: Alex Rockhill <[email protected]>\n#\n# License: BSD-3-Clause", "_____no_output_____" ] ], [ [ "Let's start by importing the dependencies we'll need.\n\n", "_____no_output_____" ] ], [ [ "import os.path as op # comes with python and helps naviagte to files\nimport numpy as np # a scientific computing package with arrays\nimport matplotlib.pyplot as plt # a plotting library\nimport mne # our main analysis software package\nfrom nilearn.plotting import plot_anat # this package plots brains", "_____no_output_____" ] ], [ [ "## Background\nMEG and EEG researchers record very small electromagentic potentials\ngenerated by the brain from outside the head. When it comes from the\nrecording devices, it looks like this (there are a lot of channels\nso only a subset are shown):\n\n", "_____no_output_____" ] ], [ [ "data_path = mne.datasets.sample.data_path() # get the sample data path\nraw = mne.io.read_raw( # navigate to some raw sample data\n op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))\nraw_plot = raw.copy() # make a copy to modify for plotting\nraw_plot.pick_channels(raw.ch_names[::10]) # pick only every tenth channel\nraw_plot.plot(n_channels=len(raw_plot.ch_names),\n duration=1, # only a small, 1 second time window\n start=50, # start part way in\n )", "_____no_output_____" ] ], [ [ "The goal of MEG and EEG researchers is to try and understand how activity\nin the brain changes as we respond to stimuli in our environment and\nperform behaviors. To do that, researchers will often use magnetic resonance\n(MR) to create an image of the research subject's brain. These images\nlook like this:\n\n", "_____no_output_____" ] ], [ [ "# first, get a T1-weighted MR scan file from the MNE example dataset\nT1_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')\nplot_anat(T1_fname) # now we can plot it", "_____no_output_____" ] ], [ [ "The T1 MR image can be used to figure out where the surfaces of the\nbrain skull and scalp are as well as label the parts of the brain\nin the image using Freesurfer. The command below does this (it takes\n8 hours so I wouldn't recommend executing it now but it has already\nbeen done for you in the mne sample data, see `here\n<https://surfer.nmr.mgh.harvard.edu/fswiki/DownloadAndInstall>`_ for\nhow to install Freesurfer):\n\n.. code-block:: bash\n\n recon-all -subjid sample -sd $DATA_PATH/subjects -i $T1_FNAME -all\n\n", "_____no_output_____" ], [ "Now let's put it all together and see the problem that MEG and EEG\nresearchers face in figuring out what's going on inside the brain from\nelectromagnetic potentials on the surface of the scalp. As you can see below,\nthere are a lot of MEG and EEG sensors and they cover a large portion of the\nhead but its not readily apparent how much of each brain area each sensor\nrecords from and how to separate the summed activity from all the brain areas\nthat is recorded by each sensor into components for each brain area:\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>The sensor positions don't come aligned to the MR image since they are\n recorded by a different device so we need to a transformation matrix to\n transform them from the coordinate frame they are in to MR coordinates.\n This can be done with :func:`mne.gui.coregistration` to generate the\n ``trans`` file that is loaded below.</p></div>\n\n", "_____no_output_____" ] ], [ [ "# the subjects_dir is where Freesurfer stored all the surface files\nsubjects_dir = op.join(data_path, 'subjects')\ntrans = mne.read_trans(op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_raw-trans.fif'))\n\n# the main plotter for mne, the brain object\nbrain = mne.viz.Brain(subject_id='sample', hemi='both', surf='pial',\n subjects_dir=subjects_dir)\nbrain.add_skull(alpha=0.5) # alpha sets transparency\nbrain.add_head(alpha=0.5)\nbrain.add_sensors(raw.info, trans)\n# set a nice view to show\nbrain.show_view(azimuth=120, elevation=90, distance=500)", "_____no_output_____" ] ], [ [ "## Making a Source Space and Forward Model\nFirst let's setup a space of vertices within the brain that we will consider\nas the sources of signal. In a real brain, there are hundreds of billions\nof cells but we don't have the resolution with only hundreds of sensors to\ndetermine the activity of each cell, so, instead, we'll choose a regularly\nsampled grid of sources that represent the summed activity of tens of\nthousands of cells. In most analyses in publications, the source space has\naround 8000 vertices, but, for this example, we'll use a smaller source\nspace for demonstration.\n\n", "_____no_output_____" ], [ "First, we would need to make a boundary element model (BEM) to account for\ndifferences in conductivity of the brain, skull and scalp. This can be\ndone with :func:`mne.make_bem_model` but, in this case, we'll just load\na pre-computed model. We'll also load the solution to the BEM model for how\ndifferent conductivities of issues effect current dipoles as they pass\nthrough each of the layers, but this can be computed with\n:func:`mne.make_bem_solution`.\n\n", "_____no_output_____" ] ], [ [ "bem_fname = op.join(subjects_dir, 'sample', 'bem',\n 'sample-5120-5120-5120-bem.fif')\n\n# load a pre-computed solution the how the sources within the brain will\n# be affected by the different conductivities\nbem_sol = op.join(subjects_dir, 'sample', 'bem',\n 'sample-5120-5120-5120-bem-sol.fif')\n# plot it, it's saved out in a standard location,\n# so we don't have to pass the path\nmne.viz.plot_bem(subject='sample', subjects_dir=op.join(data_path, 'subjects'),\n slices=np.linspace(45, 200, 12).round().astype(int))", "_____no_output_____" ] ], [ [ "## Making a Dipole\nNow, we're ready to make a dipole and see how its current will be recorded\nat the scalp with MEG and EEG.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>You can use ``print(mne.Dipole.__doc__)`` to print the arguments that\n are required by ``mne.Dipole`` or any other class, method or function.</p></div>\n\n", "_____no_output_____" ] ], [ [ "# make a dipole within the temporal lobe pointing superiorly,\n# fake a goodness-of-fit number\ndip_pos = [-0.0647572, 0.01315963, 0.07091921]\ndip = mne.Dipole(times=[0], pos=[dip_pos], amplitude=[3e-8],\n ori=[[0, 0, 1]], gof=50)\n\n# plot it!\nbrain = mne.viz.Brain(subject_id='sample', hemi='both', surf='pial',\n subjects_dir=subjects_dir, alpha=0.25)\nbrain.add_dipole(dip, trans, scales=10)\nbrain.show_view(azimuth=150, elevation=60, distance=500)", "_____no_output_____" ] ], [ [ "## Simulating Sensor Data\nWe're ready to compute a forward operator using the BEM to make the so-called\nleadfield matrix which multiplies activity at the dipole to give the\nmodelled the activity at the sensors. We can then use this to simulate evoked\ndata.\n\n", "_____no_output_____" ] ], [ [ "fwd, stc = mne.make_forward_dipole(\n dipole=dip, bem=bem_sol, info=raw.info, trans=trans)\n# we don't have a few things like the covarience matrix or a number of epochs\n# to average so we use these arguments for a reasonable solution\nevoked = mne.simulation.simulate_evoked(\n fwd, stc, raw.info, cov=None, nave=np.inf)\n\n# Now we can see what it would look like at the sensors\nfig, axes = plt.subplots(1, 3, figsize=(6, 4)) # make a figure with 3 subplots\n# use zip to iterate over axes and channel types at the same time\nfor ax, ch_type in zip(axes, ('grad', 'mag', 'eeg')):\n # we're just looking at the relative pattern so we won't use a colorbar\n evoked.plot_topomap(times=[0], ch_type=ch_type, axes=ax, colorbar=False)\n ax.set_title(ch_type)", "_____no_output_____" ] ], [ [ "## Wrapping Up\nWe covered some good intuition but there's lots more to learn! The main thing\nis that MEG and EEG researchers generally don't have the information about\nwhat's going on inside the brain, that's what they are trying to predict. To\nreverse this process, you need to invert the forward solution (tutorial:\n`tut-viz-stcs`). There is tons more to explore in the MNE `tutorials\n<https://mne.tools/dev/auto_tutorials/index.html>`_ and `examples\n<https://mne.tools/dev/auto_examples/index.html>`_ pages. Let's leave off by\nsetting up a source space of many different dipoles and seeing their\ndifferent activities manifest on the scalp as measured by the sensors.\n\n", "_____no_output_____" ] ], [ [ "src = mne.setup_volume_source_space(\n subject='sample', pos=20, # in mm\n bem=bem_fname, subjects_dir=subjects_dir)\n\n# make the leadfield matrix\nfwd = mne.make_forward_solution(\n raw.info, trans=trans, src=src, bem=bem_sol)\n\n# plot our setup\nbrain = mne.viz.Brain(subject_id='sample', hemi='both', surf='pial',\n subjects_dir=subjects_dir, alpha=0.25)\nbrain.add_volume_labels(alpha=0.25, colors='gray')\nbrain.add_forward(fwd, trans, scale=3)\nbrain.show_view(azimuth=30, elevation=90, distance=500)", "_____no_output_____" ] ], [ [ "Plot the same solution using a source space of dipoles\n\n", "_____no_output_____" ] ], [ [ "# take the source space from the forward model because some of the\n# vertices are excluded from the vertices in src\nn_dipoles = fwd['source_rr'].shape[0] # rr is the vertex positions\n# find the closest dipole to the one we used before (it was in this\n# source space) using the euclidean distance (np.linalg.norm)\nidx = np.argmin(np.linalg.norm(fwd['source_rr'] - dip_pos, axis=1))\n# make an empty matrix of zeros\ndata = np.zeros((n_dipoles, 3, 1))\ndata[idx, 2, 0] = 3e-8 # make the same dipole as before\n# this is the format that vertiex numbers are stored in\nvertices = [fwd['src'][0]['vertno']]\nstc = mne.VolVectorSourceEstimate(data, vertices=vertices,\n subject='sample', tmin=0, tstep=1)\n\nevoked = mne.simulation.simulate_evoked(\n fwd, stc, raw.info, cov=None, nave=np.inf)\n\n# confirm our replication\nfig, axes = plt.subplots(1, 3, figsize=(6, 4)) # make a figure with 3 subplots\nfor ax, ch_type in zip(axes, ('grad', 'mag', 'eeg')):\n evoked.plot_topomap(times=[0], ch_type=ch_type, axes=ax, colorbar=False)\n ax.set_title(ch_type)", "_____no_output_____" ] ], [ [ "Now, go crazy and simulate a bunch of random dipoles\n\n", "_____no_output_____" ] ], [ [ "np.random.seed(88) # always seed random number generation for reproducibility\nstc.data = np.random.random(stc.data.shape) * 3e-8 - 1.5e-8\nevoked = mne.simulation.simulate_evoked(\n fwd, stc, raw.info, cov=None, nave=np.inf)\n\n# now that's a complicated faked brain pattern, fortunately brain activity\n# is much more correlated (neighboring areas have similar activity) which\n# makes results a bit easier to interpret\nfig, axes = plt.subplots(1, 3, figsize=(6, 4)) # make a figure with 3 subplots\nfor ax, ch_type in zip(axes, ('grad', 'mag', 'eeg')):\n evoked.plot_topomap(times=[0], ch_type=ch_type, axes=ax, colorbar=False)\n ax.set_title(ch_type)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d066405480d86c498a651d428255fbf510fa241a
4,770
ipynb
Jupyter Notebook
backend/nomenclature/.ipynb_checkpoints/dataset_and_training-checkpoint.ipynb
gaarangoa/ARG-inspect
245d577f783c7ce395a730741987910097fc1981
[ "BSD-2-Clause" ]
6
2018-10-11T09:31:05.000Z
2022-01-27T10:22:41.000Z
backend/nomenclature/.ipynb_checkpoints/dataset_and_training-checkpoint.ipynb
gaarangoa/ARG-inspect
245d577f783c7ce395a730741987910097fc1981
[ "BSD-2-Clause" ]
3
2018-05-24T22:40:09.000Z
2021-10-12T06:53:45.000Z
backend/nomenclature/.ipynb_checkpoints/dataset_and_training-checkpoint.ipynb
gaarangoa/ARG-inspect
245d577f783c7ce395a730741987910097fc1981
[ "BSD-2-Clause" ]
1
2021-01-25T05:26:25.000Z
2021-01-25T05:26:25.000Z
28.058824
134
0.501468
[ [ [ "from pymongo import MongoClient\n\ndb = MongoClient().argpedia['master']\n\ncard_hits = [i for i in db.find() if i['entry']['database']=='CARD']", "_____no_output_____" ], [ "# Genes in the card database used for training\nprint(f'Card Hits: {len(card_hits)}')", "Card Hits: 2355\n" ], [ "# subtract the labels from the gene names\nraw_labels = [i['entry']['subtype'] for i in card_hits]\nimport re\ndef get_gene_shape(s):\n replaced = re.sub(r'[A-Z]', 'X', s)\n replaced = re.sub(r'[a-z]', 'x', replaced)\n replaced = re.sub(r'[0-9]', 'N', replaced)\n replaced = re.sub('N+', 'N', replaced)\n return replaced\n\ndef get_info(alignments):\n data = []\n for alignment in alignments:\n if alignment['best_hit_database'] == 'CARD':\n try:\n data += [i['category_aro_name'] for i in alignment['metadata']]\n except:\n pass\n if alignment['best_hit_database'] == 'megares':\n data += [alignment['type'], alignment['subtype']]\n if alignment['best_hit_database'] == 'ARDB':\n try:\n data += [alignment['metadata']['subtype']] + [i['type'] for i in alignment['metadata']['resistance_profile']]\n except:\n data += [alignment['metadata']['subtype']]\n if alignment['best_hit_database'] == 'RESFINDER':\n data += [alignment['type'], alignment['subtype']]\n if alignment['best_hit_database'] == 'SARG':\n data += [alignment['type'], alignment['subtype']]\n if alignment['best_hit_database'] == 'ARG-ANNOT':\n data += [alignment['type'], alignment['subtype']]\n if alignment['best_hit_database'] == 'ncbi-arg':\n data += [alignment['type'], alignment['subtype']]\n \n return data\n\n \ny = [get_gene_shape(i) for i in raw_labels]\nX = [get_info(i['besthit']['alignments']) for i in card_hits]\n\nprint(f'Dataset: {len(X)}, Labels: {len(y)}')\n", "Dataset: 2355, Labels: 2355\n" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nfo = open('train.txt', 'w')\nfor ix,i in enumerate(X_train):\n fo.write(\" \".join([str(j) for j in i])+\"\\t__label__\"+y_train[ix]+'\\n')\n\nfo = open('test.txt', 'w')\nfor ix,i in enumerate(X_test):\n fo.write(\" \".join([str(j) for j in i])+\"\\t__label__\"+y_test[ix]+'\\n')", "_____no_output_____" ], [ "!fasttext supervised -input train.txt -output trained_model -epoch 10 -dim 100 -ws 3 -wordNgrams 2", "Read 0M words\nNumber of words: 4158\nNumber of labels: 77\nProgress: 100.0% words/sec/thread: 735058 lr: 0.000000 loss: 2.307008 eta: 0h0m 370295 eta: 0h0m eta: 0h0m \n" ], [ "!fasttext test trained_model.bin test.txt", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d06647507f5d443cb31d693bbf67fbd76e349f7a
2,826
ipynb
Jupyter Notebook
slide_illustration.ipynb
darpanbiswas/chain
50e63878012a545c1868d0e6e68a0bf8ead319bc
[ "MIT" ]
null
null
null
slide_illustration.ipynb
darpanbiswas/chain
50e63878012a545c1868d0e6e68a0bf8ead319bc
[ "MIT" ]
null
null
null
slide_illustration.ipynb
darpanbiswas/chain
50e63878012a545c1868d0e6e68a0bf8ead319bc
[ "MIT" ]
null
null
null
24.362069
74
0.412597
[ [ [ "import svgwrite\nimport cairosvg", "_____no_output_____" ] ], [ [ "# Topics of Block Chain", "_____no_output_____" ] ], [ [ "_topics=[\n 'Philosophical: impact on society', \n 'Scientific: algorithms, computer science, math)', \n 'Commerce: changes to industry', \n 'Capital: risk / reward of allocation strategies',\n 'Mechanical: code, servers, data centers',\n]\n\n_w=300\n_h=50\n_s=10\n\n", "_____no_output_____" ], [ "svg_document = svgwrite.Drawing(filename = \"test-svgwrite.svg\",\n size = (\"800px\", \"600px\"))\n\nfor _index, _topic in enumerate(_topics): \n svg_document.add(\n svg_document.rect(\n insert = (\n 20,\n _s*(_index+1)+_h*_index,\n ),\n size = (\n '{:d}px'.format(_w), \n '{:d}px'.format(_h), \n ),\n stroke_width = '1',\n stroke = 'blue',\n fill = 'rgb(255,255,255)',\n ),\n )\n \n svg_document.add(\n svg_document.text(\n _topic,\n insert = (\n 30,\n _s*(_index+1)+_h*_index+28,\n ),\n style = 'font-size:10px; font-family:monospace'\n )\n )\n\n\nsvg_document.save()", "_____no_output_____" ], [ "with open('test.png', 'wb') as png_file:\n png_file.write(cairosvg.svg2png(url='test-svgwrite.svg'))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06652a82408722119e376c914837f4bbd3017ea
4,128
ipynb
Jupyter Notebook
Lab1/Neural network test.ipynb
marcciosilva/maa
fbbd739a941c0c21f9d25f2f1110ebdcfec3cfea
[ "MIT" ]
null
null
null
Lab1/Neural network test.ipynb
marcciosilva/maa
fbbd739a941c0c21f9d25f2f1110ebdcfec3cfea
[ "MIT" ]
null
null
null
Lab1/Neural network test.ipynb
marcciosilva/maa
fbbd739a941c0c21f9d25f2f1110ebdcfec3cfea
[ "MIT" ]
null
null
null
25.8
159
0.544816
[ [ [ "import numpy\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.externals import joblib", "_____no_output_____" ] ], [ [ "\n<ul>\n<li>Se inicializa una red neuronal usando función de activación de tangente hiperbólica, tasa de aprendizaje de 0.1 y 44 neuronas en capa oculta.</li>\n<li>Se entrena con dos instancias de entrenamiento (X e y) usando el método fit sobre la red neuronal.</li>\n</ul>\n", "_____no_output_____" ] ], [ [ "# Inicializo red neuronal con scikit\nclf = MLPClassifier(solver='lbfgs', activation='tanh', alpha=1e-4,\n hidden_layer_sizes=(44), random_state=1, learning_rate_init=.1)\n# Genero un array de vectores de input, de tamaño 64 y valores de [0,2]\nX = []\nfor i in range(4):\n X.append(numpy.random.randint(3, size=64))\n# X = numpy.random.randint(3, size=64).tolist()\n# X.append(5)\n# X.append(4)\n# X = X.reshape(1,-1)\ny = [0, 1, -1, 0]\n# Se entrena con X e Y a la red\nclf.fit(X, y)\nX = []\nfor i in range(4):\n X.append(numpy.random.randint(3, size=64))\ny = [-1, 0, -1, 1]\n# Se entrena nuevamente\nclf.fit(X, y)\n# clf.predict([[0., 0.], [1., 1.], [2., 1.], [1., 2.], [0., 1.5]])", "_____no_output_____" ] ], [ [ "Ejemplo de cómo evaluar un vector de entrada nuevo (lo que sería un tablero de Othello) con la red neuronal.", "_____no_output_____" ] ], [ [ "newBoard = numpy.random.randint(3, size=64)\nnewBoard = newBoard.reshape(1,-1)\nclf.predict(newBoard)", "_____no_output_____" ] ], [ [ "Ejemplo de como persistir red neuronal entrenada a un archivo en la ruta del notebook.", "_____no_output_____" ] ], [ [ "neuralNetwork = MLPClassifier(solver='lbfgs', activation='tanh', alpha=1e-4,\n hidden_layer_sizes=(44), random_state=1, learning_rate_init=.1)\njoblib.dump(neuralNetwork, 'red-neuronal-test.pkl')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0666ddf78670d466dc35a4201d0bcac9ffda04f
431,414
ipynb
Jupyter Notebook
stardist_segmentation.ipynb
quantumjot/segment-classify-track
2c9a4209d069575cb9b2081f25d6d836c9222436
[ "BSD-3-Clause" ]
3
2021-11-17T11:48:45.000Z
2022-01-28T13:49:35.000Z
stardist_segmentation.ipynb
quantumjot/segment-classify-track
2c9a4209d069575cb9b2081f25d6d836c9222436
[ "BSD-3-Clause" ]
13
2021-11-24T13:56:11.000Z
2022-03-10T16:05:51.000Z
stardist_segmentation.ipynb
quantumjot/segment-classify-track
2c9a4209d069575cb9b2081f25d6d836c9222436
[ "BSD-3-Clause" ]
null
null
null
1,438.046667
422,508
0.955203
[ [ [ "# Segmentation \n\nThis notebook shows how to use Stardist (Object Detection with Star-convex Shapes) as a part of a segmentation-classification-tracking analysis pipeline. \n\nThe sections of this notebook are as follows:\n\n1. Load images\n2. Load model of choice and segment an initial image to test Stardist parameters\n3. Batch segment a sequence of images\n\nThe data used in this notebook is timelapse microscopy data with h2b-gfp/rfp markers that show the spatial extent of the nucleus and it's mitotic state. \n\nThis notebook uses the dask octopuslite image loader from the CellX/Lowe lab project.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom octopuslite import DaskOctopusLiteLoader\nfrom stardist.models import StarDist2D \nfrom stardist.plot import render_label\nfrom csbdeep.utils import normalize\nfrom tqdm.auto import tqdm\nfrom skimage.io import imsave\nimport json\nfrom scipy import ndimage as nd", "_____no_output_____" ], [ "%matplotlib inline\nplt.rcParams['figure.figsize'] = [18,8]", "_____no_output_____" ] ], [ [ "## 1. Load images", "_____no_output_____" ] ], [ [ "# define experiment ID and select a position\nexpt = 'ND0011'\npos = 'Pos6'\n# point to where the data is\nroot_dir = '/home/nathan/data'\nimage_path = f'{root_dir}/{expt}/{pos}/{pos}_images'\n# lazily load imagesdd\nimages = DaskOctopusLiteLoader(image_path, \n remove_background = True)\nimages.channels", "Using cropping: (1200, 1600)\n" ] ], [ [ "Set segmentation channel and load test image", "_____no_output_____" ] ], [ [ "# segmentation channel\nsegmentation_channel = images.channels[3]\n# set test image index\nframe = 1000\n# load test image \nirfp = images[segmentation_channel.name][frame].compute()\n# create 1-channel XYC image\nimg = np.expand_dims(irfp, axis = -1)\nimg.shape", "_____no_output_____" ] ], [ [ "## 2. Load model and test segment single image ", "_____no_output_____" ] ], [ [ "model = StarDist2D.from_pretrained('2D_versatile_fluo')\nmodel", "Found model '2D_versatile_fluo' for 'StarDist2D'.\nLoading network weights from 'weights_best.h5'.\nLoading thresholds from 'thresholds.json'.\nUsing default values: prob_thresh=0.479071, nms_thresh=0.3.\n" ] ], [ [ "### 2.1 Test run and display initial results", "_____no_output_____" ] ], [ [ "# initialise test segmentation\nlabels, details = model.predict_instances(normalize(img))\n\n# plot input image and prediction\nplt.clf()\nplt.subplot(1,2,1)\nplt.imshow(normalize(img[:,:,0]), cmap=\"PiYG\")\nplt.axis(\"off\")\nplt.title(\"input image\")\nplt.subplot(1,2,2)\nplt.imshow(render_label(labels, img = img))\nplt.axis(\"off\")\nplt.title(\"prediction + input overlay\")\nplt.show()", "Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n" ] ], [ [ "## 3. Batch segment a whole stack of images", "_____no_output_____" ], [ "When you segment a whole data set you do not want to apply any image transformation. This is so that when you load images and masks later on you can apply the same transformation. You can apply a crop but note that you need to be consistent with your use of the crop from this point on, otherwise you'll get a shift. ", "_____no_output_____" ] ], [ [ "for expt in tqdm(['ND0009', 'ND0010', 'ND0011']):\n for pos in tqdm(['Pos0', 'Pos1', 'Pos2', 'Pos3', 'Pos4']):\n print('Starting experiment position:', expt, pos)\n # load images\n image_path = f'{root_dir}/{expt}/{pos}/{pos}_images'\n images = DaskOctopusLiteLoader(image_path, \n remove_background = True)\n # iterate over images filenames \n for fn in tqdm(images.files(segmentation_channel.name)):\n # compile 1-channel into XYC array\n img = np.expand_dims(imread(fn), axis = -1)\n # predict labels\n labels, details = model.predict_instances(normalize(img))\n # set filename as mask format (channel099)\n fn = fn.replace(f'channel00{segmentation_channel.value}', 'channel099')\n # save out labelled image\n imsave(fn, labels.astype(np.uint16), check_contrast=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0667253b3ce807971bb6255814bbe3814b1cc0f
16,281
ipynb
Jupyter Notebook
language-processing-vocab/language_processing_vocab.ipynb
prakash123mayank/Data-Science-45min-Intros
f9208e43d69f791f8611998b39238444e9a7b7ba
[ "Unlicense" ]
1,406
2015-01-05T19:20:55.000Z
2022-03-17T08:35:09.000Z
language-processing-vocab/language_processing_vocab.ipynb
prakash123mayank/Data-Science-45min-Intros
f9208e43d69f791f8611998b39238444e9a7b7ba
[ "Unlicense" ]
1
2017-10-23T15:31:02.000Z
2017-10-23T15:31:02.000Z
language-processing-vocab/language_processing_vocab.ipynb
prakash123mayank/Data-Science-45min-Intros
f9208e43d69f791f8611998b39238444e9a7b7ba
[ "Unlicense" ]
495
2015-01-06T11:39:21.000Z
2022-03-15T10:21:43.000Z
43.532086
547
0.636877
[ [ [ "# Introduction to Language Processing Concepts\n### Original tutorial by Brain Lehman, with updates by Fiona Pigott\n\nThe goal of this tutorial is to introduce a few basical vocabularies, ideas, and Python libraries for thinking about topic modeling, in order to make sure that we have a good set of vocabulary to talk more in-depth about processing languge with Python later. We'll spend some time on defining vocabulary for topic modeling and using basic topic modeling tools.\n\nA big thank-you to the good people at the Stanford NLP group, for their informative and helpful online book: https://nlp.stanford.edu/IR-book/.\n\n### Definitions.\n1. **Document**: a body of text (eg. tweet)\n2. **Tokenization**: dividing a document into pieces (and maybe throwing away some characters), in English this often (but not necessarily) means words separated by spaces and puctuation.\n3. **Text corpus**: the set of documents that contains the text for the analysis (eg. many tweets)\n4. **Stop words**: words that occur so frequently, or have so little topical meaning, that they are excluded (e.g., \"and\")\n5. **Vectorize**: Turn some documents into vectors\n6. **Vector corpus**: the set of documents transformed such that each token is a tuple (token_id , doc_freq)", "_____no_output_____" ] ], [ [ "# first, get some text:\nimport fileinput\ntry:\n import ujson as json\nexcept ImportError:\n import json\ndocuments = []\nfor line in fileinput.FileInput(\"example_tweets.json\"):\n documents.append(json.loads(line)[\"text\"])", "_____no_output_____" ] ], [ [ "### 1) Document\nIn the case of the text that we just imported, each entry in the list is a \"document\"--a single body of text, hopefully with some coherent meaning.", "_____no_output_____" ] ], [ [ "print(\"One document: \\\"{}\\\"\".format(documents[0]))", "_____no_output_____" ] ], [ [ "### 2) Tokenization\nWe split each document into smaller pieces (\"tokens\") in a process called tokenization. Tokens can be counted, and most importantly, compared between documents. There are potentially many different ways to tokenize text--splitting on spaces, removing punctionation, diving the document into n-character pieces--anything that gives us tokens that we can, hopefully, effectively compare across documents and derive meaning from.\n\nRelated to tokenization are processes called *stemming* and *lemmatiztion* which can help when using tokens to model topics based on the meaning of a word. In the phrases \"they run\" and \"he runs\" (space separated tokens: [\"they\", \"run\"] and [\"he\", \"runs\"]) the words \"run\" and \"run*s*\" mean basically the same thing, but are two different tokens. Stemming and/or lemmatization help us compare tokens with the same meaning but different spelling/suffixes.\n\n#### Lemmatization:\nUses a dictionary of words and their possible morphologies to map many different forms of a base word (\"lemma\") to a single lemma, comparable across documents. E.g.: \"run\", \"ran\", \"runs\", and \"running\" might all map to the lemma \"run\"\n\n#### Stemming: \nUses a set of heuristic rules to try to approximate lemmatization, without knowing the words in advance. For the English language, a simple and effective stemming algorithm might simply be to remove an \"s\" from the ends of words, or an \"ing\" from the end of words. E.g.: \"run\", \"runs\", and \"running\" all map to \"run,\" but \"ran\" (an irregularrly conjugated verb) would not. \n\nStemming is particularly interesting and applicable in social data, because while some words are decidely *not* standard English, conventinoal rules of grammar still apply. A fan of the popular singer Justin Bieber might call herself a \"belieber,\" while a group of fans call themselves \"beliebers.\" You won't find \"belieber\" in any English lemmatization dictionary, but a good stemming algorithm will still map \"belieber\" and \"beliebers\" to the same token (\"belieber\", or even \"belieb\", if we remover the common suffix \"er\").", "_____no_output_____" ] ], [ [ "from nltk.stem import porter\nfrom nltk.tokenize import TweetTokenizer\n\n# tokenize the documents\n# find good information on tokenization:\n# https://nlp.stanford.edu/IR-book/html/htmledition/tokenization-1.html\n# find documentation on pre-made tokenizers and options here:\n# http://www.nltk.org/api/nltk.tokenize.html\ntknzr = TweetTokenizer(reduce_len = True)\n\n# stem the documents\n# find good information on stemming and lemmatization:\n# https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html\n# find documentation on available pre-implemented stemmers here:\n# http://www.nltk.org/api/nltk.stem.html\nstemmer = porter.PorterStemmer()\nfor doc in documents[0:10]:\n tokenized = tknzr.tokenize(doc)\n stemmed = [stemmer.stem(x) for x in tokenized]\n print(\"Original document:\\n{}\\nTokenized result:\\n{}\\nStemmed result:\\n{}\\n\".format(\n doc, tokenized, stemmed))", "_____no_output_____" ] ], [ [ "### 3) Text corpus\n\nThe text corpus is a collection of all of the documents (Tweets) that we're interested in modeling. Topic modeling and/or clustering on a corpus tends to work best if that corpus has some similar themes--this will mean that some tokens overlap, and we can get signal out of when documents share (or do not share) tokens. \n\nModeling text tends to get much harder the more different, uncommon and unrelated tokens appear in a text, especially when we are working with social data, where tokens don't necessarily appear in a dictionary. This difficultly (of having many, many unrelated tokens as dimension in our model) is one example of the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality).", "_____no_output_____" ] ], [ [ "# number of documents in the corpus\nprint(\"There are {} documents in the corpus.\".format(len(documents)))", "_____no_output_____" ] ], [ [ "### 4) Stop words:\nStop words are simply tokens that we've chosen to remove from the corpus, for any reason. In English, removing words like \"and\", \"the\", \"a\", \"at\", and \"it\" are common choices for stop words. Stop words can also be edited per project requirement, in case some words are too common in a particular dataset to be meaningful (another way to do stop word removal is to simply remove any word that appears in more than some fixed percentage of documents).", "_____no_output_____" ] ], [ [ "from nltk.corpus import stopwords\n\nstopset = set(stopwords.words('english'))\nprint(\"The English stop words list provided by NLTK: \")\nprint(stopset)\n\nstopset.update([\"twitter\"]) # add token\nstopset.remove(\"i\") # remove token\nprint(\"\\nAdd or remove stop words form the set: \")\nprint(stopset)", "_____no_output_____" ] ], [ [ "### 5) Vectorize:\n\nTransform each document into a vector. There are several good choices that you can make about how to do this transformation, and I'll talk about each of them in a second.\n\nIn order to vectorize documents in a corpus (without any dimensional reduction around the vocabulary), think of each document as a row in a matrix, and each column as a word in the vocabulary of the entire corpus. In order to vectorize a corpus, we must read the entire corpus, assign one word to each column, and then turn each document into a row.\n\n**Example**: \n**Documents**: \"I love cake\", \"I hate chocolate\", \"I love chocolate cake\", \"I love cake, but I hate chocolate cake\" \n**Stopwords**: Say, because the word \"but\" is a conjunction, we want to make it a stop word (not include it in our document vectors)\n**Vocabulary**: \"I\" (column 1), \"love\" (column 2), \"cake\" (column 3), \"hate\" (column 4), \"chocolate\" (column 5)\n\\begin{equation*}\n\\begin{matrix}\n\\text{\"I love cake\" } & =\\\\\n\\text{\"I hate chocolate\" } & =\\\\\n\\text{\"I love chocolate cake\" } & = \\\\\n\\text{\"I love cake, but I hate chocolate cake\"} & =\n\\end{matrix}\n\\qquad\n\\begin{bmatrix}\n1 & 1 & 1 & 0 & 0\\\\\n1 & 0 & 0 & 1 & 1\\\\\n1 & 1 & 1 & 0 & 1\\\\\n2 & 1 & 2 & 1 & 1\n\\end{bmatrix}\n\\end{equation*}\n\n\nVectorization like this don't take into account word order (we call this property \"bag of words\"), and in the above example I am simply counting the frequency of each term in each document.", "_____no_output_____" ] ], [ [ "# we're going to use the vectorizer functions that scikit learn provides\n\n# define the tokenizer that we want to use\n# must be a callable function that takes a document and returns a list of tokens\ntknzr = TweetTokenizer(reduce_len = True)\nstemmer = porter.PorterStemmer()\ndef myTokenizer(doc):\n return [stemmer.stem(x) for x in tknzr.tokenize(doc)]\n\n# choose the stopword set that we want to use\nstopset = set(stopwords.words('english'))\nstopset.update([\"http\",\"https\",\"twitter\",\"amp\"])\n\n# vectorize\n# we're using the scikit learn CountVectorizer function, which is very handy\n# documentation here: \n# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nvectorizer = CountVectorizer(tokenizer = myTokenizer, stop_words = stopset)\nvectorized_documents = vectorizer.fit_transform(documents)", "_____no_output_____" ], [ "vectorized_documents", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline\n_ = plt.hist(vectorized_documents.todense().sum(axis = 1))\n_ = plt.title(\"Number of tokens per document\")\n_ = plt.xlabel(\"Number of tokens\")\n_ = plt.ylabel(\"Number of documents with x tokens\")", "_____no_output_____" ], [ "from numpy import logspace, ceil, histogram, array\n# get the token frequency\ntoken_freq = sorted(vectorized_documents.todense().astype(bool).sum(axis = 0).tolist()[0], reverse = False)\n# make a histogram with log scales\nbins = array([ceil(x) for x in logspace(0, 3, 5)])\nwidths = (bins[1:] - bins[:-1])\nhist = histogram(token_freq, bins=bins)\nhist_norm = hist[0]/widths\n# plot (notice that most tokens only appear in one document)\nplt.bar(bins[:-1], hist_norm, widths)\nplt.xscale('log')\nplt.yscale('log')\n_ = plt.title(\"Number of documents in which each token appears\")\n_ = plt.xlabel(\"Number of documents\")\n_ = plt.ylabel(\"Number of tokens\")", "_____no_output_____" ] ], [ [ "#### Bag of words\nTaking all the words from a document, and sticking them in a bag. Order does not matter, which could cause a problem. \"Alice loves cake\" might have a different meaning than \"Cake loves Alice.\"\n\n#### Frequency\nCounting the number of times a word appears in a document.\n\n#### Tf-Idf (term frequency inverse document frequency):\nA statistic that is intended to reflect how important a word is to a document in a collection or corpus. The Tf-Idf value increases proportionally to the number of times a word appears in the document and is inversely proportional to the frequency of the word in the corpus--this helps control words that are generally more common than others. \n\nThere are several different possibilities for computing the tf-idf statistic--choosing whether to normalize the vectors, choosing whether to use counts or the logarithm of counts, etc. I'm going to show how scikit-learn computed the tf-idf statistic by default, with more information available in the documentation of the sckit-learn [TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html).\n\n$tf(t)$ : Term Frequency, count of the number of times each term appears in the document. \n$idf(d,t)$ : Inverse document frequency. \n$df(d,t)$ : Document frequency, the count of the number of documents in which the term appears. \n\n$$\ntfidf(t) = tf(t) * \\log\\big(\\frac{1 + n}{1 + df(d, t)}\\big) + 1\n$$\n\nWe also then take the Euclidean ($l2$) norm of each document vector, so that long documents (documents with many non-stopword tokens) have the same norm as shorter documents.", "_____no_output_____" ] ], [ [ "# documentation on this sckit-learn function here:\n# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html\ntfidf_vectorizer = TfidfVectorizer(tokenizer = myTokenizer, stop_words = stopset)\ntfidf_vectorized_documents = tfidf_vectorizer.fit_transform(documents)", "_____no_output_____" ], [ "tfidf_vectorized_documents", "_____no_output_____" ], [ "# you can look at two vectors for the same document, from 2 different vectorizers:\ntfidf_vectorized_documents[0].todense().tolist()[0]", "_____no_output_____" ], [ "vectorized_documents[0].todense().tolist()[0]", "_____no_output_____" ] ], [ [ "## That's all for now!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d066731448482f634e5073efab07f8d3d92874b5
24,820
ipynb
Jupyter Notebook
MovieReviews.ipynb
kartikeyab/Kaggle-Codes
6d96b8a57c6d94ccdf3ad92e5ff00e43789a52a2
[ "MIT" ]
null
null
null
MovieReviews.ipynb
kartikeyab/Kaggle-Codes
6d96b8a57c6d94ccdf3ad92e5ff00e43789a52a2
[ "MIT" ]
null
null
null
MovieReviews.ipynb
kartikeyab/Kaggle-Codes
6d96b8a57c6d94ccdf3ad92e5ff00e43789a52a2
[ "MIT" ]
null
null
null
45.209472
7,349
0.592305
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/kartikeyab/Kaggle-Codes/blob/master/MovieReviews.ipynb)", "_____no_output_____" ] ], [ [ "from keras.preprocessing.text import Tokenizer \ntokenizer = Tokenizer(num_words = 10000)\nimport pandas as pd\nimport numpy as np\nfrom keras.utils import to_categorical\n", "Using TensorFlow backend.\n" ], [ "from google.colab import files\nuploaded = files.upload()", "_____no_output_____" ], [ "#loading data\ntrain_df = pd.read_csv('train.tsv', sep='\\t', header=0)\n", "_____no_output_____" ], [ "x_train = train_df['Phrase'].values\ny_train = train_df['Sentiment'].values\n", "_____no_output_____" ], [ "x_train[1000] , y_train[1000]", "_____no_output_____" ], [ "tokenizer.fit_on_texts(x_train)\ntokenizer.word_index\nx_train_tokens = tokenizer.texts_to_sequences(x_train)\ny_train = to_categorical(y_train)\n\n", "_____no_output_____" ], [ "x_train[7] , y_train[7]", "_____no_output_____" ], [ "num_token = [ len(x) for x in x_train]\nnum_token = np.array(num_token)\n", "_____no_output_____" ], [ "max_tokens = np.mean(num_token) + 2*np.std(num_token)\nprint(max_tokens)", "116.52523938435769\n" ], [ "from keras.preprocessing.sequence import pad_sequences", "_____no_output_____" ], [ "x_train_pad = pad_sequences(x_train_tokens, maxlen = 116 , padding = 'pre')\nx_train_pad[1]", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Embedding , Dense , Conv1D , MaxPooling1D , GRU, LSTM , Flatten , Dropout\nfrom keras.optimizers import adagrad\n", "_____no_output_____" ], [ "num_classes = 5\n", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Embedding(input_dim = 10000, input_length = 116 ,output_dim = 128))\n\nmodel.add(Conv1D(128 , kernel_size = 3 , activation = 'relu'))\n\nmodel.add(Conv1D(64 , kernel_size = 3 , activation = 'relu'))\n\nmodel.add(MaxPooling1D(pool_size =2))\n\nmodel.add(Dropout(0.2))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(250))\n\n\n\nmodel.add(Dense(num_classes, activation = 'sigmoid'))\n\n\nmodel.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_18 (Embedding) (None, 116, 128) 1280000 \n_________________________________________________________________\nconv1d_28 (Conv1D) (None, 114, 128) 49280 \n_________________________________________________________________\nconv1d_29 (Conv1D) (None, 112, 64) 24640 \n_________________________________________________________________\nmax_pooling1d_18 (MaxPooling (None, 56, 64) 0 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 56, 64) 0 \n_________________________________________________________________\nflatten_11 (Flatten) (None, 3584) 0 \n_________________________________________________________________\ndense_31 (Dense) (None, 250) 896250 \n_________________________________________________________________\ndense_32 (Dense) (None, 5) 1255 \n=================================================================\nTotal params: 2,251,425\nTrainable params: 2,251,425\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.fit(x_train_pad, y_train ,validation_split = 0.2, epochs = 10, batch_size = 256, shuffle = True)", "Train on 124848 samples, validate on 31212 samples\nEpoch 1/10\n124848/124848 [==============================] - 16s 125us/step - loss: 1.0302 - acc: 0.5819 - val_loss: 1.0066 - val_acc: 0.5970\nEpoch 2/10\n112896/124848 [==========================>...] - ETA: 1s - loss: 0.8050 - acc: 0.6636" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06686fdc7cef09fc01bdc8aae2eb00c0f942800
18,307
ipynb
Jupyter Notebook
solution_1.ipynb
saanaz379/user-comment-classifier
1c00b70cc9acc1a19798f61bc430aec38731024e
[ "Apache-2.0" ]
null
null
null
solution_1.ipynb
saanaz379/user-comment-classifier
1c00b70cc9acc1a19798f61bc430aec38731024e
[ "Apache-2.0" ]
null
null
null
solution_1.ipynb
saanaz379/user-comment-classifier
1c00b70cc9acc1a19798f61bc430aec38731024e
[ "Apache-2.0" ]
null
null
null
41.606818
509
0.423827
[ [ [ "<a href=\"https://colab.research.google.com/github/saanaz379/user-comment-classifier/blob/main/solution_1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "This is the first of three solutions to identify offensive language or hate speech in a set of user comments. The code is tested on actual data provided from a day's user comments. This solution utilizes a dictionary that stores all common offensive words and identifies them in any given review, which is then flagged for review. The dictionary is extracted from a Kaggle dataset of offensive tweets.\n\n*Note: This code is incapable of detecting sentence patterns that may predict hate speech.", "_____no_output_____" ] ], [ [ "! pip install nltk", "Requirement already satisfied: nltk in /usr/local/lib/python3.7/dist-packages (3.2.5)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from nltk) (1.15.0)\n" ], [ "! python -m textblob.download_corpora", "[nltk_data] Downloading package brown to /root/nltk_data...\n[nltk_data] Unzipping corpora/brown.zip.\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.\n[nltk_data] Downloading package conll2000 to /root/nltk_data...\n[nltk_data] Unzipping corpora/conll2000.zip.\n[nltk_data] Downloading package movie_reviews to /root/nltk_data...\n[nltk_data] Unzipping corpora/movie_reviews.zip.\nFinished.\n" ], [ "import nltk\nimport csv\nimport collections\nimport pandas as pd\n\nfrom collections import Counter\nfrom nltk.corpus import stopwords", "_____no_output_____" ], [ "# extracting offensive language from twitter kaggle data to final_list\n\nnltk.download('stopwords')\nraw_reviews = []\nreviews_filename = '/content/drive/MyDrive/labeled_data.csv'\nwith open(reviews_filename, 'r') as reviews_csvfile:\n csvreader = csv.reader(reviews_csvfile)\n next(csvreader)\n for i in range(1000): # 10399\n row = next(csvreader)\n if int(row[3]) != 0:\n review = row[-1]\n review_arr = review.split(\":\")\n raw_reviews.append(review_arr[-1])\nreview_words = []\nfor review in raw_reviews:\n review_arr = review.split(\" \")\n for review_word in review_arr:\n if \"\\\"\" not in review_word and review_word != \"\" and not \"&\" in review_word and review_word != \"-\" and review_word != \"love\" and \"I\" not in review_word and \"'\" not in review_word and review_word != \"got\":\n review_words.append(review_word)\nstop_words = set(stopwords.words('english'))\nwith open('/content/drive/MyDrive/common_words.txt','r') as file:\n common_words = file.read()\nwords_list = [word for word in review_words if not word in stop_words and not word in common_words]\nfinal_list = []\nfor word in Counter(words_list).most_common(9):\n final_list.append(word[0])", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "# adding reviews to formatted Data Frame\n\nraw_reviews = []\nreviews_filename = '/content/drive/MyDrive/reviews.csv.txt'\nwith open(reviews_filename, 'r') as reviews_csvfile:\n csvreader = csv.reader(reviews_csvfile)\n next(csvreader)\n next(csvreader)\n for i in range(10397):\n row = next(csvreader)\n if (len(row) >= 5):\n row.pop(0)\n row.pop(-1)\n row[2] = ''.join(row[2].split())\n if row[2].replace('.', '', 1).isdigit():\n row[2] = float(row[2])\n row[1] = row[1].rstrip()\n if not row[1] == \"\" and isinstance(row[2], float):\n raw_reviews.append(row)\ntable = pd.DataFrame(data = raw_reviews, columns = ['ID', 'Comments', 'Recommend'])\ntable", "_____no_output_____" ], [ "# Return flagged comments for human review. In\n# this case, the comments marked only contain\n# contain words that are made up of a word in the\n# dictionary.\n\nfor col, row in table.iterrows():\n curr_review = row['Comments']\n for word in final_list:\n word = word + \".\"\n if word in curr_review:\n print(\"offensive lang\")\n print(curr_review)", "offensive lang\n She was very quick and informative. No bullshit. You could tell she had a smile even with her mask. Thanks!\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d0668e2ca5da239346dcbdc47c750e10d3e08392
21,528
ipynb
Jupyter Notebook
tutorials/W1D3_ModelFitting/W1D3_Tutorial3.ipynb
DianaMosquera/course-content
cc2e0e2e5d9b476e0fb810ead4ed19dc23745152
[ "CC-BY-4.0", "BSD-3-Clause" ]
2
2021-05-12T02:19:05.000Z
2021-05-12T13:49:29.000Z
tutorials/W1D3_ModelFitting/W1D3_Tutorial3.ipynb
DianaMosquera/course-content
cc2e0e2e5d9b476e0fb810ead4ed19dc23745152
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2020-08-26T10:44:11.000Z
2020-08-26T10:44:11.000Z
tutorials/W1D3_ModelFitting/W1D3_Tutorial3.ipynb
DianaMosquera/course-content
cc2e0e2e5d9b476e0fb810ead4ed19dc23745152
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2021-05-02T10:03:07.000Z
2021-05-02T10:03:07.000Z
34.555377
620
0.593692
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/W1D3_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "\n# Neuromatch Academy: Week 1, Day 3, Tutorial 3\n# Model Fitting: Confidence intervals and bootstrapping\n\n**Content creators**: Pierre-Étienne Fiquet, Anqi Wu, Alex Hyafil with help from Byron Galbraith\n\n**Content reviewers**: Lina Teichmann, Saeed Salehi, Patrick Mineault, Ella Batty, Michael Waskom ", "_____no_output_____" ], [ "#Tutorial Objectives\n\nThis is Tutorial 3 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).\n\nIn this tutorial, we wil discuss how to gauge how good our estimated model parameters are. \n- Learn how to use bootstrapping to generate new sample datasets\n- Estimate our model parameter on these new sample datasets\n- Quantify the variance of our estimate using confidence intervals", "_____no_output_____" ] ], [ [ "#@title Video 1: Confidence Intervals & Bootstrapping\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"hs6bVGQNSIs\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "Up to this point we have been finding ways to estimate model parameters to fit some observed data. Our approach has been to optimize some criterion, either minimize the mean squared error or maximize the likelihood while using the entire dataset. How good is our estimate really? How confident are we that it will generalize to describe new data we haven't seen yet?\n\nOne solution to this is to just collect more data and check the MSE on this new dataset with the previously estimated parameters. However this is not always feasible and still leaves open the question of how quantifiably confident we are in the accuracy of our model.\n\nIn Section 1, we will explore how to implement bootstrapping. In Section 2, we will build confidence intervals of our estimates using the bootstrapping method.", "_____no_output_____" ], [ "---\n# Setup", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "#@title Figure Settings\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")", "_____no_output_____" ], [ "#@title Helper Functions\ndef solve_normal_eqn(x, y):\n \"\"\"Solve the normal equations to produce the value of theta_hat that minimizes\n MSE.\n\n Args:\n x (ndarray): An array of shape (samples,) that contains the input values.\n y (ndarray): An array of shape (samples,) that contains the corresponding\n measurement values to the inputs.\n thata_hat (float): An estimate of the slope parameter.\n\n Returns:\n float: the value for theta_hat arrived from minimizing MSE\n \"\"\"\n theta_hat = (x.T @ y) / (x.T @ x)\n return theta_hat", "_____no_output_____" ] ], [ [ "---\n# Section 1: Bootstrapping\n\n[Bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) is a widely applicable method to assess confidence/uncertainty about estimated parameters, it was originally [proposed](https://projecteuclid.org/euclid.aos/1176344552) by [Bradley Efron](https://en.wikipedia.org/wiki/Bradley_Efron). The idea is to generate many new synthetic datasets from the initial true dataset by randomly sampling from it, then finding estimators for each one of these new datasets, and finally looking at the distribution of all these estimators to quantify our confidence.\n\nNote that each new resampled datasets will be the same size as our original one, with the new data points sampled with replacement i.e. we can repeat the same data point multiple times. Also note that in practice we need a lot of resampled datasets, here we use 2000.\n\nTo explore this idea, we will start again with our noisy samples along the line $y_n = 1.2x_n + \\epsilon_n$, but this time only use half the data points as last time (15 instead of 30).", "_____no_output_____" ] ], [ [ "#@title\n\n#@markdown Execute this cell to simulate some data\n\n# setting a fixed seed to our random number generator ensures we will always\n# get the same psuedorandom number sequence\nnp.random.seed(121)\n\n# Let's set some parameters\ntheta = 1.2\nn_samples = 15\n\n# Draw x and then calculate y\nx = 10 * np.random.rand(n_samples) # sample from a uniform distribution over [0,10)\nnoise = np.random.randn(n_samples) # sample from a standard normal distribution\ny = theta * x + noise\n\nfig, ax = plt.subplots()\nax.scatter(x, y) # produces a scatter plot\nax.set(xlabel='x', ylabel='y');", "_____no_output_____" ] ], [ [ "### Exercise 1: Resample Dataset with Replacement\n\nIn this exercise you will implement a method to resample a dataset with replacement. The method accepts $x$ and $y$ arrays. It should return a new set of $x'$ and $y'$ arrays that are created by randomly sampling from the originals.\n\nWe will then compare the original dataset to a resampled dataset.\n\nTIP: The [numpy.random.choice](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html) method would be useful here.", "_____no_output_____" ] ], [ [ "def resample_with_replacement(x, y):\n \"\"\"Resample data points with replacement from the dataset of `x` inputs and\n `y` measurements.\n\n Args:\n x (ndarray): An array of shape (samples,) that contains the input values.\n y (ndarray): An array of shape (samples,) that contains the corresponding\n measurement values to the inputs.\n\n Returns:\n ndarray, ndarray: The newly resampled `x` and `y` data points.\n \"\"\"\n #######################################################\n ## TODO for students: resample dataset with replacement\n # Fill out function and remove\n raise NotImplementedError(\"Student exercise: resample dataset with replacement\")\n #######################################################\n\n # Get array of indices for resampled points\n sample_idx = ...\n\n # Sample from x and y according to sample_idx\n x_ = ...\n y_ = ...\n return x_, y_\n\n\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))\nax1.scatter(x, y)\nax1.set(title='Original', xlabel='x', ylabel='y')\n\n# Uncomment below to test your function\n#x_, y_ = resample_with_replacement(x, y)\n#ax2.scatter(x_, y_, color='c')\n\nax2.set(title='Resampled', xlabel='x', ylabel='y',\n xlim=ax1.get_xlim(), ylim=ax1.get_ylim());", "_____no_output_____" ], [ "# to_remove solution\ndef resample_with_replacement(x, y):\n \"\"\"Resample data points with replacement from the dataset of `x` inputs and\n `y` measurements.\n\n Args:\n x (ndarray): An array of shape (samples,) that contains the input values.\n y (ndarray): An array of shape (samples,) that contains the corresponding\n measurement values to the inputs.\n\n Returns:\n ndarray, ndarray: The newly resampled `x` and `y` data points.\n \"\"\"\n\n # Get array of indices for resampled points\n sample_idx = np.random.choice(len(x), size=len(x), replace=True)\n\n # Sample from x and y according to sample_idx\n x_ = x[sample_idx]\n y_ = y[sample_idx]\n\n return x_, y_\n\n\nwith plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))\n ax1.scatter(x, y)\n ax1.set(title='Original', xlabel='x', ylabel='y')\n\n x_, y_ = resample_with_replacement(x, y)\n ax2.scatter(x_, y_, color='c')\n\n ax2.set(title='Resampled', xlabel='x', ylabel='y',\n xlim=ax1.get_xlim(), ylim=ax1.get_ylim());", "_____no_output_____" ] ], [ [ "In the resampled plot on the right, the actual number of points is the same, but some have been repeated so they only display once.\n\nNow that we have a way to resample the data, we can use that in the full bootstrapping process.", "_____no_output_____" ], [ "### Exercise 2: Bootstrap Estimates\n\nIn this exercise you will implement a method to run the bootstrap process of generating a set of $\\hat\\theta$ values from a dataset of $x$ inputs and $y$ measurements. You should use `resample_with_replacement` here, and you may also invoke helper function `solve_normal_eqn` from Tutorial 1 to produce the MSE-based estimator.\n\nWe will then use this function to look at the theta_hat from different samples.\n", "_____no_output_____" ] ], [ [ "def bootstrap_estimates(x, y, n=2000):\n \"\"\"Generate a set of theta_hat estimates using the bootstrap method.\n\n Args:\n x (ndarray): An array of shape (samples,) that contains the input values.\n y (ndarray): An array of shape (samples,) that contains the corresponding\n measurement values to the inputs.\n n (int): The number of estimates to compute\n\n Returns:\n ndarray: An array of estimated parameters with size (n,)\n \"\"\"\n theta_hats = np.zeros(n)\n\n ##############################################################################\n ## TODO for students: implement bootstrap estimation\n # Fill out function and remove\n raise NotImplementedError(\"Student exercise: implement bootstrap estimation\")\n ##############################################################################\n\n # Loop over number of estimates\n for i in range(n):\n\n # Resample x and y\n x_, y_ = ...\n\n # Compute theta_hat for this sample\n theta_hats[i] = ...\n\n return theta_hats\n\n\nnp.random.seed(123) # set random seed for checking solutions\n\n# Uncomment below to test function\n# theta_hats = bootstrap_estimates(x, y, n=2000)\n# print(theta_hats[0:5])", "_____no_output_____" ], [ "# to_remove solution\ndef bootstrap_estimates(x, y, n=2000):\n \"\"\"Generate a set of theta_hat estimates using the bootstrap method.\n\n Args:\n x (ndarray): An array of shape (samples,) that contains the input values.\n y (ndarray): An array of shape (samples,) that contains the corresponding\n measurement values to the inputs.\n n (int): The number of estimates to compute\n\n Returns:\n ndarray: An array of estimated parameters with size (n,)\n \"\"\"\n theta_hats = np.zeros(n)\n\n # Loop over number of estimates\n for i in range(n):\n\n # Resample x and y\n x_, y_ = resample_with_replacement(x, y)\n\n # Compute theta_hat for this sample\n theta_hats[i] = solve_normal_eqn(x_, y_)\n\n return theta_hats\n\n\nnp.random.seed(123) # set random seed for checking solutions\n\ntheta_hats = bootstrap_estimates(x, y, n=2000)\nprint(theta_hats[0:5])", "_____no_output_____" ] ], [ [ "You should see `[1.27550888 1.17317819 1.18198819 1.25329255 1.20714664]` as the first five estimates.", "_____no_output_____" ], [ "Now that we have our bootstrap estimates, we can visualize all the potential models (models computed with different resampling) together to see how distributed they are.", "_____no_output_____" ] ], [ [ "#@title\n#@markdown Execute this cell to visualize all potential models\n\nfig, ax = plt.subplots()\n\n# For each theta_hat, plot model\ntheta_hats = bootstrap_estimates(x, y, n=2000)\nfor i, theta_hat in enumerate(theta_hats):\n y_hat = theta_hat * x\n ax.plot(x, y_hat, c='r', alpha=0.01, label='Resampled Fits' if i==0 else '')\n\n# Plot observed data\nax.scatter(x, y, label='Observed')\n\n# Plot true fit data\ny_true = theta * x\nax.plot(x, y_true, 'g', linewidth=2, label='True Model')\n\nax.set(\n title='Bootstrapped Slope Estimation',\n xlabel='x',\n ylabel='y'\n)\n\n# Change legend line alpha property\nhandles, labels = ax.get_legend_handles_labels()\nhandles[0].set_alpha(1)\n\nax.legend();", "_____no_output_____" ] ], [ [ "This looks pretty good! The bootstrapped estimates spread around the true model, as we would have hoped. Note that here we have the luxury to know the ground truth value for $\\theta$, but in applications we are trying to guess it from data. Therefore, assessing the quality of estimates based on finite data is a task of fundamental importance in data analysis.\n", "_____no_output_____" ], [ "---\n# Section 2: Confidence Intervals\n\nLet us now quantify how uncertain our estimated slope is. We do so by computing [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) (CIs) from our bootstrapped estimates. The most direct approach is to compute percentiles from the empirical distribution of bootstrapped estimates. Note that this is widely applicable as we are not assuming that this empirical distribution is Gaussian.", "_____no_output_____" ] ], [ [ "#@title\n\n#@markdown Execute this cell to plot bootstrapped CI\n\ntheta_hats = bootstrap_estimates(x, y, n=2000)\nprint(f\"mean = {np.mean(theta_hats):.2f}, std = {np.std(theta_hats):.2f}\")\n\nfig, ax = plt.subplots()\nax.hist(theta_hats, bins=20, facecolor='C1', alpha=0.75)\nax.axvline(theta, c='g', label=r'True $\\theta$')\nax.axvline(np.percentile(theta_hats, 50), color='r', label='Median')\nax.axvline(np.percentile(theta_hats, 2.5), color='b', label='95% CI')\nax.axvline(np.percentile(theta_hats, 97.5), color='b')\nax.legend()\nax.set(\n title='Bootstrapped Confidence Interval',\n xlabel=r'$\\hat{{\\theta}}$',\n ylabel='count',\n xlim=[1.0, 1.5]\n);", "_____no_output_____" ] ], [ [ "Looking at the distribution of bootstrapped $\\hat{\\theta}$ values, we see that the true $\\theta$ falls well within the 95% confidence interval, wich is reinsuring. We also see that the value $\\theta = 1$ does not fall within the confidence interval. From this we would reject the hypothesis that the slope was 1.", "_____no_output_____" ], [ "---\n# Summary\n\n- Bootstrapping is a resampling procedure that allows to build confidence intervals around inferred parameter values\n- it is a widely applicable and very practical method that relies on computational power and pseudo-random number generators (as opposed to more classical approaches than depend on analytical derivations)", "_____no_output_____" ], [ "**Suggested readings** \n\nComputer Age Statistical Inference: Algorithms, Evidence and Data Science, by Bradley Efron and Trevor Hastie\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d0669e6021f39ddcd4c1a94b8ba3527df013ab09
26,429
ipynb
Jupyter Notebook
Model backlog/Inference/39-commonlit-inf-roberta-base-target-sampling-exp.ipynb
dimitreOliveira/CommonLit-Readability-Prize
e2abad78a3f79119521a480391dc1254b1dd6566
[ "MIT" ]
null
null
null
Model backlog/Inference/39-commonlit-inf-roberta-base-target-sampling-exp.ipynb
dimitreOliveira/CommonLit-Readability-Prize
e2abad78a3f79119521a480391dc1254b1dd6566
[ "MIT" ]
null
null
null
Model backlog/Inference/39-commonlit-inf-roberta-base-target-sampling-exp.ipynb
dimitreOliveira/CommonLit-Readability-Prize
e2abad78a3f79119521a480391dc1254b1dd6566
[ "MIT" ]
null
null
null
33.53934
243
0.52764
[ [ [ "## Dependencies", "_____no_output_____" ] ], [ [ "import warnings, math, json, glob\nimport pandas as pd\nimport tensorflow.keras.layers as L\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import Model\nfrom transformers import TFAutoModelForSequenceClassification, TFAutoModel, AutoTokenizer\nfrom commonlit_scripts import *\n\n\nseed = 0\nseed_everything(seed)\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_colwidth', 150)", "_____no_output_____" ] ], [ [ "### Hardware configuration", "_____no_output_____" ] ], [ [ "strategy, tpu = get_strategy()\nAUTO = tf.data.AUTOTUNE\nREPLICAS = strategy.num_replicas_in_sync\nprint(f'REPLICAS: {REPLICAS}')", "REPLICAS: 1\n" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "base_path = '/kaggle/input/'\ntest_filepath = base_path + 'commonlitreadabilityprize/test.csv'\ntest = pd.read_csv(test_filepath)\nprint(f'Test samples: {len(test)}')\ndisplay(test.head())", "Test samples: 7\n" ] ], [ [ "# Model parameters", "_____no_output_____" ] ], [ [ "input_noteboks = [x for x in os.listdir(base_path) if '-commonlit-' in x]\ninput_base_path = f'{base_path}{input_noteboks[0]}/'\nwith open(input_base_path + 'config.json') as json_file:\n config = json.load(json_file)\n\nconfig", "_____no_output_____" ] ], [ [ "## Auxiliary functions", "_____no_output_____" ] ], [ [ "# Datasets utility functions\ndef custom_standardization(text, is_lower=True):\n if is_lower:\n text = text.lower() # if encoder is uncased\n text = text.strip()\n return text\n\ndef sample_target(features, target):\n mean, stddev = target\n sampled_target = tf.random.normal([], mean=tf.cast(mean, dtype=tf.float32), \n stddev=tf.cast(stddev, dtype=tf.float32), dtype=tf.float32)\n return (features, sampled_target)\n\ndef get_dataset(pandas_df, tokenizer, labeled=True, ordered=False, repeated=False, \n is_sampled=False, batch_size=32, seq_len=128, is_lower=True):\n \"\"\"\n Return a Tensorflow dataset ready for training or inference.\n \"\"\"\n text = [custom_standardization(text, is_lower) for text in pandas_df['excerpt']]\n \n # Tokenize inputs\n tokenized_inputs = tokenizer(text, max_length=seq_len, truncation=True, \n padding='max_length', return_tensors='tf')\n \n if labeled:\n dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']}, \n (pandas_df['target'], pandas_df['standard_error'])))\n if is_sampled:\n dataset = dataset.map(sample_target, num_parallel_calls=tf.data.AUTOTUNE)\n else:\n dataset = tf.data.Dataset.from_tensor_slices({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']})\n \n if repeated:\n dataset = dataset.repeat()\n if not ordered:\n dataset = dataset.shuffle(2048)\n dataset = dataset.batch(batch_size)\n dataset = dataset.cache()\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n return dataset", "_____no_output_____" ], [ "model_path_list = glob.glob(f'{input_base_path}*.h5')\nmodel_path_list.sort()\n\nprint('Models to predict:')\nprint(*model_path_list, sep='\\n')", "Models to predict:\n/kaggle/input/39-commonlit-roberta-base-target-sampling-exp/model_0.h5\n" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "def model_fn(encoder, seq_len=256):\n input_ids = L.Input(shape=(seq_len,), dtype=tf.int32, name='input_ids')\n input_attention_mask = L.Input(shape=(seq_len,), dtype=tf.int32, name='attention_mask')\n \n outputs = encoder({'input_ids': input_ids, \n 'attention_mask': input_attention_mask})\n last_hidden_state = outputs['last_hidden_state']\n \n cls_token = last_hidden_state[:, 0, :]\n \n output = L.Dense(1, name='output')(cls_token)\n \n model = Model(inputs=[input_ids, input_attention_mask], \n outputs=[output])\n return model\n\n\nwith strategy.scope():\n encoder = TFAutoModel.from_pretrained(config['BASE_MODEL'])\n # Freeze embeddings\n encoder.layers[0].embeddings.trainable = False\n model = model_fn(encoder, config['SEQ_LEN'])\n \nmodel.summary()", "Some layers from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/ were not used when initializing TFRobertaModel: ['lm_head']\n- This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nAll the layers of TFRobertaModel were initialized from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training.\n" ] ], [ [ "# Test set predictions", "_____no_output_____" ] ], [ [ "tokenizer = AutoTokenizer.from_pretrained(config['BASE_MODEL'])\ntest_pred = []\n\nfor model_path in model_path_list:\n print(model_path)\n if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)\n K.clear_session()\n model.load_weights(model_path)\n\n # Test predictions\n test_ds = get_dataset(test, tokenizer, labeled=False, ordered=True, \n batch_size=config['BATCH_SIZE'], seq_len=config['SEQ_LEN'])\n x_test = test_ds.map(lambda sample: sample)\n test_pred.append(model.predict(x_test))", "/kaggle/input/39-commonlit-roberta-base-target-sampling-exp/model_0.h5\n" ] ], [ [ "# Test set predictions", "_____no_output_____" ] ], [ [ "submission = test[['id']]\nsubmission['target'] = np.mean(test_pred, axis=0)\nsubmission.to_csv('submission.csv', index=False)\ndisplay(submission.head(10))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d066a830ff25a91c7835377a35ca98a6679ffb2a
5,043
ipynb
Jupyter Notebook
Missions_to_Mars/mission_to_mars.ipynb
XxTopShottaxX/Web-Scraping-challenge
4216b98f92c02697264ecd127fdd934db547420e
[ "ADSL" ]
null
null
null
Missions_to_Mars/mission_to_mars.ipynb
XxTopShottaxX/Web-Scraping-challenge
4216b98f92c02697264ecd127fdd934db547420e
[ "ADSL" ]
null
null
null
Missions_to_Mars/mission_to_mars.ipynb
XxTopShottaxX/Web-Scraping-challenge
4216b98f92c02697264ecd127fdd934db547420e
[ "ADSL" ]
null
null
null
30.75
1,376
0.578227
[ [ [ "from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd", "_____no_output_____" ], [ "# browser = Browser('chrome')", "_____no_output_____" ] ], [ [ "# NASA Mars News", "_____no_output_____" ] ], [ [ "mars={}", "_____no_output_____" ], [ "url = 'https://mars.nasa.gov/news/'\nbrowser.visit(url)\nhtml = browser.html\nsoup = BeautifulSoup(html,'html.parser')\nresultNasaMars = soup.findAll(\"div\",class_=\"content_title\")\nnasaTitle = resultNasaMars[1].a.text", "_____no_output_____" ], [ "result = soup.find(\"div\" ,class_=\"article_teaser_body\")\nnasaPara = result.text", "_____no_output_____" ], [ "mars[\"news_title\"] = nasaTitle \nmars[\"news_p\"] = nasaPara\nmars", "_____no_output_____" ] ], [ [ "## JPL Mars Space Images", "_____no_output_____" ] ], [ [ "url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\nbrowser.visit(url)\nbrowser.find_by_id(\"full_image\").click()\nbrowser.find_link_by_partial_text(\"more info\").click()\nhtml = browser.html\nsoup = BeautifulSoup(html,'html.parser')\nresultJPLimage = soup.find(\"figure\",class_=\"lede\")\nresultJPLimage.a.img[\"src\"]", "_____no_output_____" ], [ "imgJPL = 'https://www.jpl.nasa.gov/' + resultJPLimage.a.img[\"src\"]\nmars['featured_image_url'] = imgJPL\nmars", "_____no_output_____" ] ], [ [ "## Mars Facts", "_____no_output_____" ] ], [ [ "mars_df = pd.read_html('https://space-facts.com/mars/')[0]\nmars_df.columns = [\"Description\",\"Value\"]\nmars_df.set_index(\"Description\", inplace = True)\nmars[\"facts\"] = mars_df.to_html()\nmars", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d066ac5bacbe86c5a0a34d1352580810adc67898
44,278
ipynb
Jupyter Notebook
Python Script/Spikeling Analysis.ipynb
hoijui/Spikeling
5768d49904bc5bae22c3105dd71d898e0459b880
[ "MIT" ]
9
2018-07-10T23:35:41.000Z
2022-03-17T18:49:53.000Z
Python Script/Spikeling Analysis.ipynb
hoijui/Spikeling
5768d49904bc5bae22c3105dd71d898e0459b880
[ "MIT" ]
4
2019-07-29T09:32:15.000Z
2022-02-16T19:14:04.000Z
Python Script/Spikeling Analysis.ipynb
hoijui/Spikeling
5768d49904bc5bae22c3105dd71d898e0459b880
[ "MIT" ]
5
2018-07-16T11:54:07.000Z
2020-10-07T09:59:30.000Z
118.390374
30,804
0.860721
[ [ [ "# Load raw data", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "data = np.loadtxt('SlowSteps1.csv', delimiter = ',') # load the raw data, change the filename as required!", "_____no_output_____" ] ], [ [ "# Find spikes", "_____no_output_____" ] ], [ [ "time_s = (data[:,8]-data[0,8])/1000000 # set the timing array to seconds and subtract 1st entry to zero it\nn_spikes = 0\nspike_times = [] # in seconds\nspike_points = [] # in timepoints\nfor x in range(1, data.shape[0]-1):\n if (data[x,0]>10 and data[x-1,0]<10): # looks for all instances where subsequent Vm points jump from <10 to >10\n spike_times.append(time_s[x])\n spike_points.append(x)\n n_spikes+=1\n \nprint(n_spikes, \"spikes detected\") \n", "168 spikes detected\n" ] ], [ [ "# Compute spike rate", "_____no_output_____" ] ], [ [ "spike_rate = np.zeros(data.shape[0])\n\nfor x in range(0, n_spikes-1):\n current_rate = 1/(spike_times[x+1]-spike_times[x])\n spike_rate[spike_points[x]:spike_points[x+1]]=current_rate\n", "_____no_output_____" ] ], [ [ "# Plot raw data and spike rate", "_____no_output_____" ] ], [ [ "from bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import column\nfrom bokeh.models import Range1d\n\noutput_file(\"RawDataPlot.html\")\n\nspike_plot = figure(plot_width=1200, plot_height = 100)\nspike_plot.line(time_s[:],spike_rate[:], line_width=1, line_color=\"black\") # Spike rate\nspike_plot.yaxis[0].axis_label = 'Rate (Hz)'\nspike_plot.xgrid.grid_line_color =None\nspike_plot.ygrid.grid_line_color =None\nspike_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\nspike_plot.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks\n\nvm_plot = figure(plot_width=1200, plot_height = 300, y_range=Range1d(-100, 50),x_range=spike_plot.x_range)\nvm_plot.line(time_s[:],data[:,0], line_width=1, line_color=\"black\") # Vm\nvm_plot.scatter(spike_times[:],45, line_color=\"black\") # Rasterplot over spikes\nvm_plot.yaxis[0].axis_label = 'Vm (mV)'\nvm_plot.xgrid.grid_line_color =None\nvm_plot.ygrid.grid_line_color =None\nvm_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\nitotal_plot = figure(plot_width=1200, plot_height = 200, x_range=spike_plot.x_range)\nitotal_plot.line(time_s[:], data[:,1], line_width=1, line_color=\"black\") # Itotal\nitotal_plot.yaxis[0].axis_label = 'I total (a.u.)'\nitotal_plot.xgrid.grid_line_color =None\nitotal_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\nin_spikes_plot = figure(plot_width=1200, plot_height = 80, y_range=Range1d(-0.1,1.1), x_range=spike_plot.x_range)\nin_spikes_plot.line(time_s[:], data[:,3], line_width=1, line_color=\"black\") # Spikes in from Port 1\nin_spikes_plot.line(time_s[:], data[:,4], line_width=1, line_color=\"grey\") # Spikes in from Port 2\nin_spikes_plot.yaxis[0].axis_label = 'Input spikes'\nin_spikes_plot.xgrid.grid_line_color =None\nin_spikes_plot.ygrid.grid_line_color =None\nin_spikes_plot.yaxis.major_tick_line_color = None # turn off y-axis major ticks\nin_spikes_plot.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks\nin_spikes_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\nin_spikes_plot.yaxis.major_label_text_font_size = '0pt' # turn off y-axis tick labels\n\nstim_plot = figure(plot_width=1200, plot_height = 100,y_range=Range1d(-0.1,1.1), x_range=spike_plot.x_range)\nstim_plot.line(time_s[:], data[:,2], line_width=1, line_color=\"black\") # Stimulus\nstim_plot.yaxis[0].axis_label = 'Stimulus'\nstim_plot.xaxis[0].axis_label = 'Time (s)'\nstim_plot.xgrid.grid_line_color =None\nstim_plot.ygrid.grid_line_color =None\nstim_plot.yaxis.major_tick_line_color = None # turn off y-axis major ticks\nstim_plot.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks\nstim_plot.yaxis.major_label_text_font_size = '0pt' # turn off y-axis tick labels\n\nshow(column(spike_plot,vm_plot,itotal_plot,in_spikes_plot,stim_plot))\n", "WARNING:bokeh.core.validation.check:W-1004 (BOTH_CHILD_AND_ROOT): Models should not be a document root if they are in a layout box: Figure(id='25e4d8ca-bc35-44d5-9572-f71aea70c895', ...)\n" ] ], [ [ "# Analysis Option 1: Trigger stimuli and align", "_____no_output_____" ] ], [ [ "stimulus_times = []\nstimulus_times_s = []\nfor x in range(0, data.shape[0]-1): # goes through each timepoint\n if (data[x,2]<data[x+1,2]): # checks if the stimulus went from 0 to 1\n stimulus_times.append(x) ## make a list of times (in points) when stimulus increased \n stimulus_times_s.append(time_s[x]) ## also make a list of times (in seconds)\n \nloop_duration = stimulus_times[1]-stimulus_times[0] # compute arraylength for single stimulus\nloop_duration_s = stimulus_times_s[1]-stimulus_times_s[0] # compute arraylength for single stimulus also in s\n\nprint(loop_duration, \"points per loop;\", loop_duration_s, \"seconds\")\n\nsr_loops = []\nvm_loops = []\nitotal_loops = []\nstim_loops = []\n\nstimulus_times = np.where(data[:,2]>np.roll(data[:,2], axis = 0, shift = 1)) ## make a list of times when stimulus increased (again)\nsr_loops = np.vstack([spike_rate[x:x+loop_duration] for x in stimulus_times[0][:-1]])\nvm_loops = np.vstack([data[x:x+loop_duration, 0] for x in stimulus_times[0][:-1]])\nitotal_loops = np.vstack([data[x:x+loop_duration, 1] for x in stimulus_times[0][:-1]])\nstim_loops = np.vstack([data[x:x+loop_duration, 2] for x in stimulus_times[0][:-1]])\n\nst_loops = []\nfor i, x in enumerate(stimulus_times[0][:-1]):\n st_loops.append([time_s[sp]-time_s[x] for sp in spike_points if sp > x and sp < x+loop_duration])\n\nloops = vm_loops.shape[0]\nprint(loops, \"loops\") \n\n", "4000 points per loop; 7.27066 seconds\n8 loops\n" ] ], [ [ "# Make average arrays\n", "_____no_output_____" ] ], [ [ "sr_mean = np.mean(sr_loops, axis=0)\nvm_mean = np.mean(vm_loops, axis=0)\nitotal_mean = np.mean(itotal_loops, axis=0)\nstim_mean = np.mean(stim_loops, axis=0)", "_____no_output_____" ] ], [ [ "# Plot stimulus aligned data", "_____no_output_____" ] ], [ [ "from bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import column\nfrom bokeh.models import Range1d\n\noutput_file(\"AlignedDataPlot.html\")\n\nspike_plot = figure(plot_width=400, plot_height = 100)\nfor i in range(0,loops-1):\n spike_plot.line(time_s[0:loop_duration],sr_loops[i,:], line_width=1, line_color=\"gray\") # Vm individual repeats\nspike_plot.line(time_s[0:loop_duration],sr_mean[:], line_width=1.5, line_color=\"black\") # Vm mean\nspike_plot.yaxis[0].axis_label = 'Rate (Hz)'\nspike_plot.xgrid.grid_line_color =None\nspike_plot.ygrid.grid_line_color =None\nspike_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\n\ndot_plot = figure(plot_width=400, plot_height = 100, x_range=spike_plot.x_range)\nfor i in range(0,loops-1):\n dot_plot.scatter(st_loops[i],i, line_color=\"black\") # Rasterplot\ndot_plot.yaxis[0].axis_label = 'Repeat'\ndot_plot.xgrid.grid_line_color =None\ndot_plot.ygrid.grid_line_color =None\ndot_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\n\nvm_plot = figure(plot_width=400, plot_height = 300, y_range=Range1d(-100, 40),x_range=spike_plot.x_range)\nfor i in range(0,loops-1):\n vm_plot.line(time_s[0:loop_duration],vm_loops[i,:], line_width=1, line_color=\"gray\") # Vm individual repeats\nvm_plot.line(time_s[0:loop_duration],vm_mean[:], line_width=1.5, line_color=\"black\") # Vm mean\nvm_plot.yaxis[0].axis_label = 'Vm (mV)'\nvm_plot.xgrid.grid_line_color =None\nvm_plot.ygrid.grid_line_color =None\nvm_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\nitotal_plot = figure(plot_width=400, plot_height = 200, x_range=spike_plot.x_range)\nfor i in range(0,loops-1):\n itotal_plot.line(time_s[0:loop_duration], itotal_loops[i,:], line_width=1, line_color=\"gray\") # Itotal individual repeats\nitotal_plot.line(time_s[0:loop_duration], itotal_mean[:], line_width=1.5, line_color=\"black\") # Itotal mean\nitotal_plot.yaxis[0].axis_label = 'Itotal (a.u.)'\nitotal_plot.xgrid.grid_line_color =None\nitotal_plot.xaxis.major_label_text_font_size = '0pt' # turn off x-axis tick labels\n\nstim_plot = figure(plot_width=400, plot_height = 100,y_range=Range1d(-0.1,1.1), x_range=spike_plot.x_range)\nfor i in range(0,loops-1):\n stim_plot.line(time_s[0:loop_duration], stim_loops[i,:], line_width=1, line_color=\"gray\") # Stimulus individual repeats\nstim_plot.line(time_s[0:loop_duration], stim_mean[:], line_width=1.5, line_color=\"black\") # Stimulus mean \nstim_plot.yaxis[0].axis_label = 'Stimulus'\nstim_plot.xaxis[0].axis_label = 'Time (s)'\nstim_plot.xgrid.grid_line_color =None\nstim_plot.ygrid.grid_line_color =None\nstim_plot.yaxis.major_tick_line_color = None # turn off y-axis major ticks\nstim_plot.yaxis.minor_tick_line_color = None # turn off y-axis minor ticks\nstim_plot.yaxis.major_label_text_font_size = '0pt' # turn off y-axis tick labels\n\nshow(column(spike_plot,dot_plot,vm_plot,itotal_plot,stim_plot))", "WARNING:bokeh.core.validation.check:W-1004 (BOTH_CHILD_AND_ROOT): Models should not be a document root if they are in a layout box: Figure(id='25e4d8ca-bc35-44d5-9572-f71aea70c895', ...)\n" ] ], [ [ "# Analysis option 2: Spike triggered average (STA)", "_____no_output_____" ] ], [ [ "sta_points = 200 # number of points computed\n\nsta_individual = []\nsta_individual = np.vstack([data[x-sta_points:x,2] for x in spike_points[2:-1]])\nsta = np.mean(sta_individual, axis=0)\n\nimport matplotlib.pyplot as plt\nplt.plot(time_s[0:200],sta[:])\nplt.ylabel('Kernel amplitude')\nplt.xlabel('Time before spike (s)')\nplt.show()\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d066b286d8f87c6f107e951edd89aef5a172553d
73,840
ipynb
Jupyter Notebook
Convolutional Neural Networks/Week 1/Convolution_model_Step_by_Step_v1.ipynb
Bo-Feng-1024/Soursera-Deep-Learning-Specialization
ea2c69a818ae830ee6b62ae5674f08ab4b1cb4ea
[ "Apache-2.0" ]
null
null
null
Convolutional Neural Networks/Week 1/Convolution_model_Step_by_Step_v1.ipynb
Bo-Feng-1024/Soursera-Deep-Learning-Specialization
ea2c69a818ae830ee6b62ae5674f08ab4b1cb4ea
[ "Apache-2.0" ]
null
null
null
Convolutional Neural Networks/Week 1/Convolution_model_Step_by_Step_v1.ipynb
Bo-Feng-1024/Soursera-Deep-Learning-Specialization
ea2c69a818ae830ee6b62ae5674f08ab4b1cb4ea
[ "Apache-2.0" ]
null
null
null
41.067853
5,696
0.562378
[ [ [ "# Convolutional Neural Networks: Step by Step\n\nWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \n\nBy the end of this notebook, you'll be able to: \n\n* Explain the convolution operation\n* Apply two different types of pooling operation\n* Identify the components used in a convolutional neural network (padding, stride, filter, ...) and their purpose\n* Build a convolutional neural network \n\n**Notation**:\n- Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n\n\n- Superscript $(i)$ denotes an object from the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n \n \n- Subscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n \n \n- $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n- $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n\nYou should be familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!", "_____no_output_____" ], [ "## Table of Contents\n\n- [1 - Packages](#1)\n- [2 - Outline of the Assignment](#2)\n- [3 - Convolutional Neural Networks](#3)\n - [3.1 - Zero-Padding](#3-1)\n - [Exercise 1 - zero_pad](#ex-1)\n - [3.2 - Single Step of Convolution](#3-2)\n - [Exercise 2 - conv_single_step](#ex-2)\n - [3.3 - Convolutional Neural Networks - Forward Pass](#3-3)\n - [Exercise 3 - conv_forward](#ex-3)\n- [4 - Pooling Layer](#4)\n - [4.1 - Forward Pooling](#4-1)\n - [Exercise 4 - pool_forward](#ex-4)\n- [5 - Backpropagation in Convolutional Neural Networks (OPTIONAL / UNGRADED)](#5)\n - [5.1 - Convolutional Layer Backward Pass](#5-1)\n - [5.1.1 - Computing dA](#5-1-1)\n - [5.1.2 - Computing dW](#5-1-2)\n - [5.1.3 - Computing db](#5-1-3)\n - [Exercise 5 - conv_backward](#ex-5)\n - [5.2 Pooling Layer - Backward Pass](#5-2)\n - [5.2.1 Max Pooling - Backward Pass](#5-2-1)\n - [Exercise 6 - create_mask_from_window](#ex-6)\n - [5.2.2 - Average Pooling - Backward Pass](#5-2-2)\n - [Exercise 7 - distribute_value](#ex-7)\n - [5.2.3 Putting it Together: Pooling Backward](#5-2-3)\n - [Exercise 8 - pool_backward](#ex-8)", "_____no_output_____" ], [ "<a name='1'></a>\n## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- np.random.seed(1) is used to keep all the random function calls consistent. This helps to grade your work.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom public_tests import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)", "_____no_output_____" ] ], [ [ "<a name='2'></a>\n## 2 - Outline of the Assignment\n\nYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions to walk you through the steps:\n\n- Convolution functions, including:\n - Zero Padding\n - Convolve window \n - Convolution forward\n - Convolution backward (optional)\n- Pooling functions, including:\n - Pooling forward\n - Create mask \n - Distribute value\n - Pooling backward (optional)\n \nThis notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n\n<img src=\"images/model.png\" style=\"width:800px;height:300px;\">\n\n**Note**: For every forward function, there is a corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. ", "_____no_output_____" ], [ "<a name='3'></a>\n## 3 - Convolutional Neural Networks\n\nAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n\n<img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\n\nIn this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. ", "_____no_output_____" ], [ "<a name='3-1'></a>\n### 3.1 - Zero-Padding\n\nZero-padding adds zeros around the border of an image:\n\n<img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n<caption><center> <u> <font color='purple'> <b>Figure 1</b> </u><font color='purple'> : <b>Zero-Padding</b><br> Image (3 channels, RGB) with a padding of 2. </center></caption>\n\nThe main benefits of padding are:\n\n- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n\n- It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels at the edges of an image.\n\n<a name='ex-1'></a>\n### Exercise 1 - zero_pad\nImplement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:\n```python\na = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), mode='constant', constant_values = (0,0))\n```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2 * pad, n_W + 2 * pad, n_C)\n \"\"\"\n \n #(≈ 1 line)\n # X_pad = None\n # YOUR CODE STARTS HERE\n X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode='constant', constant_values = 0)\n \n # YOUR CODE ENDS HERE\n \n return X_pad", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\nx_pad = zero_pad(x, 3)\nprint (\"x.shape =\\n\", x.shape)\nprint (\"x_pad.shape =\\n\", x_pad.shape)\nprint (\"x[1,1] =\\n\", x[1, 1])\nprint (\"x_pad[1,1] =\\n\", x_pad[1, 1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0, :, :, 0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0, :, :, 0])\nzero_pad_test(zero_pad)", "x.shape =\n (4, 3, 3, 2)\nx_pad.shape =\n (4, 9, 9, 2)\nx[1,1] =\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\nx_pad[1,1] =\n [[0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]]\nx.shape =\n (4, 3, 3, 2)\nx_pad.shape =\n (4, 9, 9, 2)\nx[1,1] =\n [[ 0.90085595 -0.68372786]\n [-0.12289023 -0.93576943]\n [-0.26788808 0.53035547]]\nx_pad[1,1] =\n [[0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]\n [0. 0.]]\n[[0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n\u001b[92mAll tests passed!\n" ] ], [ [ "<a name='3-2'></a>\n### 3.2 - Single Step of Convolution \n\nIn this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n\n- Takes an input volume \n- Applies a filter at every position of the input\n- Outputs another volume (usually of different size)\n\n<img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n<caption><center> <u> <font color='purple'> <b>Figure 2</b> </u><font color='purple'> : <b>Convolution operation</b><br> with a filter of 3x3 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\n\nIn a computer vision application, each value in the matrix on the left corresponds to a single pixel value. You convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \n\nLater in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \n\n<a name='ex-2'></a>\n### Exercise 2 - conv_single_step\nImplement `conv_single_step()`. \n \n[Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).", "_____no_output_____" ], [ "**Note**: The variable b will be passed in as a numpy array. If you add a scalar (a float or integer) to a numpy array, the result is a numpy array. In the special case of a numpy array containing a single value, you can cast it as a float to convert it to a scalar.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, the result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n #(≈ 3 lines of code)\n # Element-wise product between a_slice_prev and W. Do not add the bias yet.\n # s = None\n # Sum over all entries of the volume s.\n # Z = None\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n # Z = None\n # YOUR CODE STARTS HERE\n s = np.multiply(a_slice_prev, W)\n Z = np.sum(s)\n Z = Z + float(b)\n \n # YOUR CODE ENDS HERE\n\n return Z", "_____no_output_____" ], [ "np.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)\nconv_single_step_test(conv_single_step)\n\nassert (type(Z) == np.float64 or type(Z) == np.float32), \"You must cast the output to float\"\nassert np.isclose(Z, -6.999089450680221), \"Wrong value\"", "Z = -6.999089450680221\n\u001b[92mAll tests passed!\n" ] ], [ [ "<a name='3-3'></a>\n### 3.3 - Convolutional Neural Networks - Forward Pass\n\nIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<a name='ex-3'></a>\n### Exercise 3 - conv_forward\nImplement the function below to convolve the filters `W` on an input activation `A_prev`. \nThis function takes the following inputs:\n* `A_prev`, the activations output by the previous layer (for a batch of m inputs); \n* Weights are denoted by `W`. The filter window size is `f` by `f`.\n* The bias vector is `b`, where each filter has its own (single) bias. \n\nYou also have access to the hyperparameters dictionary, which contains the stride and the padding. \n\n**Hint**: \n1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\n```python\na_slice_prev = a_prev[0:2,0:2,:]\n```\nNotice how this gives a 3D slice that has height 2, width 2, and depth 3. Depth is the number of channels. \nThis will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.\n\n2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find out how each of the corners can be defined using h, w, f and s in the code below.\n\n<img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n<caption><center> <u> <font color='purple'> <b>Figure 3</b> </u><font color='purple'> : <b>Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)</b> <br> This figure shows only a single channel. </center></caption>\n\n\n**Reminder**:\n \nThe formulas relating the output shape of the convolution to the input shape are:\n \n$$n_H = \\Bigl\\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\Bigr\\rfloor +1$$\n$$n_W = \\Bigl\\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\Bigr\\rfloor +1$$\n$$n_C = \\text{number of filters used in the convolution}$$\n \n\n\n\nFor this exercise, don't worry about vectorization! Just implement everything with for-loops.", "_____no_output_____" ], [ "#### Additional Hints (if you're stuck):\n\n\n* Use array slicing (e.g.`varname[0:1,:,3:5]`) for the following variables: \n `a_prev_pad` ,`W`, `b` \n - Copy the starter code of the function and run it outside of the defined function, in separate cells. \n - Check that the subset of each array is the size and dimension that you're expecting. \n* To decide how to get the `vert_start`, `vert_end`, `horiz_start`, `horiz_end`, remember that these are indices of the previous layer. \n - Draw an example of a previous padded layer (8 x 8, for instance), and the current (output layer) (2 x 2, for instance). \n - The output layer's indices are denoted by `h` and `w`. \n* Make sure that `a_slice_prev` has a height, width and depth.\n* Remember that `a_prev_pad` is a subset of `A_prev_pad`. \n - Think about which one should be used within the for loops.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, \n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n # Retrieve dimensions from A_prev's shape (≈1 line) \n # (m, n_H_prev, n_W_prev, n_C_prev) = None\n \n # Retrieve dimensions from W's shape (≈1 line)\n # (f, f, n_C_prev, n_C) = None\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n # stride = None\n # pad = None\n \n # Compute the dimensions of the CONV output volume using the formula given above. \n # Hint: use int() to apply the 'floor' operation. (≈2 lines)\n # n_H = None\n # n_W = None\n \n # Initialize the output volume Z with zeros. (≈1 line)\n # Z = None\n \n # Create A_prev_pad by padding A_prev\n # A_prev_pad = None\n \n # for i in range(None): # loop over the batch of training examples\n # a_prev_pad = None # Select ith training example's padded activation\n # for h in range(None): # loop over vertical axis of the output volume\n # Find the vertical start and end of the current \"slice\" (≈2 lines)\n # vert_start = None\n # vert_end = None\n \n # for w in range(None): # loop over horizontal axis of the output volume\n # Find the horizontal start and end of the current \"slice\" (≈2 lines)\n # horiz_start = None\n # horiz_end = None\n \n # for c in range(None): # loop over channels (= #filters) of the output volume\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n # a_slice_prev = None\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈3 line)\n # weights = None\n # biases = None\n # Z[i, h, w, c] = None\n # YOUR CODE STARTS HERE\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n (f, f, n_C_prev, n_C) = W.shape\n stride = hparameters[\"stride\"]\n pad = hparameters[\"pad\"]\n n_H = int((n_H_prev - f + 2 * pad)/stride) + 1\n n_W = int((n_W_prev - f + 2 * pad)/stride) + 1\n Z = np.zeros((m, n_H, n_W, n_C))\n A_prev_pad = zero_pad(A_prev, pad)\n for i in range(m):\n a_prev_pad = A_prev_pad[i,:,:,:]\n for h in range(n_H):\n vert_start = h*stride\n vert_end = vert_start + f\n for w in range(n_W):\n horiz_start = w*stride\n horiz_end = horiz_start + f\n for c in range(n_C):\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n weights = W[:,:,:,c]\n biases = b[:,:,:,c]\n Z[i, h, w, c] = conv_single_step(a_slice_prev, weights, biases)\n \n # YOUR CODE ENDS HERE\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache", "_____no_output_____" ], [ "np.random.seed(1)\nA_prev = np.random.randn(2, 5, 7, 4)\nW = np.random.randn(3, 3, 4, 8)\nb = np.random.randn(1, 1, 1, 8)\nhparameters = {\"pad\" : 1,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\\n\", np.mean(Z))\nprint(\"Z[0,2,1] =\\n\", Z[0, 2, 1])\nprint(\"cache_conv[0][1][2][3] =\\n\", cache_conv[0][1][2][3])\n\nconv_forward_test(conv_forward)\n", "Z's mean =\n 0.5511276474566768\nZ[0,2,1] =\n [-2.17796037 8.07171329 -0.5772704 3.36286738 4.48113645 -2.89198428\n 10.99288867 3.03171932]\ncache_conv[0][1][2][3] =\n [-1.1191154 1.9560789 -0.3264995 -1.34267579]\n\u001b[92mAll tests passed!\n" ] ], [ [ "Finally, a CONV layer should also contain an activation, in which case you would add the following line of code:\n\n```python\n# Convolve the window to get back one output neuron\nZ[i, h, w, c] = ...\n# Apply activation\nA[i, h, w, c] = activation(Z[i, h, w, c])\n```\n\nYou don't need to do it here, however. \n", "_____no_output_____" ], [ "<a name='4'></a>\n## 4 - Pooling Layer \n\nThe pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n\n- Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n\n- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n\n<table>\n<td>\n<img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n<td>\n\n<td>\n<img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n<td>\n</table>\n\nThese pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the $f \\times f$ window you would compute a *max* or *average* over. \n\n<a name='4-1'></a>\n### 4.1 - Forward Pooling\nNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. \n\n<a name='ex-4'></a>\n### Exercise 4 - pool_forward\n\nImplement the forward pass of the pooling layer. Follow the hints in the comments below.\n\n**Reminder**:\nAs there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n\n$$n_H = \\Bigl\\lfloor \\frac{n_{H_{prev}} - f}{stride} \\Bigr\\rfloor +1$$\n\n$$n_W = \\Bigl\\lfloor \\frac{n_{W_{prev}} - f}{stride} \\Bigr\\rfloor +1$$\n\n$$n_C = n_{C_{prev}}$$\n\n\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n # for i in range(None): # loop over the training examples\n # for h in range(None): # loop on the vertical axis of the output volume\n # Find the vertical start and end of the current \"slice\" (≈2 lines)\n # vert_start = None\n # vert_end = None\n \n # for w in range(None): # loop on the horizontal axis of the output volume\n # Find the vertical start and end of the current \"slice\" (≈2 lines)\n # horiz_start = None\n # horiz_end = None\n \n # for c in range (None): # loop over the channels of the output volume\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n # a_prev_slice = None\n \n # Compute the pooling operation on the slice. \n # Use an if statement to differentiate the modes. \n # Use np.max and np.mean.\n # if mode == \"max\":\n # A[i, h, w, c] = None\n # elif mode == \"average\":\n # A[i, h, w, c] = None\n \n # YOUR CODE STARTS HERE\n for i in range(m):\n for h in range(n_H):\n vert_start = h*stride\n vert_end = vert_start + f\n for w in range(n_W):\n horiz_start = w*stride\n horiz_end = horiz_start + f\n for c in range(n_C):\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n # YOUR CODE ENDS HERE\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n #assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache", "_____no_output_____" ], [ "# Case 1: stride of 1\nnp.random.seed(1)\nA_prev = np.random.randn(2, 5, 5, 3)\nhparameters = {\"stride\" : 1, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters, mode = \"max\")\nprint(\"mode = max\")\nprint(\"A.shape = \" + str(A.shape))\nprint(\"A[1, 1] =\\n\", A[1, 1])\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A.shape = \" + str(A.shape))\nprint(\"A[1, 1] =\\n\", A[1, 1])\n\npool_forward_test(pool_forward)", "mode = max\nA.shape = (2, 3, 3, 3)\nA[1, 1] =\n [[1.96710175 0.84616065 1.27375593]\n [1.96710175 0.84616065 1.23616403]\n [1.62765075 1.12141771 1.2245077 ]]\nmode = average\nA.shape = (2, 3, 3, 3)\nA[1, 1] =\n [[ 0.44497696 -0.00261695 -0.31040307]\n [ 0.50811474 -0.23493734 -0.23961183]\n [ 0.11872677 0.17255229 -0.22112197]]\n\u001b[92mAll tests passed!\n" ] ], [ [ "**Expected output**\n\n```\nmode = max\nA.shape = (2, 3, 3, 3)\nA[1, 1] =\n [[1.96710175 0.84616065 1.27375593]\n [1.96710175 0.84616065 1.23616403]\n [1.62765075 1.12141771 1.2245077 ]]\n\nmode = average\nA.shape = (2, 3, 3, 3)\nA[1, 1] =\n [[ 0.44497696 -0.00261695 -0.31040307]\n [ 0.50811474 -0.23493734 -0.23961183]\n [ 0.11872677 0.17255229 -0.22112197]]\n```", "_____no_output_____" ] ], [ [ "# Case 2: stride of 2\nnp.random.seed(1)\nA_prev = np.random.randn(2, 5, 5, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A.shape = \" + str(A.shape))\nprint(\"A[0] =\\n\", A[0])\nprint()\n\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A.shape = \" + str(A.shape))\nprint(\"A[1] =\\n\", A[1])", "mode = max\nA.shape = (2, 2, 2, 3)\nA[0] =\n [[[1.74481176 0.90159072 1.65980218]\n [1.74481176 1.6924546 1.65980218]]\n\n [[1.13162939 1.51981682 2.18557541]\n [1.13162939 1.6924546 2.18557541]]]\n\nmode = average\nA.shape = (2, 2, 2, 3)\nA[1] =\n [[[-0.17313416 0.32377198 -0.34317572]\n [ 0.02030094 0.14141479 -0.01231585]]\n\n [[ 0.42944926 0.08446996 -0.27290905]\n [ 0.15077452 0.28911175 0.00123239]]]\n" ] ], [ [ "**Expected Output:**\n \n```\nmode = max\nA.shape = (2, 2, 2, 3)\nA[0] =\n [[[1.74481176 0.90159072 1.65980218]\n [1.74481176 1.6924546 1.65980218]]\n\n [[1.13162939 1.51981682 2.18557541]\n [1.13162939 1.6924546 2.18557541]]]\n\nmode = average\nA.shape = (2, 2, 2, 3)\nA[1] =\n [[[-0.17313416 0.32377198 -0.34317572]\n [ 0.02030094 0.14141479 -0.01231585]]\n\n [[ 0.42944926 0.08446996 -0.27290905]\n [ 0.15077452 0.28911175 0.00123239]]]\n```", "_____no_output_____" ], [ "<font color='blue'>\n \n**What you should remember**:\n\n* A convolution extracts features from an input image by taking the dot product between the input data and a 3D array of weights (the filter). \n* The 2D output of the convolution is called the feature map\n* A convolution layer is where the filter slides over the image and computes the dot product \n * This transforms the input volume into an output volume of different size \n* Zero padding helps keep more information at the image borders, and is helpful for building deeper networks, because you can build a CONV layer without shrinking the height and width of the volumes\n* Pooling layers gradually reduce the height and width of the input by sliding a 2D window over each specified region, then summarizing the features in that region", "_____no_output_____" ], [ "**Congratulations**! You have now implemented the forward passes of all the layers of a convolutional network. Great work!\n\nThe remainder of this notebook is optional, and will not be graded. If you carry on, just remember to hit the Submit button to submit your work for grading first. ", "_____no_output_____" ], [ "<a name='5'></a>\n## 5 - Backpropagation in Convolutional Neural Networks (OPTIONAL / UNGRADED)\n\nIn modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \n\nWhen in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and were not derived in lecture, but are briefly presented below.\n\n<a name='5-1'></a>\n### 5.1 - Convolutional Layer Backward Pass \n\nLet's start by implementing the backward pass for a CONV layer. \n\n<a name='5-1-1'></a>\n#### 5.1.1 - Computing dA:\nThis is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n\n$$dA \\mathrel{+}= \\sum _{h=0} ^{n_H} \\sum_{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\n\nWhere $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, you multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, you are just adding the gradients of all the a_slices. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\nda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n```\n\n<a name='5-1-2'></a>\n#### 5.1.2 - Computing dW:\nThis is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n\n$$dW_c \\mathrel{+}= \\sum _{h=0} ^{n_H} \\sum_{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\n\nWhere $a_{slice}$ corresponds to the slice which was used to generate the activation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndW[:,:,:,c] \\mathrel{+}= a_slice * dZ[i, h, w, c]\n```\n\n<a name='5-1-3'></a>\n#### 5.1.3 - Computing db:\n\nThis is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n\n$$db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\n\nAs you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \n\nIn code, inside the appropriate for-loops, this formula translates into:\n```python\ndb[:,:,:,c] += dZ[i, h, w, c]\n```\n\n<a name='ex-5'></a>\n### Exercise 5 - conv_backward\n\nImplement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. ", "_____no_output_____" ] ], [ [ "def conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\" \n \n \n # Retrieve information from \"cache\"\n # (A_prev, W, b, hparameters) = None\n # Retrieve dimensions from A_prev's shape\n # (m, n_H_prev, n_W_prev, n_C_prev) = None\n # Retrieve dimensions from W's shape\n # (f, f, n_C_prev, n_C) = None\n \n # Retrieve information from \"hparameters\"\n # stride = None\n # pad = None\n \n # Retrieve dimensions from dZ's shape\n # (m, n_H, n_W, n_C) = None\n \n # Initialize dA_prev, dW, db with the correct shapes\n # dA_prev = None \n # dW = None\n # db = None\n \n # Pad A_prev and dA_prev\n # A_prev_pad = zero_pad(A_prev, pad)\n # dA_prev_pad = zero_pad(dA_prev, pad)\n \n #for i in range(m): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n # a_prev_pad = None\n # da_prev_pad = None\n \n #for h in range(n_H): # loop over vertical axis of the output volume\n # for w in range(n_W): # loop over horizontal axis of the output volume\n # for c in range(n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n # vert_start = None\n # vert_end = None\n # horiz_start = None\n # horiz_end = None\n\n # Use the corners to define the slice from a_prev_pad\n # a_slice = None\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n # da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None\n # dW[:,:,:,c] += None\n # db[:,:,:,c] += None\n \n # Set the ith training example's dA_prev to the unpadded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n # dA_prev[i, :, :, :] = None\n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db", "_____no_output_____" ], [ "# We'll run conv_forward to initialize the 'Z' and 'cache_conv\",\n# which we'll use to test the conv_backward function\nnp.random.seed(1)\nA_prev = np.random.randn(10, 4, 4, 3)\nW = np.random.randn(2, 2, 3, 8)\nb = np.random.randn(1, 1, 1, 8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\n\n# Test conv_backward\ndA, dW, db = conv_backward(Z, cache_conv)\n\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))\n\nassert type(dA) == np.ndarray, \"Output must be a np.ndarray\"\nassert type(dW) == np.ndarray, \"Output must be a np.ndarray\"\nassert type(db) == np.ndarray, \"Output must be a np.ndarray\"\nassert dA.shape == (10, 4, 4, 3), f\"Wrong shape for dA {dA.shape} != (10, 4, 4, 3)\"\nassert dW.shape == (2, 2, 3, 8), f\"Wrong shape for dW {dW.shape} != (2, 2, 3, 8)\"\nassert db.shape == (1, 1, 1, 8), f\"Wrong shape for db {db.shape} != (1, 1, 1, 8)\"\nassert np.isclose(np.mean(dA), 1.4524377), \"Wrong values for dA\"\nassert np.isclose(np.mean(dW), 1.7269914), \"Wrong values for dW\"\nassert np.isclose(np.mean(db), 7.8392325), \"Wrong values for db\"\n\nprint(\"\\033[92m All tests passed.\")", "_____no_output_____" ] ], [ [ "**Expected Output**:\n<table>\n <tr>\n <td>\n dA_mean\n </td>\n <td>\n 1.45243777754\n </td>\n </tr>\n <tr>\n <td>\n dW_mean\n </td>\n <td>\n 1.72699145831\n </td>\n </tr>\n <tr>\n <td>\n db_mean\n </td>\n <td>\n 7.83923256462\n </td>\n </tr>\n\n</table>\n", "_____no_output_____" ], [ "<a name='5-2'></a>\n## 5.2 Pooling Layer - Backward Pass\n\nNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagate the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n\n<a name='5-2-1'></a>\n### 5.2.1 Max Pooling - Backward Pass \n\nBefore jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: \n\n$$ X = \\begin{bmatrix}\n1 && 3 \\\\\n4 && 2\n\\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n0 && 0 \\\\\n1 && 0\n\\end{bmatrix}\\tag{4}$$\n\nAs you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling is similar to this, but uses a different mask. \n\n<a name='ex-6'></a>\n### Exercise 6 - create_mask_from_window\n\nImplement `create_mask_from_window()`. This function will be helpful for pooling backward. \nHints:\n- [np.max()]() may be helpful. It computes the maximum of an array.\n- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:\n```\nA[i,j] = True if X[i,j] = x\nA[i,j] = False if X[i,j] != x\n```\n- Here, you don't need to consider cases where there are several maxima in a matrix.", "_____no_output_____" ] ], [ [ "def create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\" \n # (≈1 line)\n # mask = None\n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n return mask", "_____no_output_____" ], [ "np.random.seed(1)\nx = np.random.randn(2, 3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)\n\nx = np.array([[-1, 2, 3],\n [2, -3, 2],\n [1, 5, -2]])\n\ny = np.array([[False, False, False],\n [False, False, False],\n [False, True, False]])\nmask = create_mask_from_window(x)\n\nassert type(mask) == np.ndarray, \"Output must be a np.ndarray\"\nassert mask.shape == x.shape, \"Input and output shapes must match\"\nassert np.allclose(mask, y), \"Wrong output. The True value must be at position (2, 1)\"\n\nprint(\"\\033[92m All tests passed.\")", "_____no_output_____" ] ], [ [ "**Expected Output:** \n\n<table> \n<tr> \n<td>\n\n**x =**\n</td>\n\n<td>\n\n[[ 1.62434536 -0.61175641 -0.52817175] <br>\n [-1.07296862 0.86540763 -2.3015387 ]]\n\n </td>\n</tr>\n\n<tr> \n<td>\nmask =\n</td>\n<td>\n[[ True False False] <br>\n [False False False]]\n</td>\n</tr>\n\n\n</table>", "_____no_output_____" ], [ "Why keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. ", "_____no_output_____" ], [ "<a name='5-2-2'></a>\n### 5.2.2 - Average Pooling - Backward Pass \n\nIn max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\n\nFor example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n$$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n1/4 && 1/4 \\\\\n1/4 && 1/4\n\\end{bmatrix}\\tag{5}$$\n\nThis implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \n\n<a name='ex-7'></a>\n### Exercise 7 - distribute_value\n\nImplement the function below to equally distribute a value dz through a matrix of dimension shape. \n\n[Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)", "_____no_output_____" ] ], [ [ "def distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\" \n # Retrieve dimensions from shape (≈1 line)\n # (n_H, n_W) = None\n \n # Compute the value to distribute on the matrix (≈1 line)\n # average = None\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n # a = None\n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n return a", "_____no_output_____" ], [ "a = distribute_value(2, (2, 2))\nprint('distributed value =', a)\n\n\nassert type(a) == np.ndarray, \"Output must be a np.ndarray\"\nassert a.shape == (2, 2), f\"Wrong shape {a.shape} != (2, 2)\"\nassert np.sum(a) == 2, \"Values must sum to 2\"\n\na = distribute_value(100, (10, 10))\nassert type(a) == np.ndarray, \"Output must be a np.ndarray\"\nassert a.shape == (10, 10), f\"Wrong shape {a.shape} != (10, 10)\"\nassert np.sum(a) == 100, \"Values must sum to 100\"\n\nprint(\"\\033[92m All tests passed.\")", "_____no_output_____" ] ], [ [ "**Expected Output**: \n\n<table> \n<tr> \n<td>\ndistributed_value =\n</td>\n<td>\n[[ 0.5 0.5]\n<br\\> \n[ 0.5 0.5]]\n</td>\n</tr>\n</table>", "_____no_output_____" ], [ "<a name='5-2-3'></a>\n### 5.2.3 Putting it Together: Pooling Backward \n\nYou now have everything you need to compute backward propagation on a pooling layer.\n\n<a name='ex-8'></a>\n### Exercise 8 - pool_backward\n\nImplement the `pool_backward` function in both modes (`\"max\"` and `\"average\"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dA.", "_____no_output_____" ] ], [ [ "def pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n # Retrieve information from cache (≈1 line)\n # (A_prev, hparameters) = None\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n # stride = None\n # f = None\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n # m, n_H_prev, n_W_prev, n_C_prev = None\n # m, n_H, n_W, n_C = None\n \n # Initialize dA_prev with zeros (≈1 line)\n # dA_prev = None\n \n # for i in range(None): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n # a_prev = None\n \n # for h in range(n_H): # loop on the vertical axis\n # for w in range(n_W): # loop on the horizontal axis\n # for c in range(n_C): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n # vert_start = None\n # vert_end = None\n # horiz_start = None\n # horiz_end = None\n \n # Compute the backward propagation in both modes.\n # if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n # a_prev_slice = None\n \n # Create the mask from a_prev_slice (≈1 line)\n # mask = None\n\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n # dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n # elif mode == \"average\":\n \n # Get the value da from dA (≈1 line)\n # da = None\n \n # Define the shape of the filter as fxf (≈1 line)\n # shape = None\n\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n # dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev", "_____no_output_____" ], [ "np.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\nprint(A.shape)\nprint(cache[0].shape)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev1 = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev1[1,1] = ', dA_prev1[1, 1]) \nprint()\ndA_prev2 = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev2[1,1] = ', dA_prev2[1, 1]) \n\nassert type(dA_prev1) == np.ndarray, \"Wrong type\"\nassert dA_prev1.shape == (5, 5, 3, 2), f\"Wrong shape {dA_prev1.shape} != (5, 5, 3, 2)\"\nassert np.allclose(dA_prev1[1, 1], [[0, 0], \n [ 5.05844394, -1.68282702],\n [ 0, 0]]), \"Wrong values for mode max\"\nassert np.allclose(dA_prev2[1, 1], [[0.08485462, 0.2787552], \n [1.26461098, -0.25749373], \n [1.17975636, -0.53624893]]), \"Wrong values for mode average\"\nprint(\"\\033[92m All tests passed.\")", "_____no_output_____" ] ], [ [ "**Expected Output**: \n\nmode = max:\n<table> \n<tr> \n<td>\n\n**mean of dA =**\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\ndA_prev[1,1] =\n</td>\n<td>\n[[ 0. 0. ] <br>\n [ 5.05844394 -1.68282702] <br>\n [ 0. 0. ]]\n</td>\n</tr>\n</table>\n\nmode = average\n<table> \n<tr> \n<td>\n\nmean of dA =\n</td>\n\n<td>\n\n0.145713902729\n\n </td>\n</tr>\n\n<tr> \n<td>\ndA_prev[1,1] =\n</td>\n<td>\n[[ 0.08485462 0.2787552 ] <br>\n [ 1.26461098 -0.25749373] <br>\n [ 1.17975636 -0.53624893]]\n</td>\n</tr>\n</table>", "_____no_output_____" ], [ "**Congratulations**! You've completed the assignment and its optional portion. You now understand how convolutional neural networks work, and have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow. Nicely done! See you there.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d066b2c1553c646abfb24c2d51fc6bc6ab84a78e
704,667
ipynb
Jupyter Notebook
nbs/dl1/lesson3-head-pose.ipynb
perrychu/course-v3
04214492feb68344ff8f9c4de9f34463d4c62ef5
[ "Apache-2.0" ]
null
null
null
nbs/dl1/lesson3-head-pose.ipynb
perrychu/course-v3
04214492feb68344ff8f9c4de9f34463d4c62ef5
[ "Apache-2.0" ]
null
null
null
nbs/dl1/lesson3-head-pose.ipynb
perrychu/course-v3
04214492feb68344ff8f9c4de9f34463d4c62ef5
[ "Apache-2.0" ]
null
null
null
1,909.666667
325,640
0.961099
[ [ [ "## Regression with BIWI head pose dataset", "_____no_output_____" ], [ "This is a more advanced example to show how to create custom datasets and do regression with images. Our task is to find the center of the head in each image. The data comes from the [BIWI head pose dataset](https://data.vision.ee.ethz.ch/cvl/gfanelli/head_pose/head_forest.html#db), thanks to Gabriele Fanelli et al. We have converted the images to jpeg format, so you should download the converted dataset from [this link](https://s3.amazonaws.com/fast-ai-imagelocal/biwi_head_pose.tgz).", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "from fastai import *\nfrom fastai.vision import *", "_____no_output_____" ] ], [ [ "## Getting and converting the data", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.BIWI_HEAD_POSE)", "_____no_output_____" ], [ "cal = np.genfromtxt(path/'01'/'rgb.cal', skip_footer=6); cal", "_____no_output_____" ], [ "fname = '09/frame_00667_rgb.jpg'", "_____no_output_____" ], [ "def img2txt_name(f): return path/f'{str(f)[:-7]}pose.txt'", "_____no_output_____" ], [ "img = open_image(path/fname)\nimg.show()", "_____no_output_____" ], [ "ctr = np.genfromtxt(img2txt_name(fname), skip_header=3); ctr", "_____no_output_____" ], [ "def convert_biwi(coords):\n c1 = coords[0] * cal[0][0]/coords[2] + cal[0][2]\n c2 = coords[1] * cal[1][1]/coords[2] + cal[1][2]\n return tensor([c2,c1])\n\ndef get_ctr(f):\n ctr = np.genfromtxt(img2txt_name(f), skip_header=3)\n return convert_biwi(ctr)\n\ndef get_ip(img,pts): return ImagePoints(FlowField(img.size, pts), scale=True)", "_____no_output_____" ], [ "get_ctr(fname)", "_____no_output_____" ], [ "ctr = get_ctr(fname)\nimg.show(y=get_ip(img, ctr), figsize=(6, 6))", "_____no_output_____" ] ], [ [ "## Creating a dataset", "_____no_output_____" ] ], [ [ "data = (ImageItemList.from_folder(path)\n .split_by_valid_func(lambda o: o.parent.name=='13')\n .label_from_func(get_ctr, label_cls=PointsItemList)\n .transform(get_transforms(), tfm_y=True, size=(120,160))\n .databunch().normalize(imagenet_stats)\n )", "_____no_output_____" ], [ "data.show_batch(3, figsize=(9,6))", "_____no_output_____" ] ], [ [ "## Train model", "_____no_output_____" ] ], [ [ "learn = create_cnn(data, models.resnet34)", "_____no_output_____" ], [ "learn.lr_find()\nlearn.recorder.plot()", "LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n" ], [ "lr = 2e-2", "_____no_output_____" ], [ "learn.fit_one_cycle(5, slice(lr))", "Total time: 07:28\nepoch train_loss valid_loss\n1 0.043327 0.010848 (01:34)\n2 0.015479 0.001792 (01:27)\n3 0.006021 0.001171 (01:28)\n4 0.003105 0.000521 (01:27)\n5 0.002425 0.000381 (01:29)\n\n" ], [ "learn.save('stage-1')", "_____no_output_____" ], [ "learn.load('stage-1');", "_____no_output_____" ], [ "learn.show_results()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d066c4f4223476681a361fed608089e4e829af92
33,010
ipynb
Jupyter Notebook
Marketing Analytics in Python/Segmentation/Notebooks/sgmt_numvar_ElbowChart.ipynb
zoutianxin1992/MarketingAnalyticsPython
23930c2843cb2e4527ada5de6311e45c5d20a3a0
[ "MIT" ]
null
null
null
Marketing Analytics in Python/Segmentation/Notebooks/sgmt_numvar_ElbowChart.ipynb
zoutianxin1992/MarketingAnalyticsPython
23930c2843cb2e4527ada5de6311e45c5d20a3a0
[ "MIT" ]
null
null
null
Marketing Analytics in Python/Segmentation/Notebooks/sgmt_numvar_ElbowChart.ipynb
zoutianxin1992/MarketingAnalyticsPython
23930c2843cb2e4527ada5de6311e45c5d20a3a0
[ "MIT" ]
null
null
null
136.404959
25,048
0.879067
[ [ [ "# Choosing the number of segments - Elbow chart method", "_____no_output_____" ], [ "This document illustrates how to decide the number of segments (optimal $k$) using elbow charts.", "_____no_output_____" ], [ "## Introducing elbow chart method", "_____no_output_____" ], [ "**When we should (not) add more clusters**: Ideally, the lower the $SSE$ is, the better is the clustering. Although adding more clusters (a higher $k$) always reduces $SSE$, adding too many clusters can be managerially cumbersome (e.g., when designing individual strategies for each segment) and redundant (e.g., nearby clusters have little differences). Hence, we want to add more clusters if doing so can **significantly** reduce $SSE$, and stop adding clusters if doing so **doesn't reduce $SSE$ by much**.", "_____no_output_____" ], [ "**How elbow chart works**: The elbow chart plots a curve of how SSE changes with the number of clusters. Because adding more clusters will reduce SSE, the curve will be downward sloping, and the curve is steeper if adding one more cluster ($k \\rightarrow k+1$) reduces SSE by a greater amount. We should choose the cluster number $k$ that corresponds to the \"elbow point\" in the plot (the kink where the curve exhibits an \"L\" shape). The elbow point indicates that the curve is steeper on the left ($SSE$ decreases a lot from $k-1$ to $k$), and is flatter on the right ($SSE$ decreases not much from $k$ to $k+1$).", "_____no_output_____" ], [ "**Procedure**: Suppose we want to create no more than $K$ segments. The procedure is as follows:\n1. For $k$ from $1$ to $K$: run k-mean algorithm with $k$ clusters, and calculate and record the $SSE$.\n2. Plot $SSE$ over the number of segments $k$ to get the elbow chart.\n3. Find $k$ that corresponds to the elbow point. This is the optimal number of segments to segment consumers.\n4. Use the optimal $k$ to run k-mean algorithm to segment consumers.", "_____no_output_____" ], [ "We will use \"MallCustomersTwoVariables.csv\" for analysis.", "_____no_output_____" ], [ "## Loading data and preprocessing ", "_____no_output_____" ], [ "This section will generate the normalized dataframe, `df_normalized`, for k-mean algorithm.", "_____no_output_____" ] ], [ [ "# importing packages\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans # Use \"sklearn/cluster/KMeans\" for clustering analysis \n\n# importing data and renaming variables\nurl = \"https://raw.githubusercontent.com/zoutianxin1992/MarketingAnalyticsPython/main/Marketing%20Analytics%20in%20Python/Segmentation/Datasets/MallCustomersTwoVariables.csv\"\ndf = pd.read_csv(url,index_col=0) # use the first column (customer id) as index\ndf = df.rename(columns = {\"Annual Income (k$)\":\"annual_income\",\"Spending Score (1-100)\":\"spending_score\"})\n\n\n# normalizing the data for k-mean algorithm\ndf_normalized = (df-df.min())/(df.max()-df.min()) # By default, pandas calculate maximums and minimums by columns, which serves our purpose.", "_____no_output_____" ] ], [ [ "## Calculate $SSE$ for each $k$", "_____no_output_____" ], [ "For exposition, we will create no more than $K = 10$ clusters, and calculate $SSE$s when $k = 1,2,3,...,K$. This can be achieved with a for loop.\n<br />\n(If you use Windows, you may see a warning of \"KMeans is known to have a memory leak....\" Don't worry in our case because both our data size and the number of clusters are much smaller than when the problem will happen.)", "_____no_output_____" ] ], [ [ "K = 10 # K is the maximum number of clusters we will check\nstore_SSE = np.zeros(K) # create a vector to store SSE's. The k-th entry will be the SSE with k clusters.\n\nfor k in range(1, K+1): # try k from 1 to K \n kmeanSpec = KMeans(n_clusters = k, n_init = 100) # set up k-mean model with k clusters\n kmean_result = kmeanSpec.fit(df_normalized) # run k-mean on normalized data\n store_SSE[k-1] = kmeanSpec.inertia_ # store the SSE (.inertia_) in the k-th entry of store_SSE\n", "C:\\Users\\zoutianxin\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\cluster\\_kmeans.py:882: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n f\"KMeans is known to have a memory leak on Windows \"\n" ] ], [ [ "## Generate elbow chart", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n%matplotlib inline\nplt.rcParams['figure.figsize'] = [12,8] # set figure size to be 12*8 inch\nplt.plot(range(1, K+1), store_SSE) \nplt.xticks(range(1, K+1), fontsize = 18)\nplt.yticks(fontsize = 18)\nplt.ylabel(\"SSE\",fontsize = 18)\nplt.xlabel(\"number of clusters\", fontsize = 18)\n", "_____no_output_____" ] ], [ [ "As we can see, the elbow point (kink of \"L\" shape) appears at $k = 5$, which will be the optimal number of segments to use. ", "_____no_output_____" ], [ "## Potential Problems of the elbow-chart method", "_____no_output_____" ], [ "- There may be no apparent elbow points or multiple elbow points in the chart\n- Choice of elbow points is rather subjective", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d066db53ed05c94998abbfaaca6fd0ccea6223e6
30,764
ipynb
Jupyter Notebook
notebooks/AE_CelebA_experiment.ipynb
yandex-research/learnable-init
480627217763912e83251833df2d678c8b6ea6fd
[ "Apache-2.0" ]
4
2021-07-14T19:18:47.000Z
2022-03-21T17:50:46.000Z
notebooks/AE_CelebA_experiment.ipynb
yandex-research/learnable-init
480627217763912e83251833df2d678c8b6ea6fd
[ "Apache-2.0" ]
null
null
null
notebooks/AE_CelebA_experiment.ipynb
yandex-research/learnable-init
480627217763912e83251833df2d678c8b6ea6fd
[ "Apache-2.0" ]
null
null
null
39.239796
130
0.569692
[ [ [ "# DIMAML for Autoencoder models\n\nTraining is on Celeba. Evaluation is on Tiny ImageNet", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n%env CUDA_VISIBLE_DEVICES=0\nimport os, sys, time\nsys.path.insert(0, '..')\nimport lib\n\nimport math\nimport numpy as np\nfrom copy import deepcopy\nimport torch, torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('seaborn-darkgrid')\nplt.rcParams['pdf.fonttype'] = 42\nplt.rcParams['ps.fonttype'] = 42\n\n# For reproducibility\nimport random\nseed = random.randint(0, 2 ** 32 - 1)\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nprint(seed)", "_____no_output_____" ] ], [ [ "## Setting", "_____no_output_____" ] ], [ [ "model_type = 'AE'\n\n# Dataset \ndata_dir = './data'\ntrain_batch_size = 128\nvalid_batch_size = 256\ntest_batch_size = 128\nnum_workers = 3\npin_memory = True\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# AE\nlatent_dim = 64 \nloss_function = F.mse_loss\n\n# MAML\nmax_steps = 1500\ninner_loop_steps_in_epoch = 200\ninner_loop_epochs = 3\ninner_loop_steps = inner_loop_steps_in_epoch * inner_loop_epochs\nmeta_grad_clip = 10.\n\nloss_kwargs={'reduction':'mean'}\nloss_interval = 50\nfirst_val_step = 200\n\nassert (inner_loop_steps - first_val_step) % loss_interval == 0\nvalidation_steps = int((inner_loop_steps - first_val_step) / loss_interval + 1)\n\n# Inner optimizer\ninner_optimizer_type='momentum'\ninner_optimizer_kwargs = dict(\n lr=0.01, momentum=0.9, \n nesterov=False, weight_decay=0.0\n)\n\n# Meta optimizer\nmeta_learning_rate = 1e-4\nmeta_betas = (0.9, 0.997)\nmeta_decay_interval = max_steps\n\ncheckpoint_steps = 15\nrecovery_step = None\n\nkwargs = dict(\n first_valid_step=first_val_step,\n valid_loss_interval=loss_interval, \n loss_kwargs=loss_kwargs, \n)", "_____no_output_____" ], [ "exp_name = f\"{model_type}{latent_dim}_celeba_{inner_optimizer_type}\" + \\\n f\"_steps{inner_loop_steps}_interval{loss_interval}\" + \\\n f\"_tr_bs{train_batch_size}_val_bs{valid_batch_size}_seed_{seed}\"\n \nprint(\"Experiment name: \", exp_name)\n\nlogs_path = \"./logs/{}\".format(exp_name)\nassert recovery_step is not None or not os.path.exists(logs_path)\n# !rm -rf {logs_path}", "_____no_output_____" ] ], [ [ "## Prepare the CelebA dataset", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport shutil\n\nceleba_data_dir = 'data/celeba/'\ndata = pd.read_csv(os.path.join(celeba_data_dir, 'list_eval_partition.csv'))\n\ntry:\n for partition in ['train', 'val', 'test']:\n os.makedirs(os.path.join(celeba_data_dir, partition))\n os.makedirs(os.path.join(celeba_data_dir, partition, 'images'))\n\n for i in data.index:\n partition = data.loc[i].partition\n src_path = os.path.join(celeba_data_dir, 'img_align_celeba/img_align_celeba', data.loc[i].image_id)\n if partition == 0:\n shutil.copyfile(src_path, os.path.join(celeba_data_dir, 'train', 'images', data.loc[i].image_id))\n elif partition == 1:\n shutil.copyfile(src_path, os.path.join(celeba_data_dir, 'val', 'images', data.loc[i].image_id))\n elif partition == 2:\n shutil.copyfile(src_path, os.path.join(celeba_data_dir, 'test', 'images', data.loc[i].image_id))\n \nexcept FileExistsError:\n print('\\'train\\', \\'val\\', \\'test\\' already exist. Probably, you do not want to copy data again')", "_____no_output_____" ], [ "from torchvision import transforms, datasets\nfrom torch.utils.data import DataLoader\n\nceleba_transforms = transforms.Compose([\n transforms.Resize((64, 64)),\n transforms.ToTensor(),\n])\n\n# Create the train set\nceleba_train_dataset = datasets.ImageFolder(celeba_data_dir+'train', transform=celeba_transforms)\nceleba_train_images = torch.cat([celeba_train_dataset[i][0][None] for i in range(len(celeba_train_dataset))])\n\nceleba_mean_image = celeba_train_images.mean(0)\nceleba_std_image = celeba_train_images.std(0)\nceleba_train_images = (celeba_train_images - celeba_mean_image) / celeba_std_image\n\n# Create the val set\nceleba_valid_dataset = datasets.ImageFolder(celeba_data_dir+'val', celeba_transforms)\nceleba_valid_images = torch.cat([celeba_valid_dataset[i][0][None] for i in range(len(celeba_valid_dataset))])\nceleba_valid_images = (celeba_valid_images - celeba_mean_image) / celeba_std_image\n\n# Create the test set\nceleba_test_dataset = datasets.ImageFolder(celeba_data_dir+'test', celeba_transforms)\nceleba_test_images = torch.cat([celeba_test_dataset[i][0][None] for i in range(len(celeba_test_dataset))])\nceleba_test_images = (celeba_test_images - celeba_mean_image) / celeba_std_image\n\n# Create data loaders \ntrain_loader = torch.utils.data.DataLoader(celeba_train_images, batch_size=train_batch_size, shuffle=True,\n pin_memory=pin_memory, num_workers=num_workers)\nvalid_loader = torch.utils.data.DataLoader(celeba_valid_images, batch_size=valid_batch_size, shuffle=True,\n pin_memory=pin_memory, num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(celeba_test_images, batch_size=test_batch_size, \n pin_memory=pin_memory, num_workers=num_workers)", "_____no_output_____" ] ], [ [ "## Create the model and meta-optimizer", "_____no_output_____" ] ], [ [ "optimizer = lib.make_inner_optimizer(inner_optimizer_type, **inner_optimizer_kwargs)\nmodel = lib.models.AE(latent_dim)\nmaml = lib.MAML(model, model_type, optimizer=optimizer, \n checkpoint_steps=checkpoint_steps,\n loss_function=loss_function\n).to(device)", "_____no_output_____" ] ], [ [ "## Trainer", "_____no_output_____" ] ], [ [ "def samples_batches(dataloader, num_batches):\n x_batches = []\n for batch_i, x_batch in enumerate(dataloader):\n if batch_i >= num_batches: break\n x_batches.append(x_batch)\n return x_batches\n\n\nclass TrainerAE(lib.Trainer):\n def train_on_batch(self, train_loader, valid_loader, prefix='train/', **kwargs):\n \"\"\" Performs a single gradient update and reports metrics \"\"\"\n # Sample train and val batches\n x_batches = []\n for _ in range(inner_loop_epochs):\n x_batches.extend(samples_batches(train_loader, inner_loop_steps_in_epoch))\n x_val_batches = samples_batches(valid_loader, validation_steps)\n\n # Perform a meta training step\n self.meta_optimizer.zero_grad()\n with lib.training_mode(self.maml, is_train=True):\n self.maml.resample_parameters()\n _updated_model, train_loss_history, valid_loss_history, *etc = \\\n self.maml.forward(x_batches, x_batches, x_val_batches, x_val_batches, \n device=self.device, **kwargs) \n train_loss = torch.cat(train_loss_history).mean()\n valid_loss = torch.cat(valid_loss_history).mean() if len(valid_loss_history) > 0 else torch.zeros(1)\n valid_loss.backward()\n\n # Check gradients \n grad_norm = lib.utils.total_norm_frobenius(self.maml.initializers.parameters())\n self.writer.add_scalar(prefix + \"grad_norm\", grad_norm, self.total_steps)\n bad_grad = not math.isfinite(grad_norm)\n\n if not bad_grad:\n nn.utils.clip_grad_norm_(list(self.maml.initializers.parameters()), meta_grad_clip)\n else:\n print(\"Fix bad grad. Loss {} | Grad {}\".format(train_loss.item(), grad_norm))\n for param in self.maml.initializers.parameters():\n param.grad = torch.where(torch.isfinite(param.grad), \n param.grad, torch.zeros_like(param.grad))\n self.meta_optimizer.step()\n return self.record(train_loss=train_loss.item(),\n valid_loss=valid_loss.item(), prefix=prefix)\n \n def evaluate_metrics(self, train_loader, test_loader, prefix='val/', **kwargs):\n \"\"\" Predicts and evaluates metrics over the entire dataset \"\"\"\n torch.cuda.empty_cache()\n \n print('Baseline')\n self.maml.resample_parameters(initializers=self.maml.untrained_initializers, is_final=True)\n base_model = deepcopy(self.maml.model) \n base_train_loss_history, base_test_loss_history = eval_model(base_model, train_loader, test_loader,\n device=self.device, **kwargs)\n print('DIMAML')\n self.maml.resample_parameters(is_final=True)\n maml_model = deepcopy(self.maml.model)\n maml_train_loss_history, maml_test_loss_history = eval_model(maml_model, train_loader, test_loader, \n device=self.device, **kwargs)\n lib.utils.ae_draw_plots(base_train_loss_history, base_test_loss_history, \n maml_train_loss_history, maml_test_loss_history)\n \n self.writer.add_scalar(prefix + \"train_AUC\", sum(maml_train_loss_history), self.total_steps)\n self.writer.add_scalar(prefix + \"test_AUC\", sum(maml_test_loss_history), self.total_steps)\n self.writer.add_scalar(prefix + \"test_loss\", maml_test_loss_history[-1], self.total_steps)", "_____no_output_____" ], [ "########################\n# Generate Train Batch #\n########################\n \ndef generate_train_batches(train_loader, batches_in_epoch=150):\n x_batches = []\n for batch_i, x_batch in enumerate(train_loader):\n if batch_i >= batches_in_epoch: break\n x_batches.append(x_batch)\n\n assert len(x_batches) == batches_in_epoch\n local_x = torch.cat(x_batches, dim=0)\n return DataLoader(local_x, batch_size=train_batch_size, shuffle=True, \n num_workers=num_workers, pin_memory=pin_memory)\n\n##################\n# Eval functions #\n##################\n\[email protected]_grad()\ndef compute_test_loss(model, loss_function, test_loader, device='cuda'):\n model.eval() \n test_loss = 0.\n for batch_test in test_loader:\n if isinstance(batch_test, (list, tuple)):\n x_test = batch_test[0].to(device)\n elif isinstance(batch_test, torch.Tensor):\n x_test = batch_test.to(device)\n else:\n raise Exception(\"Wrong batch\")\n preds = model(x_test)\n test_loss += loss_function(preds, x_test) * x_test.shape[0]\n test_loss /= len(test_loader.dataset)\n model.train()\n return test_loss.item()\n\n\ndef eval_model(model, train_loader, test_loader, batches_in_epoch=150, \n epochs=3, test_loss_interval=50, device='cuda', **kwargs):\n optimizer = lib.optimizers.make_eval_inner_optimizer(\n maml, model, inner_optimizer_type, \n **inner_optimizer_kwargs\n )\n train_loss_history = []\n test_loss_history = []\n \n training_mode = model.training\n total_iters = 0\n for epoch in range(1, epochs + 1):\n model.train()\n for x_batch in train_loader:\n optimizer.zero_grad()\n x_batch = x_batch.to(device)\n preds = model(x_batch)\n loss = loss_function(preds, x_batch)\n loss.backward()\n optimizer.step()\n train_loss_history.append(loss.item())\n \n if (total_iters == 0) or (total_iters + 1) % test_loss_interval == 0: \n model.eval()\n test_loss = compute_test_loss(model, loss_function, test_loader, device=device)\n print(\"Epoch {} | Total Iteration {} | Loss {}\".format(epoch, total_iters+1, test_loss))\n test_loss_history.append(test_loss)\n model.train()\n \n total_iters += 1 \n model.train(training_mode)\n return train_loss_history, test_loss_history", "_____no_output_____" ], [ "train_loss_history = []\nvalid_loss_history = []\n\ntrainer = TrainerAE(maml, meta_lr=meta_learning_rate, \n meta_betas=meta_betas, meta_grad_clip=meta_grad_clip,\n exp_name=exp_name, recovery_step=recovery_step)", "_____no_output_____" ], [ "from IPython.display import clear_output\n\nlib.free_memory()\nt0 = time.time()\n\nwhile trainer.total_steps <= max_steps:\n local_train_loader = generate_train_batches(train_loader, inner_loop_steps_in_epoch)\n \n with lib.activate_context_batchnorm(maml.model):\n metrics = trainer.train_on_batch(\n local_train_loader, valid_loader, **kwargs\n )\n train_loss = metrics['train_loss']\n train_loss_history.append(train_loss)\n \n valid_loss = metrics['valid_loss']\n valid_loss_history.append(valid_loss)\n\n if trainer.total_steps % 20 == 0:\n clear_output(True)\n print(\"Step: %d | Time: %f | Train Loss %.5f | Valid loss %.5f\" \n % (trainer.total_steps, time.time()-t0, train_loss, valid_loss))\n plt.figure(figsize=[16, 5])\n plt.subplot(1,2,1)\n plt.title('Train Loss over time')\n plt.plot(lib.utils.moving_average(train_loss_history, span=50))\n plt.scatter(range(len(train_loss_history)), train_loss_history, alpha=0.1)\n plt.subplot(1,2,2)\n plt.title('Valid Loss over time')\n plt.plot(lib.utils.moving_average(valid_loss_history, span=50))\n plt.scatter(range(len(valid_loss_history)), valid_loss_history, alpha=0.1)\n plt.show()\n trainer.evaluate_metrics(local_train_loader, test_loader, epochs=inner_loop_epochs,\n test_loss_interval=loss_interval)\n lib.utils.ae_visualize_pdf(maml)\n t0 = time.time()\n \n if trainer.total_steps % 100 == 0:\n trainer.save_model()\n \n trainer.total_steps += 1", "_____no_output_____" ] ], [ [ "## Probability Functions ", "_____no_output_____" ] ], [ [ "lib.utils.ae_visualize_pdf(maml)", "_____no_output_____" ] ], [ [ "# Evaluation", "_____no_output_____" ] ], [ [ "torch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = True", "_____no_output_____" ], [ "def genOrthgonal(dim):\n a = torch.zeros((dim, dim)).normal_(0, 1)\n q, r = torch.qr(a)\n d = torch.diag(r, 0).sign()\n diag_size = d.size(0)\n d_exp = d.view(1, diag_size).expand(diag_size, diag_size)\n q.mul_(d_exp)\n return q\n\ndef makeDeltaOrthogonal(weights, gain):\n rows = weights.size(0)\n cols = weights.size(1)\n if rows < cols:\n print(\"In_filters should not be greater than out_filters.\")\n weights.data.fill_(0)\n dim = max(rows, cols)\n q = genOrthgonal(dim)\n mid1 = weights.size(2) // 2\n mid2 = weights.size(3) // 2\n with torch.no_grad():\n weights[:, :, mid1, mid2] = q[:weights.size(0), :weights.size(1)]\n weights.mul_(gain)", "_____no_output_____" ], [ "def gradient_quotient(loss, params, eps=1e-5): \n grad = torch.autograd.grad(loss, params, retain_graph=True, create_graph=True)\n prod = torch.autograd.grad(sum([(g**2).sum() / 2 for g in grad]),\n params, retain_graph=True, create_graph=True)\n out = sum([((g - p) / (g + eps * (2*(g >= 0).float() - 1).detach()) - 1).abs().sum() \n for g, p in zip(grad, prod)])\n return out / sum([p.data.nelement() for p in params])\n\n\ndef metainit(model, criterion, x_size, lr=0.1, momentum=0.9, steps=200, eps=1e-5):\n model.eval()\n params = [p for p in model.parameters() \n if p.requires_grad and len(p.size()) >= 2]\n memory = [0] * len(params)\n for i in range(steps):\n input = torch.Tensor(*x_size).normal_(0, 1).cuda()\n loss = criterion(model(input), input)\n gq = gradient_quotient(loss, list(model.parameters()), eps)\n \n grad = torch.autograd.grad(gq, params)\n for j, (p, g_all) in enumerate(zip(params, grad)):\n norm = p.data.norm().item()\n g = torch.sign((p.data * g_all).sum() / norm) \n memory[j] = momentum * memory[j] - lr * g.item() \n new_norm = norm + memory[j]\n p.data.mul_(new_norm / (norm + eps))\n print(\"%d/GQ = %.2f\" % (i, gq.item()))", "_____no_output_____" ] ], [ [ "## Evalution on Tiny Imagenet", "_____no_output_____" ] ], [ [ "class PixelNormalize(object):\n def __init__(self, mean_image, std_image):\n self.mean_image = mean_image\n self.std_image = std_image\n \n def __call__(self, image):\n normalized_image = (image - self.mean_image) / self.std_image\n return normalized_image\n\n \nclass Flip(object):\n def __call__(self, image):\n if random.random() > 0.5:\n return image.flip(-1)\n else:\n return image\n \n \nclass CustomTensorDataset(torch.utils.data.Dataset):\n \"\"\" TensorDataset with support of transforms \"\"\"\n def __init__(self, *tensors, transform=None):\n assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)\n self.tensors = tensors\n self.transform = transform\n \n def __getitem__(self, index):\n x = self.tensors[0][index]\n \n if self.transform:\n x = self.transform(x)\n return x\n \n def __len__(self):\n return self.tensors[0].size(0)", "_____no_output_____" ], [ "# Load train and valid data\nfrom torchvision import transforms, datasets\nfrom torch.utils.data import DataLoader\n\ndata_dir = 'data/tiny-imagenet-200/'\n\ntrain_image_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), transforms.ToTensor())\ntrain_images = torch.cat([train_image_dataset[i][0][None] for i in range(len(train_image_dataset))], dim=0)\nmean_image = train_images.mean(0)\nstd_image = train_images.std(0)\n\ntrain_transforms = transforms.Compose([\n Flip(),\n PixelNormalize(mean_image, std_image),\n])\n\neval_transforms = transforms.Compose([\n PixelNormalize(mean_image, std_image),\n])\n\nti_train_dataset = CustomTensorDataset(train_images, transform=train_transforms)\n\n\nvalid_image_dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'), transforms.ToTensor())\nvalid_images = torch.cat([valid_image_dataset[i][0][None] for i in range(len(valid_image_dataset))], dim=0)\nti_valid_dataset = CustomTensorDataset(valid_images, transform=eval_transforms)\n\ntest_image_dataset = datasets.ImageFolder(os.path.join(data_dir, 'test'), transforms.ToTensor())\ntest_images = torch.cat([test_image_dataset[i][0][None] for i in range(len(test_image_dataset))], dim=0)\nti_test_dataset = CustomTensorDataset(test_images, transform=eval_transforms)\n\n\n# Create data loaders\nti_train_loader = DataLoader(\n ti_train_dataset, batch_size=train_batch_size, shuffle=True,\n num_workers=num_workers, pin_memory=pin_memory,\n)\n\nti_valid_loader = DataLoader(\n ti_valid_dataset, batch_size=valid_batch_size, shuffle=True,\n num_workers=num_workers, pin_memory=pin_memory,\n)\n\nti_test_loader = DataLoader(\n ti_test_dataset, batch_size=test_batch_size, shuffle=False, \n num_workers=num_workers, pin_memory=pin_memory\n)", "_____no_output_____" ], [ "num_reruns = 10\nti_batches_in_epoch = len(ti_train_loader) #782 - full epoch\nassert ti_batches_in_epoch == 782\n\nti_base_runs_10 = []\nti_base_runs_50 = []\nti_base_runs_100 = []\n\nti_metainit_runs_10 = []\nti_metainit_runs_50 = []\nti_metainit_runs_100 = []\n\nti_deltaorthogonal_runs_10 = []\nti_deltaorthogonal_runs_50 = []\nti_deltaorthogonal_runs_100 = []\n\nti_maml_runs_10 = []\nti_maml_runs_50 = []\nti_maml_runs_100 = []\n\nfor _ in range(num_reruns):\n print(\"Baseline\")\n maml.resample_parameters(initializers=maml.untrained_initializers, is_final=True)\n base_model = deepcopy(maml.model) \n base_train_loss_history, base_test_loss_history = \\\n eval_model(base_model, ti_train_loader, ti_test_loader, epochs=100, \n test_loss_interval=10*ti_batches_in_epoch, device=device)\n \n print(\"MetaInit\")\n batch_x = next(iter(ti_train_loader))\n maml.resample_parameters(initializers=maml.untrained_initializers, is_final=True)\n metainit_model = deepcopy(maml.model)\n metainit(metainit_model, loss_function, batch_x.shape, steps=200)\n \n metainit_train_loss_history, metainit_test_loss_history = \\\n eval_model(metainit_model, ti_train_loader, ti_test_loader, \n batches_in_epoch=ti_batches_in_epoch, epochs=100, \n test_loss_interval=10*ti_batches_in_epoch, device=device)\n \n print(\"Delta Orthogonal\")\n maml.resample_parameters(initializers=maml.untrained_initializers, is_final=True)\n deltaorthogonal_model = deepcopy(maml.model)\n for param in deltaorthogonal_model.parameters():\n if len(param.size()) >= 4:\n makeDeltaOrthogonal(param, nn.init.calculate_gain('relu'))\n \n deltaorthogonal_train_loss_history, deltaorthogonal_test_loss_history = \\\n eval_model(deltaorthogonal_model, ti_train_loader, ti_test_loader, \n batches_in_epoch=ti_batches_in_epoch, epochs=100, \n test_loss_interval=10*ti_batches_in_epoch, device=device)\n \n ti_deltaorthogonal_runs_10.append(deltaorthogonal_test_loss_history[1])\n ti_deltaorthogonal_runs_50.append(deltaorthogonal_test_loss_history[5])\n ti_deltaorthogonal_runs_100.append(deltaorthogonal_test_loss_history[10])\n\n print(\"DIMAML\")\n maml.resample_parameters(is_final=True)\n maml_model = deepcopy(maml.model)\n maml_train_loss_history, maml_test_loss_history = \\\n eval_model(maml_model, ti_train_loader, ti_test_loader, epochs=100, \n test_loss_interval=10*ti_batches_in_epoch, device=device)\n \n ti_base_runs_10.append(base_test_loss_history[1])\n ti_base_runs_50.append(base_test_loss_history[5])\n ti_base_runs_100.append(base_test_loss_history[10])\n \n ti_metainit_runs_10.append(metainit_test_loss_history[1])\n ti_metainit_runs_50.append(metainit_test_loss_history[5])\n ti_metainit_runs_100.append(metainit_test_loss_history[10])\n \n ti_maml_runs_10.append(maml_test_loss_history[1])\n ti_maml_runs_50.append(maml_test_loss_history[5])\n ti_maml_runs_100.append(maml_test_loss_history[10])", "_____no_output_____" ], [ "print(\"Baseline 10 epoch: \", np.mean(ti_base_runs_10), np.std(ti_base_runs_10, ddof=1))\nprint(\"Baseline 50 epoch: \", np.mean(ti_base_runs_50), np.std(ti_base_runs_50, ddof=1))\nprint(\"Baseline 100 epoch: \", np.mean(ti_base_runs_100), np.std(ti_base_runs_100, ddof=1))\nprint()\nprint(\"DeltaOrthogonal 10 epoch: \", np.mean(ti_deltaorthogonal_runs_10), np.std(ti_deltaorthogonal_runs_10, ddof=1))\nprint(\"DeltaOrthogonal 50 epoch: \", np.mean(ti_deltaorthogonal_runs_50), np.std(ti_deltaorthogonal_runs_50, ddof=1))\nprint(\"DeltaOrthogonal 100 epoch: \", np.mean(ti_deltaorthogonal_runs_100), np.std(ti_deltaorthogonal_runs_100, ddof=1))\nprint()\nprint(\"MetaInit 10 epoch: \", np.mean(ti_metainit_runs_10), np.std(ti_metainit_runs_10, ddof=1))\nprint(\"MetaInit 50 epoch: \", np.mean(ti_metainit_runs_50), np.std(ti_metainit_runs_50, ddof=1))\nprint(\"MetaInit 100 epoch: \", np.mean(ti_metainit_runs_100), np.std(ti_metainit_runs_100, ddof=1))\nprint()\nprint(\"DIMAML 10 epoch: \", np.mean(ti_maml_runs_10), np.std(ti_maml_runs_10, ddof=1))\nprint(\"DIMAML 50 epoch: \", np.mean(ti_maml_runs_50), np.std(ti_maml_runs_50, ddof=1))\nprint(\"DIMAML 100 epoch: \", np.mean(ti_maml_runs_100), np.std(ti_maml_runs_100, ddof=1))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d066dda2eb7de5790d6798c35e63c11cb947a006
62,788
ipynb
Jupyter Notebook
Convolutional Neural Networks/week3/1 Car_detection (Autonomous_driving)/Autonomous_driving_application_Car_detection.ipynb
nirav8403/Deep-Learning-Specialization-Coursera
251809e6977edbfbc33f1f4b0d253e9f6a50cdf2
[ "Apache-2.0" ]
15
2021-11-03T04:33:22.000Z
2022-03-30T18:24:57.000Z
Convolutional Neural Networks/week3/1 Car_detection (Autonomous_driving)/Autonomous_driving_application_Car_detection.ipynb
nirav8403/Deep-Learning-Specialization-Coursera
251809e6977edbfbc33f1f4b0d253e9f6a50cdf2
[ "Apache-2.0" ]
null
null
null
Convolutional Neural Networks/week3/1 Car_detection (Autonomous_driving)/Autonomous_driving_application_Car_detection.ipynb
nirav8403/Deep-Learning-Specialization-Coursera
251809e6977edbfbc33f1f4b0d253e9f6a50cdf2
[ "Apache-2.0" ]
21
2021-11-03T04:34:11.000Z
2022-03-22T10:17:06.000Z
42.829468
562
0.569982
[ [ [ "# Autonomous Driving - Car Detection\n\nWelcome to the Week 3 programming assignment! In this notebook, you'll implement object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242). \n\n**By the end of this assignment, you'll be able to**:\n\n- Detect objects in a car detection dataset\n- Implement non-max suppression to increase accuracy\n- Implement intersection over union\n- Handle bounding boxes, a type of image annotation popular in deep learning ", "_____no_output_____" ], [ "## Table of Contents\n\n- [Packages](#0)\n- [1 - Problem Statement](#1)\n- [2 - YOLO](#2)\n - [2.1 - Model Details](#2-1)\n - [2.2 - Filtering with a Threshold on Class Scores](#2-2)\n - [Exercise 1 - yolo_filter_boxes](#ex-1)\n - [2.3 - Non-max Suppression](#2-3)\n - [Exercise 2 - iou](#ex-2)\n - [2.4 - YOLO Non-max Suppression](#2-4)\n - [Exercise 3 - yolo_non_max_suppression](#ex-3)\n - [2.5 - Wrapping Up the Filtering](#2-5)\n - [Exercise 4 - yolo_eval](#ex-4)\n- [3 - Test YOLO Pre-trained Model on Images](#3)\n - [3.1 - Defining Classes, Anchors and Image Shape](#3-1)\n - [3.2 - Loading a Pre-trained Model](#3-2)\n - [3.3 - Convert Output of the Model to Usable Bounding Box Tensors](#3-3)\n - [3.4 - Filtering Boxes](#3-4)\n - [3.5 - Run the YOLO on an Image](#3-5)\n- [4 - Summary for YOLO](#4)\n- [5 - References](#5)", "_____no_output_____" ], [ "<a name='0'></a>\n## Packages\n\nRun the following cell to load the packages and dependencies that will come in handy as you build the object detector!", "_____no_output_____" ] ], [ [ "import argparse\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\nimport scipy.io\nimport scipy.misc\nimport numpy as np\nimport pandas as pd\nimport PIL\nfrom PIL import ImageFont, ImageDraw, Image\nimport tensorflow as tf\nfrom tensorflow.python.framework.ops import EagerTensor\n\nfrom tensorflow.keras.models import load_model\nfrom yad2k.models.keras_yolo import yolo_head\nfrom yad2k.utils.utils import draw_boxes, get_colors_for_classes, scale_boxes, read_classes, read_anchors, preprocess_image\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "<a name='1'></a>\n## 1 - Problem Statement\n\nYou are working on a self-driving car. Go you! As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds as you drive around. \n\n<center>\n<video width=\"400\" height=\"200\" src=\"nb_images/road_video_compressed2.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> Dataset provided by <a href=\"https://www.drive.ai/\">drive.ai</a>.\n</center></caption>\n\nYou've gathered all these images into a folder and labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like:\n\n<img src=\"nb_images/box_label.png\" style=\"width:500px;height:250;\">\n<caption><center> <u><b>Figure 1</u></b>: Definition of a box<br> </center></caption>\n\nIf there are 80 classes you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1, and the rest of which are 0. The video lectures used the latter representation; in this notebook, you'll use both representations, depending on which is more convenient for a particular step. \n\nIn this exercise, you'll discover how YOLO (\"You Only Look Once\") performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, the pre-trained weights are already loaded for you to use. ", "_____no_output_____" ], [ "<a name='2'></a>\n## 2 - YOLO", "_____no_output_____" ], [ "\"You Only Look Once\" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real time. This algorithm \"only looks once\" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.\n\n<a name='2-1'></a>\n### 2.1 - Model Details\n\n#### Inputs and outputs\n- The **input** is a batch of images, and each image has the shape (m, 608, 608, 3)\n- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. \n\n#### Anchor Boxes\n* Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt'\n* The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$.\n* The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). \n\n\n#### Encoding\nLet's look in greater detail at what this encoding represents. \n\n<img src=\"nb_images/architecture.png\" style=\"width:700px;height:400;\">\n<caption><center> <u><b> Figure 2 </u></b>: Encoding architecture for YOLO<br> </center></caption>\n\nIf the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.", "_____no_output_____" ], [ "Since you're using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.\n\nFor simplicity, you'll flatten the last two dimensions of the shape (19, 19, 5, 85) encoding, so the output of the Deep CNN is (19, 19, 425).\n\n<img src=\"nb_images/flatten.png\" style=\"width:700px;height:400;\">\n<caption><center> <u><b> Figure 3 </u></b>: Flattening the last two last dimensions<br> </center></caption>", "_____no_output_____" ], [ "#### Class score\n\nNow, for each box (of each cell) you'll compute the following element-wise product and extract a probability that the box contains a certain class. \nThe class score is $score_{c,i} = p_{c} \\times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$.\n\n<img src=\"nb_images/probability_extraction.png\" style=\"width:700px;height:400;\">\n<caption><center> <u><b>Figure 4</u></b>: Find the class detected by each box<br> </center></caption>\n\n##### Example of figure 4\n* In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1). \n* The probability that the object is the class \"category 3 (a car)\" is $c_{3}=0.73$. \n* The score for box 1 and for category \"3\" is $score_{1,3}=0.60 \\times 0.73 = 0.44$. \n* Let's say you calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So you'll assign the score 0.44 and class \"3\" to this box \"1\".\n\n#### Visualizing classes\nHere's one way to visualize what YOLO is predicting on an image:\n\n- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes).\n- Color that grid cell according to what object that grid cell considers the most likely.\n\nDoing this results in this picture: \n\n<img src=\"nb_images/proba_map.png\" style=\"width:300px;height:300;\">\n<caption><center> <u><b>Figure 5</u></b>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption>\n\nNote that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. ", "_____no_output_____" ], [ "#### Visualizing bounding boxes\nAnother way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: \n\n<img src=\"nb_images/anchor_map.png\" style=\"width:200px;height:200;\">\n<caption><center> <u><b>Figure 6</u></b>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>\n\n#### Non-Max suppression\nIn the figure above, the only boxes plotted are ones for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects. \n\nTo do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps: \n- Get rid of boxes with a low score. Meaning, the box is not very confident about detecting a class, either due to the low probability of any object, or low probability of this particular class.\n- Select only one box when several boxes overlap with each other and detect the same object.", "_____no_output_____" ], [ "<a name='2-2'></a>\n### 2.2 - Filtering with a Threshold on Class Scores\n\nYou're going to first apply a filter by thresholding, meaning you'll get rid of any box for which the class \"score\" is less than a chosen threshold. \n\nThe model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It's convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: \n- `box_confidence`: tensor of shape $(19, 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.\n- `boxes`: tensor of shape $(19, 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell.\n- `box_class_probs`: tensor of shape $(19, 19, 5, 80)$ containing the \"class probabilities\" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.\n\n<a name='ex-1'></a>\n### Exercise 1 - yolo_filter_boxes\n\nImplement `yolo_filter_boxes()`.\n1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \\times c$). \nThe following code may help you choose the right operator: \n```python\na = np.random.randn(19, 19, 5, 1)\nb = np.random.randn(19, 19, 5, 80)\nc = a * b # shape of c will be (19, 19, 5, 80)\n```\nThis is an example of **broadcasting** (multiplying vectors of different sizes).\n\n2. For each box, find:\n - the index of the class with the maximum box score\n - the corresponding box score\n \n **Useful References**\n * [tf.math.argmax](https://www.tensorflow.org/api_docs/python/tf/math/argmax)\n * [tf.math.reduce_max](https://www.tensorflow.org/api_docs/python/tf/math/reduce_max)\n\n **Helpful Hints**\n * For the `axis` parameter of `argmax` and `reduce_max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`.\n * Applying `reduce_max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. You don't need to keep the last dimension after applying the maximum here.\n\n\n3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be `True` for the boxes you want to keep. \n\n4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes you don't want. You should be left with just the subset of boxes you want to keep. \n\n **One more useful reference**:\n * [tf.boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) \n\n **And one more helpful hint**: :) \n * For the `tf.boolean_mask`, you can keep the default `axis=None`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: yolo_filter_boxes\n\ndef yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold = 0.6):\n \"\"\"Filters YOLO boxes by thresholding on object and class confidence.\n \n Arguments:\n boxes -- tensor of shape (19, 19, 5, 4)\n box_confidence -- tensor of shape (19, 19, 5, 1)\n box_class_probs -- tensor of shape (19, 19, 5, 80)\n threshold -- real value, if [ highest class probability score < threshold],\n then get rid of the corresponding box\n\n Returns:\n scores -- tensor of shape (None,), containing the class probability score for selected boxes\n boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes\n classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes\n\n Note: \"None\" is here because you don't know the exact number of selected boxes, as it depends on the threshold. \n For example, the actual output size of scores would be (10,) if there are 10 boxes.\n \"\"\"\n \n x = 10\n y = tf.constant(100)\n \n # YOUR CODE STARTS HERE\n\n # Step 1: Compute box scores\n ##(≈ 1 line)\n box_scores = box_class_probs*box_confidence\n \n # Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score\n ##(≈ 2 lines)\n box_classes = tf.math.argmax(box_scores,axis=-1)\n box_class_scores = tf.math.reduce_max(box_scores,axis=-1)\n \n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\n ## (≈ 1 line)\n filtering_mask = (box_class_scores >= threshold)\n \n # Step 4: Apply the mask to box_class_scores, boxes and box_classes\n ## (≈ 3 lines)\n scores = tf.boolean_mask(box_class_scores,filtering_mask)\n boxes = tf.boolean_mask(boxes,filtering_mask)\n classes = tf.boolean_mask(box_classes,filtering_mask)\n \n # YOUR CODE ENDS HERE\n \n return scores, boxes, classes", "_____no_output_____" ], [ "tf.random.set_seed(10)\nbox_confidence = tf.random.normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)\nboxes = tf.random.normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)\nbox_class_probs = tf.random.normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)\nscores, boxes, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold = 0.5)\nprint(\"scores[2] = \" + str(scores[2].numpy()))\nprint(\"boxes[2] = \" + str(boxes[2].numpy()))\nprint(\"classes[2] = \" + str(classes[2].numpy()))\nprint(\"scores.shape = \" + str(scores.shape))\nprint(\"boxes.shape = \" + str(boxes.shape))\nprint(\"classes.shape = \" + str(classes.shape))\n\nassert type(scores) == EagerTensor, \"Use tensorflow functions\"\nassert type(boxes) == EagerTensor, \"Use tensorflow functions\"\nassert type(classes) == EagerTensor, \"Use tensorflow functions\"\n\nassert scores.shape == (1789,), \"Wrong shape in scores\"\nassert boxes.shape == (1789, 4), \"Wrong shape in boxes\"\nassert classes.shape == (1789,), \"Wrong shape in classes\"\n\nassert np.isclose(scores[2].numpy(), 9.270486), \"Values are wrong on scores\"\nassert np.allclose(boxes[2].numpy(), [4.6399336, 3.2303846, 4.431282, -2.202031]), \"Values are wrong on boxes\"\nassert classes[2].numpy() == 8, \"Values are wrong on classes\"\n\nprint(\"\\033[92m All tests passed!\")\n", "_____no_output_____" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n <b>scores[2]</b>\n </td>\n <td>\n 9.270486\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes[2]</b>\n </td>\n <td>\n [ 4.6399336 3.2303846 4.431282 -2.202031 ]\n </td>\n </tr>\n <tr>\n <td>\n <b>classes[2]</b>\n </td>\n <td>\n 8\n </td>\n </tr>\n <tr>\n <td>\n <b>scores.shape</b>\n </td>\n <td>\n (1789,)\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes.shape</b>\n </td>\n <td>\n (1789, 4)\n </td>\n </tr>\n <tr>\n <td>\n <b>classes.shape</b>\n </td>\n <td>\n (1789,)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "**Note** In the test for `yolo_filter_boxes`, you're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative.", "_____no_output_____" ], [ "<a name='2-3'></a>\n### 2.3 - Non-max Suppression\n\nEven after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). ", "_____no_output_____" ], [ "<img src=\"nb_images/non-max-suppression.png\" style=\"width:500px;height:400;\">\n<caption><center> <u> <b>Figure 7</b> </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption>\n", "_____no_output_____" ], [ "Non-max suppression uses the very important function called **\"Intersection over Union\"**, or IoU.\n<img src=\"nb_images/iou.png\" style=\"width:500px;height:400;\">\n<caption><center> <u> <b>Figure 8</b> </u>: Definition of \"Intersection over Union\". <br> </center></caption>\n\n<a name='ex-2'></a>\n### Exercise 2 - iou\n\nImplement `iou()` \n\nSome hints:\n- This code uses the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, you move to the right. As y increases, you move down.\n- For this exercise, a box is defined using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. This makes it a bit easier to calculate the intersection.\n- To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative.\n- To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$: \n - Feel free to draw some examples on paper to clarify this conceptually.\n - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom.\n - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top.\n - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero). \n - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero.\n\n\n**Additional Hints**\n\n- `xi1` = **max**imum of the x1 coordinates of the two boxes\n- `yi1` = **max**imum of the y1 coordinates of the two boxes\n- `xi2` = **min**imum of the x2 coordinates of the two boxes\n- `yi2` = **min**imum of the y2 coordinates of the two boxes\n- `inter_area` = You can use `max(height, 0)` and `max(width, 0)`\n", "_____no_output_____" ] ], [ [ "#########################################################################\n######################## USELESS BELOW ##################################\n#########################################################################", "_____no_output_____" ], [ "# GRADED FUNCTION: iou\n\ndef iou(box1, box2):\n \"\"\"Implement the intersection over union (IoU) between box1 and box2\n    \n Arguments:\n box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)\n    box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)\n    \"\"\"\n\n\n (box1_x1, box1_y1, box1_x2, box1_y2) = box1\n (box2_x1, box2_y1, box2_x2, box2_y2) = box2\n\n # YOUR CODE STARTS HERE\n \n # Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.\n ##(≈ 7 lines)\n xi1 = max(box1_x1,box2_x1)\n yi1 = max(box1_y1,box2_y1)\n xi2 = min(box1_x2,box2_x2)\n yi2 = min(box1_y2,box2_y2)\n inter_width = max(0,yi2 - yi1)\n inter_height = max(0,xi2 - xi1)\n inter_area = inter_width*inter_height\n\n # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)\n ## (≈ 3 lines)\n box1_area = (box1_x2-box1_x1)*((box1_y2-box1_y1))\n box2_area = (box2_x2-box2_x1)*((box2_y2-box2_y1))\n union_area = box1_area + box2_area - inter_area\n \n # compute the IoU\n ## (≈ 1 line)\n iou = inter_area/union_area\n \n # YOUR CODE ENDS HERE\n \n return iou", "_____no_output_____" ], [ "## Test case 1: boxes intersect\nbox1 = (2, 1, 4, 3)\nbox2 = (1, 2, 3, 4)\n\nprint(\"iou for intersecting boxes = \" + str(iou(box1, box2)))\nassert iou(box1, box2) < 1, \"The intersection area must be always smaller or equal than the union area.\"\nassert np.isclose(iou(box1, box2), 0.14285714), \"Wrong value. Check your implementation. Problem with intersecting boxes\"\n\n## Test case 2: boxes do not intersect\nbox1 = (1,2,3,4)\nbox2 = (5,6,7,8)\nprint(\"iou for non-intersecting boxes = \" + str(iou(box1,box2)))\nassert iou(box1, box2) == 0, \"Intersection must be 0\"\n\n## Test case 3: boxes intersect at vertices only\nbox1 = (1,1,2,2)\nbox2 = (2,2,3,3)\nprint(\"iou for boxes that only touch at vertices = \" + str(iou(box1,box2)))\nassert iou(box1, box2) == 0, \"Intersection at vertices must be 0\"\n\n## Test case 4: boxes intersect at edge only\nbox1 = (1,1,3,3)\nbox2 = (2,3,3,4)\nprint(\"iou for boxes that only touch at edges = \" + str(iou(box1,box2)))\nassert iou(box1, box2) == 0, \"Intersection at edges must be 0\"\n\nprint(\"\\033[92m All tests passed!\")\n\n", "_____no_output_____" ] ], [ [ "**Expected Output**:\n\n```\niou for intersecting boxes = 0.14285714285714285\niou for non-intersecting boxes = 0.0\niou for boxes that only touch at vertices = 0.0\niou for boxes that only touch at edges = 0.0\n```", "_____no_output_____" ], [ "<a name='2-4'></a>\n### 2.4 - YOLO Non-max Suppression\n\nYou are now ready to implement non-max suppression. The key steps are: \n1. Select the box that has the highest score.\n2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).\n3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.\n\nThis will remove all boxes that have a large overlap with the selected boxes. Only the \"best\" boxes remain.\n\n<a name='ex-3'></a>\n### Exercise 3 - yolo_non_max_suppression\n\nImplement `yolo_non_max_suppression()` using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):\n\n**Reference documentation**: \n\n- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)\n```\ntf.image.non_max_suppression(\n boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n name=None\n)\n```\nNote that in the version of TensorFlow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument `score_threshold`.*\n\n- [tf.gather()](https://www.tensorflow.org/api_docs/python/tf/gather)\n```\nkeras.gather(\n reference,\n indices\n)\n```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: yolo_non_max_suppression\n\ndef yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):\n \"\"\"\n Applies Non-max suppression (NMS) to set of boxes\n \n Arguments:\n scores -- tensor of shape (None,), output of yolo_filter_boxes()\n boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)\n classes -- tensor of shape (None,), output of yolo_filter_boxes()\n max_boxes -- integer, maximum number of predicted boxes you'd like\n iou_threshold -- real value, \"intersection over union\" threshold used for NMS filtering\n \n Returns:\n scores -- tensor of shape (, None), predicted score for each box\n boxes -- tensor of shape (4, None), predicted box coordinates\n classes -- tensor of shape (, None), predicted class for each box\n \n Note: The \"None\" dimension of the output tensors has obviously to be less than max_boxes. Note also that this\n function will transpose the shapes of scores, boxes, classes. This is made for convenience.\n \"\"\"\n \n max_boxes_tensor = tf.Variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()\n \n # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\n ##(≈ 1 line)\n nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold)\n \n # Use tf.gather() to select only nms_indices from scores, boxes and classes\n ##(≈ 3 lines)\n scores = tf.gather(scores,nms_indices)\n boxes = tf.gather(boxes,nms_indices)\n classes = tf.gather(classes,nms_indices)\n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n\n \n return scores, boxes, classes", "_____no_output_____" ], [ "tf.random.set_seed(10)\nscores = tf.random.normal([54,], mean=1, stddev=4, seed = 1)\nboxes = tf.random.normal([54, 4], mean=1, stddev=4, seed = 1)\nclasses = tf.random.normal([54,], mean=1, stddev=4, seed = 1)\nscores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)\n\nassert type(scores) == EagerTensor, \"Use tensoflow functions\"\nprint(\"scores[2] = \" + str(scores[2].numpy()))\nprint(\"boxes[2] = \" + str(boxes[2].numpy()))\nprint(\"classes[2] = \" + str(classes[2].numpy()))\nprint(\"scores.shape = \" + str(scores.numpy().shape))\nprint(\"boxes.shape = \" + str(boxes.numpy().shape))\nprint(\"classes.shape = \" + str(classes.numpy().shape))\n\nassert type(scores) == EagerTensor, \"Use tensoflow functions\"\nassert type(boxes) == EagerTensor, \"Use tensoflow functions\"\nassert type(classes) == EagerTensor, \"Use tensoflow functions\"\n\nassert scores.shape == (10,), \"Wrong shape\"\nassert boxes.shape == (10, 4), \"Wrong shape\"\nassert classes.shape == (10,), \"Wrong shape\"\n\nassert np.isclose(scores[2].numpy(), 8.147684), \"Wrong value on scores\"\nassert np.allclose(boxes[2].numpy(), [ 6.0797963, 3.743308, 1.3914018, -0.34089637]), \"Wrong value on boxes\"\nassert np.isclose(classes[2].numpy(), 1.7079165), \"Wrong value on classes\"\n\nprint(\"\\033[92m All tests passed!\")\n", "_____no_output_____" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n <b>scores[2]</b>\n </td>\n <td>\n 8.147684\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes[2]</b>\n </td>\n <td>\n [ 6.0797963 3.743308 1.3914018 -0.34089637]\n </td>\n </tr>\n <tr>\n <td>\n <b>classes[2]</b>\n </td>\n <td>\n 1.7079165\n </td>\n </tr>\n <tr>\n <td>\n <b>scores.shape</b>\n </td>\n <td>\n (10,)\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes.shape</b>\n </td>\n <td>\n (10, 4)\n </td>\n </tr>\n <tr>\n <td>\n <b>classes.shape</b>\n </td>\n <td>\n (10,)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "<a name='2-5'></a>\n### 2.5 - Wrapping Up the Filtering\n\nIt's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. \n\n<a name='ex-4'></a>\n### Exercise 4 - yolo_eval\n\nImplement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which are provided): \n\n```python\nboxes = yolo_boxes_to_corners(box_xy, box_wh) \n```\nwhich converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`\n```python\nboxes = scale_boxes(boxes, image_shape)\n```\nYOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image -- for example, the car detection dataset had 720x1280 images -- this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. \n\nDon't worry about these two functions; you'll see where they need to be called below. ", "_____no_output_____" ] ], [ [ "def yolo_boxes_to_corners(box_xy, box_wh):\n \"\"\"Convert YOLO box predictions to bounding box corners.\"\"\"\n box_mins = box_xy - (box_wh / 2.)\n box_maxes = box_xy + (box_wh / 2.)\n\n return tf.keras.backend.concatenate([\n box_mins[..., 1:2], # y_min\n box_mins[..., 0:1], # x_min\n box_maxes[..., 1:2], # y_max\n box_maxes[..., 0:1] # x_max\n ])\n", "_____no_output_____" ], [ "# GRADED FUNCTION: yolo_eval\n\ndef yolo_eval(yolo_outputs, image_shape = (720, 1280), max_boxes=10, score_threshold=.6, iou_threshold=.5):\n \"\"\"\n Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.\n \n Arguments:\n yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:\n box_xy: tensor of shape (None, 19, 19, 5, 2)\n box_wh: tensor of shape (None, 19, 19, 5, 2)\n box_confidence: tensor of shape (None, 19, 19, 5, 1)\n box_class_probs: tensor of shape (None, 19, 19, 5, 80)\n image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)\n max_boxes -- integer, maximum number of predicted boxes you'd like\n score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box\n iou_threshold -- real value, \"intersection over union\" threshold used for NMS filtering\n \n Returns:\n scores -- tensor of shape (None, ), predicted score for each box\n boxes -- tensor of shape (None, 4), predicted box coordinates\n classes -- tensor of shape (None,), predicted class for each box\n \"\"\"\n \n \n # Retrieve outputs of the YOLO model (≈1 line)\n box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs\n\n # Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)\n boxes = yolo_boxes_to_corners(box_xy, box_wh)\n\n # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)\n scores, boxes, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, score_threshold)\n \n # Scale boxes back to original image shape (720, 1280 or whatever)\n boxes = scale_boxes(boxes, image_shape) # Network was trained to run on 608x608 images\n\n # Use one of the functions you've implemented to perform Non-max suppression with \n # maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)\n scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)\n \n # YOUR CODE STARTS HERE\n \n \n # YOUR CODE ENDS HERE\n \n return scores, boxes, classes", "_____no_output_____" ], [ "tf.random.set_seed(10)\nyolo_outputs = (tf.random.normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),\n tf.random.normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),\n tf.random.normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),\n tf.random.normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))\nscores, boxes, classes = yolo_eval(yolo_outputs)\nprint(\"scores[2] = \" + str(scores[2].numpy()))\nprint(\"boxes[2] = \" + str(boxes[2].numpy()))\nprint(\"classes[2] = \" + str(classes[2].numpy()))\nprint(\"scores.shape = \" + str(scores.numpy().shape))\nprint(\"boxes.shape = \" + str(boxes.numpy().shape))\nprint(\"classes.shape = \" + str(classes.numpy().shape))\n\nassert type(scores) == EagerTensor, \"Use tensoflow functions\"\nassert type(boxes) == EagerTensor, \"Use tensoflow functions\"\nassert type(classes) == EagerTensor, \"Use tensoflow functions\"\n\nassert scores.shape == (10,), \"Wrong shape\"\nassert boxes.shape == (10, 4), \"Wrong shape\"\nassert classes.shape == (10,), \"Wrong shape\"\n \nassert np.isclose(scores[2].numpy(), 171.60194), \"Wrong value on scores\"\nassert np.allclose(boxes[2].numpy(), [-1240.3483, -3212.5881, -645.78, 2024.3052]), \"Wrong value on boxes\"\nassert np.isclose(classes[2].numpy(), 16), \"Wrong value on classes\"\n \nprint(\"\\033[92m All tests passed!\")", "_____no_output_____" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n <b>scores[2]</b>\n </td>\n <td>\n 171.60194\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes[2]</b>\n </td>\n <td>\n [-1240.3483 -3212.5881 -645.78 2024.3052]\n </td>\n </tr>\n <tr>\n <td>\n <b>classes[2]</b>\n </td>\n <td>\n 16\n </td>\n </tr> \n <tr>\n <td>\n <b>scores.shape</b>\n </td>\n <td>\n (10,)\n </td>\n </tr>\n <tr>\n <td>\n <b>boxes.shape</b>\n </td>\n <td>\n (10, 4)\n </td>\n </tr>\n <tr>\n <td>\n <b>classes.shape</b>\n </td>\n <td>\n (10,)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "<a name='3'></a>\n## 3 - Test YOLO Pre-trained Model on Images\n\nIn this section, you are going to use a pre-trained model and test it on the car detection dataset. ", "_____no_output_____" ], [ "<a name='3-1'></a>\n### 3.1 - Defining Classes, Anchors and Image Shape\n\nYou're trying to detect 80 classes, and are using 5 anchor boxes. The information on the 80 classes and 5 boxes is gathered in two files: \"coco_classes.txt\" and \"yolo_anchors.txt\". You'll read class names and anchors from text files. The car detection dataset has 720x1280 images, which are pre-processed into 608x608 images.", "_____no_output_____" ] ], [ [ "class_names = read_classes(\"model_data/coco_classes.txt\")\nanchors = read_anchors(\"model_data/yolo_anchors.txt\")\nmodel_image_size = (608, 608) # Same as yolo_model input layer size", "_____no_output_____" ] ], [ [ "<a name='3-2'></a>\n### 3.2 - Loading a Pre-trained Model\n\nTraining a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pre-trained Keras YOLO model stored in \"yolo.h5\". These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the \"YOLOv2\" model, but are simply referred to as \"YOLO\" in this notebook.\n\nRun the cell below to load the model from this file.", "_____no_output_____" ] ], [ [ "yolo_model = load_model(\"model_data/\", compile=False)", "_____no_output_____" ] ], [ [ "This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains:", "_____no_output_____" ] ], [ [ "yolo_model.summary()", "_____no_output_____" ] ], [ [ "**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do -- this is fine!\n\n**Reminder**: This model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).", "_____no_output_____" ], [ "<a name='3-3'></a>\n### 3.3 - Convert Output of the Model to Usable Bounding Box Tensors\n\nThe output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. You will need to call `yolo_head` to format the encoding of the model you got from `yolo_model` into something decipherable:\n\n`yolo_model_outputs = yolo_model(image_data)`\n\n`yolo_outputs = yolo_head(yolo_model_outputs, anchors, len(class_names))`\n\nThe variable `yolo_outputs` will be defined as a set of 4 tensors that you can then use as input by your yolo_eval function. If you are curious about how yolo_head is implemented, you can find the function definition in the file `keras_yolo.py`. The file is also located in your workspace in this path: `yad2k/models/keras_yolo.py`.", "_____no_output_____" ], [ "<a name='3-4'></a>\n### 3.4 - Filtering Boxes\n\n`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. To perform filtering and select only the best boxes, you will call `yolo_eval`, which you had previously implemented, to do so:\n\n out_scores, out_boxes, out_classes = yolo_eval(yolo_outputs, [image.size[1], image.size[0]], 10, 0.3, 0.5)", "_____no_output_____" ], [ "<a name='3-5'></a>\n### 3.5 - Run the YOLO on an Image\n\nLet the fun begin! You will create a graph that can be summarized as follows:\n\n`yolo_model.input` is given to `yolo_model`. The model is used to compute the output `yolo_model.output`\n`yolo_model.output` is processed by `yolo_head`. It gives you `yolo_outputs`\n`yolo_outputs` goes through a filtering function, `yolo_eval`. It outputs your predictions: `out_scores`, `out_boxes`, `out_classes`.", "_____no_output_____" ], [ "Now, we have implemented for you the `predict(image_file)` function, which runs the graph to test YOLO on an image to compute `out_scores`, `out_boxes`, `out_classes`.\n\nThe code below also uses the following function:\n\n image, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\nwhich opens the image file and scales, reshapes and normalizes the image. It returns the outputs:\n\n image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.\n image_data: a numpy-array representing the image. This will be the input to the CNN.", "_____no_output_____" ] ], [ [ "def predict(image_file):\n \"\"\"\n Runs the graph to predict boxes for \"image_file\". Prints and plots the predictions.\n \n Arguments:\n image_file -- name of an image stored in the \"images\" folder.\n \n Returns:\n out_scores -- tensor of shape (None, ), scores of the predicted boxes\n out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes\n out_classes -- tensor of shape (None, ), class index of the predicted boxes\n \n Note: \"None\" actually represents the number of predicted boxes, it varies between 0 and max_boxes. \n \"\"\"\n\n # Preprocess your image\n image, image_data = preprocess_image(\"images/\" + image_file, model_image_size = (608, 608))\n \n yolo_model_outputs = yolo_model(image_data) # It's output is of shape (m, 19, 19, 5, 85) \n # But yolo_eval takes input a tensor contains 4 tensors: box_xy,box_wh, box_confidence & box_class_probs\n yolo_outputs = yolo_head(yolo_model_outputs, anchors, len(class_names))\n \n out_scores, out_boxes, out_classes = yolo_eval(yolo_outputs, [image.size[1], image.size[0]], 10, 0.3, 0.5)\n\n # Print predictions info\n print('Found {} boxes for {}'.format(len(out_boxes), \"images/\" + image_file))\n # Generate colors for drawing bounding boxes.\n colors = get_colors_for_classes(len(class_names))\n # Draw bounding boxes on the image file\n #draw_boxes2(image, out_scores, out_boxes, out_classes, class_names, colors, image_shape)\n draw_boxes(image, out_boxes, out_classes, class_names, out_scores)\n # Save the predicted bounding box on the image\n image.save(os.path.join(\"out\", str(image_file).split('.')[0]+\"_annotated.\" +str(image_file).split('.')[1] ), quality=100)\n # Display the results in the notebook\n output_image = Image.open(os.path.join(\"out\", str(image_file).split('.')[0]+\"_annotated.\" +str(image_file).split('.')[1] ))\n imshow(output_image)\n\n return out_scores, out_boxes, out_classes", "_____no_output_____" ] ], [ [ "Run the following cell on the \"test.jpg\" image to verify that your function is correct.", "_____no_output_____" ] ], [ [ "out_scores, out_boxes, out_classes = predict(\"0001.jpg\")", "_____no_output_____" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n <b>Found 10 boxes for images/test.jpg</b>\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.89 (367, 300) (745, 648)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.80 (761, 282) (942, 412)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.74 (159, 303) (346, 440)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.70 (947, 324) (1280, 705)\n </td>\n </tr>\n <tr>\n <td>\n <b>bus</b>\n </td>\n <td>\n 0.67 (5, 266) (220, 407)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.66 (706, 279) (786, 350)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.60 (925, 285) (1045, 374)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.44 (336, 296) (378, 335)\n </td>\n </tr>\n <tr>\n <td>\n <b>car</b>\n </td>\n <td>\n 0.37 (965, 273) (1022, 292)\n </td>\n </tr>\n <tr>\n <td>\n <b>traffic light</b>\n </td>\n <td>\n 00.36 (681, 195) (692, 214)\n </td>\n </tr>\n</table>", "_____no_output_____" ], [ "The model you've just run is actually able to detect 80 different classes listed in \"coco_classes.txt\". To test the model on your own images:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the cell above code\n 4. Run the code and see the output of the algorithm!\n\nIf you were to run your session in a for loop over all your images. Here's what you would get:\n\n<center>\n<video width=\"400\" height=\"200\" src=\"nb_images/pred_video_compressed2.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks to <a href=\"https://www.drive.ai/\">drive.ai</a> for providing this dataset! </center></caption>", "_____no_output_____" ], [ "<a name='4'></a>\n## 4 - Summary for YOLO\n\n- Input image (608, 608, 3)\n- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. \n- After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):\n - Each cell in a 19x19 grid over the input image gives 425 numbers. \n - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. \n - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect\n- You then select only few boxes based on:\n - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold\n - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes\n- This gives you YOLO's final output. ", "_____no_output_____" ], [ "<font color='blue'>\n \n**What you should remember**:\n \n- YOLO is a state-of-the-art object detection model that is fast and accurate\n- It runs an input image through a CNN, which outputs a 19x19x5x85 dimensional volume. \n- The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.\n- You filter through all the boxes using non-max suppression. Specifically: \n - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes\n - Intersection over Union (IoU) thresholding to eliminate overlapping boxes\n- Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, previously trained model parameters were used in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise. ", "_____no_output_____" ], [ "**Congratulations!** You've come to the end of this assignment. \n\nHere's a quick recap of all you've accomplished.\n\nYou've: \n\n- Detected objects in a car detection dataset\n- Implemented non-max suppression to achieve better accuracy\n- Implemented intersection over union as a function of NMS\n- Created usable bounding box tensors from the model's predictions\n\nAmazing work! If you'd like to know more about the origins of these ideas, spend some time on the papers referenced below. ", "_____no_output_____" ], [ "<a name='5'></a>\n## 5 - References\n\nThe ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website. \n- Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)\n- Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)\n- Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)\n- The official YOLO website (https://pjreddie.com/darknet/yolo/) \n\n### Car detection dataset\n\n<a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by/4.0/88x31.png\" /></a><br /><span xmlns:dct=\"http://purl.org/dc/terms/\" property=\"dct:title\">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Creative Commons Attribution 4.0 International License</a>. Thanks to Brody Huval, Chih Hu and Rahul Patel for providing this data. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d066e25f81f580745649ad91ad51f44ae4220e1b
132
ipynb
Jupyter Notebook
01-Lesson-Plans/06-PyViz/1/Activities/08-Stu_Picture_Perfect/Solved/Challenge/picture_perfect.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
1
2021-04-13T07:14:34.000Z
2021-04-13T07:14:34.000Z
01-Lesson-Plans/06-PyViz/1/Activities/08-Stu_Picture_Perfect/Solved/Challenge/picture_perfect.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
2
2021-06-02T03:14:19.000Z
2022-02-11T23:21:24.000Z
01-Lesson-Plans/06-PyViz/1/Activities/08-Stu_Picture_Perfect/Solved/Challenge/picture_perfect.ipynb
tatianegercina/FinTech
b40687aa362d78674e223eb15ecf14bc59f90b62
[ "ADSL" ]
1
2021-05-07T13:26:50.000Z
2021-05-07T13:26:50.000Z
33
75
0.886364
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d066e3b904b326ce51c6a21dda37ac50f83dd9de
15,344
ipynb
Jupyter Notebook
site/en/r2/tutorials/eager/eager_basics.ipynb
SamuelMarks/tensorflow-docs
70eced9841bedea740aa271ba83b3d5654f3ca47
[ "Apache-2.0" ]
1
2019-01-23T14:44:21.000Z
2019-01-23T14:44:21.000Z
site/en/r2/tutorials/eager/eager_basics.ipynb
simoneparisotto/docs
87edfc365e4e17926d3c9cc752eb30670a73049c
[ "Apache-2.0" ]
null
null
null
site/en/r2/tutorials/eager/eager_basics.ipynb
simoneparisotto/docs
87edfc365e4e17926d3c9cc752eb30670a73049c
[ "Apache-2.0" ]
null
null
null
33.871965
627
0.537604
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Eager execution basics", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/2/tutorials/eager/eager_basics\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/eager_basics.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/eager_basics.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "This is an introductory TensorFlow tutorial shows how to:\n\n* Import the required package\n* Create and use tensors\n* Use GPU acceleration\n* Demonstrate `tf.data.Dataset`", "_____no_output_____" ] ], [ [ "!pip install tf-nightly-2.0-preview", "_____no_output_____" ] ], [ [ "## Import TensorFlow\n\nImport the `tensorflow` module to get started. [Eager execution](../../guide/eager.ipynb) is enabled by default.", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ] ], [ [ "## Tensors\n\nA Tensor is a multi-dimensional array. Similar to NumPy `ndarray` objects, `tf.Tensor` objects have a data type and a shape. Additionally, `tf.Tensor`s can reside in accelerator memory (like a GPU). TensorFlow offers a rich library of operations ([tf.add](https://www.tensorflow.org/api_docs/python/tf/add), [tf.matmul](https://www.tensorflow.org/api_docs/python/tf/matmul), [tf.linalg.inv](https://www.tensorflow.org/api_docs/python/tf/linalg/inv) etc.) that consume and produce `tf.Tensor`s. These operations automatically convert native Python types, for example:\n", "_____no_output_____" ] ], [ [ "print(tf.add(1, 2))\nprint(tf.add([1, 2], [3, 4]))\nprint(tf.square(5))\nprint(tf.reduce_sum([1, 2, 3]))\nprint(tf.io.encode_base64(\"hello world\"))\n\n# Operator overloading is also supported\nprint(tf.square(2) + tf.square(3))", "_____no_output_____" ] ], [ [ "Each `tf.Tensor` has a shape and a datatype:", "_____no_output_____" ] ], [ [ "x = tf.matmul([[1]], [[2, 3]])\nprint(x.shape)\nprint(x.dtype)", "_____no_output_____" ] ], [ [ "The most obvious differences between NumPy arrays and `tf.Tensor`s are:\n\n1. Tensors can be backed by accelerator memory (like GPU, TPU).\n2. Tensors are immutable.", "_____no_output_____" ], [ "### NumPy Compatibility\n\nConverting between a TensorFlow `tf.Tensor`s and a NumPy `ndarray` is easy:\n\n* TensorFlow operations automatically convert NumPy ndarrays to Tensors.\n* NumPy operations automatically convert Tensors to NumPy ndarrays.\n\nTensors are explicitly converted to NumPy ndarrays using their `.numpy()` method. These conversions are typically cheap since the array and `tf.Tensor` share the underlying memory representation, if possible. However, sharing the underlying representation isn't always possible since the `tf.Tensor` may be hosted in GPU memory while NumPy arrays are always backed by host memory, and the conversion involves a copy from GPU to host memory.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nndarray = np.ones([3, 3])\n\nprint(\"TensorFlow operations convert numpy arrays to Tensors automatically\")\ntensor = tf.multiply(ndarray, 42)\nprint(tensor)\n\n\nprint(\"And NumPy operations convert Tensors to numpy arrays automatically\")\nprint(np.add(tensor, 1))\n\nprint(\"The .numpy() method explicitly converts a Tensor to a numpy array\")\nprint(tensor.numpy())", "_____no_output_____" ] ], [ [ "## GPU acceleration\n\nMany TensorFlow operations are accelerated using the GPU for computation. Without any annotations, TensorFlow automatically decides whether to use the GPU or CPU for an operation—copying the tensor between CPU and GPU memory, if necessary. Tensors produced by an operation are typically backed by the memory of the device on which the operation executed, for example:", "_____no_output_____" ] ], [ [ "x = tf.random.uniform([3, 3])\n\nprint(\"Is there a GPU available: \"),\nprint(tf.test.is_gpu_available())\n\nprint(\"Is the Tensor on GPU #0: \"),\nprint(x.device.endswith('GPU:0'))", "_____no_output_____" ] ], [ [ "### Device Names\n\nThe `Tensor.device` property provides a fully qualified string name of the device hosting the contents of the tensor. This name encodes many details, such as an identifier of the network address of the host on which this program is executing and the device within that host. This is required for distributed execution of a TensorFlow program. The string ends with `GPU:<N>` if the tensor is placed on the `N`-th GPU on the host.", "_____no_output_____" ], [ "\n\n### Explicit Device Placement\n\nIn TensorFlow, *placement* refers to how individual operations are assigned (placed on) a device for execution. As mentioned, when there is no explicit guidance provided, TensorFlow automatically decides which device to execute an operation and copies tensors to that device, if needed. However, TensorFlow operations can be explicitly placed on specific devices using the `tf.device` context manager, for example:", "_____no_output_____" ] ], [ [ "import time\n\ndef time_matmul(x):\n start = time.time()\n for loop in range(10):\n tf.matmul(x, x)\n\n result = time.time()-start\n \n print(\"10 loops: {:0.2f}ms\".format(1000*result))\n\n# Force execution on CPU\nprint(\"On CPU:\")\nwith tf.device(\"CPU:0\"):\n x = tf.random.uniform([1000, 1000])\n assert x.device.endswith(\"CPU:0\")\n time_matmul(x)\n\n# Force execution on GPU #0 if available\nif tf.test.is_gpu_available():\n with tf.device(\"GPU:0\"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.\n x = tf.random.uniform([1000, 1000])\n assert x.device.endswith(\"GPU:0\")\n time_matmul(x)", "_____no_output_____" ] ], [ [ "## Datasets\n\nThis section uses the [`tf.data.Dataset` API](https://www.tensorflow.org/guide/datasets) to build a pipeline for feeding data to your model. The `tf.data.Dataset` API is used to build performant, complex input pipelines from simple, re-usable pieces that will feed your model's training or evaluation loops.", "_____no_output_____" ], [ "### Create a source `Dataset`\n\nCreate a *source* dataset using one of the factory functions like [`Dataset.from_tensors`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensors), [`Dataset.from_tensor_slices`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices), or using objects that read from files like [`TextLineDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) or [`TFRecordDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset). See the [TensorFlow Dataset guide](https://www.tensorflow.org/guide/datasets#reading_input_data) for more information.", "_____no_output_____" ] ], [ [ "ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])\n\n# Create a CSV file\nimport tempfile\n_, filename = tempfile.mkstemp()\n\nwith open(filename, 'w') as f:\n f.write(\"\"\"Line 1\nLine 2\nLine 3\n \"\"\")\n\nds_file = tf.data.TextLineDataset(filename)", "_____no_output_____" ] ], [ [ "### Apply transformations\n\nUse the transformations functions like [`map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map), [`batch`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and [`shuffle`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) to apply transformations to dataset records.", "_____no_output_____" ] ], [ [ "ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)\n\nds_file = ds_file.batch(2)", "_____no_output_____" ] ], [ [ "### Iterate\n\n`tf.data.Dataset` objects support iteration to loop over records:", "_____no_output_____" ] ], [ [ "print('Elements of ds_tensors:')\nfor x in ds_tensors:\n print(x)\n\nprint('\\nElements in ds_file:')\nfor x in ds_file:\n print(x)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d066ef5f3aede2adddeaeefc4c81496b01931be3
6,008
ipynb
Jupyter Notebook
ipynb/TEM_10/ESA/a1_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
ipynb/TEM_10/ESA/a1_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
ipynb/TEM_10/ESA/a1_cc.ipynb
kassbohm/wb-snippets
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
[ "MIT" ]
null
null
null
34.136364
117
0.385486
[ [ [ "# Header starts here.\nfrom sympy.physics.units import *\nfrom sympy import *\n\n# Rounding:\nimport decimal\nfrom decimal import Decimal as DX\nfrom copy import deepcopy\ndef iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN):\n import sympy\n \"\"\"\n Rounding acc. to DIN EN ISO 80000-1:2013-08\n place value = Rundestellenwert\n \"\"\"\n assert pv in set([\n # place value # round to:\n 1, # 1\n 0.1, # 1st digit after decimal\n 0.01, # 2nd\n 0.001, # 3rd\n 0.0001, # 4th\n 0.00001, # 5th\n 0.000001, # 6th\n 0.0000001, # 7th\n 0.00000001, # 8th\n 0.000000001, # 9th\n 0.0000000001, # 10th\n ])\n objc = deepcopy(obj)\n try:\n tmp = DX(str(float(objc)))\n objc = tmp.quantize(DX(str(pv)), rounding=rounding)\n except:\n for i in range(len(objc)):\n tmp = DX(str(float(objc[i])))\n objc[i] = tmp.quantize(DX(str(pv)), rounding=rounding)\n return objc\n\n# LateX:\nkwargs = {}\nkwargs[\"mat_str\"] = \"bmatrix\"\nkwargs[\"mat_delim\"] = \"\"\n\n# kwargs[\"symbol_names\"] = {FB: \"F^{\\mathsf B}\", }\n\n# Units:\n(k, M, G ) = ( 10**3, 10**6, 10**9 )\n(mm, cm) = ( m/1000, m/100 )\nNewton = kg*m/s**2\nPa = Newton/m**2\nMPa = M*Pa\nGPa = G*Pa\nkN = k*Newton\ndeg = pi/180\n\nhalf = S(1)/2\n\n# Header ends here.\n#\n# https://colab.research.google.com/github/kassbohm/wb-snippets/blob/master/ipynb/TEM_10/ESA/a1_cc.ipynb\n\nF,l = var(\"F,l\")\nR = 3*F/2\nlu = l/sqrt(3)\n\nAh,Av,Bh,Bv,Ch,Cv = var(\"Ah,Av,Bh,Bv,Ch,Cv\")\n\ne1 = Eq(Ah + Bh + F)\ne2 = Eq(Av + Bv - R)\ne3 = Eq(Bv*l - Bh*l - F*l/2 - R*7/18*l)\n\ne4 = Eq(Ch - Bh)\ne5 = Eq(Cv - F - Bv)\ne6 = Eq(F*lu/2 + Bv*lu + Bh*l)\n\neqs = [e1,e2,e3,e4,e5,e6]\nunknowns = [Ah,Av,Bh,Bv,Ch,Cv]\n\n\npprint(\"\\nEquations:\")\nfor e in eqs:\n pprint(e)\n pprint(\"\\n\")\n\n\n# Alternative Solution (also correct):\n# Ah,Av,Bh,Bv,Gh,Gv = var(\"Ah,Av,Bh,Bv,Gh,Gv\")\n#\n# e1 = Eq(Av + Gv - R)\n# e2 = Eq(Ah + F - Gh)\n# e3 = Eq(F/2 + 7*R/18 - Gv - Gh)\n# e4 = Eq(-Gv -F + Bv)\n# e5 = Eq(Gh - Bh)\n# e6 = Eq(Gh - sqrt(3)*F/6 - Gv/sqrt(3))\n#\n# eqs = [e1,e2,e3,e4,e5,e6]\n# unknowns = [Ah,Av,Bh,Bv,Gh,Gv]\n\nsol = solve(eqs,unknowns)\n\npprint(\"\\nReactions:\")\npprint(sol)\n\npprint(\"\\nReactions / F (rounded to 0.01):\")\nfor v in sorted(sol,key=default_sort_key):\n pprint(\"\\n\\n\")\n s = sol[v]\n tmp = (s/F)\n tmp = tmp.simplify()\n # pprint(tmp)\n pprint([v, tmp, iso_round(tmp,0.01)])\n\n# Reactions / F:\n#\n# ⎡ 43 19⋅√3 ⎤\n# ⎢Ah, - ── + ─────, -0.42⎥\n# ⎣ 24 24 ⎦\n#\n#\n# ⎡ 3 19⋅√3 ⎤\n# ⎢Av, - ─ + ─────, 1.0⎥\n# ⎣ 8 24 ⎦\n#\n#\n# ⎡ 19⋅√3 19 ⎤\n# ⎢Bh, - ───── + ──, -0.58⎥\n# ⎣ 24 24 ⎦\n#\n#\n# ⎡ 19⋅√3 15 ⎤\n# ⎢Bv, - ───── + ──, 0.5⎥\n# ⎣ 24 8 ⎦\n#\n#\n# ⎡ 19⋅√3 19 ⎤\n# ⎢Ch, - ───── + ──, -0.58⎥\n# ⎣ 24 24 ⎦\n#\n#\n# ⎡ 19⋅√3 23 ⎤\n# ⎢Cv, - ───── + ──, 1.5⎥\n# ⎣ 24 8 ⎦\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d06704d20a058349a6505d956cad1f8c35c03a63
4,547
ipynb
Jupyter Notebook
05-writing-effective-tests/03-testing-your-code.ipynb
turnpenney/rcsc18_lessons
4c3d3312dc38e6eb7092ef0a231dacf328cdc26c
[ "CC-BY-4.0" ]
1
2018-09-05T08:13:52.000Z
2018-09-05T08:13:52.000Z
05-writing-effective-tests/03-testing-your-code.ipynb
turnpenney/rcsc18_lessons
4c3d3312dc38e6eb7092ef0a231dacf328cdc26c
[ "CC-BY-4.0" ]
null
null
null
05-writing-effective-tests/03-testing-your-code.ipynb
turnpenney/rcsc18_lessons
4c3d3312dc38e6eb7092ef0a231dacf328cdc26c
[ "CC-BY-4.0" ]
23
2018-09-05T08:13:54.000Z
2018-12-18T16:00:05.000Z
29.525974
307
0.574885
[ [ [ "# Tests and Exceptions\n\nIn this lesson we will look at how to make your code more reliable by writing tests. Tests when used cleverly can give you a lot of confidence in your code and therefore your results!\n\nLets start with our (broken) square function from the last lesson:", "_____no_output_____" ], [ "Tests can take many forms, they can compare your code against known results i.e. ones in a published paper, or they can just test that the result of some function returns the type of object you expect or even just check that your code results always stays the same, so you know if something breaks.\n\nA simple test for our square function we defined above might look like:", "_____no_output_____" ], [ "\n<section class=\"callout panel panel-warning\">\n<div class=\"panel-heading\">\n<h2><span class=\"fa fa-thumb-tack\"></span> The `assert` statement</h2>\n</div>\n\n\n<div class=\"panel-body\">\n\n<p>As we will see later, the way to make a test fail is to raise an error. Therefore the <code>assert</code> statement in Python is a useful shortcut when writing tests.</p>\n<p>The <code>assert</code> statement will raise an error if a condition is not satisfied. The general form of the assert statement is:</p>\n<div class=\"codehilite\"><pre><span></span><span class=\"k\">assert</span> <span class=\"n\">condition</span><span class=\"p\">,</span> <span class=\"s2\">&quot;message&quot;</span>\n</pre></div>\n\n\n<p>i.e.</p>\n<div class=\"codehilite\"><pre><span></span><span class=\"k\">assert</span> <span class=\"mi\">5</span> <span class=\"o\">==</span> <span class=\"mi\">6</span><span class=\"p\">,</span> <span class=\"s2\">&quot;Five is not equal to six&quot;</span>\n</pre></div>\n\n</div>\n\n</section>\n", "_____no_output_____" ], [ "We can run our test function the same way that we run any other function. Although this dosent scale very well to thousands of tests!", "_____no_output_____" ], [ "\n<section class=\"challange panel panel-success\">\n<div class=\"panel-heading\">\n<h2><span class=\"fa fa-pencil\"></span> Writing Tests</h2>\n</div>\n\n\n<div class=\"panel-body\">\n\n<p>The following function has bugs in it. Write some tests below the function to find all the bugs.</p>\n\n</div>\n\n</section>\n", "_____no_output_____" ], [ "## Running Tests Automatically with pytest\n\nOnce you have a few test functions written, you will probably start getting bored with typing out their names and running them one-by-one. There are a few different modules to help you write and run tests. The one we will be using is called [`pytest`](https://docs.pytest.org/en/latest/). \n\nFor the next section of this session we will be using the two Python (`.py`) files named `askdl.py` and `lsadkj.py`.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0670d8538cfb233b59938f77a808123cbb54204
137,852
ipynb
Jupyter Notebook
notebook/Lecture-6-Introduction-to-programming-for-geoscientists.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
null
null
null
notebook/Lecture-6-Introduction-to-programming-for-geoscientists.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
null
null
null
notebook/Lecture-6-Introduction-to-programming-for-geoscientists.ipynb
navjotk/test
f6addd12ad61ee25b6bf45afd535207b7b356b22
[ "CC-BY-3.0" ]
null
null
null
66.918447
26,514
0.766387
[ [ [ "# Introduction to programming for Geoscientists through Python\n### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)\n# Lecture 6: Files, strings, and dictionaries", "_____no_output_____" ], [ "Learning objectives: You will learn how to:\n\n* Read data in from a file\n* Parse strings to extract specific data of interest.\n* Use dictionaries to index data using any type of key.", "_____no_output_____" ] ], [ [ "from client.api.notebook import Notebook\nfrom client.api import assignment\nfrom client.utils import auth\n\nargs = assignment.Settings(server='okpyic.azurewebsites.net')\nok = Notebook('./lecture6.ok', args)", "=====================================================================\nAssignment: Lecture 6\nOK, version v1.13.11\n=====================================================================\n\n" ], [ "var1 = 4\nvar2 = 3\nvar3 = 3\n\ndef funct1():\n return 0\n\ndef funct2():\n return 0", "_____no_output_____" ], [ "ok.grade('lect6-q0')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 5\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ], [ [ "## Reading data from a plain text file\nWe can read text from a [text file](http://en.wikipedia.org/wiki/Text_file) into strings in a program. This is a common (and simple) way for a program to get input data. The basic recipe is:", "_____no_output_____" ] ], [ [ "# Open text file\ninfile = open(\"myfile.dat\", \"r\")\n\n# Read next line:\nline = infile.readline()\n\n# Read the lines in a loop one by one:\nfor line in infile:\n <process line>\n\n# Load all lines into a list of strings:\nlines = infile.readlines()\nfor line in lines:\n <process line>", "_____no_output_____" ] ], [ [ "Let's look at the file [data1.txt](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/data1.txt) (all of the data files in this lecture are stored in the sub-folder *data/* of this notebook library). The files has a column of numbers:", "_____no_output_____" ] ], [ [ "21.8\n18.1\n19\n23\n26\n17.8", "_____no_output_____" ] ], [ [ "The goal is to read this file and calculate the mean:", "_____no_output_____" ] ], [ [ "# Open data file\ninfile = open(\"data/data1.txt\", \"r\")\n\n# Initialise values\nmean = 0\nn=0\n\n# Loop to perform sum\nfor number in infile:\n number = float(number)\n mean = mean + number\n n += 1\n \n# It is good practice to close a file when you are finished. \ninfile.close()\n\n# Calculate the mean.\nmean = mean/n\nprint(mean)", "20.95\n" ] ], [ [ "Let's make this example more interesting. There is a **lot** of data out there for you to discover all kinds of interesting facts - you just need to be interested in learning a little analysis. For this case I have downloaded tidal gauge data for the port of Avonmouth from the [BODC](http://www.bodc.ac.uk/). If you look at the header of file [data/2012AVO.txt](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/2012AVO.txt) you will see the [metadata](http://en.wikipedia.org/wiki/Metadata):", "_____no_output_____" ] ], [ [ "Port: P060\nSite: Avonmouth\nLatitude: 51.51089\nLongitude: -2.71497\nStart Date: 01JAN2012-00.00.00\nEnd Date: 30APR2012-23.45.00\nContributor: National Oceanography Centre, Liverpool\nDatum information: The data refer to Admiralty Chart Datum (ACD)\nParameter code: ASLVTD02 = Surface elevation (unspecified datum) of the water body by fixed in-situ pressure sensor", "_____no_output_____" ] ], [ [ "Let's read the column ASLVTD02 (the surface elevation) and plot it:", "_____no_output_____" ] ], [ [ "from pylab import *\n\ntide_file = open(\"data/2012AVO.txt\", \"r\")\n\n# We know from inspecting the file that the first 11 lines are just\n# header information so lets just skip those lines.\nfor i in range(11):\n line = tide_file.readline()\n\n# Initialise an empty list to store the elevation\nelevation = []\ndays = []\n\n# Now we start reading the interesting data\nn=0\nwhile True: # This will keep looping until we break out.\n # Here we use a try/except block to try to read the data as normal\n # and to break out if unsuccessful - ie when we reach the end of the file. \n try:\n # Read the next line\n line = tide_file.readline()\n \n # Split this line into words. \n words = line.split()\n \n # If we do not have 5 words then it must be blank lines at the end of the file.\n if len(words)!=5:\n break\n except:\n # If we failed to read a line then we must have got to the end.\n break\n \n n+=1 # Count number of data points\n\n try:\n # The elevation data is on the 4th column. However, the BODC\n # appends a \"M\" when a value is improbable and an \"N\" when\n # data is missing (maybe a ship dumped into it during rough weather!)\n # Therefore, we put this conversion from a string into a float in a \n # try/except block.\n level = float(words[3])\n elevation.append(level)\n \n # There is a measurement every quarter hour.\n days.append(n*0.25/24) \n except:\n continue\n \n# For plotting lets convert the list to a NumPy array.\nelevation = array(elevation)\ndays = array(days)\n\nplot(days, elevation)\nxlabel(\"Days\")\nylabel(\"Elevation (meters)\")\nshow()", "_____no_output_____" ] ], [ [ "Quiz time:\n\n* What tidal constituents can you identify by looking at this plot?\n* Is this primarily a diurnal or semi-diurnal tidal region? (hint - change the x-axis range on the plot above).\n\nYou will notice in the above example that we used the *split()* string member function. This is a very useful function for grabbing individual words on a line. When called without any arguments it assumes that the [delimiter](http://en.wikipedia.org/wiki/Delimiter) is a blank space. However, you can use this to split a string with any delimiter, *e.g.*, *line.split(';')*, *line.split(':')*.", "_____no_output_____" ], [ "## <span style=\"color:blue\">Exercise 6.1: Read a two-column data file</span>\nThe file [data/xy.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/xy.dat) contains two columns of numbers, corresponding to *x* and *y* coordinates on a curve. The start of the file looks like this:\n\n-1.0000 -0.0000</br>\n-0.9933 -0.0087</br>\n-0.9867 -0.0179</br>\n-0.9800 -0.0274</br>\n-0.9733 -0.0374</br>\n\nMake a program that reads the first column into a list `xlist_61` and the second column into a list `ylist_61`. Then convert the lists to arrays named `xarray_61` and `yarray_61`, and plot the curve. Store the maximum and minimum y coordinates in two variables named `ymin_61` and `ymax_61`. (Hint: Read the file line by line, split each line into words, convert to float, and append to `xlist_61` and `ylist_61`.)</br>", "_____no_output_____" ] ], [ [ "# Open data file\ninfile = open(\"data/xy.dat\", \"r\") # \"r\" is for read\n\n# Initialise empty lists\nxlist_61 = []\nylist_61 = []\n\n# Loop through infile and write to x and y lists\nfor line in infile:\n line = line.split() # convert to list by dropping spaces\n xlist_61.append(float(line[0])) # take 0th element and covert to float\n ylist_61.append(float(line[1])) # take 1st element and covert to float\n\n\n# Close the filehandle\ninfile.close()\n\nxarray_61 = np.array(xlist_61)\nyarray_61 = np.array(ylist_61)\n\nymin_61 = yarray_61.min()\nymax_61 = yarray_61.max()", "_____no_output_____" ], [ "grade = ok.grade('lect6-q1')\nprint(\"===\", grade, \"===\")", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 9\n Failed: 0\n[ooooooooook] 100.0% passed\n\n=== {'passed': 9, 'failed': 0, 'locked': 0} ===\n" ] ], [ [ "## <span style=\"color:blue\">Exercise 6.2: Read a data file</span>\nThe files [data/density_water.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_water.dat) and [data/density_air.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_air.dat) contain data about the density of water and air (respectively) for different temperatures. The data files have some comment lines starting with # and some lines are blank. The rest of the lines contain density data: the temperature in the first column and the corresponding density in the second column. The goal of this exercise is to read the data in such a file, discard commented or blank lines, and plot the density versus the temperature as distinct (small) circles for each data point. Write a function `readTempDenFile` that takes a filename as argument and returns two lists containing respectively the temperature and the density. Call this function on both files, and store the temperature and density in lists called `temp_air_list`, `dens_air_list`, `temp_water_list` and `dens_water_list`.", "_____no_output_____" ] ], [ [ "def readTempDenFile(filename):\n infile = open(filename, \"r\")\n temp = []\n dens = []\n for line in infile:\n try:\n t, d = line.split()\n t = float(t)\n d = float(d)\n except:\n continue\n temp.append(t) # N.B. we're now filling out temp and dens lists\n dens.append(d)\n infile.close()\n plot(array(temp), array(dens))\n xlabel(\"Temperature (C)\")\n ylabel(\"Density (kg/m^3)\")\n show()\n return temp,dens\n\n# run function\ntemp_air_list, dens_air_list = readTempDenFile(\"data/density_air.dat\")\ntemp_water_list, dens_water_list = readTempDenFile(\"data/density_water.dat\")", "_____no_output_____" ], [ "ok.grade(\"lect6-q2\")", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n" ] ], [ [ "## <span style=\"color:blue\">Exercise 6.3: Read acceleration data and find velocities</span>\nA file [data/acc.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/acc.dat) contains measurements $a_0, a_1, \\ldots, a_{n-1}$ of the acceleration of an object moving along a straight line. The measurement $a_k$ is taken at time point $t_k = k\\Delta t$, where $\\Delta t$ is the time spacing between the measurements. The purpose of the exercise is to load the acceleration data into a program and compute the velocity $v(t)$ of the object at some time $t$.\n\nIn general, the acceleration $a(t)$ is related to the velocity $v(t)$ through $v^\\prime(t) = a(t)$. This means that\n\n$$\nv(t) = v(0) + \\int_0^t{a(\\tau)d\\tau}\n$$\n\nIf $a(t)$ is only known at some discrete, equally spaced points in time, $a_0, \\ldots, a_{n-1}$ (which is the case in this exercise), we must compute the integral above numerically, for example by the Trapezoidal rule:\n\n$$\nv(t_k) \\approx v(0) + \\Delta t \\left(\\frac{1}{2}a_0 + \\frac{1}{2}a_k + \\sum_{i=1}^{k-1}a_i \\right), \\ \\ 1 \\leq k \\leq n-1. \n$$\n\nWe assume $v(0) = 0$ so that also $v_0 = 0$.\nRead the values $a_0, \\ldots, a_{n-1}$ from file into an array `acc_array_63` and plot the acceleration versus time for $\\Delta_t = 0.5$. The time should be stored in an array named `time_array_63`.\n\nThen write a function `compute_velocity(dt, k, a)` that takes as arguments a time interval $\\Delta_t$ `dt`, an index `k` and a list of accelerations `a`, uses the Trapezoidal rule to compute one $v(t_k)$ value and return this value. Experiment with different values of $\\Delta t$ and $k$.", "_____no_output_____" ] ], [ [ "dt = 0.5\n\n# read in acceleration\ninfile = open(\"data/acc.dat\", \"r\")\nalist = []\nfor line in infile:\n alist.append(float(line))\ninfile.close()\nacc_array_63 = array(alist)\ntime_array_63 = array([e*dt for e in range(len(alist))]) # time is specified by dt and the number of elements in acc.dat\n\n#print(time_array_63, acc_array_63)\n\n# plot\nplot(time_array_63, acc_array_63)\nxlabel(\"Time\")\nylabel(\"Acceleration\")\nshow()\n\ndef compute_velocity(dt, k, alist):\n if not (1 <= k <= (len(alist) - 1)):\n raise ValueError\n return dt*(.5*alist[0] + .5*alist[k] + sum(alist[:k]))\n\ndt = 2\nk = 4\nprint(compute_velocity(2, 4, alist))\nprint(compute_velocity(3, 5, alist))\nprint(compute_velocity(12, 21, alist))", "_____no_output_____" ], [ "ok.grade('lect6-q3')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 15\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ], [ [ "## Python dictionaries\nSuppose we need to store the temperatures in Oslo, London and Paris. The Python list solution might look like:", "_____no_output_____" ] ], [ [ "temps = [13, 15.4, 17.5]\n# temps[0]: Oslo\n# temps[1]: London\n# temps[2]: Paris", "_____no_output_____" ] ], [ [ "In this case we need to remember the mapping between the index and the city name. It would be easier to specify name of city to get the temperature. Containers such as lists and arrays use a continuous series of integers to index elements. However, for many applications such an integer index is not useful.\n\n**Dictionaries** are containers where any Python object can be used\nas an index. Let's rewrite the previous example using a Python dictionary:", "_____no_output_____" ] ], [ [ "temps = {\"Oslo\": 13, \"London\": 15.4, \"Paris\": 17.5}\nprint(\"The temperature in London is\", temps[\"London\"])", "The temperature in London is 15.4\n" ] ], [ [ "Add a new element to a dictionary:", "_____no_output_____" ] ], [ [ "temps[\"Madrid\"] = 26.0\nprint(temps)", "{'Oslo': 13, 'London': 15.4, 'Paris': 17.5, 'Madrid': 26.0}\n" ] ], [ [ "Loop (iterate) over a dictionary:", "_____no_output_____" ] ], [ [ "for city in temps:\n print(\"The temperature in %s is %g\" % (city, temps[city]))", "The temperature in Oslo is 13\nThe temperature in London is 15.4\nThe temperature in Paris is 17.5\nThe temperature in Madrid is 26\n" ] ], [ [ "The index in a dictionary is called the **key**. A dictionary is said to hold key–value pairs. So in general:", "_____no_output_____" ] ], [ [ "for key in dictionary:\n value = dictionary[key]\n print(value)", "_____no_output_____" ] ], [ [ "Does the dictionary have a particular key (*i.e.* a particular data entry)?", "_____no_output_____" ] ], [ [ "if \"Berlin\" in temps:\n print(\"We have Berlin and its temperature is \", temps[\"Berlin\"])\nelse:\n print(\"I don't know Berlin' termperature.\")", "I don't know Berlin' termperature.\n" ], [ "print(\"Oslo\" in temps) # i.e. standard boolean expression", "True\n" ] ], [ [ "The keys and values can be reached as lists:", "_____no_output_____" ] ], [ [ "print(\"Keys = \", temps.keys())\nprint(\"Values = \", temps.values())", "Keys = dict_keys(['Oslo', 'London', 'Paris', 'Madrid'])\nValues = dict_values([13, 15.4, 17.5, 26.0])\n" ] ], [ [ "Note that the sequence of keys is **arbitrary**! Never rely on it, if you need a specific order of the keys then you should explicitly sort:", "_____no_output_____" ] ], [ [ "for key in sorted(temps):\n value = temps[key]\n print(key, value)", "London 15.4\nMadrid 26.0\nOslo 13\nParis 17.5\n" ] ], [ [ "Remove Oslo key:value:", "_____no_output_____" ] ], [ [ "del temps[\"Oslo\"] # remove Oslo key w/value\nprint(temps, len(temps))", "{'London': 15.4, 'Paris': 17.5, 'Madrid': 26.0} 3\n" ] ], [ [ "Similarly to what we saw for arrays, two variables can refer to the same dictionary:", "_____no_output_____" ] ], [ [ "t1 = temps\nt1[\"Stockholm\"] = 10.0\nprint(temps)", "{'London': 15.4, 'Paris': 17.5, 'Madrid': 26.0, 'Stockholm': 10.0}\n" ] ], [ [ "So we can see that while we modified *t1*, the *temps* dictionary was also changed.", "_____no_output_____" ], [ "Let's look at a simple example of reading the same data from a file and putting it into a dictionary. We will be reading the file *data/deg2.dat*.", "_____no_output_____" ] ], [ [ "infile = open(\"data/deg2.dat\", \"r\")\n# Start with empty dictionary\ntemps = {} \nfor line in infile:\n # If you examine the file you will see a ':' after the city name,\n # so let's use this as the delimiter for splitting the line.\n city, temp = line.split(\":\") \n temps[city] = float(temp)\ninfile.close()\nprint(temps)", "{'Oslo': 21.8, 'London': 18.1, 'Berlin': 19.0, 'Paris': 23.0, 'Rome': 26.0}\n" ] ], [ [ "## <span style=\"color:blue\">Exercise 6.4: Make a dictionary from a table</span>\nThe file [data/constants.txt](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/constants.txt) contains a table of the values and the dimensions of some fundamental constants from physics. We want to load this table into a dictionary *constants*, where the keys are the names of the constants. For example, *constants['gravitational constant']* holds the value of the gravitational constant (6.67259 $\\times$ 10$^{-11}$) in Newton's law of gravitation. Make a function `read_constants(file_path)` that that reads and interprets the text in the file passed as argument, and thereafter returns the dictionary.", "_____no_output_____" ] ], [ [ "def read_constants(file_path):\n infile = open(file_path, \"r\")\n constants = {} # An empty dictionary to store the constants that are read in from the file\n infile.readline(); infile.readline() # Skip the first two lines of the file, since these just contain the column names and the separator.\n for line in infile:\n words = line.split() # Split each line up into individual words\n dimension = words.pop() # pop is a list operation that removes the last element from a list and returns it\n value = float(words.pop()) # Again, use pop to obtain the constant itself.\n name = \" \".join(words) # After the two 'pop' operations above, the words remaining in the 'words' list must be the name of the constant. Join the individual words together, with spaces inbetween, using .join.\n constants[name] = value # Create a new key-value pair in the dictionary\n return constants\n\nprint(read_constants('data/constants.txt'))", "{'speed of light': 299792458.0, 'gravitational constant': 6.67259e-11, 'Planck constant': 6.6260755e-34, 'elementary charge': 1.60217733e-19, 'Avogadro number': 6.0221367e+23, 'Boltzmann constant': 1.380658e-23, 'electron mass': 9.1093897e-31, 'proton mass': 1.6726231e-27}\n" ], [ "ok.grade('lect6-q4')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 1\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ], [ [ "## <span style=\"color:blue\">Exercise 6.5: Explore syntax differences: lists vs. dictionaries</span>\nConsider this code:", "_____no_output_____" ] ], [ [ "t1 = {}\nt1[0] = -5\nt1[1] = 10.5", "_____no_output_____" ] ], [ [ "Explain why the lines above work fine while the ones below do not:", "_____no_output_____" ] ], [ [ "t2 = []\n#t2[0] = -5\n#t2[1] = 10.5", "_____no_output_____" ] ], [ [ "What must be done in the last code snippet to make it work properly?", "_____no_output_____" ], [ "## <span style=\"color:blue\">Exercise 6.6: Compute the area of a triangle</span>\nAn arbitrary triangle can be described by the coordinates of its three vertices: $(x_1, y_1), (x_2, y_2), (x_3, y_3)$, numbered in a counterclockwise direction. The area of the triangle is given by the formula:\n\n$A = \\frac{1}{2}|x_2y_3 - x_3y_2 - x_1y_3 + x_3y_1 + x_1y_2 - x_2y_1|.$\n\nWrite a function `triangle_area(vertices)` that returns the area of a triangle whose vertices are specified by the argument vertices, which is a nested list of the vertex coordinates. For example, vertices can be [[0,0], [1,0], [0,2]] if the three corners of the triangle have coordinates (0, 0), (1, 0), and (0, 2).\n\nThen, assume that the vertices of the triangle are stored in a dictionary and not a list. The keys in the dictionary correspond to the vertex number (1, 2, or 3) while the values are 2-tuples with the x and y coordinates of the vertex. For example, in a triangle with vertices (0, 0), (1, 0), and (0, 2) the vertices argument becomes:", "_____no_output_____" ] ], [ [ "def triangle_area(vertices):\n # nb. vertices = {v1: (x,y)}\n x2y3 = vertices[2][0] * vertices[3][1]\n x3y2 = vertices[3][0] * vertices[2][1]\n x1y3 = vertices[1][0] * vertices[3][1]\n x3y1 = vertices[3][0] * vertices[1][1]\n x1y2 = vertices[1][0] * vertices[2][1]\n x2y1 = vertices[2][0] * vertices[1][1]\n return .5*(x2y3 - x3y2 - x1y3 + x3y1 + x1y2 - x2y1)\n\nprint(triangle_area({1: (0,0), 2: (1,0), 3: (0,1)}))", "0.5\n" ], [ "ok.grade('lect6-q6')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 3\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ], [ [ "## String manipulation\nText in Python is represented as **strings**. Programming with strings is therefore the key to interpret text in files and construct new text (*i.e.* **parsing**). First we show some common string operations and then we apply them to real examples. Our sample string used for illustration is:", "_____no_output_____" ] ], [ [ "s = \"Berlin: 18.4 C at 4 pm\"", "_____no_output_____" ] ], [ [ "Strings behave much like lists/tuples - they are simply a sequence of characters:", "_____no_output_____" ] ], [ [ "print(\"s[0] = \", s[0])\nprint(\"s[1] = \", s[1])", "s[0] = B\ns[1] = e\n" ] ], [ [ "Substrings are just slices of lists and arrays:", "_____no_output_____" ] ], [ [ "# from index 8 to the end of the string\nprint(s[8:])", "18.4 C at 4 pm\n" ], [ "# index 8, 9, 10 and 11 (not 12!)\nprint(s[8:12])", "18.4\n" ], [ "# from index 8 to 8 from the end of the string\nprint(s[8:-8])", "18.4 C\n" ] ], [ [ "You can also find the start of a substring:", "_____no_output_____" ] ], [ [ "# where does \"Berlin\" start?\nprint(s.find(\"Berlin\"))", "0\n" ], [ "print(s.find(\"pm\"))", "20\n" ], [ "print (s.find(\"Oslo\"))", "-1\n" ] ], [ [ "In this last example, Oslo does not exist in the list so the return value is -1.", "_____no_output_____" ], [ "We can also check if a substring is contained in a string:", "_____no_output_____" ] ], [ [ "print (\"Berlin\" in s)", "True\n" ], [ "print (\"Oslo\" in s)", "False\n" ], [ "if \"C\" in s:\n print(\"C found\")\nelse:\n print(\"C not found\")", "C found\n" ] ], [ [ "### Search and replace\nStrings also support substituting a substring by another string. In general this looks like *s.replace(s1, s2)*, which replaces string *s1* in *s* by string *s2*, *e.g.*:", "_____no_output_____" ] ], [ [ "s = s.replace(\" \", \"_\")\nprint(s)", "Berlin:_18.4_C_at_4_pm\n" ], [ "s = s.replace(\"Berlin\", \"Bonn\")\nprint(s)", "Bonn:_18.4_C_at_4_pm\n" ], [ "# Replace the text before the first colon by 'London'\ns = s.replace(s[:s.find(\":\")], \"London\")\nprint(s)", "London:_18.4_C_at_4_pm\n" ] ], [ [ "Notice that in all these examples we assign the new result back to *s*. One of the reasons we are doing this is strings are actually constant (*i.e* immutable) and therefore cannot be modified *inplace*. We **cannot** write for example:", "_____no_output_____" ] ], [ [ "s[18] = '5'\nTypeError: \"str\" object does not support item assignment", "_____no_output_____" ] ], [ [ "We also encountered examples above where we used the split function to break up a line into separate substrings for a given separator (where a space is the default delimiter). Sometimes we want to split a string into lines - *i.e.* the delimiter is the [carriage return](http://en.wikipedia.org/wiki/Carriage_return). This can be surprisingly tricky because different computing platforms (*e.g.* Windows, Linux, Mac) use different characters to represent a carriage return. For example, Unix uses '\\n'. Luckly Python provides a *cross platform* way of doing this so regardless of what platform created the data file, or what platform you are running Python on, it will do the *right thing*: ", "_____no_output_____" ] ], [ [ "t = \"1st line\\n2nd line\\n3rd line\"\nprint (\"\"\"original t =\n\"\"\", t)", "original t =\n 1st line\n2nd line\n3rd line\n" ], [ "# This works here but will give you problems if you are switching\n# files between Windows and either Mac or Linux.\nprint (t.split(\"\\n\"))", "['1st line', '2nd line', '3rd line']\n" ], [ "# Cross platform (ie better) solution\nprint(t.splitlines())", "['1st line', '2nd line', '3rd line']\n" ] ], [ [ "### Stripping off leading/trailing whitespace\nWhen processing text from a file and composing new strings, we frequently need to trim leading and trailing whitespaces:", "_____no_output_____" ] ], [ [ "s = \" text with leading and trailing spaces \\n\"\nprint(\"-->%s<--\"%s.strip())", "-->text with leading and trailing spaces<--\n" ], [ "# left strip\nprint(\"-->%s<--\"%s.lstrip())", "-->text with leading and trailing spaces \n<--\n" ], [ "# right strip\nprint(\"-->%s<--\"%s.rstrip())", "--> text with leading and trailing spaces<--\n" ] ], [ [ "### join() (the opposite of split())\nWe can join a list of substrings to form a new string. Similarly to *split()* we put strings together with a delimiter inbetween:", "_____no_output_____" ] ], [ [ "strings = [\"Newton\", \"Secant\", \"Bisection\"]\nprint(\", \".join(strings))", "Newton, Secant, Bisection\n" ] ], [ [ "You can prove to yourself that these are inverse operations:", "_____no_output_____" ] ], [ [ "t = delimiter.join(stringlist)\nstringlist = t.split(delimiter)", "_____no_output_____" ] ], [ [ "As an example, let's split off the first two words on a line:", "_____no_output_____" ] ], [ [ "line = \"This is a line of words separated by space\"\nwords = line.split()\nprint(\"words = \", words)\nline2 = \" \".join(words[2:])\nprint(\"line2 = \", line2)", "words = ['This', 'is', 'a', 'line', 'of', 'words', 'separated', 'by', 'space']\nline2 = a line of words separated by space\n" ] ], [ [ "## <span style=\"color:blue\">Exercise 6.7: Improve a program</span>\nThe file [data/densities.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/densities.dat) contains a table of densities of various substances measured in g/cm$^3$. The following program reads the data in this file and produces a dictionary whose keys are the names of substances, and the values are the corresponding densities.", "_____no_output_____" ] ], [ [ "def read_densities(filename):\n infile = open(filename, 'r')\n densities = {}\n for line in infile:\n words = line.split()\n density = float(words[-1])\n \n if len(words[:-1]) == 2:\n substance = words[0] + ' ' + words[1]\n else:\n substance = words[0]\n \n densities[substance] = density\n \n infile.close()\n return densities\n\ndensities = read_densities('data/densities.dat')\nprint(densities)", "{'air': 0.0012, 'gasoline': 0.67, 'ice': 0.9, 'pure water': 1.0, 'seawater': 1.025, 'human body': 1.03, 'limestone': 2.6, 'granite': 2.7, 'iron': 7.8, 'silver': 10.5, 'mercury': 13.6, 'gold': 18.9, 'platinium': 21.4, 'Earth mean': 5.52, 'Earth core': 13.0, 'Moon': 3.3, 'Sun mean': 1.4, 'Sun core': 160.0, 'proton': 280000000000000.0}\n" ] ], [ [ "One problem we face when implementing the program above is that the name of the substance can contain one or two words, and maybe more words in a more comprehensive table. The purpose of this exercise is to use string operations to shorten the code and make it more general. Implement the following two methods in separate functions `read_densities_join` and `read_densities_substrings`, and control that they give the same result.\n\n1. Let *substance* consist of all the words but the last, using the join method in string objects to combine the words.\n2. Observe that all the densities start in the same column file and use substrings to divide line into two parts. (Hint: Remember to strip the first part such that, e.g., the density of ice is obtained as *densities['ice']* and not *densities['ice ']*.)", "_____no_output_____" ] ], [ [ "def read_densities_join(filename):\n infile = open(filename, 'r')\n densities = {}\n for line in infile:\n words = line.split()\n density = float(words.pop()) # pop is a list operation that removes the last element from a list and returns it\n substance = \"_\".join(words) # join the remaining words with _\n densities[substance] = density\n infile.close()\n return densities\n\ndef read_densities_substrings(filename):\n infile = open(filename, 'r')\n densities = {}\n for line in infile:\n density = float(line[12:]) # column 13 onwards\n substance = line[:12] # upto coumn 12\n substance = substance.strip() # remove trailing spaces\n substance = substance.replace(\" \", \"_\") # replace spaces with _\n densities[substance] = density\n infile.close()\n return densities\n\ndensities_join = read_densities_join('data/densities.dat')\ndensities_substrings = read_densities_substrings('data/densities.dat')\nprint(densities_join)\nprint(densities_substrings)", "{'air': 0.0012, 'gasoline': 0.67, 'ice': 0.9, 'pure_water': 1.0, 'seawater': 1.025, 'human_body': 1.03, 'limestone': 2.6, 'granite': 2.7, 'iron': 7.8, 'silver': 10.5, 'mercury': 13.6, 'gold': 18.9, 'platinium': 21.4, 'Earth_mean': 5.52, 'Earth_core': 13.0, 'Moon': 3.3, 'Sun_mean': 1.4, 'Sun_core': 160.0, 'proton': 280000000000000.0}\n{'air': 0.0012, 'gasoline': 0.67, 'ice': 0.9, 'pure_water': 1.0, 'seawater': 1.025, 'human_body': 1.03, 'limestone': 2.6, 'granite': 2.7, 'iron': 7.8, 'silver': 10.5, 'mercury': 13.6, 'gold': 18.9, 'platinium': 21.4, 'Earth_mean': 5.52, 'Earth_core': 13.0, 'Moon': 3.3, 'Sun_mean': 1.4, 'Sun_core': 160.0, 'proton': 280000000000000.0}\n" ], [ "ok.grade('lect6-q7')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nTest summary\n Passed: 2\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ], [ [ "## File writing\nWriting a file in Python is simple. You just collect the text you want to write in one or more strings and, for each string, use a statement along the lines of", "_____no_output_____" ] ], [ [ "outfile.write(string)", "_____no_output_____" ] ], [ [ "The write function does not add a newline character so you may have to do that explicitly:", "_____no_output_____" ] ], [ [ "outfile.write(string + ’\\n’)", "_____no_output_____" ] ], [ [ "That’s it! Compose the strings and write! Let's do an example. Write a nested list (table) to a file:", "_____no_output_____" ] ], [ [ "# Let's define some table of data\ndata = [[ 0.75, 0.29619813, -0.29619813, -0.75 ],\n [ 0.29619813, 0.11697778, -0.11697778, -0.29619813],\n [-0.29619813, -0.11697778, 0.11697778, 0.29619813],\n [-0.75, -0.29619813, 0.29619813, 0.75 ]]\n\n# Open the file for writing. Notice the \"w\" indicates we are writing!\noutfile = open(\"tmp_table.dat\", \"w\")\nfor row in data:\n for column in row:\n outfile.write(\"%14.8f\" % column)\n outfile.write(\"\\n\") # ensure newline\noutfile.close()", "_____no_output_____" ] ], [ [ "And that's it - run the above cell and take a look at the file that was generated in your Azure library clone.", "_____no_output_____" ], [ "## <span style=\"color:blue\">Exercise 6.8: Write function data to a file</span>\nWe want to dump $x$ and $f(x)$ values to a file named function_data.dat, where the $x$ values appear in the first column and the $f(x)$ values appear in the second. Choose $n$ equally spaced $x$ values in the interval [-4, 4]. Here, the function $f(x)$ is given by:\n\n$f(x) = \\frac{1}{\\sqrt{2\\pi}}\\exp(-0.5x^2)$", "_____no_output_____" ] ], [ [ "from math import pi\n\n# define our function\ndef f(x):\n return (1.0/sqrt(2.0*pi))*exp(-.5*x**2.0)\n\n# let's make our x\nxarray = linspace(-4.0, 4.0, 100)\nfxs = f(xarray)\nfxs[-1] += 1\n\n# let's zip them up for a simple for loop when writing out\ndata = zip(xarray, fxs) # this combines each element into a tuple e.g. [(xarray1, fxs1), (xarray2, fxs2) ...]\n\n# write out\noutfile = open(\"ex8_out.dat\", \"w\") # w is for writing!\nfor x,y in data:\n outfile.write(\"X = %.2f Y = %.2f\" % (x, y))\n outfile.write(\"\\n\") # ensure newline\noutfile.close()", "_____no_output_____" ], [ "ok.grade('lect6-q8')", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRunning tests\n\n---------------------------------------------------------------------\nquestion 6.8 > Suite 2 > Case 3\n\nfile_array = a = np.loadtxt(\"ex8_out.dat\", usecols=(2, 5))\n>>> np.array_equal(a[-1], [4., 0.])\nFalse\n\n# Error: expected\n# True\n# but got\n# False\n\nRun only this test case with \"python3 ok -q lect6-q8 --suite 2 --case 3\"\n---------------------------------------------------------------------\nTest summary\n Passed: 3\n Failed: 1\n[oooooook...] 75.0% passed\n\n" ], [ " ok.score()", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nScoring tests\n\n---------------------------------------------------------------------\nquestion 0\n Passed: 2\n Failed: 0\n[ooooooooook] 100.0% passed\n\n---------------------------------------------------------------------\nquestion 6.1\n Passed: 3\n Failed: 0\n[ooooooooook] 100.0% passed\n\n" ] ] ]
[ "markdown", "code", "markdown", "raw", "markdown", "raw", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "raw", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d06717f12ad25814be873172a199341c8454bd2e
24,794
ipynb
Jupyter Notebook
notebooks/old notebooks/demlin02.ipynb
snowdj/CompEcon-python
883ac75750800e2792218a7b13f97e681498a389
[ "MIT" ]
23
2016-12-14T13:21:27.000Z
2020-08-23T21:04:34.000Z
notebooks/old notebooks/demlin02.ipynb
snowdj/CompEcon-python
883ac75750800e2792218a7b13f97e681498a389
[ "MIT" ]
1
2017-09-10T04:48:54.000Z
2018-03-31T01:36:46.000Z
notebooks/old notebooks/demlin02.ipynb
snowdj/CompEcon-python
883ac75750800e2792218a7b13f97e681498a389
[ "MIT" ]
13
2017-02-25T08:10:38.000Z
2020-05-15T09:49:16.000Z
139.292135
20,236
0.88465
[ [ [ "###DemLin02:\n#Ill-conditioning of Vandermonde matrix\n* todo: Review this demo, result not the same as in Miranda's\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom numpy.linalg import norm, cond, solve\nimport time\nimport matplotlib.pyplot as plt\n%matplotlib notebook\nnp.set_printoptions(precision=4)", "_____no_output_____" ] ], [ [ "Compute approximation error and matrix condition number", "_____no_output_____" ] ], [ [ "n = np.arange(6, 51)\nnn = n.size\n\nerrv = np.zeros(nn)\nconv = np.zeros(nn)\n\nfor i in range(nn):\n v = np.vander(1 + np.arange(n[i]))\n errv[i] = np.log10(norm(np.identity(n[i]) - solve(v, v)))\n conv[i] = np.log10(cond(v))\n\nprint('errv =\\n', errv)", "errv =\n [-11.0688 -14.6779 -12.5801 -6.8825 -5.5384 -5.9532 -7.6494 -5.9833\n -5.6239 -6.3194 -5.651 -5.8029 -4.5616 -5.6639 -4.912 -5.0873\n -4.958 -5.8492 -5.0541 -5.6499 -5.7562 -5.6496 -5.8851 -5.7686\n -5.475 -5.3383 -5.4446 -5.0718 -5.4484 -5.3056 -5.3707 -5.7315\n -5.7709 -6.0165 -5.7509 -5.0538 -5.838 -6.063 -6.0756 -2.9206\n -5.0652 -5.759 -5.8286 -6.3859 -6.0894]\n" ] ], [ [ "Smooth using quadratic function", "_____no_output_____" ] ], [ [ "X = np.vstack([np.ones(nn), n]).T\nb = np.linalg.lstsq(X, errv)[0]\nerrv = np.dot(X, b)\nprint('b = ', b)\n\n\nb = np.linalg.lstsq(X, conv)[0]\nconv = np.dot(X, b)\nprint('b = ', b)", "b = [-8.003 0.0681]\nb = [1.0590e+01 9.1579e-03]\n" ] ], [ [ "Plot matrix condition numbers", "_____no_output_____" ] ], [ [ "plt.figure(figsize=[12, 5])\nplt.subplot(1, 2, 1)\nplt.plot(n, conv)\nplt.xlabel('n')\nplt.ylabel('Log_{10} Condition Number')\nplt.title('Vandermonde Matrix Condition Numbers')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d067314ec8e91e7fa787123e39f20d52a7c25ff0
105,045
ipynb
Jupyter Notebook
Time Series/Time Series Tensorflow Prediction Using LSTM .ipynb
shivangsharma1/Tensorflow-in-Practice-Specialization
f82d7a8c5635cf2d6f3b327e344239a51ea97f28
[ "Apache-2.0" ]
null
null
null
Time Series/Time Series Tensorflow Prediction Using LSTM .ipynb
shivangsharma1/Tensorflow-in-Practice-Specialization
f82d7a8c5635cf2d6f3b327e344239a51ea97f28
[ "Apache-2.0" ]
null
null
null
Time Series/Time Series Tensorflow Prediction Using LSTM .ipynb
shivangsharma1/Tensorflow-in-Practice-Specialization
f82d7a8c5635cf2d6f3b327e344239a51ea97f28
[ "Apache-2.0" ]
null
null
null
105,045
105,045
0.865962
[ [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ], [ "!pip install tf-nightly-2.0-preview\n", "_____no_output_____" ], [ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nprint(tf.__version__)", "2.0.0-dev20190628\n" ], [ "def plot_series(time, series, format=\"-\", start=0, end=None):\n plt.plot(time[start:end], series[start:end], format)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n plt.grid(True)\n\ndef trend(time, slope=0):\n return slope * time\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\ntime = np.arange(4 * 365 + 1, dtype=\"float32\")\nbaseline = 10\nseries = trend(time, 0.1) \nbaseline = 10\namplitude = 40\nslope = 0.05\nnoise_level = 5\n\n# Create the series\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n# Update with noise\nseries += noise(time, noise_level, seed=42)\n\nsplit_time = 1000\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\n\nwindow_size = 20\nbatch_size = 32\nshuffle_buffer_size = 1000", "_____no_output_____" ], [ "def windowed_dataset(series, window_size, batch_size, shuffle_buffer):\n dataset = tf.data.Dataset.from_tensor_slices(series)\n dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))\n dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))\n dataset = dataset.batch(batch_size).prefetch(1)\n return dataset", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ntf.random.set_seed(51)\nnp.random.seed(51)\n\ntf.keras.backend.clear_session()\ndataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),\n input_shape=[None]),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 100.0)\n])\n\nlr_schedule = tf.keras.callbacks.LearningRateScheduler(\n lambda epoch: 1e-8 * 10**(epoch / 20))\noptimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)\nmodel.compile(loss=tf.keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\nhistory = model.fit(dataset, epochs=100, callbacks=[lr_schedule])", "Epoch 1/100\n" ], [ "plt.semilogx(history.history[\"lr\"], history.history[\"loss\"])\nplt.axis([1e-8, 1e-4, 0, 30])", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ntf.random.set_seed(51)\nnp.random.seed(51)\n\ntf.keras.backend.clear_session()\ndataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),\n input_shape=[None]),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 100.0)\n])\n\n\nmodel.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9),metrics=[\"mae\"])\nhistory = model.fit(dataset,epochs=500,verbose=0)", "_____no_output_____" ], [ "forecast = []\nresults = []\nfor time in range(len(series) - window_size):\n forecast.append(model.predict(series[time:time + window_size][np.newaxis]))\n\nforecast = forecast[split_time-window_size:]\nresults = np.array(forecast)[:, 0, 0]\n\n\nplt.figure(figsize=(10, 6))\n\nplot_series(time_valid, x_valid)\nplot_series(time_valid, results)", "_____no_output_____" ], [ "tf.keras.metrics.mean_absolute_error(x_valid, results).numpy()", "_____no_output_____" ], [ "import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------\n# Retrieve a list of list results on training and test data\n# sets for each training epoch\n#-----------------------------------------------------------\nmae=history.history['mae']\nloss=history.history['loss']\n\nepochs=range(len(loss)) # Get number of epochs\n\n#------------------------------------------------\n# Plot MAE and Loss\n#------------------------------------------------\nplt.plot(epochs, mae, 'r')\nplt.plot(epochs, loss, 'b')\nplt.title('MAE and Loss')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend([\"MAE\", \"Loss\"])\n\nplt.figure()\n\nepochs_zoom = epochs[200:]\nmae_zoom = mae[200:]\nloss_zoom = loss[200:]\n\n#------------------------------------------------\n# Plot Zoomed MAE and Loss\n#------------------------------------------------\nplt.plot(epochs_zoom, mae_zoom, 'r')\nplt.plot(epochs_zoom, loss_zoom, 'b')\nplt.title('MAE and Loss')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend([\"MAE\", \"Loss\"])\n\nplt.figure()", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ndataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),\n input_shape=[None]),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 100.0)\n])\n\n\nmodel.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))\nmodel.fit(dataset,epochs=100, verbose=0)", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\ndataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),\n input_shape=[None]),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(1),\n tf.keras.layers.Lambda(lambda x: x * 100.0)\n])\n\n\nmodel.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))\nmodel.fit(dataset,epochs=100)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d067332b31dfa5ec43c2ec2737fc59f51e9448b1
59,239
ipynb
Jupyter Notebook
01-Lesson-Plans/19-Supervised-Machine-Learning/1/Activities/07-Ins_Confusion-Matrixes/Solved/Ins_Confusion_Matrix.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/19-Supervised-Machine-Learning/1/Activities/07-Ins_Confusion-Matrixes/Solved/Ins_Confusion_Matrix.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/19-Supervised-Machine-Learning/1/Activities/07-Ins_Confusion-Matrixes/Solved/Ins_Confusion_Matrix.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
158.817694
51,272
0.903746
[ [ [ "# Confusion Matrix\n\nA confusion matrix shows the predicted values vs. the actual values by counting the true positives, true negatives, false positives, and false negatives.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "Generate some data", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=1000, centers=2, cluster_std=3, random_state=42)\n\nprint(f\"Labels: {y[:10]}\")\nprint(f\"Data: {X[:10]}\")", "Labels: [0 1 1 1 1 1 1 0 1 0]\nData: [[-2.12128686e-03 5.62516556e+00]\n [ 9.67298128e+00 3.12404959e-01]\n [ 1.35105358e+00 -2.34698296e+00]\n [ 6.20838530e+00 2.52069672e-01]\n [ 4.09843910e+00 1.15524924e+01]\n [ 7.56547173e+00 3.47645219e+00]\n [ 6.39996024e+00 5.48458714e-01]\n [-1.91963411e+00 1.11412974e+01]\n [ 1.72490863e+00 -2.16568481e+00]\n [-2.60857854e+00 1.43979597e+01]]\n" ], [ "# Visualizing both classes\nplt.scatter(X[:, 0], X[:, 1], c=y)", "_____no_output_____" ] ], [ [ "Split our data into training and testing data", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)", "_____no_output_____" ] ], [ [ "Create a logistic regression model", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier", "_____no_output_____" ] ], [ [ "Fit (train) our model by using the training data", "_____no_output_____" ] ], [ [ "classifier.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "Validate the model by using the test data", "_____no_output_____" ] ], [ [ "print(f\"Training Data Score: {classifier.score(X_train, y_train)}\")\nprint(f\"Testing Data Score: {classifier.score(X_test, y_test)}\")", "Training Data Score: 0.9533333333333334\nTesting Data Score: 0.956\n" ] ], [ [ "Create a confusion matrix", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix\n\ny_true = y_test\ny_pred = classifier.predict(X_test)\nconfusion_matrix(y_true, y_pred)", "_____no_output_____" ] ], [ [ "The accuracy of the model on the test data is TP + TN / (TP + FP + TN + FN)", "_____no_output_____" ] ], [ [ "tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\naccuracy = (tp + tn) / (tp + fp + tn + fn) # (111 + 128) / (111 + 5 + 128 + 6)\nprint(f\"Accuracy: {accuracy}\")", "Accuracy: 0.956\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06734178d1f1306477ae598f681e4a949a4c867
71,003
ipynb
Jupyter Notebook
climate_starter.ipynb
solivas89/sqlalchemy-challenge
cb79247db15fa2e3d8f507a87f9b097b80b4769c
[ "ADSL" ]
null
null
null
climate_starter.ipynb
solivas89/sqlalchemy-challenge
cb79247db15fa2e3d8f507a87f9b097b80b4769c
[ "ADSL" ]
null
null
null
climate_starter.ipynb
solivas89/sqlalchemy-challenge
cb79247db15fa2e3d8f507a87f9b097b80b4769c
[ "ADSL" ]
null
null
null
91.972798
22,152
0.843232
[ [ [ "%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ] ], [ [ "# Reflect Tables into SQLAlchemy ORM", "_____no_output_____" ] ], [ [ "# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom sqlalchemy import desc", "_____no_output_____" ], [ "engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nconn = engine.connect()", "_____no_output_____" ], [ "inspector = inspect(engine)\ninspector.get_table_names()", "_____no_output_____" ], [ "# reflect an existing database into a new model\n# reflect the tables\nBase = automap_base()\nBase.prepare(engine, reflect=True)", "_____no_output_____" ], [ "# We can view all of the classes that automap found\nBase.classes.keys()", "_____no_output_____" ], [ "# Save references to each table\nME = Base.classes.measurement\nST = Base.classes.station", "_____no_output_____" ], [ "# Create our session (link) from Python to the DB\nsession = Session(engine)", "_____no_output_____" ] ], [ [ "# Exploratory Climate Analysis", "_____no_output_____" ] ], [ [ "first_row = session.query(ME).first()\nfirst_row.__dict__", "_____no_output_____" ], [ "first_row = session.query(ST).first()\nfirst_row.__dict__", "_____no_output_____" ], [ "columns = inspector.get_columns('measurement')\nfor column in columns:\n print(column[\"name\"], column[\"type\"])", "id INTEGER\nstation TEXT\ndate TEXT\nprcp FLOAT\ntobs FLOAT\n" ], [ "columns = inspector.get_columns('station')\nfor column in columns:\n print(column[\"name\"], column[\"type\"])", "id INTEGER\nstation TEXT\nname TEXT\nlatitude FLOAT\nlongitude FLOAT\nelevation FLOAT\n" ], [ "session.query(func.min(ME.date)).all()", "_____no_output_____" ], [ "session.query(func.max(ME.date)).all()", "_____no_output_____" ], [ "# Design a query to retrieve the last 12 months of precipitation data and plot the results\n# Calculate the date 1 year ago from the last data point in the database\n# Perform a query to retrieve the data and precipitation scores\n# Save the query results as a Pandas DataFrame and set the index to the date column\n# Sort the dataframe by date\n# Use Pandas Plotting with Matplotlib to plot the data\n\nprevious_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n# print(previous_year)\n\nyear_query = session.query(ME.date, ME.prcp).\\\n filter(ME.date >= previous_year).\\\n order_by(ME.date).all()\n# year_query\n\nyear_data = pd.DataFrame(year_query)\nyear_data.set_index('date', inplace = True)\n\nyear_data.plot()\nplt.xticks(rotation = 'vertical')\n\n# plt.title('Last 12 Months of Precipitation')\nplt.xlabel('Date')\nplt.ylabel('Inches')\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "# Use Pandas to calcualte the summary statistics for the precipitation data\nyear_data.describe()", "_____no_output_____" ], [ "# Design a query to show how many stations are available in this dataset?\nsel = [func.count(ST.station)]\nstations = session.query(*sel).all()\nstations", "_____no_output_____" ], [ "# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\nsel = [ME.station, func.count(ME.station)]\nmost_active = session.query(*sel).\\\n group_by(ME.station).\\\n order_by(func.count(ME.station).desc()).all()\nmost_active", "_____no_output_____" ], [ "# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\n#once i find the station with the most count from the measurment table we can query the min and max for that station\nsel = [func.min(ME.tobs), func.max(ME.tobs), func.avg(ME.tobs)]\nsession.query(*sel).\\\n filter(ME.station == 'USC00519281').all()", "_____no_output_____" ], [ "# Choose the station with the highest number of temperature observations.\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\n#can use the same query as above but do a date filter\nsel = [ME.tobs]\ntobs_data = pd.DataFrame(session.query(*sel).\\\n filter(ME.date >= previous_year).\\\n filter(ME.station == 'USC00519281').all())\n# tobs_data\n\ntobs_data.plot.hist(bins=12)\nplt.xlabel('Temperature')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "## Bonus Challenge Assignment", "_____no_output_____" ] ], [ [ "# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' \n# and return the minimum, average, and maximum temperatures for that range of dates\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(ME.tobs), func.avg(ME.tobs), func.max(ME.tobs)).\\\n filter(ME.date >= start_date).filter(ME.date <= end_date).all()\n\n# function usage example\nprint(calc_temps('2012-02-28', '2012-03-05'))", "[(62.0, 69.57142857142857, 74.0)]\n" ], [ "# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax \n# for your trip using the previous year's data for those same dates.\nstart_date = dt.date(2012, 2, 28) - dt.timedelta(days=365)\nend_date = dt.date(2012, 3, 5) - dt.timedelta(days=365)\n\ntrip_temps = calc_temps(start_date, end_date)\ntrip_temps", "_____no_output_____" ], [ "# Plot the results from your previous query as a bar chart. \n# Use \"Trip Avg Temp\" as your Title\n# Use the average temperature for the y value\n# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)\nfig, ax = plt.subplots(figsize=plt.figaspect(2.))\navg_temp = trip_temps[0][1]\nxpos = 1\nbar = ax.bar(xpos, avg_temp, yerr=error, alpha=0.5, color='red', align='center')\nax.set(xticks=range(xpos), title=\"Trip Avg Temp\", ylabel=\"Temperature (F)\")\nax.margins(.5, .5)\nfig.tight_layout()\nfig.show()", "C:\\Users\\salol\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:12: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n if sys.path[0] == '':\n" ], [ "# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.\n# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation\n\n", "_____no_output_____" ], [ "# Create a query that will calculate the daily normals \n# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)\n\ndef daily_normals(date):\n \"\"\"Daily Normals.\n \n Args:\n date (str): A date string in the format '%m-%d'\n \n Returns:\n A list of tuples containing the daily normals, tmin, tavg, and tmax\n \n \"\"\"\n \n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n return session.query(*sel).filter(func.strftime(\"%m-%d\", Measurement.date) == date).all()\n \ndaily_normals(\"01-01\")", "_____no_output_____" ], [ "# calculate the daily normals for your trip\n# push each tuple of calculations into a list called `normals`\n\n# Set the start and end date of the trip\n\n# Use the start and end date to create a range of dates\n\n# Stip off the year and save a list of %m-%d strings\n\n# Loop through the list of %m-%d strings and calculate the normals for each date\n", "_____no_output_____" ], [ "# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index\n", "_____no_output_____" ], [ "# Plot the daily normals as an area plot with `stacked=False`\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d067346623fd1d34b296073dd42d33d62a074630
7,295
ipynb
Jupyter Notebook
remote_sensing_lesson/remote_sensing_tgo_py_blank.ipynb
esaSPICEservice/epsc2017-workshop
5c8f2e5211308236d4a7054076e3e61ecf45df06
[ "MIT" ]
4
2018-09-19T11:56:22.000Z
2018-09-28T23:01:31.000Z
remote_sensing_lesson/remote_sensing_tgo_py_blank.ipynb
esaSPICEservice/epsc2017-workshop
5c8f2e5211308236d4a7054076e3e61ecf45df06
[ "MIT" ]
null
null
null
remote_sensing_lesson/remote_sensing_tgo_py_blank.ipynb
esaSPICEservice/epsc2017-workshop
5c8f2e5211308236d4a7054076e3e61ecf45df06
[ "MIT" ]
2
2018-09-19T12:43:38.000Z
2019-09-16T13:59:16.000Z
32.713004
249
0.616587
[ [ [ "# Remote Sensing Hands-On Lesson, using TGO\n\n \n EPSC Conference, Berlin, September 18, 2018\n \n \n## Overview\n\n \n In this lesson you will develop a series of simple programs that\n demonstrate the usage of SpiceyPy to compute a variety of different\n geometric quantities applicable to experiments carried out by a remote\n sensing instrument flown on an interplanetary spacecraft. This\n particular lesson focuses on a spectrometer flying on the ExoMars2016 TGO\n spacecraft, but many of the concepts are easily extended and generalized\n to other scenarios.", "_____no_output_____" ], [ "## Importing SpiceyPy and Loading the Kernels\n", "_____no_output_____" ], [ "## Time Conversion \n\n\nWrite a program that prompts the user for an input UTC time string,\nconverts it to the following time systems and output formats:\n \n* Ephemeris Time (ET) in seconds past J2000\n* Calendar Ephemeris Time\n* Spacecraft Clock Time\n \nand displays the results. Use the program to convert \"2018 JUN 11\n19:32:00\" UTC into these alternate systems.", "_____no_output_____" ], [ "## Obtaining Target States and Positions\n \nWrite a program that prompts the user for an input UTC time string,\ncomputes the following quantities at that epoch:\n \n* The apparent state of Mars as seen from ExoMars2016 TGO in the J2000 frame, in kilometers and kilometers/second. This vector itself is not of any particular interest, but it is a useful intermediate quantity in some geometry calculations.\n \n* The apparent position of the Earth as seen from ExoMars2016 TGO in the J2000 frame, in kilometers.\n \n* The one-way light time between ExoMars2016 TGO and the apparent position of Earth, in seconds.\n \n* The apparent position of the Sun as seen from Mars in the J2000 frame (J2000), in kilometers.\n \n* The actual (geometric) distance between the Sun and Mars, in astronomical units.\n \nand displays the results. Use the program to compute these quantities at\n\"2018 JUN 11 19:32:00\" UTC.", "_____no_output_____" ], [ "## Spacecraft Orientation and Reference Frames\n\n \nWrite a program that prompts the user for an input time string, and\ncomputes and displays the following at the epoch of interest:\n \n* The apparent state of Mars as seen from ExoMars2016 TGO in the IAU_MARS body-fixed frame. This vector itself is not of any particular interest, but it is a useful intermediate quantity in some geometry calculations.\n \n* The angular separation between the apparent position of Mars as seen from ExoMars2016 TGO and the nominal instrument view direction.\n \n* The nominal instrument view direction is not provided by any kernel variable, but it is indicated in the ExoMars2016 TGO frame kernel.\n \nUse the program to compute these quantities at the epoch 2018 JUN 11\n 19:32:00 UTC.", "_____no_output_____" ], [ "## Computing Sub-s/c and Sub-solar Points on an Ellipsoid and a DSK \n\n \nWrite a program that prompts the user for an input UTC time string and computes the following quantities at that epoch:\n \n* The apparent sub-observer point of ExoMars2016 TGO on Mars, in the body fixed frame IAU_MARS, in kilometers.\n \n* The apparent sub-solar point on Mars, as seen from ExoMars2016 TGO in the body fixed frame IAU_MARS, in kilometers.\n \nThe program computes each point twice: once using an ellipsoidal shape model and the\n \n near point/ellipsoid\n \ndefinition, and once using a DSK shape model and the\n \n nadir/dsk/unprioritized\n \ndefinition.\n\nThe program displays the results. Use the program to compute these\n quantities at 2018 JUN 11 19:32:00 UTC.", "_____no_output_____" ], [ "## Intersecting Vectors with an Ellipsoid and a DSK (fovint)\n \n \n Write a program that prompts the user for an input UTC time string and,\n for that time, computes the intersection of the ExoMars-16 TGO NOMAD LNO\n Nadir aperture boresight and field of view (FOV) boundary vectors with\n the surface of Mars. Compute each intercept twice: once with Mars' shape\n modeled as an ellipsoid, and once with Mars' shape modeled by DSK data.\n The program presents each point of intersection as\n \n* A Cartesian vector in the IAU_MARS frame\n* Planetocentric (latitudinal) coordinates in the IAU_MARS frame.\n \nFor each of the camera FOV boundary and boresight vectors, if an\n intersection is found, the program displays the results of the above\n computations, otherwise it indicates no intersection exists.\n \n At each point of intersection compute the following:\n \n* Phase angle\n* Solar incidence angle\n* Emission angle\n \nThese angles should be computed using both ellipsoidal and DSK shape\n models.\n \n Additionally compute the local solar time at the intercept of the\n spectrometer aperture boresight with the surface of Mars, using both\n ellipsoidal and DSK shape models.\n Use this program to compute values at 2018 JUN 11 19:32:00 UTC", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d067373c87708d6fe6fc319e8bfe446d548fbb2d
20,559
ipynb
Jupyter Notebook
circuit.ipynb
hongyehu/Sim-Clifford
00be6a479361fb9ee9b6e240f631a1030b881b2f
[ "MIT" ]
3
2021-11-18T13:39:15.000Z
2022-03-16T02:37:32.000Z
circuit.ipynb
hongyehu/Sim-Clifford
00be6a479361fb9ee9b6e240f631a1030b881b2f
[ "MIT" ]
null
null
null
circuit.ipynb
hongyehu/Sim-Clifford
00be6a479361fb9ee9b6e240f631a1030b881b2f
[ "MIT" ]
null
null
null
21.91791
391
0.494674
[ [ [ "import numpy\nfrom context import vaeqst", "_____no_output_____" ], [ "import numpy\nfrom context import base", "_____no_output_____" ], [ "base.RandomCliffordGate(0,1)", "_____no_output_____" ] ], [ [ "# Random Clifford Circuit", "_____no_output_____" ], [ "## RandomCliffordGate", "_____no_output_____" ], [ "`RandomClifordGate(*qubits)` represents a random Clifford gate acting on a set of qubits. There is no further parameter to specify, as it is not any particular gate, but a placeholder for a generic random Clifford gate.\n\n**Parameters**\n- `*qubits`: indices of the set of qubits on which the gate acts on.\n\nExample:", "_____no_output_____" ] ], [ [ "gate = vaeqst.RandomCliffordGate(0,1)\ngate", "_____no_output_____" ] ], [ [ "`RandomCliffordGate.random_clifford_map()` evokes a random sampling of the Clifford unitary, return in the form of operator mapping table $M$ and the corresponding sign indicator $h$. Such that under the mapping, any Pauli operator $\\sigma_g$ specified by the binary representation $g$ (and localized within the gate support) gets mapped to\n$$\\sigma_g \\to \\prod_{i=1}^{2n} (-)^{h_i}\\sigma_{M_i}^{g_i}.$$\nThe binary representation is in the $g=(x_0,z_0,x_1,z_1,\\cdots)$ basis.", "_____no_output_____" ] ], [ [ "gate.random_clifford_map()", "_____no_output_____" ] ], [ [ "## RandomCliffordLayer", "_____no_output_____" ], [ "`RandomCliffordLayer(*gates)` represents a layer of random Clifford gates. \n\n**Parameters:**\n* `*gates`: quantum gates contained in the layer.\n\nThe gates in the same layer should not overlap with each other (all gates need to commute). To ensure this, we do not manually add gates to the layer, but using the higher level function `.gate()` provided by `RandomCliffordCircuit` (see discussion later).\n\nExample:", "_____no_output_____" ] ], [ [ "layer = vaeqst.RandomCliffordLayer(vaeqst.RandomCliffordGate(0,1),vaeqst.RandomCliffordGate(3,5))\nlayer", "_____no_output_____" ] ], [ [ "It hosts a list of gates:", "_____no_output_____" ] ], [ [ "layer.gates", "_____no_output_____" ] ], [ [ "Given the total number of qubits $N$, the layer can sample the Clifford unitary (as product of each gate) $U=\\prod_{a}U_a$, and represent it as a single operator mapping (because gates do not overlap, so they maps operators in different supports independently).", "_____no_output_____" ] ], [ [ "layer.random_clifford_map(6)", "_____no_output_____" ] ], [ [ "## RandomCliffordCircuit", "_____no_output_____" ], [ "`RandomCliffordCircuit()` represents a quantum circuit of random Clifford gates.", "_____no_output_____" ], [ "### Methods", "_____no_output_____" ], [ "#### Construct the Circuit\n\nExample: create a random Clifford circuit.", "_____no_output_____" ] ], [ [ "circ = vaeqst.RandomCliffordCircuit()", "_____no_output_____" ] ], [ [ "Use `.gate(*qubits)` to add random Clifford gates to the circuit.", "_____no_output_____" ] ], [ [ "circ.gate(0,1)\ncirc.gate(2,4)\ncirc.gate(1,4)\ncirc.gate(0,2)\ncirc.gate(3,5)\ncirc.gate(3,4)\ncirc", "_____no_output_____" ] ], [ [ "Gates will automatically arranged into layers. Each new gate added to the circuit will commute through the layers if it is not blocked by the existing gates.", "_____no_output_____" ], [ "If the number of qubits `.N` is not explicitly defined, it will be dynamically infered from the circuit width, as the largest qubit index of all gates + 1.", "_____no_output_____" ] ], [ [ "circ.N", "_____no_output_____" ] ], [ [ "#### Navigate in the Circuit", "_____no_output_____" ], [ "`.layers_forward()` and `.layers_backward()` provides two generators to iterate over layers in forward and backward order resepctively.", "_____no_output_____" ] ], [ [ "list(circ.layers_forward())", "_____no_output_____" ], [ "list(circ.layers_backward())", "_____no_output_____" ] ], [ [ "`.first_layer` and `.last_layer` points to the first and the last layers.", "_____no_output_____" ] ], [ [ "circ.first_layer", "_____no_output_____" ], [ "circ.last_layer", "_____no_output_____" ] ], [ [ "Use `.next_layer` and `.prev_layer` to move forward and backward.", "_____no_output_____" ] ], [ [ "circ.first_layer.next_layer, circ.last_layer.prev_layer", "_____no_output_____" ] ], [ [ "Locate a gate in the circuit.", "_____no_output_____" ] ], [ [ "circ.first_layer.next_layer.next_layer.gates[0]", "_____no_output_____" ] ], [ [ "#### Apply Circuit to State", "_____no_output_____" ], [ "`.forward(state)` and `.backward(state)` applies the circuit to transform the state forward / backward. \n* Each call will sample a new random realization of the random Clifford circuit.\n* The transformation will create a new state, the original state remains untouched.", "_____no_output_____" ] ], [ [ "rho = vaeqst.StabilizerState(6, r=0)\nrho", "_____no_output_____" ], [ "circ.forward(rho)", "_____no_output_____" ], [ "circ.backward(rho)", "_____no_output_____" ] ], [ [ "#### POVM", "_____no_output_____" ], [ "`.povm(nsample)` provides a generator to sample $n_\\text{sample}$ from the prior POVM based on the circuit by back evolution.", "_____no_output_____" ] ], [ [ "list(circ.povm(3))", "_____no_output_____" ] ], [ [ "## BrickWallRCC", "_____no_output_____" ], [ "`BrickWallRCC(N, depth)` is a subclass of `RandomCliffordCircuit`. It represents the circuit with 2-qubit gates arranged following a brick wall pattern.", "_____no_output_____" ] ], [ [ "circ = vaeqst.BrickWallRCC(16,2)\ncirc", "_____no_output_____" ] ], [ [ "Create an inital state as a computational basis state.", "_____no_output_____" ] ], [ [ "rho = vaeqst.StabilizerState(16, r=0)\nrho", "_____no_output_____" ] ], [ [ "Backward evolve the state to obtain the measurement operator.", "_____no_output_____" ] ], [ [ "circ.backward(rho)", "_____no_output_____" ] ], [ [ "## OnSiteRCC", "_____no_output_____" ], [ "`OnSiteRCC(N)` is a subclass of `RandomCliffordCircuit`. It represents the circuit of a single layer of on-site Clifford gates. It can be used to generate random Pauli states.", "_____no_output_____" ] ], [ [ "circ = vaeqst.OnSiteRCC(16)\ncirc", "_____no_output_____" ], [ "rho = vaeqst.StabilizerState(16, r=0)\ncirc.backward(rho)", "_____no_output_____" ] ], [ [ "## GlobalRCC", "_____no_output_____" ], [ "`GlobalRCC(N)` is a subclass of `RandomCliffordCircuit`. It represents the circuit consists of a global Clifford gate. It can be used to generate Clifford states.", "_____no_output_____" ] ], [ [ "circ = vaeqst.GlobalRCC(16)\ncirc", "_____no_output_____" ], [ "rho = vaeqst.StabilizerState(16, r=0)\ncirc.backward(rho)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
d0675e01ff458d4a28d87c01380cbbfb589a2b40
11,119
ipynb
Jupyter Notebook
TWPA/notebooks/testsim928.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
3
2021-11-30T18:41:11.000Z
2021-12-12T12:27:14.000Z
TWPA/notebooks/testsim928.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
TWPA/notebooks/testsim928.ipynb
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
56.156566
1,641
0.640345
[ [ [ "#!/usr/bin/env python3 Line 1\n# -*- coding: utf-8 -*- Line 2\n#----------------------------------------------------------------------------\n# Created By : Celotto Andrea, Palumbo Emanuele, Zafferani Alessandro Line 3\n# Created Date: 16/02/2022 12:30\n# version ='Beta 1.0'\n\nimport visa \nimport numpy as np\nimport serial\nimport time\nimport os\nfrom pathlib import Path\npath = os.getcwd()\npath = Path(path)\nprint(Path(str(path.parent) + '\\\\Classes\\\\') )\n\nimport sys\nsys.path.append(str(path.parent) + '\\\\Classes\\\\')\nfrom SIM928 import *", "C:\\Users\\oper\\Desktop\\labparamp\\QTLab2122\\TWPA\\Classes\n" ] ], [ [ "# Connessione", "_____no_output_____" ] ], [ [ "sim = SIM928('COM1','4')", "_____no_output_____" ], [ "sim.query('*IDN')", "_____no_output_____" ] ], [ [ "# Comandi", "_____no_output_____" ] ], [ [ "#accensione\nsim.set_output(1)", "_____no_output_____" ], [ "#set del voltaggio\nsim.set_voltage(4e-3)", "_____no_output_____" ], [ "sim.ask_voltage()", "Voltage = 0.004\r\n V\n" ] ], [ [ "# Disconnessione", "_____no_output_____" ] ], [ [ "sim.close_all()", "Stanford_Research_Systems,SIM928,s/n030465,ver2.2\n\nStanford_Research_Systems,SIM900,s/n152741,ver3.6\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d067625bbf97cca411acc00da573060613830395
15,584
ipynb
Jupyter Notebook
docs_src/train.ipynb
hiroaki-shishido/fastai
1e4568fc04d520d10dd71ea2f8aa748f5c99e1ec
[ "Apache-2.0" ]
1
2019-02-22T01:50:35.000Z
2019-02-22T01:50:35.000Z
docs_src/train.ipynb
hiroaki-shishido/fastai
1e4568fc04d520d10dd71ea2f8aa748f5c99e1ec
[ "Apache-2.0" ]
null
null
null
docs_src/train.ipynb
hiroaki-shishido/fastai
1e4568fc04d520d10dd71ea2f8aa748f5c99e1ec
[ "Apache-2.0" ]
null
null
null
28.028777
539
0.534458
[ [ [ "# Additional training functions", "_____no_output_____" ], [ "[`train`](/train.html#train) provides a number of extension methods that are added to [`Learner`](/basic_train.html#Learner) (see below for a list and details), along with three simple callbacks:\n\n- [`ShowGraph`](/train.html#ShowGraph)\n- [`GradientClipping`](/train.html#GradientClipping)\n- [`BnFreeze`](/train.html#BnFreeze)", "_____no_output_____" ] ], [ [ "from fastai.gen_doc.nbdoc import *\nfrom fastai.train import *\nfrom fastai.vision import *\n", "_____no_output_____" ] ], [ [ "## [`Learner`](/basic_train.html#Learner) extension methods", "_____no_output_____" ], [ "These methods are automatically added to all [`Learner`](/basic_train.html#Learner) objects created after importing this module. They provide convenient access to a number of callbacks, without requiring them to be manually created.", "_____no_output_____" ] ], [ [ "show_doc(fit_one_cycle)", "_____no_output_____" ], [ "show_doc(one_cycle_scheduler)", "_____no_output_____" ] ], [ [ "See [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) for details.", "_____no_output_____" ] ], [ [ "show_doc(lr_find)", "_____no_output_____" ] ], [ [ "See [`LRFinder`](/callbacks.lr_finder.html#LRFinder) for details.", "_____no_output_____" ] ], [ [ "show_doc(to_fp16)", "_____no_output_____" ] ], [ [ "See [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) for details.", "_____no_output_____" ] ], [ [ "show_doc(to_fp32)", "_____no_output_____" ], [ "show_doc(mixup)", "_____no_output_____" ] ], [ [ "See [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback) for more details.", "_____no_output_____" ], [ "## Additional callbacks", "_____no_output_____" ], [ "We'll show examples below using our MNIST sample. As usual the `on_something` methods are directly called by the fastai library, no need to call them yourself.", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.MNIST_SAMPLE)\ndata = ImageDataBunch.from_folder(path)", "_____no_output_____" ], [ "show_doc(ShowGraph, title_level=3)", "_____no_output_____" ] ], [ [ "```python\nlearn = create_cnn(data, models.resnet18, metrics=accuracy, callback_fns=ShowGraph)\nlearn.fit(3)\n```", "_____no_output_____" ], [ "![Training graph](imgs/train_graph.gif)", "_____no_output_____" ] ], [ [ "show_doc(ShowGraph.on_epoch_end)", "_____no_output_____" ], [ "show_doc(GradientClipping)", "_____no_output_____" ], [ "learn = create_cnn(data, models.resnet18, metrics=accuracy,\n callback_fns=partial(GradientClipping, clip=0.1))\nlearn.fit(1)", "_____no_output_____" ], [ "show_doc(GradientClipping.on_backward_end)", "_____no_output_____" ], [ "show_doc(BnFreeze)", "_____no_output_____" ] ], [ [ "For batchnorm layers where `requires_grad==False`, you generally don't want to update their moving average statistics, in order to avoid the model's statistics getting out of sync with its pre-trained weights. You can add this callback to automate this freezing of statistics (internally, it calls `eval` on these layers).", "_____no_output_____" ] ], [ [ "learn = create_cnn(data, models.resnet18, metrics=accuracy, callback_fns=BnFreeze)\nlearn.fit(1)", "_____no_output_____" ], [ "show_doc(BnFreeze.on_epoch_begin)", "_____no_output_____" ] ], [ [ "## Undocumented Methods - Methods moved below this line will intentionally be hidden", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d067640e9df566eae92c4200f86aae68c1dd8f44
15,957
ipynb
Jupyter Notebook
problems/euler_3.ipynb
owenGnet/project-euler-jupyter-templates
5acbb8375af26ceecb811047dd6272e1e8587929
[ "MIT" ]
null
null
null
problems/euler_3.ipynb
owenGnet/project-euler-jupyter-templates
5acbb8375af26ceecb811047dd6272e1e8587929
[ "MIT" ]
null
null
null
problems/euler_3.ipynb
owenGnet/project-euler-jupyter-templates
5acbb8375af26ceecb811047dd6272e1e8587929
[ "MIT" ]
null
null
null
48.354545
492
0.589835
[ [ [ "<div style=\"text-align:center;\">\n<img alt=\"\" <img src=\"./images/logo_default.png\"/><br/>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Amicable numbers</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 21</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Let d(<i>n</i>) be defined as the sum of proper divisors of <i>n</i> (numbers less than <i>n</i> which divide evenly into <i>n</i>).<br>\nIf d(<i>a</i>) = <i>b</i> and d(<i>b</i>) = <i>a</i>, where <i>a</i> ≠ <i>b</i>, then <i>a</i> and <i>b</i> are an amicable pair and each of <i>a</i> and <i>b</i> are called amicable numbers.</br></p>\n<p>For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.</p>\n<p>Evaluate the sum of all the amicable numbers under 10000.</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Names scores</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 22</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Using <a href=\"project/resources/p022_names.txt\">names.txt</a> (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.</p>\n<p>For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.</p>\n<p>What is the total of all the name scores in the file?</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Non-abundant sums</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 23</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.</p>\n<p>A number <var>n</var> is called deficient if the sum of its proper divisors is less than <var>n</var> and it is called abundant if this sum exceeds <var>n</var>.</p>\n<p>As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.</p>\n<p>Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Lexicographic permutations</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 24</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:</p>\n<p style=\"text-align:center;\">012   021   102   120   201   210</p>\n<p>What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">1000-digit Fibonacci number</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 25</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>The Fibonacci sequence is defined by the recurrence relation:</p>\n<blockquote>F<sub><i>n</i></sub> = F<sub><i>n</i>−1</sub> + F<sub><i>n</i>−2</sub>, where F<sub>1</sub> = 1 and F<sub>2</sub> = 1.</blockquote>\n<p>Hence the first 12 terms will be:</p>\n<blockquote>F<sub>1</sub> = 1<br>\nF<sub>2</sub> = 1<br/>\nF<sub>3</sub> = 2<br/>\nF<sub>4</sub> = 3<br/>\nF<sub>5</sub> = 5<br/>\nF<sub>6</sub> = 8<br/>\nF<sub>7</sub> = 13<br/>\nF<sub>8</sub> = 21<br/>\nF<sub>9</sub> = 34<br/>\nF<sub>10</sub> = 55<br/>\nF<sub>11</sub> = 89<br/>\nF<sub>12</sub> = 144</br></blockquote>\n<p>The 12th term, F<sub>12</sub>, is the first term to contain three digits.</p>\n<p>What is the index of the first term in the Fibonacci sequence to contain 1000 digits?</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Reciprocal cycles</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 26</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:</p>\n<blockquote>\n<table><tr><td><sup>1</sup>/<sub>2</sub></td><td>= </td><td>0.5</td>\n</tr><tr><td><sup>1</sup>/<sub>3</sub></td><td>= </td><td>0.(3)</td>\n</tr><tr><td><sup>1</sup>/<sub>4</sub></td><td>= </td><td>0.25</td>\n</tr><tr><td><sup>1</sup>/<sub>5</sub></td><td>= </td><td>0.2</td>\n</tr><tr><td><sup>1</sup>/<sub>6</sub></td><td>= </td><td>0.1(6)</td>\n</tr><tr><td><sup>1</sup>/<sub>7</sub></td><td>= </td><td>0.(142857)</td>\n</tr><tr><td><sup>1</sup>/<sub>8</sub></td><td>= </td><td>0.125</td>\n</tr><tr><td><sup>1</sup>/<sub>9</sub></td><td>= </td><td>0.(1)</td>\n</tr><tr><td><sup>1</sup>/<sub>10</sub></td><td>= </td><td>0.1</td>\n</tr></table></blockquote>\n<p>Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that <sup>1</sup>/<sub>7</sub> has a 6-digit recurring cycle.</p>\n<p>Find the value of <i>d</i> &lt; 1000 for which <sup>1</sup>/<sub><i>d</i></sub> contains the longest recurring cycle in its decimal fraction part.</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Quadratic primes</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 27</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Euler discovered the remarkable quadratic formula:</p>\n<p style=\"text-align:center;\">$n^2 + n + 41$</p>\n<p>It turns out that the formula will produce 40 primes for the consecutive integer values $0 \\le n \\le 39$. However, when $n = 40, 40^2 + 40 + 41 = 40(40 + 1) + 41$ is divisible by 41, and certainly when $n = 41, 41^2 + 41 + 41$ is clearly divisible by 41.</p>\n<p>The incredible formula $n^2 - 79n + 1601$ was discovered, which produces 80 primes for the consecutive values $0 \\le n \\le 79$. The product of the coefficients, −79 and 1601, is −126479.</p>\n<p>Considering quadratics of the form:</p>\n<blockquote>\n$n^2 + an + b$, where $|a| &lt; 1000$ and $|b| \\le 1000$<br><br/><div>where $|n|$ is the modulus/absolute value of $n$<br/>e.g. $|11| = 11$ and $|-4| = 4$</div>\n</br></blockquote>\n<p>Find the product of the coefficients, $a$ and $b$, for the quadratic expression that produces the maximum number of primes for consecutive values of $n$, starting with $n = 0$.</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Number spiral diagonals</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 28</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:</p>\n<p style=\"text-align:center;font-family:'courier new';\"><span style=\"color:#ff0000;font-family:'courier new';\"><b>21</b></span> 22 23 24 <span style=\"color:#ff0000;font-family:'courier new';\"><b>25</b></span><br>\n20  <span style=\"color:#ff0000;font-family:'courier new';\"><b>7</b></span>  8  <span style=\"color:#ff0000;font-family:'courier new';\"><b>9</b></span> 10<br/>\n19  6  <span style=\"color:#ff0000;font-family:'courier new';\"><b>1</b></span>  2 11<br/>\n18  <span style=\"color:#ff0000;font-family:'courier new';\"><b>5</b></span>  4  <span style=\"color:#ff0000;font-family:'courier new';\"><b>3</b></span> 12<br/><span style=\"color:#ff0000;font-family:'courier new';\"><b>17</b></span> 16 15 14 <span style=\"color:#ff0000;font-family:'courier new';\"><b>13</b></span></br></p>\n<p>It can be verified that the sum of the numbers on the diagonals is 101.</p>\n<p>What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Distinct powers</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 29</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Consider all integer combinations of <i>a</i><sup><i>b</i></sup> for 2 ≤ <i>a</i> ≤ 5 and 2 ≤ <i>b</i> ≤ 5:</p>\n<blockquote>2<sup>2</sup>=4, 2<sup>3</sup>=8, 2<sup>4</sup>=16, 2<sup>5</sup>=32<br>\n3<sup>2</sup>=9, 3<sup>3</sup>=27, 3<sup>4</sup>=81, 3<sup>5</sup>=243<br/>\n4<sup>2</sup>=16, 4<sup>3</sup>=64, 4<sup>4</sup>=256, 4<sup>5</sup>=1024<br/>\n5<sup>2</sup>=25, 5<sup>3</sup>=125, 5<sup>4</sup>=625, 5<sup>5</sup>=3125<br/></br></blockquote>\n<p>If they are then placed in numerical order, with any repeats removed, we get the following sequence of 15 distinct terms:</p>\n<p style=\"text-align:center;\">4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125</p>\n<p>How many distinct terms are in the sequence generated by <i>a</i><sup><i>b</i></sup> for 2 ≤ <i>a</i> ≤ 100 and 2 ≤ <i>b</i> ≤ 100?</p>\n</div>", "_____no_output_____" ], [ "<h2 style=\"color: #6b4e3d;\">Digit fifth powers</h2>\n<div id=\"problem_info\" style=\"font-family: Consolas;\"><h3>Problem 30</h3></div>\n<div class=\"problem_content\" role=\"problem\" style='background-color: #fff; color: #111; padding: 20px;font-family: \"Segoe UI\", Arial, sans-serif; font-size: 110%;border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;'>\n<p>Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:</p>\n<blockquote>1634 = 1<sup>4</sup> + 6<sup>4</sup> + 3<sup>4</sup> + 4<sup>4</sup><br>\n8208 = 8<sup>4</sup> + 2<sup>4</sup> + 0<sup>4</sup> + 8<sup>4</sup><br/>\n9474 = 9<sup>4</sup> + 4<sup>4</sup> + 7<sup>4</sup> + 4<sup>4</sup></br></blockquote>\n<p class=\"info\">As 1 = 1<sup>4</sup> is not a sum it is not included.</p>\n<p>The sum of these numbers is 1634 + 8208 + 9474 = 19316.</p>\n<p>Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.</p>\n</div>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0676becb285732f84b2b4fe2ea4dbee420faaca
15,337
ipynb
Jupyter Notebook
tema_9.ipynb
andelpe/curso-intro-python
6b3521aa887ae26e0f542f2dd105e9bb003db495
[ "Apache-2.0" ]
1
2020-06-08T10:27:24.000Z
2020-06-08T10:27:24.000Z
tema_9.ipynb
andelpe/curso-intro-python
6b3521aa887ae26e0f542f2dd105e9bb003db495
[ "Apache-2.0" ]
null
null
null
tema_9.ipynb
andelpe/curso-intro-python
6b3521aa887ae26e0f542f2dd105e9bb003db495
[ "Apache-2.0" ]
1
2020-06-08T09:49:24.000Z
2020-06-08T09:49:24.000Z
38.151741
259
0.611658
[ [ [ "<font size=6>\n\n<b>Curso de Programación en Python</b>\n</font>\n\n<font size=4>\n \nCurso de formación interna, CIEMAT. <br/>\nMadrid, Octubre de 2021\n\nAntonio Delgado Peris\n</font>\n\nhttps://github.com/andelpe/curso-intro-python/\n\n<br/>", "_____no_output_____" ], [ "# Tema 9 - El ecosistema Python: librería estándar y otros paquetes populares", "_____no_output_____" ], [ "## Objetivos\n\n- Conocer algunos módulos de la librería estándar\n\n - Interacción con el propio intérprete\n - Interacción con el sistema operativo\n - Gestión del sistema de ficheros\n - Gestión de procesos y concurrencia\n - Desarrollo, depuración y perfilado\n - Números y matemáticas\n - Acceso y funcionalidad de red\n - Utilidades para manejo avanzado de funciones e iteradores\n\n\n- Introducir el ecosistema de librerías científicas de Python \n\n - La pila Numpy/SciPY\n - Gráficos\n - Matemáticas y estadística\n - Aprendizaje automático\n - Procesamiento del lenguaje natural\n - Biología\n - Física\n", "_____no_output_____" ], [ "## La librería estándar\n\nUno de los eslóganes de Python es _batteries included_. Se refiere a la cantidad de funcionalidad disponible en la instalación Python básica, sin necesidad de recurrir a paquetes externos.\n\nEn esta sección revisamos brevemente algunos de los módulos disponibles. Para muchas más información: https://docs.python.org/3/library/", "_____no_output_____" ], [ "### Interacción con el intérprete de Python: `sys`\n\nOfrece tanto información, como capacidad de manipular diversos aspectos del propio entorno de Python.\n\n- `sys.argv`: Lista con los argumentos pasados al programa en ejecución.\n- `sys.version`: String con la versión actual de Python.\n- `sys.stdin/out/err`: Objetos fichero usados por el intérprete para entrada, salida y error.\n- `sys.exit`: Función para acabar el programa.\n", "_____no_output_____" ], [ "### Interacción con el sistema operativo: `os`\n\nInterfaz _portable_ para funcionalidad que depende del sistema operativo.\n\nContiene funcionalidad muy variada, a veces de muy bajo nivel.\n\n- `os.environ`: diccionario con variables de entorno (modificable)\n- `os.getuid`, `os.getgid`, `os.getpid`...: Obtener UID, GID, process ID, etc. (Unix)\n- `os.uname`: información sobre el sistema operativo \n- `os.getcwd`, `os.chdir`, `os.mkdir`, `os.remove`, `os.stat`...: operaciones sobre el sistema de ficheros\n- `os.exec`, `os.fork`, `os.kill`... : gestión de procesos\n\nPara algunas de estas operaciones es más conveniente utilizar módulos más específicos, o de más alto nivel.\n\n### Operaciones sobre el sistema de ficheros\n\n- Para manipulación de _paths_, borrado, creación de directorios, etc.: `pathlib` (moderno), o `os.path` (clásico)\n- Expansión de _wildcards_ de nombres de fichero (Unix _globs_): `glob`\n- Para operaciones de copia (y otros) de alto nivel: `shutil`\n- Para ficheros y directorios temporales (de usar y tirar): `tempfile`\n\n### Gestión de procesos\n\n- `threading`: interfaz de alto nivel para gestión de _threads_.\n\n - Padece el problema del _Global Interpreter Lock_, de Python: es un _lock_ global, que asegura que solo un thread se está ejecutando en Python en un momento dado (excepto en pausas por I/O). Impide mejorar el rendimiento con múltiples CPUs.\n\n - `queue`: implementa colas multi-productor, multi-consumidor para un intercambio seguro de información entre múltiples _threads_.\n\n\n- `multiprocessing`: interfaz que imita al the `threading`, pero utiliza multi-proceso, en lugar de threads (evita el problema del GIL). Soporta Unix y Windows. Ofrece concurrencia local y remota.\n\n - El módulo `multiprocessing.shared_memory`: facilita la asignación y gestión de memoria compartida entre varios procesos.\n\n\n- `subprocess`: Permite lanzar y gestionar subprocesos (comandos externos) desde Python.\n\n - Para Python >= 3.5, se recomienda usar la función `run`, salvo casos complejos.\n", "_____no_output_____" ] ], [ [ "from subprocess import run\n\ndef showRes(res):\n print('\\n------- ret code:', res.returncode, '; err:', res.stderr)\n if res.stdout:\n print('\\n'.join(res.stdout.splitlines()[:3]))\n print()\n\n\nprint('NO SHELL')\nres = run(['ls', '-l'], capture_output=True, text=True)\nshowRes(res)\n\nprint('WITH SHELL')\nres = run('ls -l', shell=True, capture_output=True, text=True)\nshowRes(res)\n\nprint('NO OUTPUT')\nres = run(['ls', '-l'])\nshowRes(res)\n", "_____no_output_____" ], [ "print('ERROR NO-CHECK')\nres = run(['ls', '-l', 'XXXX'], capture_output=True, text=True)\nshowRes(res)\n\nprint('ERROR CHECK')\ntry:\n res = run(['ls', '-l', 'XXXX'], capture_output=True, check=True)\n showRes(res)\nexcept Exception as ex:\n print(f'--- Error of type {type(ex)}:\\n {ex}\\n')\n\nprint('NO OUTPUT')\nres = run(['ls', '-l', 'XXXX'])\nshowRes(res)", "_____no_output_____" ] ], [ [ "### Números y matemáticas\n\n- `math`: operaciones matemáticas definidas por el estándar de C (`cmath`, para números complejos)\n- `random`: generadores de números pseudo-aleatorios para varias distribuciones\n- `statistics`: estadísticas básicas \n\n### Manejo avanzado de funciones e iteradores\n\n- `itertools`: útiles para crear iteradores de forma eficiente.\n- `functools`: funciones de alto nivel que manipulan otras funciones\n- `operators`: funciones correspondientes a los operadores intrínsicos de Python", "_____no_output_____" ] ], [ [ "import operator\noperator.add(3, 4)", "_____no_output_____" ] ], [ [ "### Red \n\n- `socket`: operaciones de red de bajo nivel\n- `asyncio`: soporte para entornos de entrada/salida asíncrona\n- Existen varias librerías para interacción HTTP, pero se recomienda la librería externa `requests`.\n\n### Desarrollo, depuración y perfilado\n\n- `pydoc`: generación de documentación (HTML), a partir de los docstrings\n- Depuración\n\n - Muchos IDEs, y Jupyterlab, incluyen facilidades de depuración en sus entornos.\n - `pdb`: _Debugger_ oficial de Python\n \n - Correr scripts como `python3 -m pdb myscript.py`\n - Introducir un _break point_ con `import pdb; pdb.set_trace()`\n\n\n- `cProfile`: _Profiler_\n\n- `timeit`: Medición de tiempos de ejecución de código/scripts\n\n```python\n$ python3 -m timeit '\"-\".join(str(n) for n in range(100))'\n10000 loops, best of 5: 30.2 usec per loop\n\n>>> import timeit\n>>> timeit.timeit('\"-\".join(str(n) for n in range(100))', number=10000)\n0.3018611848820001\n \n%timeit \"-\".join(str(n) for n in range(100)) # Jupyter line mode \n\n%%timeit ... # Jupyter cell mode\n```\n\n\n- `unittest`: creación de tests para validación de código (_test-driven programming_)\n\n - La librería externa `pytest` simplifica algunas tareas, y es muy popular", "_____no_output_____" ], [ "### Números y matemáticas\n\n- `math`: operaciones matemáticas definidas por el estándar de C (`cmath`, para números complejos)\n- `random`: generadores de números pseudo-aleatorios para varias distribuciones\n- `statistics`: estadísticas básicas ", "_____no_output_____" ], [ "### Otros\n- `argparse`: procesado de argumentos y opciones por línea de comando\n - Mi recomendación es crearse un _esqueleto_ tipo como base para futuros scripts.\n- `re`: procesado de expresiones regulares\n- `time`, `datetime`: manipulación de fechas y tiempo (medición y representación del tiempo, deltas de tiempo, etc.)", "_____no_output_____" ], [ "## La pila NumPy/Scipy\n\nEste conjunto de librerías de código abierto constituye la base numérica, matemática, y de visualización sobre la que se construye el universo matemático/científico en Python.\n\n- **NumPy**: Paquete de propósito general para procesamiento de objetos _array_ (vectores y matrices), de altas prestaciones.\n\n - Sirve de base para la mayoría de los demás paquetes matemáticos.\n - Permite realizar operaciones matriciales eficientes (sin usar bucles explícitos) \n - Utiliza librerías compiladas (C y Fortran), con un API Python, para conseguir mejor rendimiento.\n\n\n- **SciPy**: Construida sobre NumPy, y como base de muchas de las siguientes, ofrece múltiples utilidades para integración numérica, interpolación, optimización, algebra lineal, procesado de señal y estadística.\n\n - No confundir la _librería SciPy_, con el proyecto o pila SciPy, que se refiere a todas las de esta sección.\n\n\n- **Matplotlib**: Librería de visualización (gráficos 2D) de referencia de Python.\n\n - También sirve de base para otras librerías, como _Seaborn_ o _Pandas_.\n\n\n- **Pandas**: Manipulación de datos de manera ágil y eficiente.\n\n - Utiliza un objeto _DataFrame_, que representa la información en columnas etiquetadas e indexadas.\n - Ofrece funcionalidades para buscar, filtrar, ordenar, transformar o extraer información.\n\n\n- **SymPy**: Librería de matemáticas simbólicas (al estilo de _Mathematica_)\n\n\n## Gráficos\n\n- **Seaborn**: Construida sobre Matplotlib ofrece un interfaz de alto nivel, para construir, de forma sencilla, gráficos avanzados para modelos estadísticos.\n\n- **Bokeh**: Librería para visualización interactiva de gráficos en web, o en Jupyter notebooks.\n\n- **Plotly**: Gráficos interactivos para web. Es parte de un proyecto mayor **_Dash_**, un entorno para construir aplicaciones web para análisis de datos en Python (sin escribir _javascript_).\n\n- **Scikit-image**: Algoritmos para _procesado_ de imágenes (diferente propósito que los anteriores).\n\n- Otras: **ggplot2/plotnine** (basadas en la librería _ggplot2_ de R), **Altair** (librería declarativa, basada en _Vega-Lite_), `Geoplotlib` y `Folium` (para construir mapas).\n\n\n## Matemáticas y estadística\n\n- **Statsmodel**: Estimación de modelos estadísticos, realización de tests y exploración de datos estadísticos.\n- **PyStan**: Inferencia Bayesiana.\n- **NetworkX**: Creación, manipulación y análisis de redes y grafos.\n\n## Machine Learning\n\n- **Scikit-learn**: Librería de aprendizaje automático de propósito general, construida sobre NumPy. Ofrece múltiples algoritmos de ML, como _support vector machines_, o _random forests_, así como muchas utilidades para pre- y postprocesado de datos.\n\n- **TensorFlow** y **PyTorch**: son dos librerías para programación de redes neuronales, incluyendo optimización para GPUs, muy extendidas.\n\n - **Keras**: Es un interfaz simplificado (de alto nivel) para el uso de TensorFlow.", "_____no_output_____" ], [ "## Otros\n\n### Procesamiento del Lenguaje Natural\n\nLas siguientes librerías ofrecen funcionalidades de análisis sintáctico y semántico de textos libres: \n\n- **GenSim**\n- **SpaCy** \n- **NLTK**\n\n### Biología\n\n- **Scikit-bio**: Estructuras de datos, algoritmos y recursos educativos para bioinformática.\n- **BioPython**: Herramientas para computación biológica.\n- **PyEnsembl**: Interfaz Python a Ensembl, base de datos de genómica.\n\n### Física\n\n- Astronomía: **Astropy**, y **PyFITS**\n- Física de altas energías: \n\n - **PyROOT**: interfaz Python a ROOT, entorno con ambición generalista, que ofrece muchas utilidades para análisis y almacenamiento de datos, estadística y visualización.\n\n - **Scikit-HEP**: colección de librerías que pretenden trabajar con datos ROOT utilizando código exclusivamente Python (integrado con Numpy), sin usar PyROOT. Algunas son **uproot**, **awkward array**, **coffea**. \n \n\n### Datos HDF5\n\n- **h5py**: Interfaz a datos HDF5 que trata de ofrecer toda la funcionalidad del interfaz C de HDF5 en Python, integrado con el los objetos y tipos NumPy, por lo que puede usarse en código Python de manera sencilla.\n\n- **pytables**: Otro interfaz a datos HDF5 con un interfaz a más alto nivel que `h5py`, y que ofrece funcionalidades adicionales al estilo de una base de datos (consultas complejas, indexado avanzado, optimización de computación con datos HDF5, etc.)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0676c8c4ee510880451deccdf108c192f874959
182,470
ipynb
Jupyter Notebook
Day04/1-classification/classificationV2.ipynb
kxu08/Bootcamp2019
fa61d80e3ba3842bc2685b6d6910a5c2711f39df
[ "BSD-3-Clause" ]
null
null
null
Day04/1-classification/classificationV2.ipynb
kxu08/Bootcamp2019
fa61d80e3ba3842bc2685b6d6910a5c2711f39df
[ "BSD-3-Clause" ]
null
null
null
Day04/1-classification/classificationV2.ipynb
kxu08/Bootcamp2019
fa61d80e3ba3842bc2685b6d6910a5c2711f39df
[ "BSD-3-Clause" ]
null
null
null
146.209936
72,108
0.890661
[ [ [ "# Acknowledgement\n\n**Origine:** This notebook is downloaded at https://github.com/justmarkham/scikit-learn-videos. \n\nSome modifications are done.", "_____no_output_____" ], [ "## Agenda\n\n1. K-nearest neighbors (KNN) classification\n2. Logistic Regression\n3. Review of model evaluation\n4. Classification accuracy\n5. Confusion matrix\n6. Adjusting the classification threshold\n\n", "_____no_output_____" ], [ "## 1. K-nearest neighbors (KNN) classification", "_____no_output_____" ], [ "1. Pick a value for K.\n2. Search for the K observations in the training data that are \"nearest\" to the measurements of the unknown point.\n3. Use the most popular response value from the K nearest neighbors as the predicted response value for the unknown point.", "_____no_output_____" ], [ "### Example training data\n\n![Training data](04_knn_dataset.png)", "_____no_output_____" ], [ "### KNN classification map (K=1)\n\n![1NN classification map](04_1nn_map.png)", "_____no_output_____" ], [ "### KNN classification map (K=5)\n\n![5NN classification map](04_5nn_map.png)", "_____no_output_____" ], [ "*Image Credits: [Data3classes](http://commons.wikimedia.org/wiki/File:Data3classes.png#/media/File:Data3classes.png), [Map1NN](http://commons.wikimedia.org/wiki/File:Map1NN.png#/media/File:Map1NN.png), [Map5NN](http://commons.wikimedia.org/wiki/File:Map5NN.png#/media/File:Map5NN.png) by Agor153. Licensed under CC BY-SA 3.0*", "_____no_output_____" ], [ "## 2. Logistic Regression\n* Linear Model of classification, assumes linear relationship between feature & target\n* Returns class probabilities\n* Hyperparameter : C - regularization coef\n* Fundamentally suited for bi-class classification", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nfrom sklearn.datasets import make_blobs", "_____no_output_____" ], [ "X,y = make_blobs(n_features=2, n_samples=1000, cluster_std=2,centers=2)", "_____no_output_____" ], [ "plt.scatter(X[:,0],X[:,1],c=y,s=10)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nlr = LogisticRegression(random_state=0, solver='lbfgs')\nlr.fit(X,y)", "_____no_output_____" ], [ "h = .02\nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))", "_____no_output_____" ], [ "Z = lr.predict(np.c_[xx.ravel(), yy.ravel()])", "_____no_output_____" ], [ "Z = Z.reshape(xx.shape)\nplt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)\nplt.scatter(X[:,0],X[:,1],c=y,s=10)", "_____no_output_____" ] ], [ [ "## 3. Review of model evaluation\n\n- Need a way to choose between models: different model types, tuning parameters, and features\n- Use a **model evaluation procedure** to estimate how well a model will generalize to out-of-sample data\n- Requires a **model evaluation metric** to quantify the model performance", "_____no_output_____" ], [ "## 4. Classification accuracy\n\n[Pima Indians Diabetes dataset](https://www.kaggle.com/uciml/pima-indians-diabetes-database) originally from the UCI Machine Learning Repository", "_____no_output_____" ] ], [ [ "# read the data into a pandas DataFrame\npath = 'pima-indians-diabetes.data'\ncol_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label']\npima = pd.read_csv(path, header=None, names=col_names)", "_____no_output_____" ], [ "# print the first 5 rows of data\npima.head()", "_____no_output_____" ] ], [ [ "**Question:** Can we predict the diabetes status of a patient given their health measurements?", "_____no_output_____" ] ], [ [ "# define X and y\nfeature_cols = ['pregnant', 'insulin', 'bmi', 'age']\nX = pima[feature_cols]\ny = pima.label", "_____no_output_____" ], [ "# split X and y into training and testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)", "_____no_output_____" ], [ "# train a logistic regression model on the training set\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression(random_state=0, solver='lbfgs')\nlogreg.fit(X_train, y_train)", "_____no_output_____" ], [ "# make class predictions for the testing set\ny_pred_class = logreg.predict(X_test)", "_____no_output_____" ] ], [ [ "**Classification accuracy:** percentage of correct predictions", "_____no_output_____" ] ], [ [ "# calculate accuracy\nfrom sklearn import metrics\nprint(metrics.accuracy_score(y_test, y_pred_class))", "0.6770833333333334\n" ] ], [ [ "**Null accuracy:** accuracy that could be achieved by always predicting the most frequent class", "_____no_output_____" ] ], [ [ "# examine the class distribution of the testing set (using a Pandas Series method)\ny_test.value_counts()", "_____no_output_____" ], [ "# calculate the percentage of ones\ny_test.mean()", "_____no_output_____" ], [ "# calculate the percentage of zeros\n1 - y_test.mean()", "_____no_output_____" ], [ "# calculate null accuracy (for binary classification problems coded as 0/1)\nmax(y_test.mean(), 1 - y_test.mean())", "_____no_output_____" ], [ "# calculate null accuracy (for multi-class classification problems)\ny_test.value_counts().head(1) / len(y_test)", "_____no_output_____" ] ], [ [ "Comparing the **true** and **predicted** response values", "_____no_output_____" ] ], [ [ "# print the first 25 true and predicted responses\nprint('True:', y_test.values[0:25])\nprint('Pred:', y_pred_class[0:25])", "True: [1 0 0 1 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 1 1 0 0 0]\nPred: [0 0 0 0 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]\n" ] ], [ [ "**Conclusion:**\n\n- Classification accuracy is the **easiest classification metric to understand**\n- But, it does not tell you the **underlying distribution** of response values\n- And, it does not tell you what **\"types\" of errors** your classifier is making", "_____no_output_____" ], [ "## 5. Confusion matrix\n\nTable that describes the performance of a classification model", "_____no_output_____" ] ], [ [ "# IMPORTANT: first argument is true values, second argument is predicted values\nprint(metrics.confusion_matrix(y_test, y_pred_class))", "[[114 16]\n [ 46 16]]\n" ] ], [ [ "![Small confusion matrix](09_confusion_matrix_1.png)", "_____no_output_____" ], [ "- Every observation in the testing set is represented in **exactly one box**\n- It's a 2x2 matrix because there are **2 response classes**\n- The format shown here is **not** universal", "_____no_output_____" ], [ "**Basic terminology**\n\n- **True Positives (TP):** we *correctly* predicted that they *do* have diabetes\n- **True Negatives (TN):** we *correctly* predicted that they *don't* have diabetes\n- **False Positives (FP):** we *incorrectly* predicted that they *do* have diabetes (a \"Type I error\")\n- **False Negatives (FN):** we *incorrectly* predicted that they *don't* have diabetes (a \"Type II error\")", "_____no_output_____" ] ], [ [ "# print the first 25 true and predicted responses\nprint('True:', y_test.values[0:25])\nprint('Pred:', y_pred_class[0:25])", "True: [1 0 0 1 0 0 1 1 0 0 1 1 0 0 0 0 1 0 0 0 1 1 0 0 0]\nPred: [0 0 0 0 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]\n" ], [ "# save confusion matrix and slice into four pieces\nconfusion = metrics.confusion_matrix(y_test, y_pred_class)\nTP = confusion[1, 1]\nTN = confusion[0, 0]\nFP = confusion[0, 1]\nFN = confusion[1, 0]", "_____no_output_____" ] ], [ [ "![Large confusion matrix](09_confusion_matrix_2.png)", "_____no_output_____" ], [ "## Metrics computed from a confusion matrix", "_____no_output_____" ], [ "**Classification Accuracy:** Overall, how often is the classifier correct?", "_____no_output_____" ] ], [ [ "print((TP + TN) / float(TP + TN + FP + FN))\nprint(metrics.accuracy_score(y_test, y_pred_class))", "0.6770833333333334\n0.6770833333333334\n" ] ], [ [ "**Classification Error:** Overall, how often is the classifier incorrect?\n\n- Also known as \"Misclassification Rate\"", "_____no_output_____" ] ], [ [ "print((FP + FN) / float(TP + TN + FP + FN))\nprint(1 - metrics.accuracy_score(y_test, y_pred_class))", "0.3229166666666667\n0.32291666666666663\n" ] ], [ [ "**Sensitivity:** When the actual value is positive, how often is the prediction correct?\n\n- How \"sensitive\" is the classifier to detecting positive instances?\n- Also known as \"True Positive Rate\" or \"Recall\"", "_____no_output_____" ] ], [ [ "print(TP / float(TP + FN))\nprint(metrics.recall_score(y_test, y_pred_class))", "0.25806451612903225\n0.25806451612903225\n" ] ], [ [ "**Specificity:** When the actual value is negative, how often is the prediction correct?\n\n- How \"specific\" (or \"selective\") is the classifier in predicting positive instances?", "_____no_output_____" ] ], [ [ "print(TN / float(TN + FP))", "0.8769230769230769\n" ] ], [ [ "**False Positive Rate:** When the actual value is negative, how often is the prediction incorrect?", "_____no_output_____" ] ], [ [ "print(FP / float(TN + FP))", "0.12307692307692308\n" ] ], [ [ "**Precision:** When a positive value is predicted, how often is the prediction correct?\n\n- How \"precise\" is the classifier when predicting positive instances?", "_____no_output_____" ] ], [ [ "print(TP / float(TP + FP))\nprint(metrics.precision_score(y_test, y_pred_class))", "0.5\n0.5\n" ] ], [ [ "Many other metrics can be computed: F1 score, Matthews correlation coefficient, etc.", "_____no_output_____" ], [ "**Conclusion:**\n\n- Confusion matrix gives you a **more complete picture** of how your classifier is performing\n- Also allows you to compute various **classification metrics**, and these metrics can guide your model selection\n\n**Which metrics should you focus on?**\n\n- Choice of metric depends on your **business objective**\n- **Spam filter** (positive class is \"spam\"): Optimize for **precision or specificity** because false negatives (spam goes to the inbox) are more acceptable than false positives (non-spam is caught by the spam filter)\n- **Fraudulent transaction detector** (positive class is \"fraud\"): Optimize for **sensitivity** because false positives (normal transactions that are flagged as possible fraud) are more acceptable than false negatives (fraudulent transactions that are not detected)", "_____no_output_____" ], [ "## 6. Adjusting the classification threshold", "_____no_output_____" ] ], [ [ "# print the first 10 predicted responses\nlogreg.predict(X_test)[0:10]", "_____no_output_____" ], [ "# print the first 10 predicted probabilities of class membership\nlogreg.predict_proba(X_test)[0:10, :]", "_____no_output_____" ], [ "# print the first 10 predicted probabilities for class 1\nlogreg.predict_proba(X_test)[0:10, 1]", "_____no_output_____" ], [ "# store the predicted probabilities for class 1\ny_pred_prob = logreg.predict_proba(X_test)[:, 1]", "_____no_output_____" ], [ "# allow plots to appear in the notebook\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# histogram of predicted probabilities\nplt.hist(y_pred_prob, bins=8)\nplt.xlim(0, 1)\nplt.title('Histogram of predicted probabilities')\nplt.xlabel('Predicted probability of diabetes')\nplt.ylabel('Frequency')", "_____no_output_____" ] ], [ [ "**Decrease the threshold** for predicting diabetes in order to **increase the sensitivity** of the classifier", "_____no_output_____" ] ], [ [ "# predict diabetes if the predicted probability is greater than 0.3\nfrom sklearn.preprocessing import binarize\ny_pred_class = binarize([y_pred_prob], 0.3)[0]", "_____no_output_____" ], [ "# print the first 10 predicted probabilities\ny_pred_prob[0:10]", "_____no_output_____" ], [ "# print the first 10 predicted classes with the lower threshold\ny_pred_class[0:10]", "_____no_output_____" ], [ "# previous confusion matrix (default threshold of 0.5)\nprint(confusion)", "[[114 16]\n [ 46 16]]\n" ], [ "# new confusion matrix (threshold of 0.3)\nprint(metrics.confusion_matrix(y_test, y_pred_class))", "[[82 48]\n [17 45]]\n" ], [ "# sensitivity has increased (used to be 0.24)\nprint(46 / float(46 + 16))", "0.7419354838709677\n" ], [ "# specificity has decreased (used to be 0.91)\nprint(80 / float(80 + 50))", "0.6153846153846154\n" ] ], [ [ "**Conclusion:**\n\n- **Threshold of 0.5** is used by default (for binary problems) to convert predicted probabilities into class predictions\n- Threshold can be **adjusted** to increase sensitivity or specificity\n- Sensitivity and specificity have an **inverse relationship**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0677754cddfb6416d1c99e3af9aa84b4e95cd38
591,491
ipynb
Jupyter Notebook
examples/notebooks/TutorialTaskImplementation.ipynb
jamesbut/evojax
6aff62370120fc7a0142d3aa94c09c718de4786e
[ "Apache-2.0" ]
365
2022-02-08T07:41:33.000Z
2022-03-31T23:35:35.000Z
examples/notebooks/TutorialTaskImplementation.ipynb
jamesbut/evojax
6aff62370120fc7a0142d3aa94c09c718de4786e
[ "Apache-2.0" ]
16
2022-02-13T11:29:53.000Z
2022-03-31T11:00:34.000Z
examples/notebooks/TutorialTaskImplementation.ipynb
jamesbut/evojax
6aff62370120fc7a0142d3aa94c09c718de4786e
[ "Apache-2.0" ]
24
2022-02-11T04:19:35.000Z
2022-03-15T02:44:35.000Z
565.478967
352,222
0.888784
[ [ [ "<a href=\"https://colab.research.google.com/github/google/evojax/blob/main/examples/notebooks/TutorialTaskImplementation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Tutorial: Creating Tasks", "_____no_output_____" ], [ "## Pre-requisite\n\nBefore we start, we need to install EvoJAX and import some libraries. \n**Note** In our [paper](https://arxiv.org/abs/2202.05008), we ran the experiments on NVIDIA V100 GPU(s). Your results can be different from ours.", "_____no_output_____" ] ], [ [ "from IPython.display import clear_output, Image\n\n!pip install evojax\n!pip install torchvision # We use torchvision.datasets.MNIST in this tutorial.\n\nclear_output()", "_____no_output_____" ], [ "import os\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\n\nfrom evojax.task.cartpole import CartPoleSwingUp\nfrom evojax.policy.mlp import MLPPolicy\nfrom evojax.algo import PGPE\nfrom evojax import Trainer\nfrom evojax.util import create_logger", "_____no_output_____" ], [ "# Let's create a directory to save logs and models.\nlog_dir = './log'\nlogger = create_logger(name='EvoJAX', log_dir=log_dir)\nlogger.info('Welcome to the tutorial on Task creation!')\n\nlogger.info('Jax backend: {}'.format(jax.local_devices()))\n!nvidia-smi --query-gpu=name --format=csv,noheader", "EvoJAX: 2022-02-12 05:53:28,121 [INFO] Welcome to the tutorial on Task creation!\nabsl: 2022-02-12 05:53:28,133 [INFO] Starting the local TPU driver.\nabsl: 2022-02-12 05:53:28,135 [INFO] Unable to initialize backend 'tpu_driver': Not found: Unable to find driver in registry given worker: local://\nabsl: 2022-02-12 05:53:28,519 [INFO] Unable to initialize backend 'tpu': Invalid argument: TpuPlatform is not available.\nEvoJAX: 2022-02-12 05:53:28,520 [INFO] Jax backend: [GpuDevice(id=0, process_index=0)]\n" ] ], [ [ "## Introduction", "_____no_output_____" ], [ "EvoJAX has three major components: the *task*, the *policy network* and the *neuroevolution algorithm*. Once these components are implemented and instantiated, we can use a trainer to start the training process. The following code snippet provides an example of how we use EvoJAX.", "_____no_output_____" ] ], [ [ "seed = 42 # Wish me luck!\n\n# We use the classic cart-pole swing up as our tasks, see\n# https://github.com/google/evojax/tree/main/evojax/task for more example tasks.\n# The test flag provides the opportunity for a user to\n# 1. Return different signals as rewards. For example, in our MNIST example,\n# we use negative cross-entropy loss as the reward in training tasks, and the\n# classification accuracy as the reward in test tasks.\n# 2. Perform reward shaping. It is common for RL practitioners to modify the\n# rewards during training so that the agent learns more efficiently. But this\n# modification should not be allowed in tests for fair evaluations.\nhard = False\ntrain_task = CartPoleSwingUp(harder=hard, test=False)\ntest_task = CartPoleSwingUp(harder=hard, test=True)\n\n# We use a feedforward network as our policy.\n# By default, MLPPolicy uses \"tanh\" as its activation function for the output.\npolicy = MLPPolicy(\n input_dim=train_task.obs_shape[0],\n hidden_dims=[64, 64],\n output_dim=train_task.act_shape[0],\n logger=logger,\n)\n\n# We use PGPE as our evolution algorithm.\n# If you want to know more about the algorithm, please take a look at the paper:\n# https://people.idsia.ch/~juergen/nn2010.pdf \nsolver = PGPE(\n pop_size=64,\n param_size=policy.num_params,\n optimizer='adam',\n center_learning_rate=0.05,\n seed=seed,\n)\n\n# Now that we have all the three components instantiated, we can create a\n# trainer and start the training process.\ntrainer = Trainer(\n policy=policy,\n solver=solver,\n train_task=train_task,\n test_task=test_task,\n max_iter=600,\n log_interval=100,\n test_interval=200,\n n_repeats=5,\n n_evaluations=128,\n seed=seed,\n log_dir=log_dir,\n logger=logger,\n)\n_ = trainer.run()", "EvoJAX: 2022-02-12 05:53:31,223 [INFO] MLPPolicy.num_params = 4609\nEvoJAX: 2022-02-12 05:53:31,381 [INFO] Start to train for 600 iterations.\nEvoJAX: 2022-02-12 05:53:42,936 [INFO] Iter=100, size=64, max=717.4396, avg=632.6160, min=475.3617, std=51.3240\nEvoJAX: 2022-02-12 05:53:51,773 [INFO] Iter=200, size=64, max=838.2386, avg=751.0416, min=592.3156, std=46.3648\nEvoJAX: 2022-02-12 05:53:53,555 [INFO] [TEST] Iter=200, #tests=128, max=880.2914 avg=834.9127, min=763.1976, std=40.8967\nEvoJAX: 2022-02-12 05:54:02,542 [INFO] Iter=300, size=64, max=917.9876, avg=857.5809, min=48.2173, std=133.0970\nEvoJAX: 2022-02-12 05:54:11,668 [INFO] Iter=400, size=64, max=917.4292, avg=900.6838, min=544.6534, std=53.2497\nEvoJAX: 2022-02-12 05:54:11,770 [INFO] [TEST] Iter=400, #tests=128, max=927.4318 avg=918.8890, min=909.4037, std=3.2266\nEvoJAX: 2022-02-12 05:54:20,773 [INFO] Iter=500, size=64, max=922.2775, avg=868.8109, min=227.3976, std=147.4509\nEvoJAX: 2022-02-12 05:54:29,884 [INFO] [TEST] Iter=600, #tests=128, max=949.3198, avg=928.1906, min=917.7035, std=5.7788\nEvoJAX: 2022-02-12 05:54:29,889 [INFO] Training done, best_score=928.1906\n" ], [ "# Let's visualize the learned policy.\n\ndef render(task, algo, policy):\n \"\"\"Render the learned policy.\"\"\"\n\n task_reset_fn = jax.jit(test_task.reset)\n policy_reset_fn = jax.jit(policy.reset)\n step_fn = jax.jit(test_task.step)\n act_fn = jax.jit(policy.get_actions)\n\n params = algo.best_params[None, :]\n task_s = task_reset_fn(jax.random.PRNGKey(seed=seed)[None, :])\n policy_s = policy_reset_fn(task_s)\n\n images = [CartPoleSwingUp.render(task_s, 0)]\n done = False\n step = 0\n reward = 0\n while not done:\n act, policy_s = act_fn(task_s, params, policy_s)\n task_s, r, d = step_fn(task_s, act)\n step += 1\n reward = reward + r\n done = bool(d[0])\n if step % 3 == 0:\n images.append(CartPoleSwingUp.render(task_s, 0))\n print('reward={}'.format(reward))\n return images\n\n\nimgs = render(test_task, solver, policy)\ngif_file = os.path.join(log_dir, 'cartpole.gif')\nimgs[0].save(\n gif_file, save_all=True, append_images=imgs[1:], duration=40, loop=0)\nImage(open(gif_file,'rb').read())", "reward=[934.1182]\n" ] ], [ [ "Including the three major components, EvoJAX implements the entire training pipeline in JAX. In the first release, we have created several [demo tasks](https://github.com/google/evojax/tree/main/evojax/task) to showcase EvoJAX's capacity. And we encourage the users to bring their own tasks. To this end, we will walk you through the process of creating EvoJAX tasks in this tutorial.", "_____no_output_____" ], [ "To contribute a task implementation to EvoJAX, all you need to do is to implement the `VectorizedTask` interface. \nThe interface is defined as the following and you can see the related Python file [here](https://github.com/google/evojax/blob/main/evojax/task/base.py):\n```python\nclass TaskState(ABC):\n \"\"\"A template of the task state.\"\"\"\n obs: jnp.ndarray\n\n\nclass VectorizedTask(ABC):\n \"\"\"Interface for all the EvoJAX tasks.\"\"\"\n\n max_steps: int\n obs_shape: Tuple\n act_shape: Tuple\n test: bool\n multi_agent_training: bool = False\n\n @abstractmethod\n def reset(self, key: jnp.array) -> TaskState:\n \"\"\"This resets the vectorized task.\n Args:\n key - A jax random key.\n Returns:\n TaskState. Initial task state.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def step(self,\n state: TaskState,\n action: jnp.ndarray) -> Tuple[TaskState, jnp.ndarray, jnp.ndarray]:\n \"\"\"This steps once the simulation.\n Args:\n state - System internal states of shape (num_tasks, *).\n action - Vectorized actions of shape (num_tasks, action_size).\n Returns:\n TaskState. Task states.\n jnp.ndarray. Reward.\n jnp.ndarray. Task termination flag: 1 for done, 0 otherwise.\n \"\"\"\n raise NotImplementedError()\n```", "_____no_output_____" ], [ "## MNIST classification", "_____no_output_____" ], [ "While one would obviously use gradient descent for MNIST in practice, the point is to show that neuroevolution can also solve them to some degree of accuracy within a short amount of time, which will be useful when these models are adapted within a more complicated task where gradient-based approaches may not work.\n\nThe following code snippet shows how we wrap the dataset and treat it as a one-step `VectorizedTask`.", "_____no_output_____" ] ], [ [ "from torchvision import datasets\nfrom flax.struct import dataclass\nfrom evojax.task.base import TaskState\nfrom evojax.task.base import VectorizedTask\n\n\n# This state contains the information we wish to carry over to the next step.\n# The state will be used in `VectorizedTask.step` method.\n# In supervised learning tasks, we want to store the data and the labels so that\n# we can calculate the loss or the accuracy and use that as the reward signal.\n@dataclass\nclass State(TaskState):\n obs: jnp.ndarray\n labels: jnp.ndarray\n\n\ndef sample_batch(key, data, labels, batch_size):\n ix = jax.random.choice(\n key=key, a=data.shape[0], shape=(batch_size,), replace=False)\n return (jnp.take(data, indices=ix, axis=0),\n jnp.take(labels, indices=ix, axis=0))\n\n\ndef loss(prediction, target):\n target = jax.nn.one_hot(target, 10)\n return -jnp.mean(jnp.sum(prediction * target, axis=1))\n\n\ndef accuracy(prediction, target):\n predicted_class = jnp.argmax(prediction, axis=1)\n return jnp.mean(predicted_class == target)\n\n\nclass MNIST(VectorizedTask):\n \"\"\"MNIST classification task.\n\n We model the classification as an one-step task, i.e.,\n `MNIST.reset` returns a batch of data to the agent, the agent outputs\n predictions, `MNIST.step` returns the reward (loss or accuracy) and\n terminates the rollout.\n \"\"\"\n\n def __init__(self, batch_size, test):\n\n self.max_steps = 1\n\n # These are similar to OpenAI Gym environment's\n # observation_space and action_space.\n # They are helpful for initializing the policy networks.\n self.obs_shape = tuple([28, 28, 1])\n self.act_shape = tuple([10, ])\n\n # We download the dataset and normalize the value.\n dataset = datasets.MNIST('./data', train=not test, download=True)\n data = np.expand_dims(dataset.data.numpy() / 255., axis=-1)\n labels = dataset.targets.numpy()\n\n def reset_fn(key):\n if test:\n # In the test mode, we want to test on the entire test set.\n batch_data, batch_labels = data, labels\n else:\n # In the training mode, we only sample a batch of training data.\n batch_data, batch_labels = sample_batch(\n key, data, labels, batch_size)\n return State(obs=batch_data, labels=batch_labels)\n \n # We use jax.vmap for auto-vectorization.\n self._reset_fn = jax.jit(jax.vmap(reset_fn))\n\n def step_fn(state, action):\n if test:\n # In the test mode, we report the classification accuracy.\n reward = accuracy(action, state.labels)\n else:\n # In the training mode, we return the negative loss as the\n # reward signal. It is legitimate to return accuracy as the\n # reward signal in training too, but we find the performance is\n # not as good as when we use the negative loss.\n reward = -loss(action, state.labels)\n # This is an one-step task, so that last return value (the `done`\n # flag) is one.\n return state, reward, jnp.ones(())\n\n # We use jax.vmap for auto-vectorization.\n self._step_fn = jax.jit(jax.vmap(step_fn))\n\n def reset(self, key):\n return self._reset_fn(key)\n\n def step(self, state, action):\n return self._step_fn(state, action)", "_____no_output_____" ], [ "# Okay, let's test out the task with a ConvNet policy.\n\nfrom evojax.policy.convnet import ConvNetPolicy\n\n\nbatch_size = 1024\ntrain_task = MNIST(batch_size=batch_size, test=False)\ntest_task = MNIST(batch_size=batch_size, test=True)\n\npolicy = ConvNetPolicy(logger=logger)\n\nsolver = PGPE(\n pop_size=64,\n param_size=policy.num_params,\n optimizer='adam',\n center_learning_rate=0.006,\n stdev_learning_rate=0.09,\n init_stdev=0.04,\n logger=logger,\n seed=seed,\n)\n\ntrainer = Trainer(\n policy=policy,\n solver=solver,\n train_task=train_task,\n test_task=test_task,\n max_iter=5000,\n log_interval=100,\n test_interval=1000,\n n_repeats=1,\n n_evaluations=1,\n seed=seed,\n log_dir=log_dir,\n logger=logger,\n)\n_ = trainer.run()", "EvoJAX: 2022-02-12 05:54:41,285 [INFO] ConvNetPolicy.num_params = 11274\nEvoJAX: 2022-02-12 05:54:41,435 [INFO] Start to train for 5000 iterations.\nEvoJAX: 2022-02-12 05:54:52,635 [INFO] Iter=100, size=64, max=-0.8691, avg=-1.0259, min=-1.4128, std=0.1188\nEvoJAX: 2022-02-12 05:54:56,730 [INFO] Iter=200, size=64, max=-0.5346, avg=-0.6686, min=-1.2417, std=0.1188\nEvoJAX: 2022-02-12 05:55:00,824 [INFO] Iter=300, size=64, max=-0.3925, avg=-0.4791, min=-0.5902, std=0.0456\nEvoJAX: 2022-02-12 05:55:04,917 [INFO] Iter=400, size=64, max=-0.3357, avg=-0.3918, min=-0.5241, std=0.0388\nEvoJAX: 2022-02-12 05:55:09,010 [INFO] Iter=500, size=64, max=-0.2708, avg=-0.3235, min=-0.4797, std=0.0317\nEvoJAX: 2022-02-12 05:55:13,104 [INFO] Iter=600, size=64, max=-0.1965, avg=-0.2417, min=-0.3119, std=0.0238\nEvoJAX: 2022-02-12 05:55:17,198 [INFO] Iter=700, size=64, max=-0.1784, avg=-0.2177, min=-0.3148, std=0.0268\nEvoJAX: 2022-02-12 05:55:21,292 [INFO] Iter=800, size=64, max=-0.1797, avg=-0.2105, min=-0.2762, std=0.0222\nEvoJAX: 2022-02-12 05:55:25,386 [INFO] Iter=900, size=64, max=-0.1803, avg=-0.2379, min=-0.3923, std=0.0330\nEvoJAX: 2022-02-12 05:55:29,478 [INFO] Iter=1000, size=64, max=-0.1535, avg=-0.1856, min=-0.2457, std=0.0225\nEvoJAX: 2022-02-12 05:55:31,071 [INFO] [TEST] Iter=1000, #tests=1, max=0.9627 avg=0.9627, min=0.9627, std=0.0000\nEvoJAX: 2022-02-12 05:55:35,170 [INFO] Iter=1100, size=64, max=-0.1150, avg=-0.1438, min=-0.1971, std=0.0153\nEvoJAX: 2022-02-12 05:55:39,263 [INFO] Iter=1200, size=64, max=-0.1278, avg=-0.1571, min=-0.2458, std=0.0193\nEvoJAX: 2022-02-12 05:55:43,358 [INFO] Iter=1300, size=64, max=-0.1323, avg=-0.1641, min=-0.2089, std=0.0164\nEvoJAX: 2022-02-12 05:55:47,453 [INFO] Iter=1400, size=64, max=-0.1331, avg=-0.1573, min=-0.2085, std=0.0163\nEvoJAX: 2022-02-12 05:55:51,547 [INFO] Iter=1500, size=64, max=-0.1709, avg=-0.2142, min=-0.2950, std=0.0197\nEvoJAX: 2022-02-12 05:55:55,640 [INFO] Iter=1600, size=64, max=-0.1052, avg=-0.1410, min=-0.2766, std=0.0279\nEvoJAX: 2022-02-12 05:55:59,735 [INFO] Iter=1700, size=64, max=-0.0897, avg=-0.1184, min=-0.1591, std=0.0144\nEvoJAX: 2022-02-12 05:56:03,828 [INFO] Iter=1800, size=64, max=-0.0777, avg=-0.1029, min=-0.1509, std=0.0165\nEvoJAX: 2022-02-12 05:56:07,922 [INFO] Iter=1900, size=64, max=-0.0935, avg=-0.1285, min=-0.1682, std=0.0151\nEvoJAX: 2022-02-12 05:56:12,015 [INFO] Iter=2000, size=64, max=-0.1158, avg=-0.1439, min=-0.2054, std=0.0155\nEvoJAX: 2022-02-12 05:56:12,026 [INFO] [TEST] Iter=2000, #tests=1, max=0.9740 avg=0.9740, min=0.9740, std=0.0000\nEvoJAX: 2022-02-12 05:56:16,121 [INFO] Iter=2100, size=64, max=-0.1054, avg=-0.1248, min=-0.1524, std=0.0101\nEvoJAX: 2022-02-12 05:56:20,213 [INFO] Iter=2200, size=64, max=-0.1092, avg=-0.1363, min=-0.1774, std=0.0146\nEvoJAX: 2022-02-12 05:56:24,306 [INFO] Iter=2300, size=64, max=-0.1079, avg=-0.1298, min=-0.1929, std=0.0158\nEvoJAX: 2022-02-12 05:56:28,398 [INFO] Iter=2400, size=64, max=-0.1129, avg=-0.1352, min=-0.1870, std=0.0145\nEvoJAX: 2022-02-12 05:56:32,491 [INFO] Iter=2500, size=64, max=-0.0790, avg=-0.0955, min=-0.1291, std=0.0113\nEvoJAX: 2022-02-12 05:56:36,584 [INFO] Iter=2600, size=64, max=-0.1299, avg=-0.1537, min=-0.1947, std=0.0128\nEvoJAX: 2022-02-12 05:56:40,675 [INFO] Iter=2700, size=64, max=-0.0801, avg=-0.0983, min=-0.1301, std=0.0094\nEvoJAX: 2022-02-12 05:56:44,767 [INFO] Iter=2800, size=64, max=-0.0849, avg=-0.1014, min=-0.1511, std=0.0116\nEvoJAX: 2022-02-12 05:56:48,859 [INFO] Iter=2900, size=64, max=-0.0669, avg=-0.0796, min=-0.1111, std=0.0090\nEvoJAX: 2022-02-12 05:56:52,950 [INFO] Iter=3000, size=64, max=-0.0782, avg=-0.0975, min=-0.1304, std=0.0123\nEvoJAX: 2022-02-12 05:56:52,960 [INFO] [TEST] Iter=3000, #tests=1, max=0.9768 avg=0.9768, min=0.9768, std=0.0000\nEvoJAX: 2022-02-12 05:56:57,056 [INFO] Iter=3100, size=64, max=-0.0857, avg=-0.1029, min=-0.1421, std=0.0092\nEvoJAX: 2022-02-12 05:57:01,149 [INFO] Iter=3200, size=64, max=-0.0769, avg=-0.0964, min=-0.1279, std=0.0120\nEvoJAX: 2022-02-12 05:57:05,242 [INFO] Iter=3300, size=64, max=-0.0805, avg=-0.1021, min=-0.1200, std=0.0088\nEvoJAX: 2022-02-12 05:57:09,335 [INFO] Iter=3400, size=64, max=-0.0642, avg=-0.0774, min=-0.0972, std=0.0080\nEvoJAX: 2022-02-12 05:57:13,428 [INFO] Iter=3500, size=64, max=-0.0601, avg=-0.0771, min=-0.1074, std=0.0080\nEvoJAX: 2022-02-12 05:57:17,522 [INFO] Iter=3600, size=64, max=-0.0558, avg=-0.0709, min=-0.1082, std=0.0094\nEvoJAX: 2022-02-12 05:57:21,615 [INFO] Iter=3700, size=64, max=-0.0915, avg=-0.1048, min=-0.1519, std=0.0100\nEvoJAX: 2022-02-12 05:57:25,709 [INFO] Iter=3800, size=64, max=-0.0525, avg=-0.0667, min=-0.0823, std=0.0069\nEvoJAX: 2022-02-12 05:57:29,801 [INFO] Iter=3900, size=64, max=-0.0983, avg=-0.1150, min=-0.1447, std=0.0105\nEvoJAX: 2022-02-12 05:57:33,895 [INFO] Iter=4000, size=64, max=-0.0759, avg=-0.0954, min=-0.1293, std=0.0114\nEvoJAX: 2022-02-12 05:57:33,909 [INFO] [TEST] Iter=4000, #tests=1, max=0.9800 avg=0.9800, min=0.9800, std=0.0000\nEvoJAX: 2022-02-12 05:57:38,004 [INFO] Iter=4100, size=64, max=-0.0811, avg=-0.0957, min=-0.1184, std=0.0086\nEvoJAX: 2022-02-12 05:57:42,095 [INFO] Iter=4200, size=64, max=-0.0806, avg=-0.0960, min=-0.1313, std=0.0096\nEvoJAX: 2022-02-12 05:57:46,187 [INFO] Iter=4300, size=64, max=-0.0698, avg=-0.0908, min=-0.1158, std=0.0100\nEvoJAX: 2022-02-12 05:57:50,278 [INFO] Iter=4400, size=64, max=-0.0754, avg=-0.0930, min=-0.1202, std=0.0104\nEvoJAX: 2022-02-12 05:57:54,368 [INFO] Iter=4500, size=64, max=-0.0708, avg=-0.0877, min=-0.1107, std=0.0088\nEvoJAX: 2022-02-12 05:57:58,459 [INFO] Iter=4600, size=64, max=-0.0610, avg=-0.0773, min=-0.1032, std=0.0076\nEvoJAX: 2022-02-12 05:58:02,550 [INFO] Iter=4700, size=64, max=-0.0704, avg=-0.0881, min=-0.1299, std=0.0110\nEvoJAX: 2022-02-12 05:58:06,640 [INFO] Iter=4800, size=64, max=-0.0651, avg=-0.0812, min=-0.1042, std=0.0080\nEvoJAX: 2022-02-12 05:58:10,732 [INFO] Iter=4900, size=64, max=-0.0588, avg=-0.0712, min=-0.1096, std=0.0081\nEvoJAX: 2022-02-12 05:58:14,795 [INFO] [TEST] Iter=5000, #tests=1, max=0.9822, avg=0.9822, min=0.9822, std=0.0000\nEvoJAX: 2022-02-12 05:58:14,800 [INFO] Training done, best_score=0.9822\n" ] ], [ [ "Okay! Our implementation of the classification task is successful and EvoJAX achieved $>98\\%$ test accuracy within 5 min on a V100 GPU.\n\nAs mentioned before, MNIST is a simple one-step task, we want to get you familiar with the interfaces. \nNext, we will build the classic cart-pole task from scratch.", "_____no_output_____" ], [ "## Cart-pole swing up", "_____no_output_____" ], [ "In our cart-pole swing up task, the agent applies an action $a \\in [-1, 1]$ on the cart, and we maintain 4 states:\n1. cart position $x$\n2. cart velocity $\\dot{x}$\n3. the angle between the cart and the pole $\\theta$\n4. the pole's angular velocity $\\dot{\\theta}$\n\nWe randomly sample the initial states and will use the forward Euler integration to update them: \n$\\mathbf{x}(t + \\Delta t) = \\mathbf{x}(t) + \\Delta t \\mathbf{v}(t)$ and \n$\\mathbf{v}(t + \\Delta t) = \\mathbf{v}(t) + \\Delta t f(a, \\mathbf{x}(t), \\mathbf{v}(t))$ \nwhere $\\mathbf{x}(t) = [x, \\theta]^{\\intercal}$, $\\mathbf{v}(t) = [\\dot{x}, \\dot{\\theta}]^{\\intercal}$ and $f(\\cdot)$ is a function that represents the physical model.\n\nThanks to `jax.vmap`, we are able to write the task as if it is designed to deal with non-batch inputs though in the training process JAX will automatically vectorize the task for us.", "_____no_output_____" ] ], [ [ "from evojax.task.base import TaskState\nfrom evojax.task.base import VectorizedTask\nimport PIL\n\n\n# Define some physics metrics.\nGRAVITY = 9.82\nCART_MASS = 0.5\nPOLE_MASS = 0.5\nPOLE_LEN = 0.6\nFRICTION = 0.1\nFORCE_SCALING = 10.0\nDELTA_T = 0.01\nCART_X_LIMIT = 2.4\n\n# Define some constants for visualization.\nSCREEN_W = 600\nSCREEN_H = 600\nCART_W = 40\nCART_H = 20\nVIZ_SCALE = 100\nWHEEL_RAD = 5\n\n@dataclass\nclass State(TaskState):\n obs: jnp.ndarray # This is the tuple (x, x_dot, theta, theta_dot)\n state: jnp.ndarray # This maintains the system's state.\n steps: jnp.int32 # This tracks the rollout length.\n key: jnp.ndarray # This serves as a random seed.\n\n\nclass CartPole(VectorizedTask):\n \"\"\"A quick implementation of the cart-pole task.\"\"\"\n\n def __init__(self, max_steps=1000, test=False):\n self.max_steps = max_steps\n self.obs_shape = tuple([4, ])\n self.act_shape = tuple([1, ])\n\n def sample_init_state(sample_key):\n return (\n jax.random.normal(sample_key, shape=(4,)) * 0.2 +\n jnp.array([0, 0, jnp.pi, 0])\n )\n\n def get_reward(x, x_dot, theta, theta_dot):\n # We encourage\n # the pole to be held upward (i.e., theta is close to 0) and\n # the cart to be at the origin (i.e., x is close to 0).\n reward_theta = (jnp.cos(theta) + 1.0) / 2.0\n reward_x = jnp.cos((x / CART_X_LIMIT) * (jnp.pi / 2.0))\n return reward_theta * reward_x\n\n def update_state(action, x, x_dot, theta, theta_dot):\n action = jnp.clip(action, -1.0, 1.0)[0] * FORCE_SCALING\n s = jnp.sin(theta)\n c = jnp.cos(theta)\n total_m = CART_MASS + POLE_MASS\n m_p_l = POLE_MASS * POLE_LEN\n \n # This is the physical model: f-function.\n x_dot_update = (\n (-2 * m_p_l * (theta_dot ** 2) * s +\n 3 * POLE_MASS * GRAVITY * s * c +\n 4 * action - 4 * FRICTION * x_dot) /\n (4 * total_m - 3 * POLE_MASS * c ** 2)\n )\n theta_dot_update = (\n (-3 * m_p_l * (theta_dot ** 2) * s * c +\n 6 * total_m * GRAVITY * s +\n 6 * (action - FRICTION * x_dot) * c) /\n (4 * POLE_LEN * total_m - 3 * m_p_l * c ** 2)\n )\n\n # This is the forward Euler integration.\n x = x + x_dot * DELTA_T\n theta = theta + theta_dot * DELTA_T\n x_dot = x_dot + x_dot_update * DELTA_T\n theta_dot = theta_dot + theta_dot_update * DELTA_T\n\n return jnp.array([x, x_dot, theta, theta_dot])\n\n def out_of_screen(x):\n \"\"\"We terminate the rollout if the cart is out of the screen.\"\"\"\n beyond_boundary_l = jnp.where(x < -CART_X_LIMIT, 1, 0)\n beyond_boundary_r = jnp.where(x > CART_X_LIMIT, 1, 0)\n return jnp.bitwise_or(beyond_boundary_l, beyond_boundary_r)\n\n def reset_fn(key):\n next_key, key = jax.random.split(key)\n state = sample_init_state(key)\n return State(\n obs=state, # We make the task fully-observable.\n state=state,\n steps=jnp.zeros((), dtype=int),\n key=next_key,\n )\n \n self._reset_fn = jax.jit(jax.vmap(reset_fn))\n\n def step_fn(state, action):\n current_state = update_state(action, *state.state)\n reward = get_reward(*current_state)\n steps = state.steps + 1\n done = jnp.bitwise_or(\n out_of_screen(current_state[0]), steps >= max_steps)\n # We reset the step counter to zero if the rollout has ended.\n steps = jnp.where(done, jnp.zeros((), jnp.int32), steps)\n # We automatically reset the states if the rollout has ended.\n next_key, key = jax.random.split(state.key)\n # current_state = jnp.where(\n # done, sample_init_state(key), current_state)\n return State(\n state=current_state,\n obs=current_state,\n steps=steps,\n key=next_key), reward, done\n\n self._step_fn = jax.jit(jax.vmap(step_fn))\n\n def reset(self, key):\n return self._reset_fn(key)\n\n def step(self, state, action):\n return self._step_fn(state, action)\n\n # Optinally, we can implement a render method to visualize the task.\n @staticmethod\n def render(state, task_id):\n \"\"\"Render a specified task.\"\"\"\n img = PIL.Image.new('RGB', (SCREEN_W, SCREEN_H), (255, 255, 255))\n draw = PIL.ImageDraw.Draw(img)\n x, _, theta, _ = np.array(state.state[task_id])\n cart_y = SCREEN_H // 2 + 100\n cart_x = x * VIZ_SCALE + SCREEN_W // 2\n # Draw the horizon.\n draw.line(\n (0, cart_y + CART_H // 2 + WHEEL_RAD,\n SCREEN_W, cart_y + CART_H // 2 + WHEEL_RAD),\n fill=(0, 0, 0), width=1)\n # Draw the cart.\n draw.rectangle(\n (cart_x - CART_W // 2, cart_y - CART_H // 2,\n cart_x + CART_W // 2, cart_y + CART_H // 2),\n fill=(255, 0, 0), outline=(0, 0, 0))\n # Draw the wheels.\n draw.ellipse(\n (cart_x - CART_W // 2 - WHEEL_RAD,\n cart_y + CART_H // 2 - WHEEL_RAD,\n cart_x - CART_W // 2 + WHEEL_RAD,\n cart_y + CART_H // 2 + WHEEL_RAD),\n fill=(220, 220, 220), outline=(0, 0, 0))\n draw.ellipse(\n (cart_x + CART_W // 2 - WHEEL_RAD,\n cart_y + CART_H // 2 - WHEEL_RAD,\n cart_x + CART_W // 2 + WHEEL_RAD,\n cart_y + CART_H // 2 + WHEEL_RAD),\n fill=(220, 220, 220), outline=(0, 0, 0))\n # Draw the pole.\n draw.line(\n (cart_x, cart_y,\n cart_x + POLE_LEN * VIZ_SCALE * np.cos(theta - np.pi / 2),\n cart_y + POLE_LEN * VIZ_SCALE * np.sin(theta - np.pi / 2)),\n fill=(0, 0, 255), width=6)\n return img", "_____no_output_____" ], [ "# Okay, let's test this simple cart-pole implementation.\n\nrollout_key = jax.random.PRNGKey(seed=seed)\n\nreset_key, rollout_key = jax.random.split(rollout_key, 2)\nreset_key = reset_key[None, :] # Expand dim, the leading is the batch dim.\n\n# Initialize the task.\ncart_pole_task = CartPole()\nt_state = cart_pole_task.reset(reset_key)\ntask_screens = [CartPole.render(t_state, 0)]\n\n# Rollout with random actions.\ndone = False\nstep_cnt = 0\ntotal_reward = 0\nwhile not done:\n action_key, rollout_key = jax.random.split(rollout_key, 2)\n action = jax.random.uniform(\n action_key, shape=(1, 1), minval=-1., maxval=1.)\n t_state, reward, done = cart_pole_task.step(t_state, action)\n total_reward = total_reward + reward\n step_cnt += 1\n if step_cnt % 4 == 0:\n task_screens.append(CartPole.render(t_state, 0))\nprint('reward={}, steps={}'.format(total_reward, step_cnt))\n\n# Visualze the rollout.\ngif_file = os.path.join(log_dir, 'rand_cartpole.gif')\ntask_screens[0].save(\n gif_file, save_all=True, append_images=task_screens[1:], loop=0)\nImage(open(gif_file,'rb').read())", "reward=[4.687451], steps=221\n" ] ], [ [ "The random policy does not solve the cart-pole task, but our implementation seems to be correct. Let's now plug in this task to EvoJAX.", "_____no_output_____" ] ], [ [ "train_task = CartPole(test=False)\ntest_task = CartPole(test=True)\n\n# We use the same policy and solver to solve this \"new\" task.\npolicy = MLPPolicy(\n input_dim=train_task.obs_shape[0],\n hidden_dims=[64, 64],\n output_dim=train_task.act_shape[0],\n logger=logger,\n)\nsolver = PGPE(\n pop_size=64,\n param_size=policy.num_params,\n optimizer='adam',\n center_learning_rate=0.05,\n seed=seed,\n)\ntrainer = Trainer(\n policy=policy,\n solver=solver,\n train_task=train_task,\n test_task=test_task,\n max_iter=600,\n log_interval=100,\n test_interval=200,\n n_repeats=5,\n n_evaluations=128,\n seed=seed,\n log_dir=log_dir,\n logger=logger,\n)\n_ = trainer.run()", "EvoJAX: 2022-02-12 05:58:16,702 [INFO] MLPPolicy.num_params = 4545\nEvoJAX: 2022-02-12 05:58:16,868 [INFO] Start to train for 600 iterations.\nEvoJAX: 2022-02-12 05:58:26,417 [INFO] Iter=100, size=64, max=704.6008, avg=538.1765, min=115.6323, std=110.1506\nEvoJAX: 2022-02-12 05:58:34,678 [INFO] Iter=200, size=64, max=716.4336, avg=595.8668, min=381.3772, std=60.5778\nEvoJAX: 2022-02-12 05:58:35,551 [INFO] [TEST] Iter=200, #tests=128, max=695.8007 avg=685.7385, min=668.2902, std=4.3287\nEvoJAX: 2022-02-12 05:58:44,053 [INFO] Iter=300, size=64, max=759.5718, avg=658.8391, min=296.1095, std=71.2600\nEvoJAX: 2022-02-12 05:58:52,540 [INFO] Iter=400, size=64, max=919.3878, avg=839.7709, min=134.9505, std=136.0545\nEvoJAX: 2022-02-12 05:58:52,624 [INFO] [TEST] Iter=400, #tests=128, max=930.0361 avg=915.0107, min=900.9803, std=5.1936\nEvoJAX: 2022-02-12 05:59:00,732 [INFO] Iter=500, size=64, max=926.3024, avg=812.4763, min=121.6825, std=229.5144\nEvoJAX: 2022-02-12 05:59:09,005 [INFO] [TEST] Iter=600, #tests=128, max=942.0136, avg=922.7744, min=235.6000, std=61.2483\nEvoJAX: 2022-02-12 05:59:09,010 [INFO] Training done, best_score=922.7744\n" ], [ "# Let's visualize the learned policy.\n\ndef render(task, algo, policy):\n \"\"\"Render the learned policy.\"\"\"\n\n task_reset_fn = jax.jit(test_task.reset)\n policy_reset_fn = jax.jit(policy.reset)\n step_fn = jax.jit(test_task.step)\n act_fn = jax.jit(policy.get_actions)\n\n params = algo.best_params[None, :]\n task_s = task_reset_fn(jax.random.PRNGKey(seed=seed)[None, :])\n policy_s = policy_reset_fn(task_s)\n\n images = [CartPole.render(task_s, 0)]\n done = False\n step = 0\n reward = 0\n while not done:\n act, policy_s = act_fn(task_s, params, policy_s)\n task_s, r, d = step_fn(task_s, act)\n step += 1\n reward = reward + r\n done = bool(d[0])\n if step % 3 == 0:\n images.append(CartPole.render(task_s, 0))\n print('reward={}'.format(reward))\n return images\n\n\nimgs = render(test_task, solver, policy)\ngif_file = os.path.join(log_dir, 'trained_cartpole.gif')\nimgs[0].save(\n gif_file, save_all=True, append_images=imgs[1:], duration=40, loop=0)\nImage(open(gif_file,'rb').read())", "reward=[923.1105]\n" ] ], [ [ "Nice! EvoJAX is able to solve the new cart-pole task within a minute.\n\nIn this tutorial, we walked you through the process of creating tasks from scratch. The two examples we used are simple and are supposed to help you understand the interfaces. If you are interested in learning more, please check out our GitHub [repo](https://github.com/google/evojax/tree/main/evojax/task).\n\nPlease let us ([email protected]) know if you have any problems or suggestions, thanks!", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0678f45ae89dc199f6a96ce18f610a069071d14
2,559
ipynb
Jupyter Notebook
notebooks/Provider.ipynb
spe-uob/HealthcareLakeETL
3d5d8005d12039d77bf1ffbb2a34e58bca2ef3dc
[ "MIT" ]
4
2021-03-19T16:52:26.000Z
2021-08-06T17:30:40.000Z
notebooks/Provider.ipynb
spe-uob/2020-HealthcareLakeETL
3d5d8005d12039d77bf1ffbb2a34e58bca2ef3dc
[ "MIT" ]
5
2021-04-14T15:49:31.000Z
2021-05-06T15:35:35.000Z
notebooks/Provider.ipynb
spe-uob/HealthcareLakeETL
3d5d8005d12039d77bf1ffbb2a34e58bca2ef3dc
[ "MIT" ]
1
2021-08-20T13:47:32.000Z
2021-08-20T13:47:32.000Z
20.637097
149
0.548261
[ [ [ "## Provider Table Mapping\n\nThis is an attempt at mapping FHIR to OMOP using the following guide: https://build.fhir.org/ig/HL7/cdmh/profiles.html#omop-to-fhir-mappings\n<br>In this notebook we are mapping FHIR to the OMOP Provider Table", "_____no_output_____" ], [ "### Load Data Frame from Parquet Catalog File", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import dayofmonth,month,year,to_date,trunc,split,explode,array\n\n# Create a local Spark session\nspark = SparkSession.builder.appName('etl').getOrCreate()", "_____no_output_____" ], [ "# Reads file \ndf = spark.read.parquet('data/catalog.parquet')", "_____no_output_____" ] ], [ [ "Data Frame schema ", "_____no_output_____" ] ], [ [ "#df.printSchema()", "_____no_output_____" ] ], [ [ "### Practitioner Mapping ", "_____no_output_____" ], [ "Filter By Practitioner Resource type ", "_____no_output_____" ] ], [ [ "filtered = df.filter(df['resourceType'] == 'Practitioner')", "_____no_output_____" ], [ "#filtered.printSchema()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
d06793641431a3e642fbdc5d7c6167920050ac3f
113,411
ipynb
Jupyter Notebook
examples/categorical-encoders.ipynb
iahsanujunda/feature_engine
46c6bd5a06626b0789fcc1367069d065010794a1
[ "BSD-3-Clause" ]
1
2020-11-15T13:15:28.000Z
2020-11-15T13:15:28.000Z
examples/categorical-encoders.ipynb
iahsanujunda/feature_engine
46c6bd5a06626b0789fcc1367069d065010794a1
[ "BSD-3-Clause" ]
null
null
null
examples/categorical-encoders.ipynb
iahsanujunda/feature_engine
46c6bd5a06626b0789fcc1367069d065010794a1
[ "BSD-3-Clause" ]
null
null
null
33.996103
11,928
0.471145
[ [ [ "# Categorical encoders\n\nExamples of how to use the different categorical encoders using the Titanic dataset.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\nfrom feature_engine import categorical_encoders as ce\nfrom feature_engine.missing_data_imputers import CategoricalVariableImputer\n\npd.set_option('display.max_columns', None)", "_____no_output_____" ], [ "# Load titanic dataset from OpenML\n\ndef load_titanic():\n data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')\n data = data.replace('?', np.nan)\n data['cabin'] = data['cabin'].astype(str).str[0]\n data['pclass'] = data['pclass'].astype('O')\n data['age'] = data['age'].astype('float')\n data['fare'] = data['fare'].astype('float')\n data['embarked'].fillna('C', inplace=True)\n data.drop(labels=['boat', 'body', 'home.dest'], axis=1, inplace=True)\n return data", "_____no_output_____" ], [ "# load data\ndata = load_titanic()\ndata.head()", "_____no_output_____" ], [ "data.isnull().sum()", "_____no_output_____" ], [ "# we will encode the below variables, they have no missing values\ndata[['cabin', 'pclass', 'embarked']].isnull().sum()", "_____no_output_____" ], [ "data[['cabin', 'pclass', 'embarked']].dtypes", "_____no_output_____" ], [ "data[['cabin', 'pclass', 'embarked']].dtypes", "_____no_output_____" ], [ "# let's separate into training and testing set\n\nX_train, X_test, y_train, y_test = train_test_split(\n data.drop(['survived', 'name', 'ticket'], axis=1), data['survived'], test_size=0.3, random_state=0)\n\nX_train.shape, X_test.shape", "_____no_output_____" ] ], [ [ "## CountFrequencyCategoricalEncoder\n\nThe CountFrequencyCategoricalEncoder, replaces the categories by the count or frequency of the observations in the train set for that category. \n\nIf we select \"count\" in the encoding_method, then for the variable colour, if there are 10 observations in the train set that show colour blue, blue will be replaced by 10. Alternatively, if we select \"frequency\" in the encoding_method, if 10% of the observations in the train set show blue colour, then blue will be replaced by 0.1.", "_____no_output_____" ], [ "### Frequency\n\nLabels are replaced by the percentage of the observations that show that label in the train set.", "_____no_output_____" ] ], [ [ "count_enc = ce.CountFrequencyCategoricalEncoder(\n encoding_method='frequency', variables=['cabin', 'pclass', 'embarked'])\n\ncount_enc.fit(X_train)", "_____no_output_____" ], [ "# we can explore the encoder_dict_ to find out the category replacements.\n\ncount_enc.encoder_dict_", "_____no_output_____" ], [ "# transform the data: see the change in the head view\n\ntrain_t = count_enc.transform(X_train)\ntest_t = count_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ], [ "test_t['pclass'].value_counts().plot.bar()", "_____no_output_____" ] ], [ [ "### Count\n\nLabels are replaced by the number of the observations that show that label in the train set.", "_____no_output_____" ] ], [ [ "# this time we encode only 1 variable\n\ncount_enc = ce.CountFrequencyCategoricalEncoder(encoding_method='count',\n variables='cabin')\n\ncount_enc.fit(X_train)", "_____no_output_____" ], [ "# we can find the mappings in the encoder_dict_ attribute.\n\ncount_enc.encoder_dict_", "_____no_output_____" ], [ "# transform the data: see the change in the head view for Cabin\n\ntrain_t = count_enc.transform(X_train)\ntest_t = count_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ], [ "test_t['pclass'].value_counts().plot.bar()", "_____no_output_____" ] ], [ [ "### Select categorical variables automatically\n\nIf we don't indicate which variables we want to encode, the encoder will find all categorical variables", "_____no_output_____" ] ], [ [ "# this time we ommit the argument for variable\ncount_enc = ce.CountFrequencyCategoricalEncoder(encoding_method = 'count')\n\ncount_enc.fit(X_train)", "_____no_output_____" ], [ "# we can see that the encoder selected automatically all the categorical variables\n\ncount_enc.variables", "_____no_output_____" ], [ "# transform the data: see the change in the head view\n\ntrain_t = count_enc.transform(X_train)\ntest_t = count_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "Note that if there are labels in the test set that were not present in the train set, the transformer will introduce NaN, and raise a warning.", "_____no_output_____" ], [ "## MeanCategoricalEncoder\n\nThe MeanCategoricalEncoder replaces the labels of the variables by the mean value of the target for that label. For example, in the variable colour, if the mean value of the binary target is 0.5 for the label blue, then blue is replaced by 0.5", "_____no_output_____" ] ], [ [ "# we will transform 3 variables\nmean_enc = ce.MeanCategoricalEncoder(variables=['cabin', 'pclass', 'embarked'])\n\n# Note: the MeanCategoricalEncoder needs the target to fit\nmean_enc.fit(X_train, y_train)", "_____no_output_____" ], [ "# see the dictionary with the mappings per variable\n\nmean_enc.encoder_dict_", "_____no_output_____" ], [ "mean_enc.variables", "_____no_output_____" ], [ "# we can see the transformed variables in the head view\n\ntrain_t = count_enc.transform(X_train)\ntest_t = count_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Automatically select the variables\n\nThis encoder will select all categorical variables to encode, when no variables are specified when calling the encoder", "_____no_output_____" ] ], [ [ "mean_enc = ce.MeanCategoricalEncoder()\n\nmean_enc.fit(X_train, y_train)", "_____no_output_____" ], [ "mean_enc.variables", "_____no_output_____" ], [ "# we can see the transformed variables in the head view\n\ntrain_t = count_enc.transform(X_train)\ntest_t = count_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "## WoERatioCategoricalEncoder\n\nThis encoder replaces the labels by the weight of evidence or the ratio of probabilities. It only works for binary classification.\n\n The weight of evidence is given by: np.log( p(1) / p(0) )\n \n The target probability ratio is given by: p(1) / p(0)\n \n \n### Weight of evidence", "_____no_output_____" ] ], [ [ "## Rare value encoder first to reduce the cardinality\n# see below for more details on this encoder\n\nrare_encoder = ce.RareLabelCategoricalEncoder(\n tol=0.03, n_categories=2, variables=['cabin', 'pclass', 'embarked'])\n\nrare_encoder.fit(X_train)\n\n# transform\ntrain_t = rare_encoder.transform(X_train)\ntest_t = rare_encoder.transform(X_test)", "_____no_output_____" ], [ "woe_enc = ce.WoERatioCategoricalEncoder(\n encoding_method='woe', variables=['cabin', 'pclass', 'embarked'])\n\n# to fit you need to pass the target y\nwoe_enc.fit(train_t, y_train)", "_____no_output_____" ], [ "woe_enc.encoder_dict_", "_____no_output_____" ], [ "# transform and visualise the data\n\ntrain_t = woe_enc.transform(train_t)\ntest_t = woe_enc.transform(test_t)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Ratio\n\nSimilarly, it is recommended to remove rare labels and high cardinality before using this encoder.", "_____no_output_____" ] ], [ [ "# rare label encoder first: transform\n\ntrain_t = rare_encoder.transform(X_train)\ntest_t = rare_encoder.transform(X_test)", "_____no_output_____" ], [ "ratio_enc = ce.WoERatioCategoricalEncoder(\n encoding_method='ratio', variables=['cabin', 'pclass', 'embarked'])\n\n# to fit we need to pass the target y\nratio_enc.fit(train_t, y_train)", "_____no_output_____" ], [ "ratio_enc.encoder_dict_", "_____no_output_____" ], [ "# transform and visualise the data\n\ntrain_t = woe_enc.transform(train_t)\ntest_t = woe_enc.transform(test_t)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "## OrdinalCategoricalEncoder\n\nThe OrdinalCategoricalEncoder will replace the variable labels by digits, from 1 to the number of different labels. If we select \"arbitrary\", then the encoder will assign numbers as the labels appear in the variable (first come first served). If we select \"ordered\", the encoder will assign numbers following the mean of the target value for that label. So labels for which the mean of the target is higher will get the number 1, and those where the mean of the target is smallest will get the number n.\n\n### Ordered", "_____no_output_____" ] ], [ [ "# we will encode 3 variables:\n\nordinal_enc = ce.OrdinalCategoricalEncoder(\n encoding_method='ordered', variables=['pclass', 'cabin', 'embarked'])\n\n# for this encoder, we need to pass the target as argument\n# if encoding_method='ordered'\nordinal_enc.fit(X_train, y_train)", "_____no_output_____" ], [ "# here we can see the mappings\nordinal_enc.encoder_dict_", "_____no_output_____" ], [ "# transform and visualise the data\n\ntrain_t = ordinal_enc.transform(X_train)\ntest_t = ordinal_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Arbitrary", "_____no_output_____" ] ], [ [ "ordinal_enc = ce.OrdinalCategoricalEncoder(encoding_method='arbitrary',\n variables='cabin')\n\n# for this encoder we don't need to add the target. You can leave it or remove it.\nordinal_enc.fit(X_train, y_train)", "_____no_output_____" ], [ "ordinal_enc.encoder_dict_", "_____no_output_____" ] ], [ [ "Note that the ordering of the different labels is not the same when we select \"arbitrary\" or \"ordered\"", "_____no_output_____" ] ], [ [ "# transform: see the numerical values in the former categorical variables\n\ntrain_t = ordinal_enc.transform(X_train)\ntest_t = ordinal_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Automatically select categorical variables\n\nThese encoder as well selects all the categorical variables, if None is passed to the variable argument when calling the enconder.", "_____no_output_____" ] ], [ [ "ordinal_enc = ce.OrdinalCategoricalEncoder(encoding_method = 'arbitrary')\n\n# for this encoder we don't need to add the target. You can leave it or remove it.\nordinal_enc.fit(X_train)", "_____no_output_____" ], [ "ordinal_enc.variables", "_____no_output_____" ], [ "# transform: see the numerical values in the former categorical variables\n\ntrain_t = ordinal_enc.transform(X_train)\ntest_t = ordinal_enc.transform(X_test)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "## OneHotCategoricalEncoder\n\nPerforms One Hot Encoding. The encoder can select how many different labels per variable to encode into binaries. When top_categories is set to None, all the categories will be transformed in binary variables. However, when top_categories is set to an integer, for example 10, then only the 10 most popular categories will be transformed into binary, and the rest will be discarded.\n\nThe encoder has also the possibility to create binary variables from all categories (drop_last = False), or remove the binary for the last category (drop_last = True), for use in linear models.\n\n### All binary, no top_categories", "_____no_output_____" ] ], [ [ "ohe_enc = ce.OneHotCategoricalEncoder(\n top_categories=None,\n variables=['pclass', 'cabin', 'embarked'],\n drop_last=False)\n\nohe_enc.fit(X_train)", "_____no_output_____" ], [ "ohe_enc.drop_last", "_____no_output_____" ], [ "ohe_enc.encoder_dict_", "_____no_output_____" ], [ "train_t = ohe_enc.transform(X_train)\ntest_t = ohe_enc.transform(X_train)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Dropping the last category for linear models", "_____no_output_____" ] ], [ [ "ohe_enc = ce.OneHotCategoricalEncoder(\n top_categories=None,\n variables=['pclass', 'cabin', 'embarked'],\n drop_last=True)\n\nohe_enc.fit(X_train)\n\nohe_enc.encoder_dict_", "_____no_output_____" ], [ "train_t = ohe_enc.transform(X_train)\ntest_t = ohe_enc.transform(X_train)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Selecting top_categories to encode", "_____no_output_____" ] ], [ [ "ohe_enc = ce.OneHotCategoricalEncoder(\n top_categories=2,\n variables=['pclass', 'cabin', 'embarked'],\n drop_last=False)\n\nohe_enc.fit(X_train)\n\nohe_enc.encoder_dict_", "_____no_output_____" ], [ "train_t = ohe_enc.transform(X_train)\ntest_t = ohe_enc.transform(X_train)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "## RareLabelCategoricalEncoder\n\nThe RareLabelCategoricalEncoder groups labels that show a small number of observations in the dataset into a new category called 'Rare'. This helps to avoid overfitting.\n\nThe argument tol indicates the percentage of observations that the label needs to have in order not to be re-grouped into the \"Rare\" label. The argument n_categories indicates the minimum number of distinct categories that a variable needs to have for any of the labels to be re-grouped into rare. If the number of labels is smaller than n_categories, then the encoder will not group the labels for that variable.", "_____no_output_____" ] ], [ [ "## Rare value encoder\n\nrare_encoder = ce.RareLabelCategoricalEncoder(\n tol=0.03, n_categories=5, variables=['cabin', 'pclass', 'embarked'])\n\n\nrare_encoder.fit(X_train)\n\n# the encoder_dict_ contains a dictionary of the {variable: frequent labels} pair\nrare_encoder.encoder_dict_", "c:\\users\\sole\\documents\\repositories\\feature_engine\\feature_engine\\categorical_encoders.py:667: UserWarning: The number of unique categories for variable pclass is less than that indicated in n_categories. Thus, all categories will be considered frequent\n warnings.warn(\"The number of unique categories for variable {} is less than that indicated in \"\nc:\\users\\sole\\documents\\repositories\\feature_engine\\feature_engine\\categorical_encoders.py:667: UserWarning: The number of unique categories for variable embarked is less than that indicated in n_categories. Thus, all categories will be considered frequent\n warnings.warn(\"The number of unique categories for variable {} is less than that indicated in \"\n" ], [ "train_t = rare_encoder.transform(X_train)\ntest_t = rare_encoder.transform(X_train)\n\ntest_t.head()", "_____no_output_____" ] ], [ [ "### Automatically select all categorical variables\n\nIf no variable list is passed as argument, it selects all the categorical variables.", "_____no_output_____" ] ], [ [ "## Rare value encoder\n\nrare_encoder = ce.RareLabelCategoricalEncoder(tol = 0.03, n_categories=5)\n\nrare_encoder.fit(X_train)\n\nrare_encoder.encoder_dict_", "c:\\users\\sole\\documents\\repositories\\feature_engine\\feature_engine\\categorical_encoders.py:667: UserWarning: The number of unique categories for variable pclass is less than that indicated in n_categories. Thus, all categories will be considered frequent\n warnings.warn(\"The number of unique categories for variable {} is less than that indicated in \"\nc:\\users\\sole\\documents\\repositories\\feature_engine\\feature_engine\\categorical_encoders.py:667: UserWarning: The number of unique categories for variable sex is less than that indicated in n_categories. Thus, all categories will be considered frequent\n warnings.warn(\"The number of unique categories for variable {} is less than that indicated in \"\nc:\\users\\sole\\documents\\repositories\\feature_engine\\feature_engine\\categorical_encoders.py:667: UserWarning: The number of unique categories for variable embarked is less than that indicated in n_categories. Thus, all categories will be considered frequent\n warnings.warn(\"The number of unique categories for variable {} is less than that indicated in \"\n" ], [ "train_t = rare_encoder.transform(X_train)\ntest_t = rare_encoder.transform(X_train)\n\ntest_t.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06796a327f39f84fe0d42082366c1a42b613641
43,868
ipynb
Jupyter Notebook
Task 1-Prediction using Supervised ML/Prediction using Supervised ML.ipynb
Divyakathirvel26/GRIP-Internship-Tasks
ff154ad6047d7ea2f79e5906c6f9c837e271fa0e
[ "MIT" ]
1
2022-03-26T07:15:01.000Z
2022-03-26T07:15:01.000Z
Task 1-Prediction using Supervised ML/Prediction using Supervised ML.ipynb
Divyakathirvel26/GRIP-Internship-Tasks
ff154ad6047d7ea2f79e5906c6f9c837e271fa0e
[ "MIT" ]
null
null
null
Task 1-Prediction using Supervised ML/Prediction using Supervised ML.ipynb
Divyakathirvel26/GRIP-Internship-Tasks
ff154ad6047d7ea2f79e5906c6f9c837e271fa0e
[ "MIT" ]
null
null
null
63.301587
14,888
0.764589
[ [ [ "# Simple Linear Regression", "_____no_output_____" ], [ "### Importing all libraries required", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline", "_____no_output_____" ], [ "# Reading data from remote link\nurl = \"http://bit.ly/w-data\"\ndata = pd.read_csv(url)\nprint(\"Data imported successfully\")\ndata.head(10)", "Data imported successfully\n" ], [ "# The shape of dataset \ndata.shape", "_____no_output_____" ], [ "# check the info of data\ndata.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 25 entries, 0 to 24\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Hours 25 non-null float64\n 1 Scores 25 non-null int64 \ndtypes: float64(1), int64(1)\nmemory usage: 528.0 bytes\n" ], [ "# check the description of student_score data\ndata.describe()", "_____no_output_____" ] ], [ [ "### Data Visualization", "_____no_output_____" ] ], [ [ "# Plotting the distribution of scores\ndata.plot(x='Hours', y='Scores', style='o') \nplt.title('Hours vs Percentage') \nplt.xlabel('Hours Studied') \nplt.ylabel('Percentage Score') \nplt.show()", "_____no_output_____" ] ], [ [ "### Linear Regression Model", "_____no_output_____" ] ], [ [ "X = data.iloc[:, :-1].values \ny = data.iloc[:, 1].values \nX_train, X_test, y_train, y_test = train_test_split(X, y,train_size=0.80,test_size=0.20,random_state=42)", "_____no_output_____" ] ], [ [ "### Training the model", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nlinearRegressor= LinearRegression()\nlinearRegressor.fit(X_train, y_train)\ny_predict= linearRegressor.predict(X_train)", "_____no_output_____" ] ], [ [ "### Training the Algorithm", "_____no_output_____" ] ], [ [ "regressor = LinearRegression() \nregressor.fit(X_train, y_train) \n\nprint(\"Training complete.\")", "Training complete.\n" ], [ "\n# Plotting the regression line\nline = regressor.coef_*X+regressor.intercept_\n# Plotting for the test data\nplt.scatter(X, y)\nplt.plot(X, line);\nplt.title('Hours vs Percentage') \nplt.xlabel('Hours Studied') \nplt.ylabel('Percentage Score')\nplt.show()", "_____no_output_____" ] ], [ [ "### Checking the accuracy scores for training and test set", "_____no_output_____" ] ], [ [ "print('Test Score')\nprint(regressor.score(X_test, y_test))\nprint('Training Score')\nprint(regressor.score(X_train, y_train))", "Test Score\n0.9678055545167994\nTraining Score\n0.9491209376364416\n" ], [ "y_test", "_____no_output_____" ], [ "y_predict", "_____no_output_____" ], [ "\ny_predict[:5]", "_____no_output_____" ], [ "data= pd.DataFrame({'Actual': y_test,'Predicted': y_predict[:5]})\ndata", "_____no_output_____" ], [ "#Let's predict the score for 9.25 hpurs\nprint('Score of student who studied for 9.25 hours a dat', regressor.predict([[9.25]]))", "Score of student who studied for 9.25 hours a dat [92.38611528]\n" ] ], [ [ "### Model Evaluation Metrics", "_____no_output_____" ] ], [ [ "#Checking the efficiency of model\nmean_squ_error = mean_squared_error(y_test, y_predict[:5])\nmean_abs_error = mean_absolute_error(y_test, y_predict[:5])\nprint(\"Mean Squred Error:\",mean_squ_error)\nprint(\"Mean absolute Error:\",mean_abs_error)", "Mean Squred Error: 1404.2200673968694\nMean absolute Error: 33.80918778157651\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0679d67858aedb147be6456f8c5c3fe0f98b31f
21,512
ipynb
Jupyter Notebook
04_HowTos/StoreClient/How_to_use_the_Data_Lab_StoreClient.ipynb
noaodatalab/notebooks_default
3001f40c0de05445e65e205fdb3806f85e91dbfe
[ "BSD-3-Clause" ]
1
2022-03-19T17:38:59.000Z
2022-03-19T17:38:59.000Z
04_HowTos/StoreClient/How_to_use_the_Data_Lab_StoreClient.ipynb
noaodatalab/notebooks_default
3001f40c0de05445e65e205fdb3806f85e91dbfe
[ "BSD-3-Clause" ]
6
2022-02-21T20:09:10.000Z
2022-03-26T15:41:15.000Z
04_HowTos/StoreClient/How_to_use_the_Data_Lab_StoreClient.ipynb
noaodatalab/notebooks_default
3001f40c0de05445e65e205fdb3806f85e91dbfe
[ "BSD-3-Clause" ]
1
2022-02-21T18:13:37.000Z
2022-02-21T18:13:37.000Z
26.656753
442
0.552901
[ [ [ "__author__ = 'Mike Fitzpatrick <[email protected]>, Robert Nikutta <[email protected]>'\n__version__ = '20211130'\n__datasets__ = []\n__keywords__ = []", "_____no_output_____" ] ], [ [ "## How to use the Data Lab *Store Client* Service\n\nThis notebook documents how to use the Data Lab virtual storage system via the store client service. This can be done either from a Python script (e.g. within this notebook) or from the command line using the <i>datalab</i> command.\n\n### The storage manager service interface\n\nThe store client service simplifies access to the Data Lab virtual storage system. This section describes the store client service interface in case we want to write our own code against that rather than using one of the provided tools. The store client service accepts an HTTP GET call to the appropriate endpoint for the particular operation:\n\n| Endpoint | Description | Req'd Parameters |\n|----------|-------------|------------|\n| /get | Retrieve a file | name |\n| /put | Upload a file | name |\n| /load | Load a file to vospace | name, endpoint |\n| /cp | Copy a file/directory | from, to |\n| /ln | Link a file/directory | from, to |\n| /lock | Lock a node from write updates | name |\n| /ls | Get a file/directory listing | name |\n| /access | Determine file accessability | name |\n| /stat | File status info | name,verbose |\n| /mkdir | Create a directory | name |\n| /mv | Move/rename a file/directory | from, to |\n| /rm | Delete a file | name |\n| /rmdir | Delete a directory | name |\n| /tag | Annotate a file/directory | name, tag |\n\nFor example, a call to <i>http://datalab.noirlab.edu/storage/get?name=vos://mag.csv</i> will retrieve the file '_mag.csv_' from the root directory of the user's virtual storage. Likewise, a python call using the _storeClient_ interface such as \"_storeClient.get('vos://mag.csv')_\" would get the same file.\n\n#### Virtual storage identifiers\n\nFiles in the virtual storage are usually identified via the prefix \"_vos://_\". This shorthand identifier is resolved to a user's home directory of the storage space in the service. As a convenience, the prefix may optionally be omitted when the parameter refers to a node in the virtual storage. Navigation above a user's home directory is not supported, however, subdirectories within the space may be created and used as needed.\n\n#### Authentication\nThe storage manager service requires a DataLab security token. This needs to be passed as the value of the header keyword \"X-DL-AuthToken\" in any HTTP GET call to the service. If the token is not supplied anonymous access is assumed but provides access only to public storage spaces.", "_____no_output_____" ], [ "### From Python code\n\nThe store client service can be called from Python code using the <i>datalab</i> module. This provides methods to access the various functions in the <i>storeClient</i> subpackage. \n\n#### Initialization\nThis is the setup that is required to use the store client. The first thing to do is import the relevant Python modules and also retrieve our DataLab security token.", "_____no_output_____" ] ], [ [ "# Standard notebook imports\nfrom getpass import getpass\nfrom dl import authClient, storeClient", "_____no_output_____" ] ], [ [ "Comment out and run the cell below if you need to login to Data Lab:", "_____no_output_____" ] ], [ [ "## Get the authentication token for the user\n#token = authClient.login(input(\"Enter user name: (+ENTER) \"),getpass(\"Enter password: (+ENTER) \"))\n#if not authClient.isValidToken(token):\n# raise Exception('Token is not valid. Please check your usename/password and execute this cell again.')", "_____no_output_____" ] ], [ [ "#### Listing a file/directory\n\nWe can see all the files that are in a specific directory or get a full listing for a specific file. In this case, we'll list the default virtual storage directory to use as a basis for changes we'll make below.", "_____no_output_____" ] ], [ [ "listing = storeClient.ls (name = 'vos://')\nprint (listing)", "cutout.fits,public,results,tmp\n" ] ], [ [ "The *public* directory shown here is visible to all Data Lab users and provides a means of sharing data without having to setup special access. Similarly, the *tmp* directory is read-protected and provides a convenient temporary directory to be used in a workflow.", "_____no_output_____" ], [ "#### File Existence and Info\n\nAside from simply listing files, it's possible to test whether a named file already exists or to determine more information about it.", "_____no_output_____" ] ], [ [ "# A simple file existence test:\nif storeClient.access ('vos://public'):\n print ('User \"public\" directory exists')\nif storeClient.access ('vos://public', mode='w'):\n print ('User \"public\" directory is group/world writable')\nelse:\n print ('User \"public\" directory is not group/world writable')\n \nif storeClient.access ('vos://tmp'):\n print ('User \"tmp\" directory exists') \nif storeClient.access ('vos://tmp', mode='w'):\n print ('User \"tmp\" directory is group/world writable')\nelse:\n print ('User \"tmp\" directory is not group/world writable')", "User \"public\" directory exists\nUser \"public\" directory is not group/world writable\nUser \"tmp\" directory exists\nUser \"tmp\" directory is not group/world writable\n" ] ], [ [ "#### Uploading a file\n\nNow we want to upload a new data file from our local disk to the virtual storage:", "_____no_output_____" ] ], [ [ "storeClient.put (to = 'vos://newmags.csv', fr = './newmags.csv')\nprint(storeClient.ls (name='vos://'))", "(1 / 1) ./newmags.csv -> vos://newmags.csv\ncutout.fits,newmags.csv,public,results,tmp\n" ] ], [ [ "#### Downloading a file\n\nLet's say we want to download a file from our virtual storage space, in this case a query result that we saved to it in the \"How to use the Data Lab query manager service\" notebook:", "_____no_output_____" ] ], [ [ "storeClient.get (fr = 'vos://newmags.csv', to = './mymags.csv')", "(1/1) [====================] [ 142B] newmags.csv\n" ] ], [ [ "It is also possible to get the contents of a remote file directly into your notebook by specifying the location as an empty string:", "_____no_output_____" ] ], [ [ "data = storeClient.get (fr = 'vos://newmags.csv', to = '')\nprint (data)", "id,g,r,i\n001,22.3,12.4,21.5\n002,22.3,12.4,21.5\n003,22.3,12.4,21.5\n004,22.3,12.4,21.5\n005,22.3,12.4,21.5\n006,22.3,12.4,21.5\n007,22.3,12.4,21.5\n\n" ] ], [ [ "#### Loading a file from a remote URL\n\nIt is possible to load a file directly to virtual storage from a remote URL )e.g. an \"accessURL\" for an image cutout, a remote data file, etc) using the \"storeClient.load()\" method:", "_____no_output_____" ] ], [ [ "url = \"http://datalab.noirlab.edu/svc/cutout?col=&siaRef=c4d_161005_022804_ooi_g_v1.fits.fz&extn=31&POS=335.0,0.0&SIZE=0.1\"\nstoreClient.load('vos://cutout.fits',url)", "_____no_output_____" ] ], [ [ "#### Creating a directory\n\nWe can create a directory on the remote storage to be used for saving data later:", "_____no_output_____" ] ], [ [ "storeClient.mkdir ('vos://results')", "_____no_output_____" ] ], [ [ "#### Copying a file/directory\n\nWe want to put a copy of the file in a remote work directory:", "_____no_output_____" ] ], [ [ "storeClient.mkdir ('vos://temp')\nprint (\"Before: \" + storeClient.ls (name='vos://temp/'))\nstoreClient.cp (fr = 'vos://newmags.csv', to = 'vos://temp/newmags.csv',verbose=True)\nprint (\"After: \" + storeClient.ls (name='vos://temp/'))", "Before: \nAfter: newmags.csv\n" ], [ "print(storeClient.ls('vos://',format='long'))", "-rw-rw-r-x demo01 2963520 22 Nov 2021 14:22 cutout.fits\n-rw-rw-r-x demo01 142 30 Nov 2021 14:58 newmags.csv\ndrwxrwxr-x demo01 0 14 Jul 2020 10:01 public/\ndrwxrwxr-x demo01 0 22 Nov 2021 14:22 results/\ndrwxrwxr-x demo01 0 30 Nov 2021 14:58 temp/\ndrwxrwx--- demo01 0 14 Jul 2020 10:01 tmp/\n\n" ] ], [ [ "Notice that in the *ls()* call we append the directory name with a trailing '/' to list the contents of the directory rather than the directory itself.", "_____no_output_____" ], [ "#### Linking to a file/directory\n\n**WARNING**: Linking is currently **not** working in the Data Lab storage manager. This notebook will be updated when the problem has been resolved.\n\nSometimes we want to create a link to a file or directory. In this case, the link named by the *'fr'* parameter is created and points to the file/container named by the *'target'* parameter.", "_____no_output_____" ] ], [ [ "storeClient.ln ('vos://mags.csv', 'vos://temp/newmags.csv')\nprint (\"Root dir: \" + storeClient.ls (name='vos://'))\nprint (\"Temp dir: \" + storeClient.ls (name='vos://temp/'))", "Root dir: cutout.fits,newmags.csv,public,results,temp,tmp\nTemp dir: newmags.csv\n" ] ], [ [ "#### Moving/renaming a file/directory\n\nWe can move a file or directory:", "_____no_output_____" ] ], [ [ "storeClient.mv(fr = 'vos://temp/newmags.csv', to = 'vos://results')\nprint (\"Results dir: \" + storeClient.ls (name='vos://results/'))", "Results dir: newmags.csv\n" ] ], [ [ "#### Deleting a file\n\nWe can delete a file:", "_____no_output_____" ] ], [ [ "print (\"Before: \" + storeClient.ls (name='vos://'))\nstoreClient.rm (name = 'vos://mags.csv')\nprint (\"After: \" + storeClient.ls (name='vos://'))", "Before: cutout.fits,newmags.csv,public,results,temp,tmp\nAfter: cutout.fits,newmags.csv,public,results,temp,tmp\n" ] ], [ [ "#### Deleting a directory\n\nWe can also delete a directory, doing so also deletes the contents of that directory:", "_____no_output_____" ] ], [ [ "storeClient.rmdir(name = 'vos://temp')", "_____no_output_____" ] ], [ [ "#### Tagging a file/directory\n\n**Warning**: Tagging is currently **not** working in the Data Lab storage manager. This notebook will be updated when the problem has been resolved.\n\nWe can tag any file or directory with arbitrary metadata:", "_____no_output_____" ] ], [ [ "storeClient.tag('vos://results', 'The results from my analysis')", "_____no_output_____" ] ], [ [ "#### Cleanup the demo directory of remaining files", "_____no_output_____" ] ], [ [ "storeClient.rm (name = 'vos://newmags.csv')\nstoreClient.rm (name = 'vos://results')\nstoreClient.ls (name = 'vos://')", "_____no_output_____" ] ], [ [ "### Using the datalab command\n\nThe <i>datalab</i> command provides an alternate command line way to work with the query manager through the <i>query</i> subcommands, which is especially useful if you want to interact with the query manager from your local computer. Please have the `datalab` command line utility installed first (for install instructions see https://github.com/astro-datalab/datalab ).", "_____no_output_____" ], [ "The cells below are commented out. Copy and paste any of them (without the comment sign) and run locally.", "_____no_output_____" ], [ "#### Log in once", "_____no_output_____" ] ], [ [ "#!datalab login", "_____no_output_____" ] ], [ [ "and enter the credentials as prompted.", "_____no_output_____" ], [ "#### Downloading a file\n\nLet's say we want to download a file from our virtual storage space:", "_____no_output_____" ] ], [ [ "#!datalab get fr=\"vos://mags.csv\" to=\"./mags.csv\"", "_____no_output_____" ] ], [ [ "#### Uploading a file\n\nNow we want to upload a new data file from our local disk:", "_____no_output_____" ] ], [ [ "#!datalab put fr=\"./newmags.csv\" to=\"vos://newmags.csv\"", "_____no_output_____" ] ], [ [ "#### Copying a file/directory\n\nWe want to put a copy of the file in a remote work directory:", "_____no_output_____" ] ], [ [ "#!datalab cp fr=\"vos://newmags.csv\" to=\"vos://temp/newmags.csv\"", "_____no_output_____" ] ], [ [ "#### Linking to a file/directory\n\nSometimes we want to create a link to a file or directory:", "_____no_output_____" ] ], [ [ "#!datalab ln fr=\"vos://temp/mags.csv\" to=\"vos://mags.csv\"", "_____no_output_____" ] ], [ [ "#### Listing a file/directory\n\nWe can see all the files that are in a specific directory or get a full listing for a specific file:", "_____no_output_____" ] ], [ [ "#!datalab ls name=\"vos://temp\"", "_____no_output_____" ] ], [ [ "#### Creating a directory\n\nWe can create a directory:", "_____no_output_____" ] ], [ [ "#!datalab mkdir name=\"vos://results\"", "_____no_output_____" ] ], [ [ "#### Moving/renaming a file/directory\n\nWe can move a file or directory:", "_____no_output_____" ] ], [ [ "#!datalab mv fr=\"vos://temp/newmags.csv\" to=\"vos://results\"", "_____no_output_____" ] ], [ [ "#### Deleting a file\n\nWe can delete a file:", "_____no_output_____" ] ], [ [ "#!datalab rm name=\"vos://temp/mags.csv\"", "_____no_output_____" ] ], [ [ "#### Deleting a directory\n\nWe can also delete a directory:", "_____no_output_____" ] ], [ [ "#!datalab rmdir name=\"vos://temp\"", "_____no_output_____" ] ], [ [ "#### Tagging a file/directory\n\nWe can tag any file or directory with arbitrary metadata:", "_____no_output_____" ] ], [ [ "#!datalab tag name=\"vos://results\" tag=\"The results from my analysis\"", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d067aa001aa9ac6e3cb78f20c822c21739b3aeb1
696,808
ipynb
Jupyter Notebook
8. Bose-Einstein condensation.ipynb
cphysics/simulation
6fc2056c77a021105a6851809e2bacdcc0148ba3
[ "MIT" ]
3
2020-12-02T19:34:02.000Z
2022-03-17T03:12:07.000Z
8. Bose-Einstein condensation.ipynb
cphysics/simulation
6fc2056c77a021105a6851809e2bacdcc0148ba3
[ "MIT" ]
null
null
null
8. Bose-Einstein condensation.ipynb
cphysics/simulation
6fc2056c77a021105a6851809e2bacdcc0148ba3
[ "MIT" ]
17
2020-09-17T12:32:46.000Z
2021-11-06T03:25:15.000Z
460.547257
66,200
0.928247
[ [ [ "# *Bosonic statistics and the Bose-Einstein condensation*", "_____no_output_____" ], [ "`Doruk Efe Gökmen -- 30/08/2018 -- Ankara`", "_____no_output_____" ], [ "## Non-interacting ideal bosons\nNon-interacting bosons is the only system in physics that can undergo a phase transition without mutual interactions between its components.\n\nLet us enumerate the energy eigenstates of a single 3D boson in an harmonic trap by the following program.", "_____no_output_____" ] ], [ [ "Emax = 30\nStates = []\nfor E_x in range(Emax):\n for E_y in range(Emax):\n for E_z in range(Emax):\n States.append(((E_x + E_y + E_z), (E_x, E_y, E_z)))\nStates.sort()\nfor k in range(Emax):\n print '%3d' % k, States[k][0], States[k][1]", " 0 0 (0, 0, 0)\n 1 1 (0, 0, 1)\n 2 1 (0, 1, 0)\n 3 1 (1, 0, 0)\n 4 2 (0, 0, 2)\n 5 2 (0, 1, 1)\n 6 2 (0, 2, 0)\n 7 2 (1, 0, 1)\n 8 2 (1, 1, 0)\n 9 2 (2, 0, 0)\n 10 3 (0, 0, 3)\n 11 3 (0, 1, 2)\n 12 3 (0, 2, 1)\n 13 3 (0, 3, 0)\n 14 3 (1, 0, 2)\n 15 3 (1, 1, 1)\n 16 3 (1, 2, 0)\n 17 3 (2, 0, 1)\n 18 3 (2, 1, 0)\n 19 3 (3, 0, 0)\n 20 4 (0, 0, 4)\n 21 4 (0, 1, 3)\n 22 4 (0, 2, 2)\n 23 4 (0, 3, 1)\n 24 4 (0, 4, 0)\n 25 4 (1, 0, 3)\n 26 4 (1, 1, 2)\n 27 4 (1, 2, 1)\n 28 4 (1, 3, 0)\n 29 4 (2, 0, 2)\n" ] ], [ [ "Here it can be perceived that the degeneracy at an energy level $E_n$, which we denote by $\\mathcal{N}(E_n)$, is $\\frac{(n+1)(n+2)}{2}$. Alternatively, we may use a more systematic approach. We can calculate the number of states at the $n$th energy level as $\\mathcal{N}(E_n)=\\sum_{E_x=0}^{E_n}\\sum_{E_y=0}^{E_n}\\sum_{E_z=0}^{E_n}\\delta_{(E_x+E_y+E_z),E_n}$, where $\\delta_{j,k}$ is the Kronecker delta. In the continuous limit we have the Dirac delta function \n\n$\\delta_{j,k}\\rightarrow\\delta(j-k) =\\int_{-\\pi}^\\pi \\frac{\\text{d}\\lambda}{2\\pi}e^{i(j-k)\\lambda}$. (1)\n\nIf we insert this function into above expression, we get $\\mathcal{N}(E_n)=\\int_{-\\pi}^\\pi \\frac{\\text{d}\\lambda}{2\\pi}e^{-iE_n\\lambda}\\left(\\sum_{E_x=0}^{E_n}e^{iE_x\\lambda}\\right)^3$. The geometric sum can be evaluated, hence we have the integral $\\mathcal{N}(E_n)=\\int_{-\\pi}^\\pi \\frac{\\text{d}\\lambda}{2\\pi}e^{-iE_n\\lambda}\\left[\\frac{1-e^{i\\lambda (n+1)}}{1-e^{i\\lambda}}\\right]^3$. The integration range corresponds to a circular contour $\\mathcal{C}$ of radius 1 centered at 0 at the complex plane. If we define $z=e^{i\\lambda}$, the integral transforms into $\\mathcal{N}(E_n)=\\frac{1}{2\\pi i}\\oint_{\\mathcal{C}}\\frac{\\text{d}z}{z^{n+1}}\\left[\\frac{1-z^{n+1}}{1-z}\\right]^3$. Using the residue theorem, this integral can be evaluated by determining the coefficient of the $z^{-1}$ term in the Laurent series of $\\frac{1}{z^{n+1}}\\left[\\frac{1-z^{n+1}}{1-z}\\right]^3$, which is $(n+1)(n+1)/2$. Hence we recover the previous result.\n\n##### Five boson bounded trap model\nConsider 5 bosons in the harmonic trap, but with a cutoff on the single-particle energies: $E_\\sigma\\leq 4$. There are $34$ possible single-particles energy states. For this model, the above naive enumeration of these energy states still works. We can label the state of each 5 particle by $\\sigma_i$, so that $\\{\\text{5-particle state}\\}=\\{\\sigma_1,\\cdots,\\sigma_5\\}$. The partititon function for this system is given by $Z(\\beta)=\\sum_{0\\leq\\sigma_1\\leq\\cdots\\leq\\sigma_5\\leq 34}e^{-\\beta E(\\sigma_1,\\cdots,\\sigma_5)}$. In the following program, the average occupation number of the ground state per particle is calculated at different temperatures (corresponds to the condensate). However, due to the nested for loops, this method is very inconvenient for higher number of particles. ", "_____no_output_____" ] ], [ [ "%pylab inline\nimport math, numpy as np, pylab as plt\n\n#calculate the partition function for 5 bosons by stacking the bosons in one of the N_states \n#number of possible states and counting only a specific order of them (they are indistinguishable)\ndef bosons_bounded_harmonic(beta, N):\n Energy = [] #initialise the vector that the energy values are saved with enumeration\n n_states_1p = 0 #initialise the total number of single trapped boson states\n for n in range(N + 1):\n degeneracy = (n + 1) * (n + 2) / 2 #degeneracy in the 3D harmonic oscillator\n Energy += [float(n)] * degeneracy \n n_states_1p += degeneracy\n \n n_states_5p = 0 #initialise the total number states of 5 trapped bosons\n Z = 0.0 #initialise the partition function\n N0_mean = 0.0\n E_mean = 0.0 \n for s_0 in range(n_states_1p):\n for s_1 in range(s_0, n_states_1p): #consider the order s_0<s_1... to avoid overcounting\n for s_2 in range(s_1, n_states_1p):\n for s_3 in range(s_2, n_states_1p):\n for s_4 in range(s_3, n_states_1p):\n n_states_5p += 1\n state = [s_0, s_1, s_2, s_3, s_4] #construct the state of each 5 boson\n E = sum(Energy[s] for s in state) #calculate the total energy by above enumeration\n Z += math.exp(-beta * E) #canonical partition function\n E_mean += E * math.exp(-beta * E) #avg. total energy\n N0_mean += state.count(0) * math.exp(-beta * E) #avg. ground level occupation number\n return n_states_5p, Z, E_mean, N0_mean\n \nN = 4 #the energy cutoff for each boson\nbeta = 1.0 #inverse temperature\n\nn_states_5p, Z, E_mean, N0_mean = bosons_bounded_harmonic(beta, N)\n\nprint 'Temperature:', 1 / beta, 'Total number of possible states:', n_states_5p, '| Partition function:', Z,\\\n '| Average energy per particle:', E_mean / Z / 5.0,\\\n '| Condensate fraction (ground state occupation per particle):', N0_mean / Z / 5.0\n\ncond_frac = []\ntemperature = []\nfor T in np.linspace(0.1, 1.0, 10):\n n_states_5p, Z, E_mean, N0_mean = bosons_bounded_harmonic(1.0 / T, N)\n cond_frac.append(N0_mean / Z / 5.0)\n temperature.append(T)\n \nplt.plot(temperature, cond_frac)\nplt.title('Condensate? fraction for the $N=5$ bosons bounded trap model ($N_{bound}=%i$)' % N, fontsize = 14)\nplt.xlabel('$T$', fontsize = 14)\nplt.ylabel('$\\\\langle N_0 \\\\rangle$ / N', fontsize = 14)\nplt.grid()", "Populating the interactive namespace from numpy and matplotlib\nTemperature: 1.0 Total number of possible states: 575757 | Partition function: 17.3732972183 | Average energy per particle: 1.03133265311 | Condensate fraction (ground state occupation per particle): 0.446969501933\n" ] ], [ [ "Here we see that all particles are in the ground states at very low temperatures this is a simple consequence of Boltzmann statistics. At zero temperature all the particles populate the ground state. Bose-Einstein condensation is something else, it means that a finite fraction of the system is in the ground-state for temperatures which are much higher than the gap between the gap between the ground-state and the first excited state, which is one, in our system. Bose-Einstein condensation occurs when all of a sudden a finite fraction of particles populate the single-particle ground state. In a trap, this happens at higher and higher temperatures as we increase the particle number.\n\n\n\nAlternatively, we can characterise any single particle state $\\sigma=0,\\cdots,34$ by an occupation number $n_\\sigma$. Using this occupation number representation, the energy is given by $E=n_0E_0+\\cdots + n_{34}E_{34}$, and the partition function is $Z(\\beta)=\\sum^{N=5}_{n_0=0}\\cdots\\sum^{N=5}_{n_{34}=0}e^{-\\beta(n_0E_0+\\cdots + n_{34}E_{34})}\\delta_{(n_0+\\cdots + n_{34}),N=5}$. Using the integral representation of the Kronecker delta given in (1), and evaluating the resulting sums, we have \n\n$Z(\\beta)=\\int_{-\\pi}^\\pi\\frac{\\text{d}\\lambda}{2\\pi}e^{-iN\\lambda}\\Pi_{E=0}^{E_\\text{max}}[f_E(\\beta,\\lambda)]^{\\mathcal{N}(E)}$. (2)", "_____no_output_____" ], [ "### The bosonic density matrix\n\n**Distinguishable particles:** The partition function of $N$ distinguishable particles is given by $Z^D(\\beta)=\\int \\text{d}\\mathbf{x}\\rho(\\mathbf{x},\\mathbf{x},\\beta)$, where $\\mathbf{x}=\\{0,\\cdots,N-1\\}$, i.e. the positions of the $i$th particle; and $\\rho$ is the $N$ distinguishable particle density matrix. If the particles are non-interacting (ideal), then the density matrix can simply be decomposed into $N$ single particle density matrices as \n\n$\\rho^{D,\\text{ideal}}(\\mathbf{x},\\mathbf{x}',\\beta)=\\Pi_{i=0}^{N-1}\\rho(x_i,x_i',\\beta)$, (3)\n\nwith the single particle density matrix $\\rho(x_i,x_i',\\beta)=\\sum_{\\lambda_i=0}^{\\infty}\\psi_{\\lambda_i}(x_i)\\psi_{\\lambda_i}^{*}(x'_i)e^{-\\beta E_{\\lambda_i}}$, where $\\lambda_i$ is the energy eigenstate of the $i$th particle. That means that the quantum statistical paths of the two particles are independent. More generally, the interacting many distinguishable particle density matrix is\n\n$\\rho^{D}(\\mathbf{x},\\mathbf{x}',\\beta)=\\sum_{\\sigma}\\Psi_{\\sigma}(\\mathbf{x})\\Psi_{\\sigma}^{*}(\\mathbf{x}')e^{-\\beta E_{\\sigma}}$, (4)\n\nwhere the sum is done over the all possible $N$ particle states $\\sigma=\\{\\lambda_0,\\cdots,\\lambda_{N-1}\\}$. The interacting paths are described by the paths whose weight are modified through Trotter decomposition, which *correlates* those paths. \n\n**Indistinguishable particles:** The particles $\\{0,\\cdots,N-1\\}$ are indistinguishable if and only if \n\n$\\Psi_{\\sigma_\\text{id}}(\\mathbf{x})=\\xi^\\mathcal{P}\\Psi_{\\sigma_\\text{id}}(\\mathcal{P}\\mathbf{x})$ $\\forall \\sigma$, (5)\n\nwhere they are in an indistinguishable state ${\\sigma_\\text{id}}$, $\\mathcal{P}$ is any $N$ particle permutation and the *species factor* $\\xi$ is $-1$ (antisymmetric) for fermions, and $1$ (symmetric) for bosons. Here we focus on the bosonic case. Since there are $N!$ such permutations, if the particles are indistinguishable bosons, using (5) we get $\\frac{1}{N!}\\sum_{\\mathcal{P}}\\Psi_\\sigma(\\mathcal{P}x)=\\Psi_\\sigma(\\mathbf{x})$, i.e. $\\Psi_\\sigma(x)=\\Psi_{\\sigma_\\text{id}}(x)$. Furthermore, from a group theory argument it follows that $\\frac{1}{N!}\\sum_{\\mathcal{P}}\\Psi_\\sigma(\\mathcal{P}x)=0$ otherwise (fermionic or distinguishable). This can be expressed in a more compact form as\n\n$\\frac{1}{N!}\\sum_{\\mathcal{P}}\\Psi_\\sigma(\\mathcal{P}x)=\\delta_{{\\sigma_\\text{id}},\\sigma}\\Psi_\\sigma(x)$. (6)\n\nBy definition, the bosonic density matrix should be $\\rho^\\text{bose}(\\mathbf{x},\\mathbf{x}',\\beta)=\\sum_{\\sigma=\\{\\sigma_\\text{id}\\}}\\Psi_\\sigma(\\mathbf{x})\\Psi^{*}_\\sigma(\\mathbf{x}')e^{-\\beta E_\\sigma}=\\sum_{\\sigma}\\delta_{{\\sigma_\\text{id}},\\sigma}\\Psi_\\sigma(\\mathbf{x})\\Psi^{*}_\\sigma(\\mathbf{x}')e^{-\\beta E_\\sigma}$, i.e. a sum over all $N$ particle states which are symmetric. If we insert Eqn. (6) here in the latter equality, we get $\\rho^\\text{bose}(\\mathbf{x},\\mathbf{x}',\\beta)=\\frac{1}{N!}\\sum_\\sigma\\Psi_\\sigma(\\mathbf{x})\\sum_\\mathcal{P}\\Psi^{*}_\\sigma(\\mathcal{P}\\mathbf{x}')e^{-\\beta E_\\sigma}$. Exchanging the sums, we get $\\rho^\\text{bose}(\\mathbf{x},\\mathbf{x}',\\beta)=\\frac{1}{N!}\\sum_\\mathcal{P}\\sum_\\sigma\\Psi_\\sigma(\\mathbf{x})\\Psi^{*}_\\sigma(\\mathcal{P}\\mathbf{x}')e^{-\\beta E_\\sigma}$. In other words, we simply have \n\n$\\boxed{\\rho^\\text{bose}(\\mathbf{x},\\mathbf{x}',\\beta)=\\frac{1}{N!}\\sum_\\mathcal{P}\\rho^D(\\mathbf{x},\\mathcal{P}\\mathbf{x}',\\beta)}$, (7)\n\nthat is the average of the distinguishable density matrices over all permutations of $N$ particles.\n\nFor ideal bosons, we have $\\boxed{\\rho^\\text{bose, ideal}(\\mathbf{x},\\mathbf{x}',\\beta)=\\frac{1}{N!}\\sum_\\mathcal{P}\\rho(x_0,\\mathcal{P}x_0',\\beta)\\rho(x_1,\\mathcal{P}x_1',\\beta)\\cdots\\rho(x_{N-1},\\mathcal{P}x_{N-1}',\\beta)}$. (8)\n\nThe partition function is therefore \n\n$Z^\\text{bose}(\\beta)=\\frac{1}{N!}\\int \\text{d}x_0\\cdots\\text{d}x_{N-1}\\sum_\\mathcal{P}\\rho^D(\\mathbf{x},\\mathcal{P}\\mathbf{x},\\beta)=\\frac{1}{N!}\\sum_\\mathcal{P}Z_\\mathcal{P}$, (9)\n\n\ni.e. an integral over paths and an average over all permutations. We should therefore sample both positions and permutations.", "_____no_output_____" ], [ "For fermions, the sum over permutations $\\mathcal{P}$ involve a weighting with factor $(-1)^{\\mathcal{P}}$: \n\n$\\rho^\\text{fermi}(\\mathbf{x},\\mathbf{x}',\\beta)=\\frac{1}{N!}\\sum_\\mathcal{P}(-1)^\\mathcal{P}\\rho^D(\\mathbf{x},\\mathcal{P}\\mathbf{x}',\\beta)$\n\nTherefore for fermions corresponding path integrals are nontrivial, and they involve Grassmann variables (see e.g. Negele, Orland https://www.amazon.com/Quantum-Many-particle-Systems-Advanced-Classics/dp/0738200522 ).", "_____no_output_____" ], [ "#### Sampling permutations\n\nThe following Markov-chain algorithm samples permutations of $N$ elements on a list $L$. The permutation function for the uniformly distributed $\\mathcal{P}$ is $Y_N=\\sum_\\mathcal{P}1=N!$.", "_____no_output_____" ] ], [ [ "import random\n\nN = 3 #length of the list\nstatistics = {}\nL = range(N) #initialise the list\nnsteps = 10\nfor step in range(nsteps):\n i = random.randint(0, N - 1) #pick two random indices i and j from the list L\n j = random.randint(0, N - 1)\n L[i], L[j] = L[j], L[i] #exchange the i'th and j'th elements\n if tuple(L) in statistics: \n statistics[tuple(L)] += 1 #if a certain configuration appears again, add 1 to its count\n else:\n statistics[tuple(L)] = 1 #if a certain configuration for the first time, give it a count of 1\n print L\n print range(N)\n print\n\nfor item in statistics:\n print item, statistics[item]", "_____no_output_____" ] ], [ [ "Let us look at the permutation cycles and their frequency of occurrence:", "_____no_output_____" ] ], [ [ "import random\n\nN = 20 #length of the list\nstats = [0] * (N + 1) #initialise the \"stats\" vector\nL = range(N) #initialise the list\nnsteps = 1000000 #number of steps\nfor step in range(nsteps):\n i = random.randint(0, N - 1) #pick two random indices i and j from the list L\n j = random.randint(0, N - 1)\n L[i], L[j] = L[j], L[i] #exchange the i'th and j'th elements in the list L\n #Calculate the lengths of the permutation cycles in list L\n if step % 100 == 0: #i.e. at each 100 steps\n cycle_dict = {} #initialise the permutation cycle dictionary\n for k in range(N): #loop over the list length,where keys (k) represent the particles\n cycle_dict[k] = L[k] #and the values (L) are for the successors of the particles in the perm. cycle\n while cycle_dict != {}: #i.e. when the cycle dictionary is not empty?\n starting_element = cycle_dict.keys()[0] #save the first (0th) element in the cycle as the starting element\n cycle_length = 0 #initialise the cycle length\n old_element = starting_element #ancillary variable\n while True:\n cycle_length += 1 #increase the cycle length while...\n new_element = cycle_dict.pop(old_element) #get the successor of the old element in the perm. cycle\n if new_element == starting_element: break #the new element is the same as the first one (cycle complete)\n else: old_element = new_element #move on to the next successor in the perm. cycle\n stats[cycle_length] += 1 #increase the number of occurrences of a cycle of that length by 1\nfor k in range(1, N + 1): #print the cycle lengths and their number of occurrences\n print k, stats[k] ", "1 10130\n2 5008\n3 3395\n4 2438\n5 1969\n6 1659\n7 1403\n8 1260\n9 1118\n10 949\n11 943\n12 833\n13 778\n14 745\n15 642\n16 618\n17 610\n18 553\n19 530\n20 492\n" ] ], [ [ "The partition function of permutations $\\mathcal{P}$ on a list of lentgth $N$ is $Y_N=\\sum_\\mathcal{P}\\text{weight}(\\mathcal{P})$. Let $z_n$ be the weight of a permutation cycle of length $n$. Then, the permutation $[0,1,2,3]\\rightarrow[0,1,2,3]$, which can be represented as $(0)(1)(2)(3)$, has the weight $z_1^4$; similarly, $(0)(12)(3)$ would have $z_1^2z_2$, etc.\n\nGenerally, the cycle $\\{n_1,\\cdots,n_{k-1},\\text{last element}\\}$, i.e. the cycle containing the last element, has a length $k$, with the weight $z_k$. The remaining $N-k$ elements have the partition function $Y_{(N-k)}$. Hence, the total partition function is given by $Y_N=\\sum_{k=1}^Nz_k\\{\\text{# of choices for} \\{n_1,\\cdots,n_{k-1}\\}\\}\\{\\text{# of cycles with} \\{n_1,\\cdots,n_{k}\\}\\}Y_{N-k}$\n\n$\\implies Y_N=\\sum_{k=1}^N z_k{{N-1}\\choose{k-1}}(k-1)!Y_{N-k}$ which leads to the following recursion formula\n\n$\\boxed{Y_N=\\frac{1}{N}\\sum_{k=1}^N z_k\\frac{N!}{(N-k)!}Y_{N-k}, (\\text{with }Y_0=1)}$. (10)", "_____no_output_____" ], [ "***Using the convolution property, we can regard the $l+1$ bosons in a permutation cycle of length $l$ at temperatyre $1/\\beta$ as a single boson at a temperature $1/(l\\beta)$.***\n\n*Example 1:* Consider the permutation $[0,3,1,2]\\rightarrow[0,1,2,3]$ consists of the following permutation cycle $1\\rightarrow 2 \\rightarrow 3 \\rightarrow 1$ of length 3 ($\\mathcal{P}=(132)$). This corresponds to the partition function $Z^\\text{bose}_{(0)(132)}(\\beta)=\\int \\text{d}x_0\\rho(x_0,x_0,\\beta)\\int\\text{d}x_1\\int\\text{d}x_2\\int\\text{d}x_3\\rho(x_1,x_3,\\beta)\\rho(x_3,x_2,\\beta)\\rho(x_2,x_1,\\beta)$. Using the convolution property, we have: $\\int\\text{d}x_3\\rho(x_1,x_3,\\beta)\\rho(x_3,x_2,\\beta)=\\rho(x_1,x_2,2\\beta)\\implies\\int\\text{d}x_2\\rho(x_1,x_2,2\\beta)\\rho(x_2,x_1,\\beta)=\\rho(x_1,x_1,3\\beta)$. The single particle partition function is defined as $z(\\beta)=\\int\\text{d}\\mathbf{x}\\rho(\\mathbf{x},\\mathbf{x},\\beta) =\\left[ \\int\\text{d}x\\rho(x,x,\\beta)\\right]^3$.\n\n$\\implies Z^\\text{bose}_{(0)(132)}(\\beta)=\\int \\text{d}x_0\\rho(x_0,x_0,\\beta)\\int\\text{d}x_1\\rho(x_1,x_1,3\\beta)=z(\\beta)z(3\\beta)$.\n\n*Example 2:* $Z^\\text{bose}_{(0)(1)(2)(3)}(\\beta)=z(\\beta)^4$.\n\nSimulation of bosons in a harmonic trap: (Carefully note that here are no intermediate slices in the sampled paths, since the paths are sampled from the exact distribution.)", "_____no_output_____" ] ], [ [ "import random, math, pylab, mpl_toolkits.mplot3d\n \n#3 dimensional Levy algorithm, used for resampling the positions of entire permutation cycles of bosons\n#to sample positions\ndef levy_harmonic_path(k, beta):\n #direct sample (rejection-free) three coordinate values, use diagonal density matrix\n #k corresponds to the length of the permutation cycle\n xk = tuple([random.gauss(0.0, 1.0 / math.sqrt(2.0 *\n math.tanh(k * beta / 2.0))) for d in range(3)]) \n x = [xk] #save the 3 coordinate values xk into a 3d vector x (final point)\n for j in range(1, k): #loop runs through the permutation cycle\n #Levy sampling (sample a point given the latest sample and the final point)\n Upsilon_1 = (1.0 / math.tanh(beta) +\n 1.0 / math.tanh((k - j) * beta)) \n Upsilon_2 = [x[j - 1][d] / math.sinh(beta) + xk[d] /\n math.sinh((k - j) * beta) for d in range(3)]\n x_mean = [Upsilon_2[d] / Upsilon_1 for d in range(3)] \n sigma = 1.0 / math.sqrt(Upsilon_1)\n dummy = [random.gauss(x_mean[d], sigma) for d in range(3)] #direct sample the j'th point \n x.append(tuple(dummy)) #construct the 3d path (permutation cycle) by appending tuples\n return x\n \n#(Non-diagonal) harmonic oscillator density matrix, used for organising the exchange of two elements\n#to sample permutations\ndef rho_harm(x, xp, beta):\n Upsilon_1 = sum((x[d] + xp[d]) ** 2 / 4.0 *\n math.tanh(beta / 2.0) for d in range(3))\n Upsilon_2 = sum((x[d] - xp[d]) ** 2 / 4.0 /\n math.tanh(beta / 2.0) for d in range(3))\n return math.exp(- Upsilon_1 - Upsilon_2)\n\nN = 256 #number of bosons\nT_star = 0.3\nbeta = 1.0 / (T_star * N ** (1.0 / 3.0)) #??\nnsteps = 1000000 \npositions = {} #initial position dictionary\nfor j in range(N): #loop over all particles, initial permutation is identity (k=1)\n a = levy_harmonic_path(1, beta) #initial positions (outputs a single 3d point)\n positions[a[0]] = a[0] #positions of particles are keys for themselves in the initial position dict.\nfor step in range(nsteps):\n boson_a = random.choice(positions.keys()) #randomly pick the position of boson \"a\" from the dict.\n perm_cycle = [] #initialise the permutation cycle\n while True: #compute the permutation cycle of the boson \"a\":\n perm_cycle.append(boson_a) #construct the permutation cycle by appending the updated position of boson \"a\"\n boson_b = positions.pop(boson_a) #remove and return (pop) the position of \"a\", save it as a temp. var.\n if boson_b == perm_cycle[0]: break #if the cycle is completed, break the while loop\n else: boson_a = boson_b #move boson \"a\" to position of \"b\" and continue permuting\n k = len(perm_cycle) #length of the permutation cycle\n #SAMPLE POSITIONS:\n perm_cycle = levy_harmonic_path(k, beta) #resample the particle positions in the current permutation cycle\n positions[perm_cycle[-1]] = perm_cycle[0] #assures that the new path is a \"cycle\" (last term maps to the first term)\n for j in range(len(perm_cycle) - 1): #update the positions of bosons\n positions[perm_cycle[j]] = perm_cycle[j + 1] #construct the \"cycle\": j -> j+1\n #SAMPLE PERMUTATION CYCLES by exchanges:\n #Pick two particles and attempt an exchange to sample permutations (with Metropolis acceptance rate):\n a_1 = random.choice(positions.keys()) #pick the first random particle\n b_1 = positions.pop(a_1) #save the random particle to a temporary variable\n a_2 = random.choice(positions.keys()) #pick the second random particle\n b_2 = positions.pop(a_2) #save the random particle to a temporary variable\n weight_new = rho_harm(a_1, b_2, beta) * rho_harm(a_2, b_1, beta) #the new Metropolis acceptance rate\n weight_old = rho_harm(a_1, b_1, beta) * rho_harm(a_2, b_2, beta) #the old Metropolis acceptance rate\n if random.uniform(0.0, 1.0) < weight_new / weight_old:\n positions[a_1] = b_2 #accept\n positions[a_2] = b_1\n else:\n positions[a_1] = b_1 #reject\n positions[a_2] = b_2\n \n#Figure output:\nfig = pylab.figure()\nax = mpl_toolkits.mplot3d.axes3d.Axes3D(fig)\nax.set_aspect('equal')\nlist_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\nn_colors = len(list_colors)\ndict_colors = {}\ni_color = 0\n# find and plot permutation cycles:\nwhile positions:\n x, y, z = [], [], []\n starting_boson = positions.keys()[0]\n boson_old = starting_boson\n while True:\n x.append(boson_old[0])\n y.append(boson_old[1])\n z.append(boson_old[2])\n boson_new = positions.pop(boson_old)\n if boson_new == starting_boson: break\n else: boson_old = boson_new\n len_cycle = len(x)\n if len_cycle > 2:\n x.append(x[0])\n y.append(y[0])\n z.append(z[0])\n if len_cycle in dict_colors:\n color = dict_colors[len_cycle]\n ax.plot(x, y, z, color + '+-', lw=0.75)\n else:\n color = list_colors[i_color]\n i_color = (i_color + 1) % n_colors\n dict_colors[len_cycle] = color\n ax.plot(x, y, z, color + '+-', label='k=%i' % len_cycle, lw=0.75)\n# finalize plot\npylab.title('$N=%i$, $T^*=%s$' % (N, T_star))\npylab.legend()\nax.set_xlabel('$x$', fontsize=16)\nax.set_ylabel('$y$', fontsize=16)\nax.set_zlabel('$z$', fontsize=16)\nax.set_xlim3d([-8, 8])\nax.set_ylim3d([-8, 8])\nax.set_zlim3d([-8, 8])\npylab.savefig('snapshot_bosons_3d_N%04i_Tstar%04.2f.png' % (N, T_star))\npylab.show()", "_____no_output_____" ] ], [ [ "![caption](BEC.gif)", "_____no_output_____" ], [ "But we do know that for the harmonic trap, the single 3-dimensional particle partition function is given by $z(\\beta)=\\left(\\frac{1}{1-e^{-\\beta}}\\right)^3$. The permutation cycle of length $k$ corresponds to $z_k=z(k\\beta)=\\left(\\frac{1}{1-e^{-k\\beta}}\\right)^3$. Hence, using (9) and (10), we have that \n\n$Z^\\text{bose}_N=Y_N/{N!}=\\frac{1}{N}\\sum_{k=1}^N z_k Z^\\text{bose}_{N-k}, (\\text{with }Z^\\text{bose}_0=1)$. (11)\n\n(Due to Landsberg, 1961 http://store.doverpublications.com/0486664937.html)\nThis recursion relation relates the partition function of a system of $N$ ideal bosons to the partition function of a single particle and the partition functions of systems with fewer particles. ", "_____no_output_____" ] ], [ [ "import math, pylab\n\ndef z(k, beta):\n return 1.0 / (1.0 - math.exp(- k * beta)) ** 3 #partition function of a single particle in a harmonic trap\n\ndef canonic_recursion(N, beta): #Landsberg recursion relations for the partition function of N bosons\n Z = [1.0] #Z_0 = 1\n for M in range(1, N + 1):\n Z.append(sum(Z[k] * z(M - k, beta) \\\n for k in range(M)) / M)\n return Z #list of partition functions for boson numbers up to N\n\nN = 256 #number of bosons\nT_star = 0.5 #temperature\nbeta = 1.0 / N ** (1.0 / 3.0) / T_star\nZ = canonic_recursion(N, beta) #partition function\npi_k = [(z(k, beta) * Z[N - k] / Z[-1]) / float(N) for k in range(1, N + 1)] #probability of a cycle of length k\n# graphics output\npylab.plot(range(1, N + 1), pi_k, 'b-', lw=2.5)\npylab.ylim(0.0, 0.01)\npylab.xlabel('cycle length $k$', fontsize=16)\npylab.ylabel('cycle probability $\\pi_k$', fontsize=16)\npylab.title('Cycle length distribution ($N=%i$, $T^*=%s$)' % (N, T_star), fontsize=16)\npylab.savefig('plot-prob_cycle_length.png')\n\nphase = [pi[k+1] - pi[k] for k in range(1, N+1)]\n\n# graphics output\npylab.plot(range(1, N + 1), pi_k, 'b-', lw=2.5)\npylab.ylim(0.0, 0.01)\npylab.xlabel('cycle length $k$', fontsize=16)\npylab.ylabel('cycle probability $\\pi_k$', fontsize=16)\npylab.title('Cycle length distribution ($N=%i$, $T^*=%s$)' % (N, T_star), fontsize=16)\npylab.savefig('plot-prob_cycle_length.png')", "_____no_output_____" ] ], [ [ "Since we have an analytical solution to the problem, we can now implement a rejection-free direct sampling algorithm for the permutations.", "_____no_output_____" ] ], [ [ "import math, random\n\ndef z(k, beta): #partition function of a single particle in a harmonic trap\n return (1.0 - math.exp(- k * beta)) ** (-3)\n\ndef canonic_recursion(N, beta): #Landsberg recursion relation for the partition function of N bosons in a harmonic trap\n Z = [1.0]\n for M in range(1, N + 1):\n Z.append(sum(Z[k] * z(M - k, beta) for k in range(M)) / M)\n return Z\n\ndef make_pi_list(Z, M): #the probability for a boson to be in a permutation length of length up to M?\n pi_list = [0.0] + [z(k, beta) * Z[M - k] / Z[M] / M for k in range(1, M + 1)]\n pi_cumulative = [0.0]\n for k in range(1, M + 1):\n pi_cumulative.append(pi_cumulative[k - 1] + pi_list[k])\n return pi_cumulative\n\ndef naive_tower_sample(pi_cumulative):\n eta = random.uniform(0.0, 1.0)\n for k in range(len(pi_cumulative)):\n if eta < pi_cumulative[k]: break\n return k\n\ndef levy_harmonic_path(dtau, N): #path sampling (to sample permutation positions)\n beta = N * dtau\n x_N = random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(beta / 2.0)))\n x = [x_N]\n for k in range(1, N):\n dtau_prime = (N - k) * dtau\n Upsilon_1 = 1.0 / math.tanh(dtau) + 1.0 / math.tanh(dtau_prime)\n Upsilon_2 = x[k - 1] / math.sinh(dtau) + x_N / math.sinh(dtau_prime)\n x_mean = Upsilon_2 / Upsilon_1\n sigma = 1.0 / math.sqrt(Upsilon_1)\n x.append(random.gauss(x_mean, sigma))\n return x\n\n### main program starts here ###\nN = 8 #number of bosons\nT_star = 0.1 #temperature\nbeta = 1.0 / N ** (1.0 / 3.0) / T_star\nn_steps = 1000 \nZ = canonic_recursion(N, beta) #{N} boson partition function\nfor step in range(n_steps):\n N_tmp = N #ancillary\n x_config, y_config, z_config = [], [], [] #initialise the configurations in each 3 directions\n while N_tmp > 0: #iterate through all particles\n pi_sum = make_pi_list(Z, N_tmp)\n k = naive_tower_sample(pi_sum)\n x_config += levy_harmonic_path(beta, k)\n y_config += levy_harmonic_path(beta, k)\n z_config += levy_harmonic_path(beta, k)\n N_tmp -= k #reduce the number of particles that are in the permutation cycle of length k", "_____no_output_____" ] ], [ [ "### Physical properties of the 1-dimensional classical and bosonic systems\n\n* Consider 2 non-interacting **distinguishable particles** in a 1-dimensional harmonic trap:", "_____no_output_____" ] ], [ [ "import random, math, pylab\n\n#There are only two possible cases: For k=1, we sample a single position (cycle of length 1), \n#for k=2, we sample two positions (a cycle of length two).\ndef levy_harmonic_path(k):\n x = [random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(k * beta / 2.0)))] #direct-sample the first position\n if k == 2:\n Ups1 = 2.0 / math.tanh(beta)\n Ups2 = 2.0 * x[0] / math.sinh(beta)\n x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1)))\n return x[:]\n\ndef pi_x(x, beta): \n sigma = 1.0 / math.sqrt(2.0 * math.tanh(beta / 2.0))\n return math.exp(-x ** 2 / (2.0 * sigma ** 2)) / math.sqrt(2.0 * math.pi) / sigma\n\nbeta = 2.0\nnsteps = 1000000\n#initial sample has identity permutation\nlow = levy_harmonic_path(2) #tau=0\nhigh = low[:] #tau=beta\ndata = []\nfor step in xrange(nsteps):\n k = random.choice([0, 1])\n low[k] = levy_harmonic_path(1)[0]\n high[k] = low[k]\n data.append(high[k])\n \nlist_x = [0.1 * a for a in range (-30, 31)]\ny = [pi_x(a, beta) for a in list_x]\npylab.plot(list_x, y, linewidth=2.0, label='Exact distribution')\npylab.hist(data, normed=True, bins=80, label='QMC', alpha=0.5, color='green')\npylab.legend()\npylab.xlabel('$x$',fontsize=14)\npylab.ylabel('$\\\\pi(x)$',fontsize=14)\npylab.title('2 non-interacting distinguishable 1-d particles',fontsize=14)\npylab.xlim(-3, 3)\npylab.savefig('plot_A1_beta%s.png' % beta)", "_____no_output_____" ] ], [ [ "* Consider two non-interacting **indistinguishable bosonic** quantum particles in a one-dimensional harmonic trap:", "_____no_output_____" ] ], [ [ "import math, random, pylab, numpy as np\n\ndef z(beta):\n return 1.0 / (1.0 - math.exp(- beta))\n\ndef pi_two_bosons(x, beta): #exact two boson position distribution\n pi_x_1 = math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * math.exp(-x ** 2 * math.tanh(beta / 2.0))\n pi_x_2 = math.sqrt(math.tanh(beta)) / math.sqrt(math.pi) * math.exp(-x ** 2 * math.tanh(beta))\n weight_1 = z(beta) ** 2 / (z(beta) ** 2 + z(2.0 * beta))\n weight_2 = z(2.0 * beta) / (z(beta) ** 2 + z(2.0 * beta))\n pi_x = pi_x_1 * weight_1 + pi_x_2 * weight_2\n return pi_x\n\ndef levy_harmonic_path(k):\n x = [random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(k * beta / 2.0)))]\n if k == 2:\n Ups1 = 2.0 / math.tanh(beta)\n Ups2 = 2.0 * x[0] / math.sinh(beta)\n x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1)))\n return x[:]\n\ndef rho_harm_1d(x, xp, beta):\n Upsilon_1 = (x + xp) ** 2 / 4.0 * math.tanh(beta / 2.0)\n Upsilon_2 = (x - xp) ** 2 / 4.0 / math.tanh(beta / 2.0)\n return math.exp(- Upsilon_1 - Upsilon_2)\n\nbeta = 2.0\nlist_beta = np.linspace(0.1, 5.0)\nnsteps = 10000\nlow = levy_harmonic_path(2)\nhigh = low[:]\nfract_one_cycle_dat, fract_two_cycles_dat = [], []\n\nfor beta in list_beta:\n one_cycle_dat = 0.0 #initialise the permutation fractions for each temperature\n data = []\n for step in xrange(nsteps):\n # move 1 (direct-sample the positions)\n if low[0] == high[0]: #if the cycle is of length 1\n k = random.choice([0, 1])\n low[k] = levy_harmonic_path(1)[0]\n high[k] = low[k] #assures the cycle\n else: #if the cycle is of length 2s\n low[0], low[1] = levy_harmonic_path(2)\n high[1] = low[0] #assures the cycle\n high[0] = low[1]\n one_cycle_dat += 1.0 / float(nsteps) #calculate the fraction of the single cycle cases\n data += low[:] #save the position histogram data\n # move 2 (Metropolis for sampling the permutations)\n weight_old = (rho_harm_1d(low[0], high[0], beta) * rho_harm_1d(low[1], high[1], beta))\n weight_new = (rho_harm_1d(low[0], high[1], beta) * rho_harm_1d(low[1], high[0], beta))\n if random.uniform(0.0, 1.0) < weight_new / weight_old:\n high[0], high[1] = high[1], high[0]\n\n fract_one_cycle_dat.append(one_cycle_dat)\n fract_two_cycles_dat.append(1.0 - one_cycle_dat) #save the fraction of the two cycles cases\n \n#Exact permutation distributions for all temperatures\nfract_two_cycles = [z(beta) ** 2 / (z(beta) ** 2 + z(2.0 * beta)) for beta in list_beta]\nfract_one_cycle = [z(2.0 * beta) / (z(beta) ** 2 + z(2.0 * beta)) for beta in list_beta]\n\n#Graphics output: \nlist_x = [0.1 * a for a in range (-30, 31)]\ny = [pi_two_bosons(a, beta) for a in list_x]\npylab.plot(list_x, y, linewidth=2.0, label='Exact distribution')\npylab.hist(data, normed=True, bins=80, label='QMC', alpha=0.5, color='green')\npylab.legend()\npylab.xlabel('$x$',fontsize=14)\npylab.ylabel('$\\\\pi(x)$',fontsize=14)\npylab.title('2 non-interacting bosonic 1-d particles',fontsize=14)\npylab.xlim(-3, 3)\npylab.savefig('plot_A2_beta%s.png' % beta)\npylab.show()\npylab.clf()\n\nfig = pylab.figure(figsize=(10, 5))\n\nax = fig.add_subplot(1, 2, 1)\nax.plot(list_beta, fract_one_cycle_dat, linewidth=4, label='QMC')\nax.plot(list_beta, fract_one_cycle, linewidth=2, label='exact')\nax.legend()\nax.set_xlabel('$\\\\beta$',fontsize=14)\nax.set_ylabel('$\\\\pi_2(\\\\beta)$',fontsize=14)\nax.set_title('Fraction of cycles of length 2',fontsize=14)\n\nax = fig.add_subplot(1, 2, 2)\nax.plot(list_beta, fract_two_cycles_dat, linewidth=4, label='QMC')\nax.plot(list_beta, fract_two_cycles, linewidth=2,label='exact')\nax.legend()\nax.set_xlabel('$\\\\beta$',fontsize=14)\nax.set_ylabel('$\\\\pi_1(\\\\beta)$',fontsize=14)\nax.set_title('Fraction of cycles of length 1',fontsize=14)\n\npylab.savefig('plot_A2.png')\npylab.show()\npylab.clf()", "_____no_output_____" ] ], [ [ "We can use dictionaries instead of lists. The implementation is in the following program. \n\nHere we also calculate the correlation between the two particles, i.e. sample of the absolute distance $r$ between the two bosons. The comparison between the resulting distribution and the distribution for the distinguishable case corresponds to boson bunching (high weight for small distances between the bosons).", "_____no_output_____" ] ], [ [ "import math, random, pylab\n\ndef prob_r_distinguishable(r, beta): #the exact correlation function for two particles\n sigma = math.sqrt(2.0) / math.sqrt(2.0 * math.tanh(beta / 2.0))\n prob = (math.sqrt(2.0 / math.pi) / sigma) * math.exp(- r ** 2 / 2.0 / sigma ** 2)\n return prob\n\ndef levy_harmonic_path(k):\n x = [random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(k * beta / 2.0)))]\n if k == 2:\n Ups1 = 2.0 / math.tanh(beta)\n Ups2 = 2.0 * x[0] / math.sinh(beta)\n x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1)))\n return x[:]\n\ndef rho_harm_1d(x, xp, beta):\n Upsilon_1 = (x + xp) ** 2 / 4.0 * math.tanh(beta / 2.0)\n Upsilon_2 = (x - xp) ** 2 / 4.0 / math.tanh(beta / 2.0)\n return math.exp(- Upsilon_1 - Upsilon_2)\n\nbeta = 0.1\nnsteps = 1000000\nlow_1, low_2 = levy_harmonic_path(2)\nx = {low_1:low_1, low_2:low_2}\ndata_corr = []\nfor step in xrange(nsteps):\n # move 1\n a = random.choice(x.keys())\n if a == x[a]:\n dummy = x.pop(a)\n a_new = levy_harmonic_path(1)[0]\n x[a_new] = a_new\n else:\n a_new, b_new = levy_harmonic_path(2)\n x = {a_new:b_new, b_new:a_new}\n r = abs(x.keys()[1] - x.keys()[0])\n data_corr.append(r)\n # move 2\n (low1, high1), (low2, high2) = x.items()\n weight_old = rho_harm_1d(low1, high1, beta) * rho_harm_1d(low2, high2, beta)\n weight_new = rho_harm_1d(low1, high2, beta) * rho_harm_1d(low2, high1, beta)\n if random.uniform(0.0, 1.0) < weight_new / weight_old:\n x = {low1:high2, low2:high1}\n \n#Graphics output: \nlist_x = [0.1 * a for a in range (0, 100)]\ny = [prob_r_distinguishable(a, beta) for a in list_x]\npylab.plot(list_x, y, linewidth=2.0, label='Exact distinguishable distribution')\npylab.hist(data_corr, normed=True, bins=120, label='Indistinguishable QMC', alpha=0.5, color='green')\npylab.legend()\npylab.xlabel('$r$',fontsize=14)\npylab.ylabel('$\\\\pi_{corr}(r)$',fontsize=14)\npylab.title('Correlation function of non-interacting 1-d bosons',fontsize=14)\npylab.xlim(0, 10)\npylab.savefig('plot_A3_beta%s.png' % beta)\npylab.show()\npylab.clf()", "_____no_output_____" ] ], [ [ "### 3-dimensional bosons\n#### Isotropic trap", "_____no_output_____" ] ], [ [ "import random, math, numpy, sys, os\nimport matplotlib.pyplot as plt\n\ndef harmonic_ground_state(x):\n return math.exp(-x ** 2)/math.sqrt(math.pi)\n\ndef levy_harmonic_path_3d(k):\n x0 = tuple([random.gauss(0.0, 1.0 / math.sqrt(2.0 *\n math.tanh(k * beta / 2.0))) for d in range(3)])\n x = [x0]\n for j in range(1, k):\n Upsilon_1 = 1.0 / math.tanh(beta) + 1.0 / \\\n math.tanh((k - j) * beta)\n Upsilon_2 = [x[j - 1][d] / math.sinh(beta) + x[0][d] /\n math.sinh((k - j) * beta) for d in range(3)]\n x_mean = [Upsilon_2[d] / Upsilon_1 for d in range(3)]\n sigma = 1.0 / math.sqrt(Upsilon_1)\n dummy = [random.gauss(x_mean[d], sigma) for d in range(3)]\n x.append(tuple(dummy))\n return x\n\ndef rho_harm_3d(x, xp):\n Upsilon_1 = sum((x[d] + xp[d]) ** 2 / 4.0 *\n math.tanh(beta / 2.0) for d in range(3))\n Upsilon_2 = sum((x[d] - xp[d]) ** 2 / 4.0 /\n math.tanh(beta / 2.0) for d in range(3))\n return math.exp(- Upsilon_1 - Upsilon_2)\n\nN = 512\nT_star = 0.8\nlist_T = numpy.linspace(0.8,0.1,5)\nbeta = 1.0 / (T_star * N ** (1.0 / 3.0))\ncycle_min = 10\nnsteps = 50000\ndata_x, data_y, data_x_l, data_y_l = [], [], [], []\n\nfor T_star in list_T:\n # Initial condition\n filename = 'data_boson_configuration_N%i_T%.1f.txt' % (N,T_star)\n positions = {}\n if os.path.isfile(filename):\n f = open(filename, 'r')\n for line in f:\n a = line.split()\n positions[tuple([float(a[0]), float(a[1]), float(a[2])])] = \\\n tuple([float(a[3]), float(a[4]), float(a[5])])\n f.close()\n if len(positions) != N:\n sys.exit('ERROR in the input file.')\n print 'starting from file', filename\n else:\n for k in range(N):\n a = levy_harmonic_path_3d_anisotropic(1)\n positions[a[0]] = a[0]\n print 'Starting from a new configuration'\n\n # Monte Carlo loop \n for step in range(nsteps):\n # move 1: resample one permutation cycle\n boson_a = random.choice(positions.keys())\n perm_cycle = []\n while True:\n perm_cycle.append(boson_a)\n boson_b = positions.pop(boson_a)\n if boson_b == perm_cycle[0]:\n break\n else:\n boson_a = boson_b\n k = len(perm_cycle)\n data_x.append(boson_a[0])\n data_y.append(boson_a[1])\n\n if k > cycle_min:\n data_x_l.append(boson_a[0])\n data_y_l.append(boson_a[1]) \n perm_cycle = levy_harmonic_path_3d(k)\n positions[perm_cycle[-1]] = perm_cycle[0]\n for k in range(len(perm_cycle) - 1):\n positions[perm_cycle[k]] = perm_cycle[k + 1]\n\n # move 2: exchange\n a_1 = random.choice(positions.keys())\n b_1 = positions.pop(a_1)\n a_2 = random.choice(positions.keys())\n b_2 = positions.pop(a_2)\n weight_new = rho_harm_3d(a_1, b_2) * rho_harm_3d(a_2, b_1)\n weight_old = rho_harm_3d(a_1, b_1) * rho_harm_3d(a_2, b_2)\n if random.uniform(0.0, 1.0) < weight_new / weight_old:\n positions[a_1] = b_2\n positions[a_2] = b_1\n else:\n positions[a_1] = b_1\n positions[a_2] = b_2\n\n f = open(filename, 'w')\n for a in positions:\n b = positions[a]\n f.write(str(a[0]) + ' ' + str(a[1]) + ' ' + str(a[2]) + ' ' +\n str(b[0]) + ' ' + str(b[1]) + ' ' + str(b[2]) + '\\n')\n f.close()\n\n # Analyze cycles, do 3d plot\n import pylab, mpl_toolkits.mplot3d\n\n fig = pylab.figure()\n ax = mpl_toolkits.mplot3d.axes3d.Axes3D(fig)\n ax.set_aspect('equal')\n n_colors = 10\n list_colors = pylab.cm.rainbow(numpy.linspace(0, 1, n_colors))[::-1]\n dict_colors = {}\n i_color = 0\n positions_copy = positions.copy()\n while positions_copy:\n x, y, z = [], [], []\n starting_boson = positions_copy.keys()[0]\n boson_old = starting_boson\n while True:\n x.append(boson_old[0])\n y.append(boson_old[1])\n z.append(boson_old[2])\n boson_new = positions_copy.pop(boson_old)\n if boson_new == starting_boson: break\n else: boson_old = boson_new\n len_cycle = len(x)\n if len_cycle > 2:\n x.append(x[0])\n y.append(y[0])\n z.append(z[0])\n if len_cycle in dict_colors:\n color = dict_colors[len_cycle]\n ax.plot(x, y, z, '+-', c=color, lw=0.75)\n else:\n color = list_colors[i_color]\n i_color = (i_color + 1) % n_colors\n dict_colors[len_cycle] = color\n ax.plot(x, y, z, '+-', c=color, label='k=%i' % len_cycle, lw=0.75)\n pylab.title(str(N) + ' bosons at T* = ' + str(T_star))\n pylab.legend()\n ax.set_xlabel('$x$', fontsize=16)\n ax.set_ylabel('$y$', fontsize=16)\n ax.set_zlabel('$z$', fontsize=16)\n xmax = 6.0\n ax.set_xlim3d([-xmax, xmax])\n ax.set_ylim3d([-xmax, xmax])\n ax.set_zlim3d([-xmax, xmax])\n pylab.savefig('plot_boson_configuration_N%i_T%.1f.png' %(N,T_star))\n pylab.show()\n pylab.clf()\n\n#Plot the histograms\nlist_x = [0.1 * a for a in range (-50, 51)]\ny = [harmonic_ground_state(a) for a in list_x]\npylab.plot(list_x, y, linewidth=2.0, label='Ground state')\npylab.hist(data_x, normed=True, bins=120, alpha = 0.5, label='All bosons') \npylab.hist(data_x_l, normed=True, bins=120, alpha = 0.5, label='Bosons in longer cycle') \npylab.xlim(-3.0, 3.0) \npylab.xlabel('$x$',fontsize=14)\npylab.ylabel('$\\pi(x)$',fontsize=14)\npylab.title('3-d non-interacting bosons $x$ distribution $N= %i$, $T= %.1f$' %(N,T_star))\npylab.legend()\npylab.savefig('position_distribution_N%i_T%.1f.png' %(N,T_star))\npylab.show()\npylab.clf()\n\nplt.hist2d(data_x_l, data_y_l, bins=40, normed=True)\nplt.xlabel('$x$')\nplt.ylabel('$y$')\nplt.title('The distribution of the $x$ and $y$ positions')\nplt.colorbar()\nplt.xlim(-3.0, 3.0) \nplt.ylim(-3.0, 3.0) \nplt.show()", "starting from file data_boson_configuration_N512_T0.8.txt\n" ] ], [ [ "#### Anisotropic trap\n\nWe can imitate the experiments that imitate 1-d bosons in *cigar shaped* anisotropic harmonic traps, and 2-d bosons in *pancake shaped* anisotropic harmonic traps.", "_____no_output_____" ] ], [ [ "%pylab inline\nimport random, math, numpy, os, sys\n\ndef levy_harmonic_path_3d_anisotropic(k, omega):\n sigma = [1.0 / math.sqrt(2.0 * omega[d] *\n math.tanh(0.5 * k * beta * omega[d])) for d in xrange(3)]\n xk = tuple([random.gauss(0.0, sigma[d]) for d in xrange(3)])\n x = [xk]\n for j in range(1, k):\n Upsilon_1 = [1.0 / math.tanh(beta * omega[d]) +\n 1.0 / math.tanh((k - j) * beta * omega[d]) for d in range(3)]\n Upsilon_2 = [x[j - 1][d] / math.sinh(beta * omega[d]) + \\\n xk[d] / math.sinh((k - j) * beta * omega[d]) for d in range(3)]\n x_mean = [Upsilon_2[d] / Upsilon_1[d] for d in range(3)]\n sigma = [1.0 / math.sqrt(Upsilon_1[d] * omega[d]) for d in range(3)]\n dummy = [random.gauss(x_mean[d], sigma[d]) for d in range(3)]\n x.append(tuple(dummy))\n return x\n\ndef rho_harm_3d_anisotropic(x, xp, beta, omega):\n Upsilon_1 = sum(omega[d] * (x[d] + xp[d]) ** 2 / 4.0 *\n math.tanh(beta * omega[d] / 2.0) for d in range(3))\n Upsilon_2 = sum(omega[d] * (x[d] - xp[d]) ** 2 / 4.0 /\n math.tanh(beta * omega[d] / 2.0) for d in range(3))\n return math.exp(- Upsilon_1 - Upsilon_2)\n\nomegas = numpy.array([[4.0, 4.0, 1.0], [1.0, 5.0, 1.0]])\n\nfor i in range(len(omegas[:,1])):\n N = 512\n nsteps = 100000\n omega_harm = 1.0\n omega = omegas[i,:]\n for d in range(3):\n omega_harm *= omega[d] ** (1.0 / 3.0)\n T_star = 0.5\n T = T_star * omega_harm * N ** (1.0 / 3.0)\n beta = 1.0 / T\n print 'omega: ', omega\n # Initial condition\n if i == 0:\n filename = 'data_boson_configuration_anisotropic_N%i_T%.1f_cigar.txt' % (N,T_star)\n elif i == 1:\n filename = 'data_boson_configuration_anisotropic_N%i_T%.1f_pancake.txt' % (N,T_star)\n positions = {}\n if os.path.isfile(filename):\n f = open(filename, 'r')\n for line in f:\n a = line.split()\n positions[tuple([float(a[0]), float(a[1]), float(a[2])])] = \\\n tuple([float(a[3]), float(a[4]), float(a[5])])\n f.close()\n if len(positions) != N:\n sys.exit('ERROR in the input file.')\n print 'starting from file', filename\n else:\n for k in range(N):\n a = levy_harmonic_path_3d_anisotropic(1,omega)\n positions[a[0]] = a[0]\n print 'Starting from a new configuration'\n for step in range(nsteps):\n boson_a = random.choice(positions.keys())\n perm_cycle = []\n while True:\n perm_cycle.append(boson_a)\n boson_b = positions.pop(boson_a)\n if boson_b == perm_cycle[0]: break\n else: boson_a = boson_b\n k = len(perm_cycle)\n perm_cycle = levy_harmonic_path_3d_anisotropic(k,omega)\n positions[perm_cycle[-1]] = perm_cycle[0]\n for j in range(len(perm_cycle) - 1):\n positions[perm_cycle[j]] = perm_cycle[j + 1]\n a_1 = random.choice(positions.keys())\n b_1 = positions.pop(a_1)\n a_2 = random.choice(positions.keys())\n b_2 = positions.pop(a_2)\n weight_new = (rho_harm_3d_anisotropic(a_1, b_2, beta, omega) *\n rho_harm_3d_anisotropic(a_2, b_1, beta, omega))\n weight_old = (rho_harm_3d_anisotropic(a_1, b_1, beta, omega) *\n rho_harm_3d_anisotropic(a_2, b_2, beta, omega))\n if random.uniform(0.0, 1.0) < weight_new / weight_old:\n positions[a_1], positions[a_2] = b_2, b_1\n else:\n positions[a_1], positions[a_2] = b_1, b_2\n\n f = open(filename, 'w')\n for a in positions:\n b = positions[a]\n f.write(str(a[0]) + ' ' + str(a[1]) + ' ' + str(a[2]) + ' ' +\n str(b[0]) + ' ' + str(b[1]) + ' ' + str(b[2]) + '\\n')\n f.close()\n\n import pylab, mpl_toolkits.mplot3d\n fig = pylab.figure()\n ax = mpl_toolkits.mplot3d.axes3d.Axes3D(fig)\n ax.set_aspect('equal')\n n_colors = 10\n list_colors = pylab.cm.rainbow(numpy.linspace(0, 1, n_colors))[::-1]\n dict_colors = {}\n i_color = 0\n positions_copy = positions.copy()\n while positions_copy:\n x, y, z = [], [], []\n starting_boson = positions_copy.keys()[0]\n boson_old = starting_boson\n while True:\n x.append(boson_old[0])\n y.append(boson_old[1])\n z.append(boson_old[2])\n boson_new = positions_copy.pop(boson_old)\n if boson_new == starting_boson: break\n else: boson_old = boson_new\n len_cycle = len(x)\n if len_cycle > 2:\n x.append(x[0])\n y.append(y[0])\n z.append(z[0])\n if len_cycle in dict_colors:\n color = dict_colors[len_cycle]\n ax.plot(x, y, z, '+-', c=color, lw=0.75)\n else:\n color = list_colors[i_color]\n i_color = (i_color + 1) % n_colors\n dict_colors[len_cycle] = color\n ax.plot(x, y, z, '+-', c=color, label='k=%i' % len_cycle, lw=0.75)\n pylab.legend()\n ax.set_xlabel('$x$', fontsize=16)\n ax.set_ylabel('$y$', fontsize=16)\n ax.set_zlabel('$z$', fontsize=16)\n xmax = 8.0\n ax.set_xlim3d([-xmax, xmax])\n ax.set_ylim3d([-xmax, xmax])\n ax.set_zlim3d([-xmax, xmax])\n if i == 0:\n pylab.title(str(N) + ' bosons at T* = ' + str(T_star) + ' cigar potential')\n pylab.savefig('position_distribution_N%i_T%.1f_cigar.png' %(N,T_star))\n elif i == 1:\n pylab.title(str(N) + ' bosons at T* = ' + str(T_star) + ' pancake potential')\n pylab.savefig('position_distribution_N%i_T%.1f_pancake.png' %(N,T_star))\n pylab.show() ", "Populating the interactive namespace from numpy and matplotlib\nomega: [4. 4. 1.]\nstarting from file data_boson_configuration_anisotropic_N512_T0.5_cigar.txt\n" ] ], [ [ "There it is found that the critical temperature for Bose-Einstein condensation is around $T^*\\sim 0.9$.", "_____no_output_____" ], [ "## To do:\n\n* Calculate the pair correlation function", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d067b47d158cce7b1d727f11561733b096c04c67
351,026
ipynb
Jupyter Notebook
cjio_tutorial.ipynb
balazsdukai/foss4g2019
a2fefc817b15c6b9ef87a523bedd5cf202e2fa4d
[ "CC-BY-4.0" ]
5
2019-09-11T11:21:09.000Z
2022-01-17T10:50:57.000Z
cjio_tutorial.ipynb
balazsdukai/foss4g2019
a2fefc817b15c6b9ef87a523bedd5cf202e2fa4d
[ "CC-BY-4.0" ]
null
null
null
cjio_tutorial.ipynb
balazsdukai/foss4g2019
a2fefc817b15c6b9ef87a523bedd5cf202e2fa4d
[ "CC-BY-4.0" ]
null
null
null
105.986111
193,960
0.828785
[ [ [ "# Working with 3D city models in Python\n\n\n\n**Balázs Dukai** [*@BalazsDukai*](https://twitter.com/balazsdukai), **FOSS4G 2019**\n\nTweet <span style=\"color:blue\">#CityJSON</span>\n\n[3D geoinformation research group, TU Delft, Netherlands](https://3d.bk.tudelft.nl/)\n\n![](figures/logos.png)\n\nRepo of this talk: [https://github.com/balazsdukai/foss4g2019](https://github.com/balazsdukai/foss4g2019)", "_____no_output_____" ], [ "# 3D + city + model ?\n![](figures/google_earth.png)", "_____no_output_____" ], [ "Probably the most well known 3d city model is what we see in Google Earth. And it is a very nice model to look at and it is improving continuously. However, certain applications require more information than what is stored in such a mesh model. They need to know what does an object in the model represent in the real world.", "_____no_output_____" ], [ "# Semantic models\n![](figures/semantic_model.png)", "_____no_output_____" ], [ "That is why we have semantic models, where for each object in the model we store a label of is meaning.\nOnce we have labels on the object and on their parts, data preparation becomes more simple. An important property for analytical applications, such as wind flow simulations.", "_____no_output_____" ], [ "# Useful for urban analysis\n\n![](figures/cfd.gif)\n\nGarcía-Sánchez, C., van Beeck, J., Gorlé, C., Predictive Large Eddy Simulations for Urban Flows: Challenges and Opportunities, Building and Environment, 139, 146-156, 2018.", "_____no_output_____" ], [ "But we can do much more with 3d city models. We can use them to better estimate the energy consumption in buildings, simulate noise in cities or analyse views and shadows. In the Netherlands sunshine is precious commodity, so we like to get as much as we can.", "_____no_output_____" ], [ "# And many more...\n\n![3d city model applications](figures/3d_cm_applications.png)", "_____no_output_____" ], [ "There are many open 3d city models available. They come in different formats and quality. However, at our group we are still waiting for the \"year of the 3d city model\" to come. We don't really see mainstream use, apart of visualisation. Which is nice, I belive they can provide much more value than having a nice thing to simply look at.", "_____no_output_____" ], [ "# ...mostly just production of the models\n\nmany available, but who **uses** them? **For more than visualisation?**\n\n![open 3d city models](figures/open_cms.png)", "_____no_output_____" ], [ "# In truth, 3D CMs are a bit difficult to work with", "_____no_output_____" ], [ "### Our built environment is complex, and the objects are complex too\n\n![](figures/assembling_solid.png)", "_____no_output_____" ], [ "### Software are lagging behind\n\n+ not many software supports 3D city models\n\n+ if they do, mostly propietary data model and format\n\n+ large, *\"eterprise\"*-type applications (think Esri, FME, Bentley ... )\n\n+ few tools accessible for the individual developer / hobbyist", "_____no_output_____" ], [ "2. GML doesn't help ( *[GML madness](http://erouault.blogspot.com/2014/04/gml-madness.html) by Even Rouault* )", "_____no_output_____" ], [ "That is why we are developing CityJSON, which is a data format for 3d city models. Essentially, it aims to increase the value of 3d city models by making it more simple to work with them and lower the entry for a wider audience than cadastral organisations.", "_____no_output_____" ], [ "![cityjson logo](figures/cityjson_webpage.png)", "_____no_output_____" ], [ "## Key concepts of CityJSON", "_____no_output_____" ], [ "+ *simple*, as in easy to implement\n+ designed with programmers in mind\n+ fully developed in the open\n+ flattened hierarchy of objects\n+ <span style=\"color:red\">implementation first</span>\n\n![GitHub Issues](figures/github_issues.png)", "_____no_output_____" ], [ "CityJSON implements the data model of CityGML. CityGML is an international standard for 3d city models and it is coupled with its GML-based encoding. \n\nWe don't really like GML, because it's verbose, files are deeply nested and large (often several GB). And there are many different ways to do one thing.\n\nAlso, I'm not a web-developer, but I would be surprised if anyone prefers GML over JSON for sending stuff around the web.", "_____no_output_____" ], [ "# JSON-based encoding of the CityGML data model\n![](figures/citygml_encoding.png)", "_____no_output_____" ], [ "<blockquote class=\"twitter-tweet\"><p lang=\"en\" dir=\"ltr\">I just got sent a CityGML file. <a href=\"https://t.co/jnTVoRnVLS\">pic.twitter.com/jnTVoRnVLS</a></p>&mdash; James Fee (@jamesmfee) <a href=\"https://twitter.com/jamesmfee/status/748270105319006208?ref_src=twsrc%5Etfw\">June 29, 2016</a></blockquote> <script async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"></script> \n\n+ files are deeply nested, and large\n+ many \"points of entry\"\n+ many diff ways to do one thing (GML doesn't help, *[GML madness](http://erouault.blogspot.com/2014/04/gml-madness.html) by Even Rouault* )", "_____no_output_____" ], [ "## The CityGML data model\n\n![](figures/citygml_uml.gif)", "_____no_output_____" ], [ "## Compression ~6x over CityGML\n\n![](figures/zurich_size.png)", "_____no_output_____" ], [ "## Compression\n| file | CityGML size (original) | CityGML size (w/o spaces) | textures | CityJSON | compression |\n| -------- | ----------------------- | ----------------------------- |--------- | ------------ | --------------- | \n| [CityGML demo \"GeoRes\"](https://www.citygml.org/samplefiles/) | 4.3MB | 4.1MB | yes | 524KB | 8.0 |\n| [CityGML v2 demo \"Railway\"](https://www.citygml.org/samplefiles/) | 45MB | 34MB | yes | 4.3MB | 8.1 |\n| [Den Haag \"tile 01\"](https://data.overheid.nl/data/dataset/ngr-3d-model-den-haag) | 23MB | 18MB | no, material | 2.9MB | 6.2 |\n| [Montréal VM05](http://donnees.ville.montreal.qc.ca/dataset/maquette-numerique-batiments-citygml-lod2-avec-textures/resource/36047113-aa19-4462-854a-cdcd6281a5af) | 56MB | 42MB | yes | 5.4MB | 7.8 |\n| [New York LoD2 (DA13)](https://www1.nyc.gov/site/doitt/initiatives/3d-building.page) | 590MB | 574MB | no | 105MB | 5.5 |\n| [Rotterdam Delfshaven](http://rotterdamopendata.nl/dataset/rotterdam-3d-bestanden/resource/edacea54-76ce-41c7-a0cc-2ebe5750ac18) | 16MB | 15MB | yes | 2.6MB | 5.8 |\n| [Vienna (the demo file)](https://www.data.gv.at/katalog/dataset/86d88cae-ad97-4476-bae5-73488a12776d) | 37MB | 36MB | no | 5.3MB | 6.8 |\n| [Zürich LoD2](https://www.data.gv.at/katalog/dataset/86d88cae-ad97-4476-bae5-73488a12776d) | 3.03GB | 2.07GB | no | 292MB | 7.1 |", "_____no_output_____" ], [ "If you are interested in a more detailed comparison between CityGML and CityJSON you can read our article, its open access.", "_____no_output_____" ], [ "![cityjson paper](figures/cityjson_paper.png)", "_____no_output_____" ], [ "And yes, we are guilty of charge.", "_____no_output_____" ], [ "![standards](figures/standards.png)\n\n[https://xkcd.com/927/](https://xkcd.com/927/)", "_____no_output_____" ], [ "# Let's have a look-see, shall we?\n![](figures/looksee.gif)", "_____no_output_____" ], [ "Now let's take a peek under the hood, what's going on in a CityJSON file.", "_____no_output_____" ], [ "## An empty CityJSON file\n\n![](figures/cj01.svg)", "_____no_output_____" ], [ "In a city model we represent the real-world objects such as buildings, bridges, trees as different types of CityObjects. Each CityObject has its \n\n+ unique ID, \n+ attributes,\n+ geometry,\n+ and it can have children objects or it can be part of a parent object.\n\nNote however, that CityObject are not nested. Each of them is stored at root and the hierachy represented by linking to object IDs. ", "_____no_output_____" ], [ "## A CityObject\n\n![](figures/cj02.svg)", "_____no_output_____" ], [ "Each CityObject has a geometry representation. This geometry is composed of *boundaries* and *semantics*.", "_____no_output_____" ], [ "## Geometry\n\n+ **boundaries** definition uses vertex indices (inspired by Wavefront OBJ)\n+ We have a vertex list at the root of the document\n+ Vertices are not repeated (unlike Simple Features)\n+ **semantics** are linked to the boundary surfaces\n![](figures/cj04.svg)", "_____no_output_____" ], [ "This `MulitSurface` has \n\n5 surfaces \n```json\n[[0, 3, 2, 1]], [[4, 5, 6, 7]], [[0, 1, 5, 4]], [[0, 2, 3, 8]], [[10, 12, 23, 48]]\n```\neach surface has only an exterior ring (the first array)\n```json\n[ [0, 3, 2, 1] ]\n```\n\nThe semantic surfaces in the `semantics` json-object are linked to the boundary surfaces. The integers in the `values` property of `surfaces` are the 0-based indices of the surfaces of the boundary.", "_____no_output_____" ] ], [ [ "import json\nimport os\n\npath = os.path.join('data', 'rotterdam_subset.json')\nwith open(path) as fin:\n cm = json.loads(fin.read())\n \nprint(f\"There are {len(cm['CityObjects'])} CityObjects\")\n\n# list all IDs\nfor id in cm['CityObjects']:\n print(id, \"\\t\")", "There are 16 CityObjects\n{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE} \t\n{71B60053-BC28-404D-BAB9-8A642AAC0CF4} \t\n{6271F75F-E8D8-4EE4-AC46-9DB02771A031} \t\n{DE77E78F-B110-43D2-A55C-8B61911192DE} \t\n{19935DFC-F7B3-4D6E-92DD-C48EE1D1519A} \t\n{953BC999-2F92-4B38-95CF-218F7E05AFA9} \t\n{8D716FDE-18DD-4FB5-AB06-9D207377240E} \t\n{C6AAF95B-8C09-4130-AB4D-6777A2A18A2E} \t\n{72390BDE-903C-4C8C-8A3F-2DF5647CD9B4} \t\n{8244B286-63E2-436E-9D4E-169B8ACFE9D0} \t\n{87316D28-7574-4763-B9CE-BF6A2DF8092C} \t\n{CD98680D-A8DD-4106-A18E-15EE2A908D75} \t\n{64A9018E-4F56-47CD-941F-43F6F0C4285B} \t\n{459F183A-D0C2-4F8A-8B5F-C498EFDE366D} \t\n{237D41CC-991E-4308-8986-42ABFB4F7431} \t\n{23D8CA22-0C82-4453-A11E-B3F2B3116DB4} \t\n" ] ], [ [ "+ Working with a CityJSON file is straightforward. One can open it with the standard library and get going.\n+ But you need to know the schema well.\n+ And you need to write everything from scratch.", "_____no_output_____" ], [ "That is why we are developing **cjio**. \n\n**cjio** is how *we eat what we cook*\n\nAims to help to actually work with and analyse 3D city models, and extract more value from them. Instead of letting them gather dust in some governmental repository.", "_____no_output_____" ], [ "![cjio](figures/cjio_docs.png)", "_____no_output_____" ], [ "## `cjio` has a (quite) stable CLI\n\n```bash\n$ cjio city_model.json reproject 2056 export --format glb /out/model.glb\n```", "_____no_output_____" ], [ "## and an experimental API\n\n```python\nfrom cjio import cityjson\n\ncm = cityjson.load('city_model.json')\n\ncm.get_cityobjects(type='building')\n```", "_____no_output_____" ], [ "**`pip install cjio`**", "_____no_output_____" ], [ "This notebook is based on the develop branch.", "_____no_output_____" ], [ "**`pip install git+https://github.com/tudelft3d/cjio@develop`**", "_____no_output_____" ], [ "# `cjio`'s CLI", "_____no_output_____" ] ], [ [ "! cjio --help", "Usage: cjio [OPTIONS] INPUT COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...\r\n\r\n Process and manipulate a CityJSON file, and allow different outputs. The\r\n different operators can be chained to perform several processing in one\r\n step, the CityJSON model goes through the different operators.\r\n\r\n To get help on specific command, eg for 'validate':\r\n\r\n cjio validate --help\r\n\r\n Usage examples:\r\n\r\n cjio example.json info validate\r\n cjio example.json assign_epsg 7145 remove_textures export output.obj\r\n cjio example.json subset --id house12 save out.json\r\n\r\nOptions:\r\n --version Show the version and exit.\r\n --ignore_duplicate_keys Load a CityJSON file even if some City Objects have\r\n the same IDs (technically invalid file)\r\n --help Show this message and exit.\r\n\r\nCommands:\r\n assign_epsg Assign a (new) EPSG.\r\n clean Clean = remove_duplicate_vertices +...\r\n compress Compress a CityJSON file, ie stores its...\r\n decompress Decompress a CityJSON file, ie remove the...\r\n export Export the CityJSON to another format.\r\n extract_lod Extract only one LoD for a dataset.\r\n info Output info in simple JSON.\r\n locate_textures Output the location of the texture files.\r\n merge Merge the current CityJSON with others.\r\n partition Partition the city model into tiles.\r\n remove_duplicate_vertices Remove duplicate vertices a CityJSON file.\r\n remove_materials Remove all materials from a CityJSON file.\r\n remove_orphan_vertices Remove orphan vertices a CityJSON file.\r\n remove_textures Remove all textures from a CityJSON file.\r\n reproject Reproject the CityJSON to a new EPSG.\r\n save Save the city model to a CityJSON file.\r\n subset Create a subset of a CityJSON file.\r\n translate Translate the file by its (-minx, -miny,...\r\n update_bbox Update the bbox of a CityJSON file.\r\n update_textures Update the location of the texture files.\r\n upgrade_version Upgrade the CityJSON to the latest version.\r\n validate Validate the CityJSON file: (1) against its...\r\n" ], [ "! cjio data/rotterdam_subset.json info", "\u001b[30m\u001b[46mParsing data/rotterdam_subset.json\u001b[0m\r\n{\r\n \"cityjson_version\": \"1.0\",\r\n \"epsg\": 7415,\r\n \"bbox\": [\r\n 90454.18900000001,\r\n 435614.88,\r\n 0.0,\r\n 91002.41900000001,\r\n 436048.217,\r\n 18.29\r\n ],\r\n \"transform/compressed\": true,\r\n \"cityobjects_total\": 16,\r\n \"cityobjects_present\": [\r\n \"Building\"\r\n ],\r\n \"materials\": false,\r\n \"textures\": true\r\n}\r\n" ], [ "! cjio data/rotterdam_subset.json validate", "\u001b[30m\u001b[46mParsing data/rotterdam_subset.json\u001b[0m\n\u001b[30m\u001b[46m===== Validation (with official CityJSON schemas) =====\u001b[0m\n-- Validating the syntax of the file\n\t(using the schemas 1.0.0)\n-- Validating the internal consistency of the file (see docs for list)\n\t--Vertex indices coherent\n\t--Specific for CityGroups\n\t--Semantic arrays coherent with geometry\n\t--Root properties\n\t--Empty geometries\n\t--Duplicate vertices\n\t--Orphan vertices\n\t--CityGML attributes\n=====\n\u001b[32mFile is valid\u001b[0m\n\u001b[31mFile has warnings\u001b[0m\n--- WARNINGS ---\nWARNING: attributes 'TerrainHeight' not in CityGML schema\n\t(16 CityObjects have this warning)\nWARNING: attributes 'bron_tex' not in CityGML schema\n\t(16 CityObjects have this warning)\nWARNING: attributes 'voll_tex' not in CityGML schema\n\t(16 CityObjects have this warning)\nWARNING: attributes 'bron_geo' not in CityGML schema\n\t(16 CityObjects have this warning)\nWARNING: attributes 'status' not in CityGML schema\n\t(16 CityObjects have this warning)\n=====================================\n" ], [ "! cjio data/rotterdam_subset.json \\\n subset --exclude --id \"{CD98680D-A8DD-4106-A18E-15EE2A908D75}\" \\\n merge data/rotterdam_one.json \\\n reproject 2056 \\\n save data/test_rotterdam.json", "\u001b[30m\u001b[46mParsing data/rotterdam_subset.json\u001b[0m\n\u001b[30m\u001b[46mSubset of CityJSON\u001b[0m\n\u001b[30m\u001b[46mMerging files\u001b[0m\n\u001b[30m\u001b[46mReproject to EPSG:2056\u001b[0m\n\u001b[?25l [####################################] 100% \u001b[?25h\n\u001b[30m\u001b[46mSaving CityJSON to a file /home/balazs/Reports/talk_cjio_foss4g_2019/data/test_rotterdam.json\u001b[0m\n" ] ], [ [ "+ The CLI was first, no plans for API\n\n+ **Works with whole city model only**\n\n+ Functions for the CLI work with the JSON directly, passing it along\n\n+ Simple and effective architecture", "_____no_output_____" ], [ "# `cjio`'s API\n\nAllow *read* --> *explore* --> *modify* --> *write* iteration\n\nWork with CityObjects and their parts\n\nFunctions for common operations\n\nInspired by the *tidyverse* from the R ecosystem", "_____no_output_____" ] ], [ [ "import os\nfrom copy import deepcopy\nfrom cjio import cityjson\nfrom shapely.geometry import Polygon\nimport matplotlib.pyplot as plt\nplt.close('all')\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn import cluster\nimport numpy as np", "_____no_output_____" ] ], [ [ "In the following we work with a subset of the 3D city model of Rotterdam\n![](figures/rotterdam_subset.png)", "_____no_output_____" ], [ "## Load a CityJSON", "_____no_output_____" ], [ "The `load()` method loads a CityJSON file into a CityJSON object.", "_____no_output_____" ] ], [ [ "path = os.path.join('data', 'rotterdam_subset.json')\n\ncm = cityjson.load(path)\n\nprint(type(cm))", "<class 'cjio.cityjson.CityJSON'>\n" ] ], [ [ "## Using the CLI commands in the API\nYou can use any of the CLI commands on a CityJSON object \n\n*However,* not all CLI commands are mapped 1-to-1 to `CityJSON` methods\n\nAnd we haven't harmonized the CLI and the API yet. ", "_____no_output_____" ] ], [ [ "cm.validate()", "-- Validating the syntax of the file\n\t(using the schemas 1.0.0)\n-- Validating the internal consistency of the file (see docs for list)\n\t--Vertex indices coherent\n\t--Specific for CityGroups\n\t--Semantic arrays coherent with geometry\n\t--Root properties\n\t--Empty geometries\n\t--Duplicate vertices\n\t--Orphan vertices\n\t--CityGML attributes\n" ] ], [ [ "## Explore the city model\n\nPrint the basic information about the city model. Note that `print()` returns the same information as the `info` command in the CLI.", "_____no_output_____" ] ], [ [ "print(cm)", "{\n \"cityjson_version\": \"1.0\",\n \"epsg\": 7415,\n \"bbox\": [\n 90454.18900000001,\n 435614.88,\n 0.0,\n 91002.41900000001,\n 436048.217,\n 18.29\n ],\n \"transform/compressed\": true,\n \"cityobjects_total\": 16,\n \"cityobjects_present\": [\n \"Building\"\n ],\n \"materials\": false,\n \"textures\": true\n}\n" ] ], [ [ "## Getting objects from the model\nGet CityObjects by their *type*, or a list of types. Also by their IDs. \n\nNote that `get_cityobjects()` == `cm.cityobjects`", "_____no_output_____" ] ], [ [ "buildings = cm.get_cityobjects(type='building')\n\n# both Building and BuildingPart objects\nbuildings_parts = cm.get_cityobjects(type=['building', 'buildingpart'])\n\nr_ids = ['{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE}',\n '{6271F75F-E8D8-4EE4-AC46-9DB02771A031}']\nbuildings_ids = cm.get_cityobjects(id=r_ids)", "_____no_output_____" ] ], [ [ "## Properties and geometry of objects", "_____no_output_____" ] ], [ [ "b01 = buildings_ids['{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE}']\nprint(b01)", "{\n \"id\": \"{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE}\",\n \"type\": \"Building\",\n \"attributes\": {\n \"TerrainHeight\": 3.03,\n \"bron_tex\": \"UltraCAM-X 10cm juni 2008\",\n \"voll_tex\": \"complete\",\n \"bron_geo\": \"Lidar 15-30 punten - nov. 2008\",\n \"status\": \"1\"\n },\n \"children\": null,\n \"parents\": null,\n \"geometry_type\": [\n \"MultiSurface\"\n ],\n \"geometry_lod\": [\n 2\n ],\n \"semantic_surfaces\": [\n \"WallSurface\",\n \"RoofSurface\",\n \"GroundSurface\"\n ]\n}\n" ], [ "b01.attributes", "_____no_output_____" ] ], [ [ "CityObjects can have *children* and *parents*", "_____no_output_____" ] ], [ [ "b01.children is None and b01.parents is None", "_____no_output_____" ] ], [ [ "CityObject geometry is a list of `Geometry` objects. That is because a CityObject can have multiple geometry representations in different levels of detail, eg. a geometry in LoD1 and a second geometry in LoD2.", "_____no_output_____" ] ], [ [ "b01.geometry", "_____no_output_____" ], [ "geom = b01.geometry[0]\nprint(\"{}, lod {}\".format(geom.type, geom.lod))", "MultiSurface, lod 2\n" ] ], [ [ "### Geometry boundaries and Semantic Surfaces\nOn the contrary to a CityJSON file, the geometry boundaries are dereferenced when working with the API. This means that the vertex coordinates are included in the boundary definition, not only the vertex indices.\n\n`cjio` doesn't provide specific geometry classes (yet), eg. MultiSurface or Solid class. If you are working with the geometry boundaries, you need to the geometric operations yourself, or cast the boundary to a geometry-class of some other library. For example `shapely` if 2D is enough.", "_____no_output_____" ], [ "Vertex coordinates are kept 'as is' on loading the geometry. CityJSON files are often compressed and coordinates are shifted and transformed into integers so probably you'll want to transform them back. Otherwise geometry operations won't make sense.", "_____no_output_____" ] ], [ [ "transformation_object = cm.transform\n\ngeom_transformed = geom.transform(transformation_object)\n\ngeom_transformed.boundaries[0][0]", "_____no_output_____" ] ], [ [ "But it might be easier to transform (decompress) the whole model on load.", "_____no_output_____" ] ], [ [ "cm_transformed = cityjson.load(path, transform=True)\nprint(cm_transformed)", "{\n \"cityjson_version\": \"1.0\",\n \"epsg\": 7415,\n \"bbox\": [\n 90454.18900000001,\n 435614.88,\n 0.0,\n 91002.41900000001,\n 436048.217,\n 18.29\n ],\n \"transform/compressed\": false,\n \"cityobjects_total\": 16,\n \"cityobjects_present\": [\n \"Building\"\n ],\n \"materials\": false,\n \"textures\": true\n}\n" ] ], [ [ "Semantic Surfaces are stored in a similar fashion as in a CityJSON file, in the `surfaces` attribute of a Geometry object.", "_____no_output_____" ] ], [ [ "geom.surfaces", "_____no_output_____" ] ], [ [ "`surfaces` does not store geometry boundaries, just references (`surface_idx`). Use the `get_surface_boundaries()` method to obtain the boundary-parts connected to the semantic surface.", "_____no_output_____" ] ], [ [ "roofs = geom.get_surfaces(type='roofsurface')\nroofs", "_____no_output_____" ], [ "roof_boundaries = []\nfor r in roofs.values():\n roof_boundaries.append(geom.get_surface_boundaries(r))", "_____no_output_____" ], [ "roof_boundaries", "_____no_output_____" ] ], [ [ "### Assigning attributes to Semantic Surfaces\n1. extract the surfaces,\n2. make the changes on the surface,\n3. overwrite the CityObjects with the changes.", "_____no_output_____" ] ], [ [ "cm_copy = deepcopy(cm)\nnew_cos = {}\nfor co_id, co in cm.cityobjects.items():\n new_geoms = []\n for geom in co.geometry:\n # Only LoD >= 2 models have semantic surfaces\n if geom.lod >= 2.0:\n # Extract the surfaces\n roofsurfaces = geom.get_surfaces('roofsurface')\n for i, rsrf in roofsurfaces.items():\n # Change the attributes\n if 'attributes' in rsrf.keys():\n rsrf['attributes']['cladding'] = 'tiles'\n else:\n rsrf['attributes'] = {}\n rsrf['attributes']['cladding'] = 'tiles'\n geom.surfaces[i] = rsrf\n new_geoms.append(geom)\n else:\n # Use the unchanged geometry\n new_geoms.append(geom)\n co.geometry = new_geoms\n new_cos[co_id] = co\ncm_copy.cityobjects = new_cos", "_____no_output_____" ], [ "print(cm_copy.cityobjects['{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE}'])", "{\n \"id\": \"{C9D4A5CF-094A-47DA-97E4-4A3BFD75D3AE}\",\n \"type\": \"Building\",\n \"attributes\": {\n \"TerrainHeight\": 3.03,\n \"bron_tex\": \"UltraCAM-X 10cm juni 2008\",\n \"voll_tex\": \"complete\",\n \"bron_geo\": \"Lidar 15-30 punten - nov. 2008\",\n \"status\": \"1\"\n },\n \"children\": null,\n \"parents\": null,\n \"geometry_type\": [\n \"MultiSurface\"\n ],\n \"geometry_lod\": [\n 2\n ],\n \"semantic_surfaces\": [\n \"WallSurface\",\n \"RoofSurface\",\n \"GroundSurface\"\n ]\n}\n" ] ], [ [ "### Create new Semantic Surfaces\nThe process is similar as previously. However, in this example we create new SemanticSurfaces that hold the values which we compute from the geometry. The input city model has a single semantic \"WallSurface\", without attributes, for all the walls of a building. The snippet below illustrates how to separate surfaces and assign the semantics to them.", "_____no_output_____" ] ], [ [ "new_cos = {}\n\nfor co_id, co in cm_copy.cityobjects.items():\n new_geoms = []\n \n for geom in co.geometry:\n if geom.lod >= 2.0:\n max_id = max(geom.surfaces.keys())\n old_ids = []\n \n for w_i, wsrf in geom.get_surfaces('wallsurface').items():\n old_ids.append(w_i)\n del geom.surfaces[w_i]\n boundaries = geom.get_surface_boundaries(wsrf)\n \n for j, boundary_geometry in enumerate(boundaries):\n # The original geometry has the same Semantic for all wall, \n # but we want to divide the wall surfaces by their orientation, \n # thus we need to have the correct surface index\n surface_index = wsrf['surface_idx'][j]\n new_srf = {\n 'type': wsrf['type'],\n 'surface_idx': surface_index\n }\n \n for multisurface in boundary_geometry:\n # Do any operation here\n x, y, z = multisurface[0]\n if j % 2 > 0:\n orientation = 'north'\n else:\n orientation = 'south'\n \n # Add the new attribute to the surface \n if 'attributes' in wsrf.keys():\n wsrf['attributes']['orientation'] = orientation\n else:\n wsrf['attributes'] = {}\n wsrf['attributes']['orientation'] = orientation\n \n new_srf['attributes'] = wsrf['attributes']\n \n # if w_i in geom.surfaces.keys():\n # del geom.surfaces[w_i]\n \n max_id = max_id + 1\n geom.surfaces[max_id] = new_srf\n \n new_geoms.append(geom)\n \n else:\n # If LoD1, just add the geometry unchanged\n new_geoms.append(geom)\n \n co.geometry = new_geoms\n new_cos[co_id] = co\n \ncm_copy.cityobjects = new_cos", "_____no_output_____" ] ], [ [ "# Analysing CityModels\n\n![](figures/zurich.png)", "_____no_output_____" ], [ "In the following I show how to compute some attributes from CityObject geometry and use these attributes as input for machine learning. For this we use the LoD2 model of Zürich.\n\nDownload the Zürich data set from https://3d.bk.tudelft.nl/opendata/cityjson/1.0/Zurich_Building_LoD2_V10.json", "_____no_output_____" ] ], [ [ "path = os.path.join('data', 'zurich.json')\nzurich = cityjson.load(path, transform=True)", "_____no_output_____" ] ], [ [ "## A simple geometry function", "_____no_output_____" ], [ "Here is a simple geometry function that computes the area of the groundsurface (footprint) of buildings in the model. It also show how to cast surfaces, in this case the ground surface, to Shapely Polygons.", "_____no_output_____" ] ], [ [ "def compute_footprint_area(co):\n \"\"\"Compute the area of the footprint\"\"\"\n footprint_area = 0\n for geom in co.geometry:\n \n # only LoD2 (or higher) objects have semantic surfaces\n if geom.lod >= 2.0:\n footprints = geom.get_surfaces(type='groundsurface')\n \n # there can be many surfaces with label 'groundsurface'\n for i,f in footprints.items():\n for multisurface in geom.get_surface_boundaries(f):\n for surface in multisurface:\n \n # cast to Shapely polygon\n shapely_poly = Polygon(surface)\n footprint_area += shapely_poly.area\n \n return footprint_area", "_____no_output_____" ] ], [ [ "## Compute new attributes", "_____no_output_____" ], [ "Then we need to loop through the CityObjects and update add the new attributes. Note that the `attributes` CityObject attribute is just a dictionary.\n\nThus we compute the number of vertices of the CityObject and the area of is footprint. Then we going to cluster these two variables. This is completely arbitrary excercise which is simply meant to illustrate how to transform a city model into machine-learnable features.", "_____no_output_____" ] ], [ [ "for co_id, co in zurich.cityobjects.items():\n co.attributes['nr_vertices'] = len(co.get_vertices())\n co.attributes['fp_area'] = compute_footprint_area(co)\n zurich.cityobjects[co_id] = co", "_____no_output_____" ] ], [ [ "It is possible to export the city model into a pandas DataFrame. Note that only the CityObject attributes are exported into the dataframe, with CityObject IDs as the index of the dataframe. Thus if you want to export the attributes of SemanticSurfaces for example, then you need to add them as CityObject attributes.\n\nThe function below illustrates this operation.", "_____no_output_____" ] ], [ [ "def assign_cityobject_attribute(cm):\n \"\"\"Copy the semantic surface attributes to CityObject attributes.\n Returns a copy of the citymodel.\n \"\"\"\n new_cos = {}\n cm_copy = deepcopy(cm)\n for co_id, co in cm.cityobjects.items():\n for geom in co.geometry:\n for srf in geom.surfaces.values():\n if 'attributes' in srf:\n for attr,a_v in srf['attributes'].items():\n if (attr not in co.attributes) or (co.attributes[attr] is None):\n co.attributes[attr] = [a_v]\n else:\n co.attributes[attr].append(a_v)\n new_cos[co_id] = co\n cm_copy.cityobjects = new_cos\n return cm_copy", "_____no_output_____" ], [ "df = zurich.to_dataframe()\ndf.head()", "_____no_output_____" ] ], [ [ "In order to have a nicer distribution of the data, we remove the missing values and apply a log-transform on the two variables. Note that the `FuntionTransformer.transform` transforms a DataFrame to a numpy array that is ready to be used in `scikit-learn`. The details of a machine learning workflow is beyond the scope of this tutorial however.", "_____no_output_____" ] ], [ [ "df_subset = df[df['Geomtype'].notnull() & df['fp_area'] > 0.0].loc[:, ['nr_vertices', 'fp_area']]\ntransformer = FunctionTransformer(np.log, validate=True)\ndf_logtransform = transformer.transform(df_subset)", "_____no_output_____" ], [ "fig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.scatter(df_logtransform[:,0], df_logtransform[:,1], alpha=0.3, s=1.0)\nplt.show()", "_____no_output_____" ], [ "def plot_model_results(model, data):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n colormap = np.array(['lightblue', 'red', 'lime', 'blue','black'])\n ax.scatter(data[:,0], data[:,1], c=colormap[model.labels_], s=10, alpha=0.5)\n ax.set_xlabel('Number of vertices [log]')\n ax.set_ylabel('Footprint area [log]')\n plt.title(f\"DBSCAN clustering with estimated {len(set(model.labels_))} clusters\")\n plt.show()", "_____no_output_____" ] ], [ [ "Since we transformed our DataFrame, we can fit any model in `scikit-learn`. I use DBSCAN because I wanted to find the data points on the fringes of the central cluster.", "_____no_output_____" ] ], [ [ "%matplotlib notebook\nmodel = cluster.DBSCAN(eps=0.2).fit(df_logtransform)\n\nplot_model_results(model, df_logtransform)", "_____no_output_____" ], [ "# merge the cluster labels back to the data frame\ndf_subset['dbscan'] = model.labels_", "_____no_output_____" ] ], [ [ "## Save the results back to CityJSON", "_____no_output_____" ], [ "And merge the DataFrame with cluster labels back to the city model.", "_____no_output_____" ] ], [ [ "for co_id, co in zurich.cityobjects.items():\n if co_id in df_subset.index:\n ml_results = dict(df_subset.loc[co_id])\n else:\n ml_results = {'nr_vertices': 'nan', 'fp_area': 'nan', 'dbscan': 'nan'}\n new_attrs = {**co.attributes, **ml_results}\n co.attributes = new_attrs\n zurich.cityobjects[co_id] = co", "_____no_output_____" ] ], [ [ "At the end, the `save()` method saves the edited city model into a CityJSON file.", "_____no_output_____" ] ], [ [ "path_out = os.path.join('data', 'zurich_output.json')\ncityjson.save(zurich, path_out)", "_____no_output_____" ] ], [ [ "## And view the results in QGIS again\n\n![](figures/zurich_ml_result.png)", "_____no_output_____" ], [ "However, you'll need to set up the styling based on the cluster labels by hand.", "_____no_output_____" ], [ "# Other software", "_____no_output_____" ], [ "## Online CityJSON viewer\n\n![](figures/viewer.png)", "_____no_output_____" ], [ "## QGIS plugin\n![](figures/qgis_zurich.png)", "_____no_output_____" ], [ "## Azul\n![](figures/azul.png)", "_____no_output_____" ], [ "# Full conversion CityGML <--> CityJSON\n![](figures/citygml4j.png)", "_____no_output_____" ], [ "# Thank you!\n\nBalázs Dukai\n\[email protected]\n\n@BalazsDukai\n\n## A few links\n\nRepo of this talk: [https://github.com/balazsdukai/foss4g2019](https://github.com/balazsdukai/foss4g2019)\n\n[cityjson.org](cityjson.org)\n\n[viewer.cityjson.org](viewer.cityjson.org)\n\nQGIS plugin: [github.com/tudelft3d/cityjson-qgis-plugin](github.com/tudelft3d/cityjson-qgis-plugin)\n\nAzul – CityJSON viewer on Mac – check the [AppStore](https://apps.apple.com/nl/app/azul/id1173239678?mt=12)\n\ncjio: [github.com/tudelft3d/cjio](github.com/tudelft3d/cjio) & [tudelft3d.github.io/cjio/](tudelft3d.github.io/cjio/)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d067cd495d88c9bd2962cedffcca0a72aeeb261c
5,878
ipynb
Jupyter Notebook
docs/examples/Working_with_nonWebMercatorTMS.ipynb
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
docs/examples/Working_with_nonWebMercatorTMS.ipynb
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
docs/examples/Working_with_nonWebMercatorTMS.ipynb
Anagraph/titiler
287201a554523a1cb4258ff41ec52ca2bdc0ac13
[ "MIT" ]
null
null
null
25.556522
206
0.534706
[ [ [ "# Working With TileMatrixSets (other than WebMercator)\n\n\n[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/developmentseed/titiler/master?filepath=docs%2Fexamples%2FWorking_with_nonWebMercatorTMS.ipynb)\n\nTiTiler has builtin support for serving tiles in multiple Projections by using [rio-tiler](https://github.com/cogeotiff/rio-tiler) and [morecantile](https://github.com/developmentseed/morecantile).\n\nThe default `cog` and `stac` endpoint (`titiler.endpoints.cog`and `titiler.endoints.stac`) are built with Mutli TMS support using the default grids provided by morecantile:\n\n```python\n\nfrom fastapi import FastAPI\n\nfrom titiler.endpoints.factory import TilerFactory\n\n# Create a Multi TMS Tiler using `TilerFactory` Factory\ncog = TilerFactory(router_prefix=\"cog\")\n\napp = FastAPI()\napp.include_router(cog.router, prefix=\"/cog\", tags=[\"Cloud Optimized GeoTIFF\"])\n\n```", "_____no_output_____" ], [ "This Notebook shows how to use and display tiles with non-webmercator TileMatrixSet\n\n#### Requirements\n- ipyleaflet\n- requests", "_____no_output_____" ] ], [ [ "# Uncomment if you need to install those module within the notebook\n# !pip install ipyleaflet requests", "_____no_output_____" ], [ "import json\n\nimport requests\n\nfrom ipyleaflet import (\n Map,\n basemaps,\n basemap_to_tiles,\n TileLayer,\n WMSLayer,\n GeoJSON,\n projections\n)", "_____no_output_____" ], [ "titiler_endpoint = \"https://api.cogeo.xyz\" # Devseed Custom TiTiler endpoint\nurl = \"https://s3.amazonaws.com/opendata.remotepixel.ca/cogs/natural_earth/world.tif\" # Natural Earth WORLD tif", "_____no_output_____" ] ], [ [ "### List Supported TileMatrixSets", "_____no_output_____" ] ], [ [ "r = requests.get(\"https://api.cogeo.xyz/tileMatrixSets\").json()\n\nprint(\"Supported TMS:\")\nfor tms in r[\"tileMatrixSets\"]:\n print(\"-\", tms[\"id\"])", "_____no_output_____" ] ], [ [ "## WGS 84 -- WGS84 - World Geodetic System 1984 - EPSG:4326\n\nhttps://epsg.io/4326", "_____no_output_____" ] ], [ [ "r = requests.get(\n \"https://api.cogeo.xyz/cog/WorldCRS84Quad/tilejson.json\", params = {\"url\": url}\n).json()\n\nm = Map(center=(45, 0), zoom=4, basemap={}, crs=projections.EPSG4326)\n\nlayer = TileLayer(url=r[\"tiles\"][0], opacity=1)\nm.add_layer(layer)\nm", "_____no_output_____" ] ], [ [ "## WGS 84 / NSIDC Sea Ice Polar Stereographic North - EPSG:3413\n\nhttps://epsg.io/3413", "_____no_output_____" ] ], [ [ "r = requests.get(\n \"https://api.cogeo.xyz/cog/EPSG3413/tilejson.json\", params = {\"url\": url}\n).json()\n\nm = Map(center=(70, 0), zoom=1, basemap={}, crs=projections.EPSG3413)\n\nlayer = TileLayer(url=r[\"tiles\"][0], opacity=1)\nm.add_layer(layer)\nm", "_____no_output_____" ] ], [ [ "## ETRS89-extended / LAEA Europe - EPSG:3035\n\nhttps://epsg.io/3035", "_____no_output_____" ] ], [ [ "r = requests.get(\n \"https://api.cogeo.xyz/cog/EuropeanETRS89_LAEAQuad/tilejson.json\", params = {\"url\": url}\n).json()\n\nmy_projection = {\n 'name': 'EPSG:3035',\n 'custom': True, #This is important, it tells ipyleaflet that this projection is not on the predefined ones.\n 'proj4def': '+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',\n 'origin': [6500000.0, 5500000.0],\n 'resolutions': [\n 8192.0,\n 4096.0,\n 2048.0,\n 1024.0,\n 512.0,\n 256.0\n ]\n}\n\nm = Map(center=(50, 65), zoom=1, basemap={}, crs=my_projection)\n\nlayer = TileLayer(url=r[\"tiles\"][0], opacity=1)\nm.add_layer(layer)\nm", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d067e54bfae8fef0ac26bc12c23854d8b41cb8a7
42,853
ipynb
Jupyter Notebook
100-pandas-puzzles.ipynb
greenteausa/100-pandas-puzzles
490ceac9b56bf5b828d6fefdbc9f6615ff9416e9
[ "MIT" ]
1
2020-04-25T06:59:19.000Z
2020-04-25T06:59:19.000Z
100-pandas-puzzles.ipynb
greenteausa/100-pandas-puzzles
490ceac9b56bf5b828d6fefdbc9f6615ff9416e9
[ "MIT" ]
null
null
null
100-pandas-puzzles.ipynb
greenteausa/100-pandas-puzzles
490ceac9b56bf5b828d6fefdbc9f6615ff9416e9
[ "MIT" ]
2
2020-11-19T04:01:58.000Z
2020-11-19T06:46:46.000Z
29.052881
400
0.542832
[ [ [ "# 100 pandas puzzles\n\nInspired by [100 Numpy exerises](https://github.com/rougier/numpy-100), here are 100* short puzzles for testing your knowledge of [pandas'](http://pandas.pydata.org/) power.\n\nSince pandas is a large library with many different specialist features and functions, these excercises focus mainly on the fundamentals of manipulating data (indexing, grouping, aggregating, cleaning), making use of the core DataFrame and Series objects. \n\nMany of the excerises here are stright-forward in that the solutions require no more than a few lines of code (in pandas or NumPy... don't go using pure Python or Cython!). Choosing the right methods and following best practices is the underlying goal.\n\nThe exercises are loosely divided in sections. Each section has a difficulty rating; these ratings are subjective, of course, but should be a seen as a rough guide as to how inventive the required solution is.\n\nIf you're just starting out with pandas and you are looking for some other resources, the official documentation is very extensive. In particular, some good places get a broader overview of pandas are...\n\n- [10 minutes to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html)\n- [pandas basics](http://pandas.pydata.org/pandas-docs/stable/basics.html)\n- [tutorials](http://pandas.pydata.org/pandas-docs/stable/tutorials.html)\n- [cookbook and idioms](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#cookbook)\n\nEnjoy the puzzles!\n\n\\* *the list of exercises is not yet complete! Pull requests or suggestions for additional exercises, corrections and improvements are welcomed.*", "_____no_output_____" ], [ "## Importing pandas\n\n### Getting started and checking your pandas setup\n\nDifficulty: *easy* \n\n**1.** Import pandas under the alias `pd`.", "_____no_output_____" ], [ "**2.** Print the version of pandas that has been imported.", "_____no_output_____" ], [ "**3.** Print out all the *version* information of the libraries that are required by the pandas library.", "_____no_output_____" ], [ "## DataFrame basics\n\n### A few of the fundamental routines for selecting, sorting, adding and aggregating data in DataFrames\n\nDifficulty: *easy*\n\nNote: remember to import numpy using:\n```python\nimport numpy as np\n```\n\nConsider the following Python dictionary `data` and Python list `labels`:\n\n``` python\ndata = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],\n 'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],\n 'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}\n\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\n```\n(This is just some meaningless data I made up with the theme of animals and trips to a vet.)\n\n**4.** Create a DataFrame `df` from this dictionary `data` which has the index `labels`.", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndata = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],\n 'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],\n 'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],\n 'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}\n\nlabels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\n\ndf = # (complete this line of code)", "_____no_output_____" ] ], [ [ "**5.** Display a summary of the basic information about this DataFrame and its data (*hint: there is a single method that can be called on the DataFrame*).", "_____no_output_____" ], [ "**6.** Return the first 3 rows of the DataFrame `df`.", "_____no_output_____" ], [ "**7.** Select just the 'animal' and 'age' columns from the DataFrame `df`.", "_____no_output_____" ], [ "**8.** Select the data in rows `[3, 4, 8]` *and* in columns `['animal', 'age']`.", "_____no_output_____" ], [ "**9.** Select only the rows where the number of visits is greater than 3.", "_____no_output_____" ], [ "**10.** Select the rows where the age is missing, i.e. it is `NaN`.", "_____no_output_____" ], [ "**11.** Select the rows where the animal is a cat *and* the age is less than 3.", "_____no_output_____" ], [ "**12.** Select the rows the age is between 2 and 4 (inclusive).", "_____no_output_____" ], [ "**13.** Change the age in row 'f' to 1.5.", "_____no_output_____" ], [ "**14.** Calculate the sum of all visits in `df` (i.e. find the total number of visits).", "_____no_output_____" ], [ "**15.** Calculate the mean age for each different animal in `df`.", "_____no_output_____" ], [ "**16.** Append a new row 'k' to `df` with your choice of values for each column. Then delete that row to return the original DataFrame.", "_____no_output_____" ], [ "**17.** Count the number of each type of animal in `df`.", "_____no_output_____" ], [ "**18.** Sort `df` first by the values in the 'age' in *decending* order, then by the value in the 'visit' column in *ascending* order (so row `i` should be first, and row `d` should be last).", "_____no_output_____" ], [ "**19.** The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values: 'yes' should be `True` and 'no' should be `False`.", "_____no_output_____" ], [ "**20.** In the 'animal' column, change the 'snake' entries to 'python'.", "_____no_output_____" ], [ "**21.** For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (*hint: use a pivot table*).", "_____no_output_____" ], [ "## DataFrames: beyond the basics\n\n### Slightly trickier: you may need to combine two or more methods to get the right answer\n\nDifficulty: *medium*\n\nThe previous section was tour through some basic but essential DataFrame operations. Below are some ways that you might need to cut your data, but for which there is no single \"out of the box\" method.", "_____no_output_____" ], [ "**22.** You have a DataFrame `df` with a column 'A' of integers. For example:\n```python\ndf = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})\n```\n\nHow do you filter out rows which contain the same integer as the row immediately above?\n\nYou should be left with a column containing the following values:\n\n```python\n1, 2, 3, 4, 5, 6, 7\n```", "_____no_output_____" ], [ "**23.** Given a DataFrame of numeric values, say\n```python\ndf = pd.DataFrame(np.random.random(size=(5, 3))) # a 5x3 frame of float values\n```\n\nhow do you subtract the row mean from each element in the row?", "_____no_output_____" ], [ "**24.** Suppose you have DataFrame with 10 columns of real numbers, for example:\n\n```python\ndf = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))\n```\nWhich column of numbers has the smallest sum? Return that column's label.", "_____no_output_____" ], [ "**25.** How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)? As input, use a DataFrame of zeros and ones with 10 rows and 3 columns.\n\n```python\ndf = pd.DataFrame(np.random.randint(0, 2, size=(10, 3)))\n```", "_____no_output_____" ], [ "The next three puzzles are slightly harder.\n\n\n**26.** In the cell below, you have a DataFrame `df` that consists of 10 columns of floating-point numbers. Exactly 5 entries in each row are NaN values. \n\nFor each row of the DataFrame, find the *column* which contains the *third* NaN value.\n\nYou should return a Series of column labels: `e, c, d, h, d`", "_____no_output_____" ] ], [ [ "nan = np.nan\n\ndata = [[0.04, nan, nan, 0.25, nan, 0.43, 0.71, 0.51, nan, nan],\n [ nan, nan, nan, 0.04, 0.76, nan, nan, 0.67, 0.76, 0.16],\n [ nan, nan, 0.5 , nan, 0.31, 0.4 , nan, nan, 0.24, 0.01],\n [0.49, nan, nan, 0.62, 0.73, 0.26, 0.85, nan, nan, nan],\n [ nan, nan, 0.41, nan, 0.05, nan, 0.61, nan, 0.48, 0.68]]\n\ncolumns = list('abcdefghij')\n\ndf = pd.DataFrame(data, columns=columns)\n\n# write a solution to the question here", "_____no_output_____" ] ], [ [ "**27.** A DataFrame has a column of groups 'grps' and and column of integer values 'vals': \n\n```python\ndf = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), \n 'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})\n```\nFor each *group*, find the sum of the three greatest values. You should end up with the answer as follows:\n```\ngrps\na 409\nb 156\nc 345\n```", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), \n 'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})\n\n# write a solution to the question here", "_____no_output_____" ] ], [ [ "**28.** The DataFrame `df` constructed below has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive). \n\nFor each group of 10 consecutive integers in 'A' (i.e. `(0, 10]`, `(10, 20]`, ...), calculate the sum of the corresponding values in column 'B'.\n\nThe answer should be a Series as follows:\n\n```\nA\n(0, 10] 635\n(10, 20] 360\n(20, 30] 315\n(30, 40] 306\n(40, 50] 750\n(50, 60] 284\n(60, 70] 424\n(70, 80] 526\n(80, 90] 835\n(90, 100] 852\n```", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(np.random.RandomState(8765).randint(1, 101, size=(100, 2)), columns = [\"A\", \"B\"])\n\n# write a solution to the question here", "_____no_output_____" ] ], [ [ "## DataFrames: harder problems \n\n### These might require a bit of thinking outside the box...\n\n...but all are solvable using just the usual pandas/NumPy methods (and so avoid using explicit `for` loops).\n\nDifficulty: *hard*", "_____no_output_____" ], [ "**29.** Consider a DataFrame `df` where there is an integer column 'X':\n```python\ndf = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})\n```\nFor each value, count the difference back to the previous zero (or the start of the Series, whichever is closer). These values should therefore be \n\n```\n[1, 2, 0, 1, 2, 3, 4, 0, 1, 2]\n```\n\nMake this a new column 'Y'.", "_____no_output_____" ], [ "**30.** Consider the DataFrame constructed below which contains rows and columns of numerical data. \n\nCreate a list of the column-row index locations of the 3 largest values in this DataFrame. In this case, the answer should be:\n```\n[(5, 7), (6, 4), (2, 5)]\n```", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(np.random.RandomState(30).randint(1, 101, size=(8, 8)))", "_____no_output_____" ] ], [ [ "**31.** You are given the DataFrame below with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals'.\n\n```python\ndf = pd.DataFrame({\"vals\": np.random.RandomState(31).randint(-30, 30, size=15), \n \"grps\": np.random.RandomState(31).choice([\"A\", \"B\"], 15)})\n```\n\nCreate a new column 'patched_values' which contains the same values as the 'vals' any negative values in 'vals' with the group mean:\n\n```\n vals grps patched_vals\n0 -12 A 13.6\n1 -7 B 28.0\n2 -14 A 13.6\n3 4 A 4.0\n4 -7 A 13.6\n5 28 B 28.0\n6 -2 A 13.6\n7 -1 A 13.6\n8 8 A 8.0\n9 -2 B 28.0\n10 28 A 28.0\n11 12 A 12.0\n12 16 A 16.0\n13 -24 A 13.6\n14 -12 A 13.6\n```", "_____no_output_____" ], [ "**32.** Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame:\n\n```python\n>>> df = pd.DataFrame({'group': list('aabbabbbabab'),\n 'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]})\n>>> df\n group value\n0 a 1.0\n1 a 2.0\n2 b 3.0\n3 b NaN\n4 a 2.0\n5 b 3.0\n6 b NaN\n7 b 1.0\n8 a 7.0\n9 b 3.0\n10 a NaN\n11 b 8.0\n```\nThe goal is to compute the Series:\n\n```\n0 1.000000\n1 1.500000\n2 3.000000\n3 3.000000\n4 1.666667\n5 3.000000\n6 3.000000\n7 2.000000\n8 3.666667\n9 2.000000\n10 4.500000\n11 4.000000\n```\nE.g. the first window of size three for group 'b' has values 3.0, NaN and 3.0 and occurs at row index 5. Instead of being NaN the value in the new column at this row index should be 3.0 (just the two non-NaN values are used to compute the mean (3+3)/2)", "_____no_output_____" ], [ "## Series and DatetimeIndex\n\n### Exercises for creating and manipulating Series with datetime data\n\nDifficulty: *easy/medium*\n\npandas is fantastic for working with dates and times. These puzzles explore some of this functionality.\n", "_____no_output_____" ], [ "**33.** Create a DatetimeIndex that contains each business day of 2015 and use it to index a Series of random numbers. Let's call this Series `s`.", "_____no_output_____" ], [ "**34.** Find the sum of the values in `s` for every Wednesday.", "_____no_output_____" ], [ "**35.** For each calendar month in `s`, find the mean of values.", "_____no_output_____" ], [ "**36.** For each group of four consecutive calendar months in `s`, find the date on which the highest value occurred.", "_____no_output_____" ], [ "**37.** Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016.", "_____no_output_____" ], [ "## Cleaning Data\n\n### Making a DataFrame easier to work with\n\nDifficulty: *easy/medium*\n\nIt happens all the time: someone gives you data containing malformed strings, Python, lists and missing data. How do you tidy it up so you can get on with the analysis?\n\nTake this monstrosity as the DataFrame to use in the following puzzles:\n\n```python\ndf = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm', \n 'Budapest_PaRis', 'Brussels_londOn'],\n 'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],\n 'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],\n 'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )', \n '12. Air France', '\"Swiss Air\"']})\n```\nFormatted, it looks like this:\n\n```\n From_To FlightNumber RecentDelays Airline\n0 LoNDon_paris 10045.0 [23, 47] KLM(!)\n1 MAdrid_miLAN NaN [] <Air France> (12)\n2 londON_StockhOlm 10065.0 [24, 43, 87] (British Airways. )\n3 Budapest_PaRis NaN [13] 12. Air France\n4 Brussels_londOn 10085.0 [67, 32] \"Swiss Air\"\n```\n\n\n(It's some flight data I made up; it's not meant to be accurate in any way.)\n", "_____no_output_____" ], [ "**38.** Some values in the the **FlightNumber** column are missing (they are `NaN`). These numbers are meant to increase by 10 with each row so 10055 and 10075 need to be put in place. Modify `df` to fill in these missing numbers and make the column an integer column (instead of a float column).", "_____no_output_____" ], [ "**39.** The **From\\_To** column would be better as two separate columns! Split each string on the underscore delimiter `_` to give a new temporary DataFrame called 'temp' with the correct values. Assign the correct column names 'From' and 'To' to this temporary DataFrame. ", "_____no_output_____" ], [ "**40.** Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame 'temp'. Standardise the strings so that only the first letter is uppercase (e.g. \"londON\" should become \"London\".)", "_____no_output_____" ], [ "**41.** Delete the **From_To** column from `df` and attach the temporary DataFrame 'temp' from the previous questions.", "_____no_output_____" ], [ "**42**. In the **Airline** column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. `'(British Airways. )'` should become `'British Airways'`.", "_____no_output_____" ], [ "**43**. In the RecentDelays column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN.\n\nExpand the Series of lists into a DataFrame named `delays`, rename the columns `delay_1`, `delay_2`, etc. and replace the unwanted RecentDelays column in `df` with `delays`.", "_____no_output_____" ], [ "The DataFrame should look much better now.\n```\n FlightNumber Airline From To delay_1 delay_2 delay_3\n0 10045 KLM London Paris 23.0 47.0 NaN\n1 10055 Air France Madrid Milan NaN NaN NaN\n2 10065 British Airways London Stockholm 24.0 43.0 87.0\n3 10075 Air France Budapest Paris 13.0 NaN NaN\n4 10085 Swiss Air Brussels London 67.0 32.0 NaN\n```", "_____no_output_____" ], [ "## Using MultiIndexes\n\n### Go beyond flat DataFrames with additional index levels\n\nDifficulty: *medium*\n\nPrevious exercises have seen us analysing data from DataFrames equipped with a single index level. However, pandas also gives you the possibilty of indexing your data using *multiple* levels. This is very much like adding new dimensions to a Series or a DataFrame. For example, a Series is 1D, but by using a MultiIndex with 2 levels we gain of much the same functionality as a 2D DataFrame.\n\nThe set of puzzles below explores how you might use multiple index levels to enhance data analysis.\n\nTo warm up, we'll look make a Series with two index levels. ", "_____no_output_____" ], [ "**44**. Given the lists `letters = ['A', 'B', 'C']` and `numbers = list(range(10))`, construct a MultiIndex object from the product of the two lists. Use it to index a Series of random numbers. Call this Series `s`.", "_____no_output_____" ], [ "**45.** Check the index of `s` is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex).", "_____no_output_____" ], [ "**46**. Select the labels `1`, `3` and `6` from the second level of the MultiIndexed Series.", "_____no_output_____" ], [ "**47**. Slice the Series `s`; slice up to label 'B' for the first level and from label 5 onwards for the second level.", "_____no_output_____" ], [ "**48**. Sum the values in `s` for each label in the first level (you should have Series giving you a total for labels A, B and C).", "_____no_output_____" ], [ "**49**. Suppose that `sum()` (and other methods) did not accept a `level` keyword argument. How else could you perform the equivalent of `s.sum(level=1)`?", "_____no_output_____" ], [ "**50**. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it.", "_____no_output_____" ], [ "## Minesweeper\n\n### Generate the numbers for safe squares in a Minesweeper grid\n\nDifficulty: *medium* to *hard*\n\nIf you've ever used an older version of Windows, there's a good chance you've played with Minesweeper:\n- https://en.wikipedia.org/wiki/Minesweeper_(video_game)\n\n\nIf you're not familiar with the game, imagine a grid of squares: some of these squares conceal a mine. If you click on a mine, you lose instantly. If you click on a safe square, you reveal a number telling you how many mines are found in the squares that are immediately adjacent. The aim of the game is to uncover all squares in the grid that do not contain a mine.\n\nIn this section, we'll make a DataFrame that contains the necessary data for a game of Minesweeper: coordinates of the squares, whether the square contains a mine and the number of mines found on adjacent squares.", "_____no_output_____" ], [ "**51**. Let's suppose we're playing Minesweeper on a 5 by 4 grid, i.e.\n```\nX = 5\nY = 4\n```\nTo begin, generate a DataFrame `df` with two columns, `'x'` and `'y'` containing every coordinate for this grid. That is, the DataFrame should start:\n```\n x y\n0 0 0\n1 0 1\n2 0 2\n```", "_____no_output_____" ], [ "**52**. For this DataFrame `df`, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4.", "_____no_output_____" ], [ "**53**. Now create a new column for this DataFrame called `'adjacent'`. This column should contain the number of mines found on adjacent squares in the grid. \n\n(E.g. for the first row, which is the entry for the coordinate `(0, 0)`, count how many mines are found on the coordinates `(0, 1)`, `(1, 0)` and `(1, 1)`.)", "_____no_output_____" ], [ "**54**. For rows of the DataFrame that contain a mine, set the value in the `'adjacent'` column to NaN.", "_____no_output_____" ], [ "**55**. Finally, convert the DataFrame to grid of the adjacent mine counts: columns are the `x` coordinate, rows are the `y` coordinate.", "_____no_output_____" ], [ "## Plotting\n\n### Visualize trends and patterns in data\n\nDifficulty: *medium*\n\nTo really get a good understanding of the data contained in your DataFrame, it is often essential to create plots: if you're lucky, trends and anomalies will jump right out at you. This functionality is baked into pandas and the puzzles below explore some of what's possible with the library.\n\n**56.** Pandas is highly integrated with the plotting library matplotlib, and makes plotting DataFrames very user-friendly! Plotting in a notebook environment usually makes use of the following boilerplate:\n\n```python\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('ggplot')\n```\n\nmatplotlib is the plotting library which pandas' plotting functionality is built upon, and it is usually aliased to ```plt```.\n\n```%matplotlib inline``` tells the notebook to show plots inline, instead of creating them in a separate window. \n\n```plt.style.use('ggplot')``` is a style theme that most people find agreeable, based upon the styling of R's ggplot package.\n\nFor starters, make a scatter plot of this random data, but use black X's instead of the default markers. \n\n```df = pd.DataFrame({\"xs\":[1,5,2,8,1], \"ys\":[4,2,1,9,6]})```\n\nConsult the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) if you get stuck!", "_____no_output_____" ], [ "**57.** Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame.\n\n(Hint: If you're having trouble seeing the plot, try multiplying the Series which you choose to represent size by 10 or more)\n\n*The chart doesn't have to be pretty: this isn't a course in data viz!*\n\n```\ndf = pd.DataFrame({\"productivity\":[5,2,3,1,4,5,6,7,8,3,4,8,9],\n \"hours_in\" :[1,9,6,5,3,9,2,9,1,7,4,2,2],\n \"happiness\" :[2,1,3,2,3,1,2,3,1,2,2,1,3],\n \"caffienated\" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})\n```", "_____no_output_____" ], [ "**58.** What if we want to plot multiple things? Pandas allows you to pass in a matplotlib *Axis* object for plots, and plots will also return an Axis object.\n\nMake a bar plot of monthly revenue with a line plot of monthly advertising spending (numbers in millions)\n\n```\ndf = pd.DataFrame({\"revenue\":[57,68,63,71,72,90,80,62,59,51,47,52],\n \"advertising\":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],\n \"month\":range(12)\n })\n```", "_____no_output_____" ], [ "Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the \"candle\" (the thick part of the bar) is green if the stock closed above its opening price, or red if below.\n\n![Candlestick Example](img/candle.jpg)\n\nThis was initially designed to be a pandas plotting challenge, but it just so happens that this type of plot is just not feasible using pandas' methods. If you are unfamiliar with matplotlib, we have provided a function that will plot the chart for you so long as you can use pandas to get the data into the correct format.\n\nYour first step should be to get the data in the correct format using pandas' time-series grouping function. We would like each candle to represent an hour's worth of data. You can write your own aggregation function which returns the open/high/low/close, but pandas has a built-in which also does this.", "_____no_output_____" ], [ "The below cell contains helper functions. Call ```day_stock_data()``` to generate a DataFrame containing the prices a hypothetical stock sold for, and the time the sale occurred. Call ```plot_candlestick(df)``` on your properly aggregated and formatted stock data to print the candlestick chart.", "_____no_output_____" ] ], [ [ "import numpy as np\ndef float_to_time(x):\n return str(int(x)) + \":\" + str(int(x%1 * 60)).zfill(2) + \":\" + str(int(x*60 % 1 * 60)).zfill(2)\n\ndef day_stock_data():\n #NYSE is open from 9:30 to 4:00\n time = 9.5\n price = 100\n results = [(float_to_time(time), price)]\n while time < 16:\n elapsed = np.random.exponential(.001)\n time += elapsed\n if time > 16:\n break\n price_diff = np.random.uniform(.999, 1.001)\n price *= price_diff\n results.append((float_to_time(time), price))\n \n \n df = pd.DataFrame(results, columns = ['time','price'])\n df.time = pd.to_datetime(df.time)\n return df\n\n#Don't read me unless you get stuck!\ndef plot_candlestick(agg):\n \"\"\"\n agg is a DataFrame which has a DatetimeIndex and five columns: [\"open\",\"high\",\"low\",\"close\",\"color\"]\n \"\"\"\n fig, ax = plt.subplots()\n for time in agg.index:\n ax.plot([time.hour] * 2, agg.loc[time, [\"high\",\"low\"]].values, color = \"black\")\n ax.plot([time.hour] * 2, agg.loc[time, [\"open\",\"close\"]].values, color = agg.loc[time, \"color\"], linewidth = 10)\n\n ax.set_xlim((8,16))\n ax.set_ylabel(\"Price\")\n ax.set_xlabel(\"Hour\")\n ax.set_title(\"OHLC of Stock Value During Trading Day\")\n plt.show()", "_____no_output_____" ] ], [ [ "**59.** Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices", "_____no_output_____" ], [ "**60.** Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the ```plot_candlestick(df)``` function above, or matplotlib's [```plot``` documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html) if you get stuck.", "_____no_output_____" ], [ "*More exercises to follow soon...*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d067fb140799de6f0debdde8f26fbe9a79ee9c7c
13,430
ipynb
Jupyter Notebook
01. Getting Started with Python/Python_Revision_and_Statistical_Methods.ipynb
Jamess-ai/ai-with-python-series
4346a836aefd652d9fdc7f0ad3bb856e5c129b22
[ "MIT" ]
23
2021-07-16T11:32:17.000Z
2022-03-14T02:28:32.000Z
01. Getting Started with Python/Python_Revision_and_Statistical_Methods.ipynb
Jamess-ai/ai-with-python-series
4346a836aefd652d9fdc7f0ad3bb856e5c129b22
[ "MIT" ]
1
2021-09-28T15:17:42.000Z
2021-09-28T15:48:09.000Z
01. Getting Started with Python/Python_Revision_and_Statistical_Methods.ipynb
Jamess-ai/ai-with-python-series
4346a836aefd652d9fdc7f0ad3bb856e5c129b22
[ "MIT" ]
28
2021-08-01T09:10:18.000Z
2022-03-24T12:47:49.000Z
36.594005
424
0.539762
[ [ [ "<a href=\"https://colab.research.google.com/github/rjrahul24/ai-with-python-series/blob/main/01.%20Getting%20Started%20with%20Python/Python_Revision_and_Statistical_Methods.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "**Inheritence in Python**\n\nObject Oriented Programming is a coding paradigm that revolves around creating modular code and stopping mulitple uses of the same structure. It is aimed at increasing stability and usability of code. It consists of some well-known concepts stated below:\n\n\n1. Classes: These often show a collection of functions and attributes that are fastened to a precise name and represent an abstract container.\n2. Attributes: Generally, the data that is associated with each class. Examples are variables declared during creation of the class.\n3. Objects: An instance generated from the class. There can be multiple objects of a class and every individual object takes on the properties of the class. ", "_____no_output_____" ] ], [ [ "# Implementation of Classes in Python\n# Creating a Class Math with 2 functions\nclass Math:\n def subtract (self, i, j):\n return i-j\n def add (self, x, y):\n return x+y\n\n# Creating an object of the class Math\nmath_child = Math()\ntest_int_A = 10\ntest_int_B = 20\nprint(math_child.subtract(test_int_B, test_int_A))", "10\n" ], [ "# Creating a Class Person with an attribute and an initialization function\nclass Person:\n name = 'George'\n def __init__ (self):\n self.age = 34\n\n# Creating an object of the class and printing its attributes\np1 = Person()\nprint (p1.name)\nprint (p1.age)", "George\n34\n" ] ], [ [ "**Constructors and Inheritance**\n\nThe constructor is an initialization function that is always called when a class’s instance is created. The constructor is named __init__() in Python and defines the specifics of instantiating a class and its attributes. \nClass inheritance is a concept of taking values of a class from its origin and giving the same properties to a child class. It creates relationship models like “Class A is a Class B”, like a triangle (child class) is a shape (parent class). All the functions and attributes of a superclass are inherited by the subclass. \n1.\tOverriding: During the inheritance, the behavior of the child class or the subclass can be modified. Doing this modification on functions is class “overriding” and is achieved by declaring functions in the subclass with the same name. Functions created in the subclass will take precedence over those in the parent class.\n2.\tComposition: Classes can also be built from other smaller classes that support relationship models like “Class A has a Class B”, like a Department has Students.\n3.\tPolymorphism: The functionality of similar looking functions can be changed in run-time, during their implementation. This is achieved using Polymorphism, that includes two objects of different parent class but having the same set of functions. The outward look of these functions is the same, but implementations differ.\n", "_____no_output_____" ] ], [ [ "# Creating a class and instantiating variables\nclass Animal_Dog:\n species = \"Canis\"\n def __init__(self, name, age):\n self.name = name\n self.age = age\n # Instance method\n def description(self):\n return f\"{self.name} is {self.age} years old\"\n # Another instance method\n def animal_sound(self, sound):\n return f\"{self.name} says {sound}\"\n\n\n# Check the object’s type\nAnimal_Dog(\"Bunny\", 7)\n\n\n# Even though a and b are both instances of the Dog class, they represent two distinct objects in memory.\na = Animal_Dog(\"Fog\", 6)\nb = Animal_Dog(\"Bunny\", 7)\na == b\n\n# Instantiating objects with the class’s constructor arguments\nfog = Animal_Dog(\"Fog\", 6)\nbunny = Animal_Dog(\"Bunny\", 7)\nprint (bunny.name)\n\nprint (bunny.age)", "Bunny\n7\n" ], [ "# Accessing attributes directly\nprint (bunny.species)", "Canis\n" ], [ "# Creating a new Object to access through instance functions\nfog = Animal_Dog(\"Fog\", 6)\nfog.description()\n\n\nfog.animal_sound(\"Whoof Whoof\")\n\n\nfog.animal_sound(\"Bhoof Whoof\")\n\n\n# Inheriting the Class\nclass GoldRet(Animal_Dog):\n def speak(self, sound=\"Warf\"):\n return f\"{self.name} says {sound}\"\n\n\nbunny = GoldRet(\"Bunny\", 5)\nbunny.speak()\n\n\nbunny.speak(\"Grrr Grrr\")\n\n\n# Code Snippet 3: Variables and data types\n\n\nint_var = 100 # Integer variable\nfloat_var = 1000.0 # Float value\nstring_var = \"John\" # String variable\nprint (int_var)\n\n\nprint (float_var)\n\nprint (string_var)", "100\n1000.0\nJohn\n" ] ], [ [ "Variables and Data Types in Python\n\nVariables are reserved locations in the computer’s memory that store values defined within them. Whenever a variable is created, a piece of the computer’s memory is allocated to it. Based on the data type of this declared variable, the interpreter allocates varied chunks of memory. Therefore, basis the assignment of variables as integer, float, strings, etc. different sizes of memory allocations are invoked.\n•\tDeclaration: Variables in Python do not need explicit declaration to reserve memory space. This happens automatically when a value is assigned. The (=) sign is used to assign values to variables. \n•\tMultiple Assignment: Python allows for multiple variables to hold a single value and this declaration can be done together for all variables. \n•\tDeleting References: Memory reference once created can also be deleted. The 'del' statement is used to delete the reference to a number object. Multiple object deletion is also supported by the 'del' statement.\n•\tStrings: Strings are a set of characters, that Python allows representation through single or double quotes. String subsets can be formed using the slice operator ([ ] and [:] ) where indexing starts from 0 on the left and -1 on the right. The (+) sign is the string concatenation operator and the (*) sign is the repetition operator.\nDatatype Conversion\nFunction\tDescription\nint(x [,base])\tConverts given input to integer. Base is used for string conversions.\nlong(x [,base] )\tConverts given input to a long integer\nfloat(x)\tFollows conversion to floating-point number.\ncomplex(real [,imag])\tUsed for creating a complex number.\nstr(x)\tConverts any given object to a string\neval(str)\tEvaluates given string and returns an object.\ntuple(s)\tConversion to tuple\nlist(s)\tList conversion of given input\nset(s)\tConverts the given value to a set\nunichr(x)\tConversion from an integer to Unicode character.\n\nLooking at Variables and Datatypes\nData stored as Python’s variables is abstracted as objects. Data is represented by objects or through relations between individual objects. Therefore, every variable and its corresponding values are an object of a class, depending on the stored data.\n", "_____no_output_____" ] ], [ [ "# Multiple Assignment: All are assigned to the same memory location\na = b = c = 1\n# Assigning multiple variables with multiple values\na,b,c = 1,2,\"jacob\"\n\n\n# Assigning and deleting variable references\nvar1 = 1\nvar2 = 10\n\n\ndel var1 # Removes the reference of var1\ndel var2\n\n\n# Basic String Operations in Python\nstr = 'Hello World!'\nprint (str) \n\n\n# Print the first character of string variable\nprint (str[0])\n\n\n# Prints characters from 3rd to 5th positions\nprint (str[2:5]) \n\n\n# Print the string twice\nprint (str * 2)\n\n\n# Concatenate the string and print\nprint (str + \"TEST\")", "Hello World!\nH\nllo\nHello World!Hello World!\nHello World!TEST\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d067fc2cee0e886e4db8b0def421daab6c548b76
21,434
ipynb
Jupyter Notebook
source/dask/dask.compute in distributed.ipynb
supriyascode/MODIS-Aggregation
cc6ce992b81c358e6b7cb2635a5b8ff72d6e99f9
[ "Apache-2.0" ]
null
null
null
source/dask/dask.compute in distributed.ipynb
supriyascode/MODIS-Aggregation
cc6ce992b81c358e6b7cb2635a5b8ff72d6e99f9
[ "Apache-2.0" ]
null
null
null
source/dask/dask.compute in distributed.ipynb
supriyascode/MODIS-Aggregation
cc6ce992b81c358e6b7cb2635a5b8ff72d6e99f9
[ "Apache-2.0" ]
null
null
null
37.211806
494
0.55118
[ [ [ "import pandas as pd\nimport numpy as np\nimport dask.array as da\nimport dask.dataframe as dd\nimport time\nimport math\n\nfrom netCDF4 import Dataset\nimport os,datetime,sys,fnmatch\nimport h5py", "_____no_output_____" ], [ "from dask.distributed import Client, LocalCluster\ncluster = LocalCluster()\nclient = Client(cluster)", "/Users/dprakas1/anaconda3/lib/python3.7/site-packages/distributed/dashboard/core.py:74: UserWarning: \nPort 8787 is already in use. \nPerhaps you already have a cluster running?\nHosting the diagnostics dashboard on a random port instead.\n warnings.warn(\"\\n\" + msg)\n" ], [ "client", "_____no_output_____" ], [ "%%time\ndef read_filelist(loc_dir,prefix,unie,fileformat):\n # Read the filelist in the specific directory\n str = os.popen(\"ls \"+ loc_dir + prefix + unie + \"*.\"+fileformat).read()\n fname = np.array(str.split(\"\\n\"))\n fname = np.delete(fname,len(fname)-1)\n \n return fname\n\n\ndef read_MODIS(fname1,fname2,verbose=False): # READ THE HDF FILE\n # Read the cloud mask from MYD06_L2 product')\n ncfile=Dataset(fname1,'r')\n CM1km = np.array(ncfile.variables['Cloud_Mask_1km'])\n CM = (np.array(CM1km[:,:,0],dtype='byte') & 0b00000110) >>1\n #ncfile = Dataset(fname1, \"r\")\n #CM = myd06.variables[\"Cloud_Mask_1km\"][:,:,:] # Reading Specific Variable 'Cloud_Mask_1km'.\n #CM = (np.array(CM[:,:,0],dtype='byte') & 0b00000110) >>1\n ncfile.close()\n \n ncfile=Dataset(fname2,'r')\n lat = np.array(ncfile.variables['Latitude'])\n lon = np.array(ncfile.variables['Longitude'])\n #ncfile = Dataset(MOD03_files, \"r\")\n #latitude = myd03.variables[\"Latitude\"][:,:] # Reading Specific Variable 'Latitude'.\n #latitude = np.array(latitude).byteswap().newbyteorder() # Addressing Byteswap For Big Endian Error.\n #longitude = myd03.variables[\"Longitude\"][:,:] # Reading Specific Variable 'Longitude'.\n attr_lat = ncfile.variables['Latitude']._FillValue\n attr_lon = ncfile.variables['Longitude']._FillValue\n return lat,lon,CM\n\n\ndef countzero(x, axis=1):\n #print(x)\n count0 = 0\n count1 = 0\n for i in x:\n if i <= 1:\n count0 +=1\n #print(count0/len(x))\n return count0/len(x)\n\nsatellite = 'Aqua'\n\nMYD06_dir= '/Users/dprakas1/Desktop/modis_files/'\nMYD06_prefix = 'MYD06_L2.A2008'\nMYD03_dir= '/Users/dprakas1/Desktop/modis_files/'\nMYD03_prefix = 'MYD03.A2008'\nfileformat = 'hdf'\n\nfname1,fname2 = [],[]\n\n\ndays = np.arange(1,31,dtype=np.int)\nfor day in days:\n dc ='%03i' % day\n fname_tmp1 = read_filelist(MYD06_dir,MYD06_prefix,dc,fileformat)\n fname_tmp2 = read_filelist(MYD03_dir,MYD03_prefix,dc,fileformat)\n fname1 = np.append(fname1,fname_tmp1)\n fname2 = np.append(fname2,fname_tmp2)\n\n# Initiate the number of day and total cloud fraction\nfiles = np.arange(len(fname1))\n\n\n\nfor j in range(0,1):#hdfs:\n print('steps: ',j+1,'/ ',(fname1)) \n\n # Read Level-2 MODIS data\n lat,lon,CM = read_MODIS(fname1[j],fname2[j])\nprint((fname1))\nprint((fname2))\n#rint(CM)\n#lat = lat.ravel()\n#lon = lon.ravel()\n#CM = CM.ravel()\nCM.shape \n\n\n\ncm = np.zeros((2030,1354), dtype=np.float32)\n\nfor MOD06_file in fname1:\n #print(MOD06_file)\n myd06 = Dataset(MOD06_file, \"r\")\n CM = myd06.variables[\"Cloud_Mask_1km\"][:,:,0]# Reading Specific Variable 'Cloud_Mask_1km'.\n CM = (np.array(CM,dtype='byte') & 0b00000110) >>1\n CM = np.array(CM).byteswap().newbyteorder()\n #cm = da.from_array(CM, chunks =(2030,1354))\n #print(CM.shape)\n #cm = np.concatenate((cm,CM))\n cm = da.concatenate((cm,CM),axis=0)\n #bit0 = np.dstack((bit0,bit0r))\n #bit12 = np.dstack((bit12,bit12r))\n \nprint('The Cloud Mask Array Shape Is: ',cm.shape)\n\n\nlat = np.zeros((2030,1354), dtype=np.float32)\nlon = np.zeros((2030,1354), dtype=np.float32)\nfor MOD03_file in fname2:\n #print(MOD03_file)\n myd03 = Dataset(MOD03_file, \"r\")\n latitude = myd03.variables[\"Latitude\"][:,:]# Reading Specific Variable 'Latitude'.\n #lat = da.from_array(latitude, chunks =(2030,1354))\n lat = da.concatenate((lat,latitude),axis=0)\n\n\n longitude = myd03.variables[\"Longitude\"][:,:] # Reading Specific Variable 'Longitude'.\n #lon = da.from_array(longitude, chunks =(2030,1354))\n lon = da.concatenate((lon,longitude),axis=0)\n \nprint('Longitude Shape Is: ',lon.shape)\nprint('Latitude Shape Is: ',lat.shape)\n\ncm=da.ravel(cm)\nlat=da.ravel(lat)\nlon=da.ravel(lon)\n\nlon=lon.astype(int)\nlat=lat.astype(int)\ncm=cm.astype(int)\n\n\nLat=lat.to_dask_dataframe()\nLon=lon.to_dask_dataframe()\nCM=cm.to_dask_dataframe()\n\ndf=dd.concat([Lat,Lon,CM],axis=1,interleave_partitions=False)\n\ncols = {0:'Latitude',1:'Longitude',2:'CM'}\ndf = df.rename(columns=cols)\n#df.compute()\n\ndf2=df.groupby(['Longitude','Latitude']).CM.apply(countzero).reset_index()\n\ndf3=df2.compute(num_workers=4)\n\ncombs=[]\nfor x in range(-89,91):\n for y in range(-179,181):\n combs.append((x, y))\n \ndf_1=pd.DataFrame(combs)\ndf_1.columns=['Latitude','Longitude']\ndf_2=dd.from_pandas(df_1,npartitions=1)\n\ndf4=pd.merge(df_1, df3,on=('Longitude','Latitude'), how='left')\n\ndf5=df4['CM'].values\n\nb=df5.reshape(180,360)\n\nprint(b)\n\n\n\n\n\n", "steps: 1 / ['/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0000.006.2013341193524.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0005.006.2013341193207.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0010.006.2013341192125.hdf']\n['/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0000.006.2013341193524.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0005.006.2013341193207.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0010.006.2013341192125.hdf']\n['/Users/dprakas1/Desktop/modis_files/MYD03.A2008001.0000.006.2012066122450.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD03.A2008001.0005.006.2012066122516.hdf'\n '/Users/dprakas1/Desktop/modis_files/MYD03.A2008001.0010.006.2012066122416.hdf']\nThe Cloud Mask Array Shape Is: (8120, 1354)\nLongitude Shape Is: (8120, 1354)\nLatitude Shape Is: (8120, 1354)\n" ], [ "%%time\nimport pandas as pd\nimport numpy as np\nimport dask.array as da\nimport dask.dataframe as dd\nimport dask.delayed as delayed\nimport time\nimport math\n#import graphviz\nfrom netCDF4 import Dataset\nimport os,datetime,sys,fnmatch\nimport h5py\nimport dask\n\ndef read_filelist(loc_dir,prefix,unie,fileformat):\n # Read the filelist in the specific directory\n str = os.popen(\"ls \"+ loc_dir + prefix + unie + \"*.\"+fileformat).read()\n fname = np.array(str.split(\"\\n\"))\n fname = np.delete(fname,len(fname)-1)\n \n return fname\n\ndef read_MODIS(fname1,fname2,verbose=False): # READ THE HDF FILE\n # Read the cloud mask from MYD06_L2 product')\n ncfile=Dataset(fname1,'r')\n CM1km = np.array(ncfile.variables['Cloud_Mask_1km'])\n CM = (np.array(CM1km[:,:,0],dtype='byte') & 0b00000110) >>1\n #ncfile = Dataset(fname1, \"r\")\n #CM = myd06.variables[\"Cloud_Mask_1km\"][:,:,:] # Reading Specific Variable 'Cloud_Mask_1km'.\n #CM = (np.array(CM[:,:,0],dtype='byte') & 0b00000110) >>1\n CM=delayed(CM)\n ncfile.close()\n \n ncfile=Dataset(fname2,'r')\n lat = np.array(ncfile.variables['Latitude'])\n lon = np.array(ncfile.variables['Longitude'])\n #ncfile = Dataset(MOD03_files, \"r\")\n #latitude = myd03.variables[\"Latitude\"][:,:] # Reading Specific Variable 'Latitude'.\n #latitude = np.array(latitude).byteswap().newbyteorder() # Addressing Byteswap For Big Endian Error.\n #longitude = myd03.variables[\"Longitude\"][:,:] # Reading Specific Variable 'Longitude'.\n attr_lat = ncfile.variables['Latitude']._FillValue\n attr_lon = ncfile.variables['Longitude']._FillValue\n lat=delayed(lat)\n lon=delayed(lon)\n return lat,lon,CM\n\n\ndef countzero(x, axis=1):\n #print(x)\n count0 = 0\n count1 = 0\n for i in x:\n if i <= 1:\n count0 +=1\n #print(count0/len(x))\n return (count0/len(x))\n\n\n\nMYD06_dir= '/Users/dprakas1/Desktop/modis_files/'\nMYD06_prefix = 'MYD06_L2.A2008'\nMYD03_dir= '/Users/dprakas1/Desktop/modis_files/'\nMYD03_prefix = 'MYD03.A2008'\nfileformat = 'hdf'\n\nfname1,fname2 = [],[]\n\n\ndays = np.arange(1,31,dtype=np.int)\nfor day in days:\n dc ='%03i' % day\n fname_tmp1 = read_filelist(MYD06_dir,MYD06_prefix,dc,fileformat)\n fname_tmp2 = read_filelist(MYD03_dir,MYD03_prefix,dc,fileformat)\n fname1 = np.append(fname1,fname_tmp1)\n fname2 = np.append(fname2,fname_tmp2)\n\n# Initiate the number of day and total cloud fraction\nfiles = np.arange(len(fname1))\n\n\n\nfor j in range(0,1):#hdfs:\n ('steps: ',j+1,'/ ',(fname1)) \n\n # Read Level-2 MODIS data\n lat,lon,CM = read_MODIS(fname1[j],fname2[j])\n\n#rint(CM)\nlat = lat.compute()\nlon = lon.compute()\nCM = CM.compute()\n\n\n\n\ncloud_pix = np.zeros((180, 360))\ndelayed_b1=[]\ndef aggregateOneFileData(M06_file, M03_file):\n cm = np.zeros((2030,1354), dtype=np.float32)\n lat = np.zeros((2030,1354), dtype=np.float32)\n lon = np.zeros((2030,1354), dtype=np.float32)\n \n print(x,y)\n myd06 = Dataset(M06_file, \"r\")\n CM = myd06.variables[\"Cloud_Mask_1km\"][:,:,0]# Reading Specific Variable 'Cloud_Mask_1km'.\n CM = (np.array(CM,dtype='byte') & 0b00000110) >>1\n CM = np.array(CM).byteswap().newbyteorder()\n \n print(\"CM intial shape:\",CM.shape)\n cm = da.concatenate((cm,CM),axis=0)\n #print(\"CM shape after con:\",cm.shape)\n cm=da.ravel(cm)\n print(\"cm shape after ravel:\",cm.shape)\n myd03 = Dataset(M03_file, \"r\")\n latitude = myd03.variables[\"Latitude\"][:,:]\n longitude = myd03.variables[\"Longitude\"][:,:]\n print(\"Lat intial shape:\",latitude.shape)\n print(\"lon intial shape:\",longitude.shape)\n \n lat = da.concatenate((lat,latitude),axis=0)\n lon = da.concatenate((lon,longitude),axis=0)\n print(\"lat shape after con:\",lat.shape)\n print(\"lon shape after con:\",lon.shape)\n \n lat=da.ravel(lat)\n lon=da.ravel(lon)\n \n print(\"lat shape after ravel:\",lat.shape)\n print(\"lon shape after ravel:\",lon.shape)\n cm=cm.astype(int)\n lon=lon.astype(int)\n lat=lat.astype(int)\n \n Lat=(lat.to_dask_dataframe())\n Lon=(lon.to_dask_dataframe())\n CM=(cm.to_dask_dataframe())\n df=(dd.concat([Lat,Lon,CM],axis=1,interleave_partitions=False))\n print(type(df))\n \n cols = {0:'Latitude',1:'Longitude',2:'CM'}\n df = df.rename(columns=cols)\n \n df2=delayed(df.groupby(['Longitude','Latitude']).CM.apply(countzero).reset_index())\n print(type(df2))\n df3=df2.compute()\n print(type(df3))\n \n df4=[df2['Longitude'].values,df2['Latitude'].values,df2['CM'].values]\n print(type(df4))\n \n delayed_b1.append(df4)\n \n \n \n return delayed_b1\n \n \nfor x,y in zip(fname1,fname2):\n results = aggregateOneFileData(x,y)\n print(results)\n \ncf = np.zeros((180,360))\ncf[:]=np.nan\nfor i in range(len(delayed_b1)):\n cf[(delayed_b1[i][1].compute()-90),(180+delayed_b1[i][0].compute())] = delayed_b1[i][2].compute()\nprint(cf)\nclient.close()", "/Users/dprakas1/Desktop/modis_files/MYD06_L2.A2008001.0000.006.2013341193524.hdf /Users/dprakas1/Desktop/modis_files/MYD03.A2008001.0000.006.2012066122450.hdf\nCM intial shape: (2030, 1354)\ncm shape after ravel: (5497240,)\nLat intial shape: (2030, 1354)\nlon intial shape: (2030, 1354)\nlat shape after con: (4060, 1354)\nlon shape after con: (4060, 1354)\nlat shape after ravel: (5497240,)\nlon shape after ravel: (5497240,)\n<class 'dask.dataframe.core.DataFrame'>\n<class 'dask.delayed.Delayed'>\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0681cfcac05a45a800a0776c8c4afe287f6b73d
15,050
ipynb
Jupyter Notebook
test_trained_network.ipynb
Vela-Yang/vela_SR
0d483f517dbeacc114712342ec4fbe577b4eba2c
[ "Apache-2.0" ]
1
2021-08-18T08:35:20.000Z
2021-08-18T08:35:20.000Z
test_trained_network.ipynb
VelaQ/Deep-Learning-based-Image-Reconstruction
0d483f517dbeacc114712342ec4fbe577b4eba2c
[ "Apache-2.0" ]
null
null
null
test_trained_network.ipynb
VelaQ/Deep-Learning-based-Image-Reconstruction
0d483f517dbeacc114712342ec4fbe577b4eba2c
[ "Apache-2.0" ]
null
null
null
35.748219
105
0.447043
[ [ [ "gpu_info = !nvidia-smi\r\ngpu_info = '\\n'.join(gpu_info)\r\nif gpu_info.find('failed') >= 0:\r\n print('Select the Runtime > \"Change runtime type\" menu to enable a GPU accelerator, ')\r\n print('and then re-execute this cell.')\r\nelse:\r\n print(gpu_info)", "Thu Jul 29 09:17:58 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 470.42.01 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 63C P0 31W / 70W | 0MiB / 15109MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "!git clone https://github.com/Vela-Yang/vela_SR.git SR", "fatal: destination path 'SR' already exists and is not an empty directory.\n" ], [ "import torch\r\nfrom SR.models import AVS3Filter\r\nfrom SR.dataset import VelaDataset\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport IPython.display as ipd\r\nfrom tqdm.notebook import tqdm\r\n\r\n\r\n# 是否有GPU加速\r\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\nif device == \"cuda\":\r\n num_workers = 16\r\n pin_memory = True\r\nelse:\r\n num_workers = 0\r\n pin_memory = False\r\n\r\n", "_____no_output_____" ], [ "# 加载网络模型\r\n# load the network\r\nnet_filename = './SR/Trained_Networks/256.pkl'\r\nmodel = torch.load(net_filename, map_location=device)", "_____no_output_____" ], [ "def clear_overflow(tensor):\r\n zero_value = torch.tensor([0]).to(device, torch.float)\r\n neg_clear = torch.heaviside(tensor, zero_value)\r\n result_tensor = tensor * neg_clear + 0.01\r\n overflow = result_tensor - 255\r\n pos_clear = torch.heaviside(overflow, zero_value) * overflow\r\n result_tensor = result_tensor - pos_clear\r\n result_tensor = result_tensor.floor()\r\n result_tensor = result_tensor.squeeze(0)\r\n result_tensor = result_tensor.to(\"cpu\", torch.uint8)\r\n return result_tensor", "_____no_output_____" ], [ "\r\nfrom torchvision.io import read_image, ImageReadMode, write_jpeg\r\ntorch.cuda.empty_cache()\r\n# 输入图像\r\nim = read_image('1.jpg', mode=ImageReadMode.GRAY)\r\nim = im.to(device, torch.float) / 255\r\n# 网络输出\r\nout = model(im.unsqueeze(0)) * 255\r\n\r\nout = clear_overflow(out)\r\nwrite_jpeg(out.cpu(), '2.jpg')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d068315804dd11e57da1b8958200dc3f0fa1a12c
569,220
ipynb
Jupyter Notebook
analyse_regression_results.ipynb
SusTra/TraCo
98921de5337e15be0b74cb8d8297d33f47cd133f
[ "MIT" ]
null
null
null
analyse_regression_results.ipynb
SusTra/TraCo
98921de5337e15be0b74cb8d8297d33f47cd133f
[ "MIT" ]
null
null
null
analyse_regression_results.ipynb
SusTra/TraCo
98921de5337e15be0b74cb8d8297d33f47cd133f
[ "MIT" ]
null
null
null
844.540059
80,952
0.949062
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "sns.set_style(\"white\")\n#x_size, y_size = 12,8\nplt.rcParams.update({'font.size': 12})", "_____no_output_____" ], [ "df = pd.read_csv(\"regression_results.csv\")", "_____no_output_____" ], [ "f = open(\"data\\\\counters_per_route.txt\", encoding=\"utf8\")\nroutes = []\n\nfor l in f:\n if l.startswith(\"#\") or (l == \"\\n\"):\n continue\n ss = l.strip().split(\";\")\n route_id = ss[0] \n routes.append(route_id)\n #route_id = int(route_id)\n\nroutes", "_____no_output_____" ], [ "# feature_labels\ndef set_feature_labels(features, sep=\"\\n\"):\n features=features.replace(\"'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace('\"','')\n features=features.replace(\"workday, weather, time_x, time_y\", \"basic\")\n features=features.replace(\"900000\",\"\")\n features=features.replace(\" \",\"\")\n features=features.replace(\",\",sep)\n \n return features", "_____no_output_____" ], [ "df['feature_labels'] = df['features'].map(set_feature_labels)", "_____no_output_____" ] ], [ [ "### Models and features", "_____no_output_____" ] ], [ [ "#for route in df.route.unique():\nfor route in routes:\n try:\n df2 = df[df['route'] == route]\n sns.barplot(data=df2, x=\"model\", y=\"R2_test\", hue=\"feature_labels\")\n y1,y2 = plt.ylim()\n plt.ylim((max(0,y1),y2))\n plt.title(route)\n plt.ylabel(\"$R^2$(test)\")\n\n f = plt.gcf()\n f.set_size_inches(15, 10)\n\n plt.savefig(f\"figs\\\\models\\\\models_{route}.pdf\", bbox_inches=\"tight\")\n plt.savefig(f\"figs\\\\models\\\\models_{route}.png\", bbox_inches=\"tight\")\n\n\n plt.show()\n except:\n pass", "_____no_output_____" ], [ "#df[(df['route']=='Dunajska1') & (df['feature_labels']==\"0655-1\")]", "_____no_output_____" ] ], [ [ "### Best features", "_____no_output_____" ] ], [ [ "fig, axs = plt.subplots(4, 2, sharey=False)\n\n#for i, (route,ax) in enumerate(zip(df.route.unique(), axs.flatten())):\nfor i, (route,ax) in enumerate(zip(routes, axs.flatten())):\n try:\n df2 = df[df['route'] == route]\n df3 = pd.DataFrame()\n\n for features in df2.feature_labels.unique():\n df_features = df2[df2['feature_labels'] == features]\n df_best_model = df_features[df_features['R2_test'] == df_features['R2_test'].max()]\n df3 = df3.append(df_best_model, ignore_index=True)\n #print(df_best_model.model)\n\n sns.barplot(data=df3, x=\"feature_labels\", y=\"R2_test\", ax=ax)\n\n #fig = plt.gcf()\n #fig.setsiz\n y1,y2 = ax.get_ylim()\n ax.set_ylim((max(0,y1),y2))\n\n\n ax.set_title(route)\n ax.set_ylabel(\"$R^2$(test)\")\n if i < 6:\n ax.set_xlabel(\"\")\n else:\n ax.set_xlabel(\"features\")\n except:\n pass\n\n\nfig.set_size_inches(15, 20) \n\nplt.savefig(\"figs\\\\models\\\\features.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"figs\\\\models\\\\features.png\", bbox_inches=\"tight\")\n\nplt.show()\n \n \n\n ", "_____no_output_____" ], [ "fig, axs = plt.subplots(4, 2, sharey=False)\n\n#for i, (route,ax) in enumerate(zip(df.route.unique(), axs.flatten())):\nfor i, (route,ax) in enumerate(zip(routes, axs.flatten())):\n try:\n df2 = df[df['route'] == route]\n df3 = pd.DataFrame()\n\n for features in df2.feature_labels.unique():\n df_features = df2[df2['feature_labels'] == features]\n df_best_model = df_features[df_features['R2_train'] == df_features['R2_train'].max()]\n df3 = df3.append(df_best_model, ignore_index=True)\n #print(df_best_model.model)\n\n sns.barplot(data=df3, x=\"feature_labels\", y=\"R2_train\", ax=ax)\n\n #fig = plt.gcf()\n #fig.setsiz\n y1,y2 = ax.get_ylim()\n ax.set_ylim((max(0,y1),y2))\n\n\n ax.set_title(route)\n ax.set_ylabel(\"$R^2$(train)\")\n if i < 6:\n ax.set_xlabel(\"\")\n else:\n ax.set_xlabel(\"features\")\n except:\n pass\n\n\nfig.set_size_inches(15, 20) \n\nplt.savefig(\"figs\\\\models\\\\features_train.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"figs\\\\models\\\\features_train.png\", bbox_inches=\"tight\")\n\nplt.show()\n ", "_____no_output_____" ] ], [ [ "### Best models", "_____no_output_____" ] ], [ [ "#for features in df.features.unique():\nfig, axs = plt.subplots(4, 2, sharey=False)\n\n#for i, (route,ax) in enumerate(zip(df.route.unique(), axs.flatten())):\nfor i, (route,ax) in enumerate(zip(routes, axs.flatten())):\n try:\n df2 = df[df['route'] == route]\n df3 = pd.DataFrame()\n\n #features = df2.features.unique()\n #max_feature = sorted(features, key=len, reverse=True)[0]\n #df2 = df2[df2['features']==max_feature]\n for model in df2.model.unique():\n df_model = df2[df2['model'] == model]\n df_best_model = df_model[df_model['R2_test'] == df_model['R2_test'].max()]\n df3 = df3.append(df_best_model, ignore_index=True)\n #print(df_best_model.feature_labels)\n\n sns.barplot(data=df3, x=\"model\", y=\"R2_test\", ax=ax)\n ax.set_title(route)\n ax.set_ylabel(\"$R^2$(test)\")\n if i < 6:\n ax.set_xlabel(\"\")\n else:\n ax.set_xlabel(\"models\")\n except:\n pass\n \nfig.set_size_inches(15, 20)\n\nplt.savefig(\"figs\\\\models\\\\models.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"figs\\\\models\\\\models.png\", bbox_inches=\"tight\")\n\nplt.show()\n ", "_____no_output_____" ], [ "#for features in df.features.unique():\nfig, axs = plt.subplots(4, 2, sharey=False)\n\n#for i, (route,ax) in enumerate(zip(df.route.unique(), axs.flatten())):\nfor i, (route,ax) in enumerate(zip(routes, axs.flatten())):\n try:\n df2 = df[df['route'] == route]\n df3 = pd.DataFrame()\n\n #features = df2.features.unique()\n #max_feature = sorted(features, key=len, reverse=True)[0]\n #df2 = df2[df2['features']==max_feature]\n for model in df2.model.unique():\n df_model = df2[df2['model'] == model]\n df_best_model = df_model[df_model['R2_train'] == df_model['R2_train'].max()]\n df3 = df3.append(df_best_model, ignore_index=True)\n #print(df_best_model.feature_labels)\n\n sns.barplot(data=df3, x=\"model\", y=\"R2_train\", ax=ax)\n ax.set_title(route)\n ax.set_ylabel(\"$R^2$(train)\")\n if i < 6:\n ax.set_xlabel(\"\")\n else:\n ax.set_xlabel(\"models\")\n except:\n pass\n \nfig.set_size_inches(15, 20)\n\nplt.savefig(\"figs\\\\models\\\\models_train.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"figs\\\\models\\\\models_train.png\", bbox_inches=\"tight\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Best results", "_____no_output_____" ] ], [ [ "df_best = pd.read_csv(\"regression_results_best.csv\")\ndf_best['feature_labels'] = df_best['features'].map(lambda x: set_feature_labels(x, sep=\", \"))\ndf_best['R2_test'] = round(df_best['R2_test'],3)\ndf_best['R2_train'] = round(df_best['R2_train'],3)\ndf_best = df_best[['route', 'feature_labels','model', 'R2_train','R2_test']]\ndf_best.columns = ['segment', 'features', 'best model', 'R2(train)', 'R2(test)']\n", "_____no_output_____" ], [ "f = open(\"best_results.txt\", \"w\")\n\nprint(df_best.to_latex(index=False), file=f)\n\nf.close()", "_____no_output_____" ], [ "df_best", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06839430da01fa89fccbd77f751c2b8cf687c71
124,377
ipynb
Jupyter Notebook
Mini-Projects/IMDB Sentiment Analysis - XGBoost (Hyperparameter Tuning).ipynb
itirkaa/sagemaker-deployment
52d96fc3e507c30f6498b4c89c2be41dba0dc1b2
[ "MIT" ]
null
null
null
Mini-Projects/IMDB Sentiment Analysis - XGBoost (Hyperparameter Tuning).ipynb
itirkaa/sagemaker-deployment
52d96fc3e507c30f6498b4c89c2be41dba0dc1b2
[ "MIT" ]
null
null
null
Mini-Projects/IMDB Sentiment Analysis - XGBoost (Hyperparameter Tuning).ipynb
itirkaa/sagemaker-deployment
52d96fc3e507c30f6498b4c89c2be41dba0dc1b2
[ "MIT" ]
null
null
null
73.552336
2,951
0.66846
[ [ [ "# Sentiment Analysis\n\n## Using XGBoost in SageMaker\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nIn this example of using Amazon's SageMaker service we will construct a random tree model to predict the sentiment of a movie review. You may have seen a version of this example in a pervious lesson although it would have been done using the sklearn package. Instead, we will be using the XGBoost package as it is provided to us by Amazon.\n\n## Instructions\n\nSome template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.\n\n> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.", "_____no_output_____" ], [ "## Step 1: Downloading the data\n\nThe dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise.\n\n> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.\n\nWe begin by using some Jupyter Notebook magic to download and extract the dataset.", "_____no_output_____" ] ], [ [ "%mkdir ../data\n!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data", "mkdir: cannot create directory ‘../data’: File exists\n--2020-08-26 07:30:30-- http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\nResolving ai.stanford.edu (ai.stanford.edu)... 171.64.68.10\nConnecting to ai.stanford.edu (ai.stanford.edu)|171.64.68.10|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84125825 (80M) [application/x-gzip]\nSaving to: ‘../data/aclImdb_v1.tar.gz’\n\n../data/aclImdb_v1. 100%[===================>] 80.23M 6.67MB/s in 16s \n\n2020-08-26 07:30:47 (5.05 MB/s) - ‘../data/aclImdb_v1.tar.gz’ saved [84125825/84125825]\n\n" ] ], [ [ "## Step 2: Preparing the data\n\nThe data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing.", "_____no_output_____" ] ], [ [ "import os\nimport glob\n\ndef read_imdb_data(data_dir='../data/aclImdb'):\n data = {}\n labels = {}\n \n for data_type in ['train', 'test']:\n data[data_type] = {}\n labels[data_type] = {}\n \n for sentiment in ['pos', 'neg']:\n data[data_type][sentiment] = []\n labels[data_type][sentiment] = []\n \n path = os.path.join(data_dir, data_type, sentiment, '*.txt')\n files = glob.glob(path)\n \n for f in files:\n with open(f) as review:\n data[data_type][sentiment].append(review.read())\n # Here we represent a positive review by '1' and a negative review by '0'\n labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)\n \n assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \\\n \"{}/{} data size does not match labels size\".format(data_type, sentiment)\n \n return data, labels", "_____no_output_____" ], [ "data, labels = read_imdb_data()\nprint(\"IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg\".format(\n len(data['train']['pos']), len(data['train']['neg']),\n len(data['test']['pos']), len(data['test']['neg'])))", "IMDB reviews: train = 12500 pos / 12500 neg, test = 12500 pos / 12500 neg\n" ], [ "from sklearn.utils import shuffle\n\ndef prepare_imdb_data(data, labels):\n \"\"\"Prepare training and test sets from IMDb movie reviews.\"\"\"\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test", "_____no_output_____" ], [ "train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)\nprint(\"IMDb reviews (combined): train = {}, test = {}\".format(len(train_X), len(test_X)))", "IMDb reviews (combined): train = 25000, test = 25000\n" ], [ "train_X[100]", "_____no_output_____" ] ], [ [ "## Step 3: Processing the data\n\nNow that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data.", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download(\"stopwords\")\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\nstemmer = PorterStemmer()", "[nltk_data] Downloading package stopwords to\n[nltk_data] /home/ec2-user/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "import re\nfrom bs4 import BeautifulSoup\n\ndef review_to_words(review):\n text = BeautifulSoup(review, \"html.parser\").get_text() # Remove HTML tags\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) # Convert to lower case\n words = text.split() # Split string into words\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n \n return words", "_____no_output_____" ], [ "import pickle\n\ncache_dir = os.path.join(\"../cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\ndef preprocess_data(data_train, data_test, labels_train, labels_test,\n cache_dir=cache_dir, cache_file=\"preprocessed_data.pkl\"):\n \"\"\"Convert each review to words; read from cache if available.\"\"\"\n\n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = pickle.load(f)\n print(\"Read preprocessed data from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Preprocess training and test data to obtain words for each review\n #words_train = list(map(review_to_words, data_train))\n #words_test = list(map(review_to_words, data_test))\n words_train = [review_to_words(review) for review in data_train]\n words_test = [review_to_words(review) for review in data_test]\n \n # Write to cache file for future runs\n if cache_file is not None:\n cache_data = dict(words_train=words_train, words_test=words_test,\n labels_train=labels_train, labels_test=labels_test)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n pickle.dump(cache_data, f)\n print(\"Wrote preprocessed data to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n words_train, words_test, labels_train, labels_test = (cache_data['words_train'],\n cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])\n \n return words_train, words_test, labels_train, labels_test", "_____no_output_____" ], [ "# Preprocess data\ntrain_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)", "Read preprocessed data from cache file: preprocessed_data.pkl\n" ] ], [ [ "### Extract Bag-of-Words features\n\nFor the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport joblib\n# joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays\n\ndef extract_BoW_features(words_train, words_test, vocabulary_size=5000,\n cache_dir=cache_dir, cache_file=\"bow_features.pkl\"):\n \"\"\"Extract Bag-of-Words for a given set of documents, already preprocessed into words.\"\"\"\n \n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = joblib.load(f)\n print(\"Read features from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Fit a vectorizer to training documents and use it to transform them\n # NOTE: Training documents have already been preprocessed and tokenized into words;\n # pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x\n vectorizer = CountVectorizer(max_features=vocabulary_size,\n preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed\n features_train = vectorizer.fit_transform(words_train).toarray()\n\n # Apply the same vectorizer to transform the test documents (ignore unknown words)\n features_test = vectorizer.transform(words_test).toarray()\n \n # NOTE: Remember to convert the features using .toarray() for a compact representation\n \n # Write to cache file for future runs (store vocabulary as well)\n if cache_file is not None:\n vocabulary = vectorizer.vocabulary_\n cache_data = dict(features_train=features_train, features_test=features_test,\n vocabulary=vocabulary)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n joblib.dump(cache_data, f)\n print(\"Wrote features to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n features_train, features_test, vocabulary = (cache_data['features_train'],\n cache_data['features_test'], cache_data['vocabulary'])\n \n # Return both the extracted features as well as the vocabulary\n return features_train, features_test, vocabulary", "_____no_output_____" ], [ "# Extract Bag of Words features for both training and test datasets\ntrain_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)", "Read features from cache file: bow_features.pkl\n" ] ], [ [ "## Step 4: Classification using XGBoost\n\nNow that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker.\n\n### Writing the dataset\n\nThe XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nval_X = pd.DataFrame(train_X[:10000])\ntrain_X = pd.DataFrame(train_X[10000:])\n\nval_y = pd.DataFrame(train_y[:10000])\ntrain_y = pd.DataFrame(train_y[10000:])\n\ntest_y = pd.DataFrame(test_y)\ntest_X = pd.DataFrame(test_X)", "_____no_output_____" ] ], [ [ "The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample.\n\nFor more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__", "_____no_output_____" ] ], [ [ "# First we make sure that the local directory in which we'd like to store the training and validation csv files exists.\ndata_dir = '../data/xgboost'\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)", "_____no_output_____" ], [ "# First, save the test data to test.csv in the data_dir directory. Note that we do not save the associated ground truth\n# labels, instead we will use them later to compare with our model output.\n\npd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)\n\npd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)\npd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)", "_____no_output_____" ], [ "# To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None.\n\ntrain_X = val_X = train_y = val_y = None", "_____no_output_____" ] ], [ [ "### Uploading Training / Validation files to S3\n\nAmazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later.\n\nFor this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option.\n\nRecall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded.\n\nFor additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__", "_____no_output_____" ] ], [ [ "import sagemaker\n\nsession = sagemaker.Session() # Store the current SageMaker session\n\n# S3 prefix (which folder will we use)\nprefix = 'sentiment-xgboost'\n\ntest_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)\nval_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)", "_____no_output_____" ] ], [ [ "### (TODO) Creating a hypertuned XGBoost model\n\nNow that the data has been uploaded it is time to create the XGBoost model. As in the Boston Housing notebook, the first step is to create an estimator object which will be used as the *base* of your hyperparameter tuning job.", "_____no_output_____" ] ], [ [ "from sagemaker import get_execution_role\n\n# Our current execution role is require when creating the model as the training\n# and inference code will need to access the model artifacts.\nrole = get_execution_role()", "_____no_output_____" ], [ "# We need to retrieve the location of the container which is provided by Amazon for using XGBoost.\n# As a matter of convenience, the training and inference code both use the same container.\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\n\ncontainer = get_image_uri(session.boto_region_name, 'xgboost')", "'get_image_uri' method will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.\nThere is a more up to date SageMaker XGBoost image. To use the newer image, please set 'repo_version'='1.0-1'. For example:\n\tget_image_uri(region, 'xgboost', '1.0-1').\n" ], [ "# TODO: Create a SageMaker estimator using the container location determined in the previous cell.\n# It is recommended that you use a single training instance of type ml.m4.xlarge. It is also\n# recommended that you use 's3://{}/{}/output'.format(session.default_bucket(), prefix) as the\n# output path.\n\nxgb = sagemaker.estimator.Estimator(container,\n role,\n train_instance_count=1,\n train_instance_type='ml.m4.xlarge',\n output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),\n sagemaker_session=session)\n\n# TODO: Set the XGBoost hyperparameters in the xgb object. Don't forget that in this case we have a binary\n# label so we should be using the 'binary:logistic' objective.\nxgb.set_hyperparameters(max_depth=5,\n eta=0.2, \n gamma=5,\n min_child_weight=6,\n subsample=0.8,\n objective='binary:logistic',\n early_stopping=10,\n num_round=300)\n", "Parameter image_name will be renamed to image_uri in SageMaker Python SDK v2.\n" ] ], [ [ "### (TODO) Create the hyperparameter tuner\n\nNow that the base estimator has been set up we need to construct a hyperparameter tuner object which we will use to request SageMaker construct a hyperparameter tuning job.\n\n**Note:** Training a single sentiment analysis XGBoost model takes longer than training a Boston Housing XGBoost model so if you don't want the hyperparameter tuning job to take too long, make sure to not set the total number of models (jobs) too high.", "_____no_output_____" ] ], [ [ "# First, make sure to import the relevant objects used to construct the tuner\nfrom sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner\n\n# TODO: Create the hyperparameter tuner object\n\nxgb_hyperparameter_tuner = HyperparameterTuner(estimator=xgb,\n objective_metric_name='validation:rmse',\n objective_type='Minimize',\n max_jobs=6,\n max_parallel_jobs=3,\n hyperparameter_ranges={\n 'max_depth': IntegerParameter(3,6),\n 'eta': ContinuousParameter(0.05, 0.5),\n 'gamma': IntegerParameter(2,8),\n 'min_child_weight': IntegerParameter(3,8),\n 'subsample': ContinuousParameter(0.5, 0.9)\n })", "_____no_output_____" ] ], [ [ "### Fit the hyperparameter tuner\n\nNow that the hyperparameter tuner object has been constructed, it is time to fit the various models and find the best performing model.", "_____no_output_____" ] ], [ [ "s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')\ns3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')", "'s3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.\n's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.\n" ], [ "xgb_hyperparameter_tuner.fit({'train': s3_input_train, 'validation': s3_input_validation})", "_____no_output_____" ] ], [ [ "Remember that the tuning job is constructed and run in the background so if we want to see the progress of our training job we need to call the `wait()` method.", "_____no_output_____" ] ], [ [ "xgb_hyperparameter_tuner.wait()", "..................................................................................................................................................................................................................................................................................................................!\n" ] ], [ [ "### (TODO) Testing the model\n\nNow that we've run our hyperparameter tuning job, it's time to see how well the best performing model actually performs. To do this we will use SageMaker's Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set. \n\nRemember that in order to create a transformer object to perform the batch transform job, we need a trained estimator object. We can do that using the `attach()` method, creating an estimator object which is attached to the best trained job.", "_____no_output_____" ] ], [ [ "# TODO: Create a new estimator object attached to the best training job found during hyperparameter tuning\n\nxgb_attached = sagemaker.estimator.Estimator.attach(xgb_hyperparameter_tuner.best_training_job())\n", "Parameter image_name will be renamed to image_uri in SageMaker Python SDK v2.\n" ] ], [ [ "Now that we have an estimator object attached to the correct training job, we can proceed as we normally would and create a transformer object.", "_____no_output_____" ] ], [ [ "# TODO: Create a transformer object from the attached estimator. Using an instance count of 1 and an instance type of ml.m4.xlarge\n# should be more than enough.\n\nxgb_transformer = xgb_attached.transformer(instance_count=1, instance_type='ml.m4.xlarge')\n", "Parameter image will be renamed to image_uri in SageMaker Python SDK v2.\n" ] ], [ [ "Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that we have provided is too large to process all at once then we need to specify how the data file should be split up. Since each line is a single entry in our data set we tell SageMaker that it can split the input on each line.", "_____no_output_____" ] ], [ [ "# TODO: Start the transform job. Make sure to specify the content type and the split type of the test data.\nxgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')", "_____no_output_____" ] ], [ [ "Currently the transform job is running but it is doing so in the background. Since we wish to wait until the transform job is done and we would like a bit of feedback we can run the `wait()` method.", "_____no_output_____" ] ], [ [ "xgb_transformer.wait()", "............................\u001b[34mArguments: serve\u001b[0m\n\u001b[35mArguments: serve\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [1] [INFO] Starting gunicorn 19.7.1\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [1] [INFO] Starting gunicorn 19.7.1\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [1] [INFO] Listening at: http://0.0.0.0:8080 (1)\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [1] [INFO] Using worker: gevent\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [36] [INFO] Booting worker with pid: 36\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [37] [INFO] Booting worker with pid: 37\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [38] [INFO] Booting worker with pid: 38\u001b[0m\n\u001b[34m[2020-08-26 08:02:52 +0000] [39] [INFO] Booting worker with pid: 39\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 36\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 37\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 38\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 39\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [1] [INFO] Listening at: http://0.0.0.0:8080 (1)\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [1] [INFO] Using worker: gevent\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [36] [INFO] Booting worker with pid: 36\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [37] [INFO] Booting worker with pid: 37\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [38] [INFO] Booting worker with pid: 38\u001b[0m\n\u001b[35m[2020-08-26 08:02:52 +0000] [39] [INFO] Booting worker with pid: 39\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 36\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 37\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 38\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Model loaded successfully for worker : 39\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:52:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:52:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:53:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:56:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:56:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:55:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:56:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:56:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[32m2020-08-26T08:02:52.548:[sagemaker logs]: MaxConcurrentTransforms=4, MaxPayloadInMB=6, BatchStrategy=MULTI_RECORD\u001b[0m\n\u001b[34m[2020-08-26:08:02:57:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:57:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:57:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:57:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:02:58:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:00:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:00:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:00:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:00:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:00:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:00:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:00:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:00:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:02:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:02:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:02:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:02:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:03:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:07:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:07:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:07:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:07:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:08:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:07:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:07:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:07:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:07:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:08:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:08:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:08:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:08:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:08:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:08:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:08:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:10:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\n\u001b[34m[2020-08-26:08:03:12:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:12:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:13:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:13:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:12:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:12:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:13:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:13:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[34m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Sniff delimiter as ','\u001b[0m\n\u001b[35m[2020-08-26:08:03:15:INFO] Determined delimiter of CSV input is ','\u001b[0m\n" ] ], [ [ "Now the transform job has executed and the result, the estimated sentiment of each review, has been saved on S3. Since we would rather work on this file locally we can perform a bit of notebook magic to copy the file to the `data_dir`.", "_____no_output_____" ] ], [ [ "!aws s3 cp --recursive $xgb_transformer.output_path $data_dir", "download: s3://sagemaker-ap-south-1-714138043953/xgboost-200826-0732-001-281d2aa4-2020-08-26-07-58-22-744/test.csv.out to ../data/xgboost/test.csv.out\n" ] ], [ [ "The last step is now to read in the output from our model, convert the output to something a little more usable, in this case we want the sentiment to be either `1` (positive) or `0` (negative), and then compare to the ground truth labels.", "_____no_output_____" ] ], [ [ "predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)\npredictions = [round(num) for num in predictions.squeeze().values]", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(test_y, predictions)", "_____no_output_____" ] ], [ [ "## Optional: Clean up\n\nThe default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.", "_____no_output_____" ] ], [ [ "# First we will remove all of the files contained in the data_dir directory\n!rm $data_dir/*\n\n# And then we delete the directory itself\n!rmdir $data_dir\n\n# Similarly we will remove the files in the cache_dir directory and the directory itself\n!rm $cache_dir/*\n!rmdir $cache_dir", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0684e0817cd8664821b82267e64369682887d85
39,818
ipynb
Jupyter Notebook
code-testing.ipynb
NaserNikandish/test03
aed264939ef70b017f7d8e9412d90d4e98a6c1e7
[ "MIT" ]
null
null
null
code-testing.ipynb
NaserNikandish/test03
aed264939ef70b017f7d8e9412d90d4e98a6c1e7
[ "MIT" ]
null
null
null
code-testing.ipynb
NaserNikandish/test03
aed264939ef70b017f7d8e9412d90d4e98a6c1e7
[ "MIT" ]
null
null
null
18.494194
708
0.433397
[ [ [ "import numpy as np\nnumbers=np.array([4,6,9.5])\nnumbers", "_____no_output_____" ], [ "conda install -c plotly plotly", "_____no_output_____" ], [ "numbers2=np.array([[1,2,3],[4,5,6]])\nnumbers2", "_____no_output_____" ], [ "numbers3=np.array([[2,'d',4],[4,'g',6]])\nnumbers3", "_____no_output_____" ], [ "evens=np.array([2*i for i in range(1,11)])\nevens", "_____no_output_____" ], [ "evens = np.array([x for x in range(2,21,2)])\nevens", "_____no_output_____" ], [ "three = np.array([[x for x in range(2,11,2)],[y for y in range(1,10,2)]])\nthree", "_____no_output_____" ], [ "three.ndim", "_____no_output_____" ], [ "three.shape", "_____no_output_____" ], [ "three.size", "_____no_output_____" ], [ "three.itemsize", "_____no_output_____" ], [ "for rows in three:\n for number in rows:\n print(number, end=' ')\n print()", "2 4 6 8 10 \n1 3 5 7 9 \n" ], [ "for item in three.flat:\n print(item, end=' ')", "2 4 6 8 10 1 3 5 7 9 " ], [ "np.zeros(5, dtype=int)", "_____no_output_____" ], [ "np.ones(3)", "_____no_output_____" ], [ "np.full((2,3), 6, dtype=float)", "_____no_output_____" ], [ "test1=np.array([1,2,3.])\ntest1", "_____no_output_____" ], [ "test1.dtype", "_____no_output_____" ], [ "test2=np.arange(5,10,2)\ntest2", "_____no_output_____" ], [ "test3=np.linspace(0,1)\ntest3", "_____no_output_____" ], [ "test4=np.linspace(0,1,num=3)\ntest4", "_____no_output_____" ], [ "test4=np.arange(0,20000).reshape(20,1000)\ntest4", "_____no_output_____" ], [ "test5=np.arange(2,41,2).reshape(4,5)\ntest5", "_____no_output_____" ], [ "import random\n%timeit die1=[random.randrange(1,7) for i in range(0,6_000_000)]\n", "62 µs ± 541 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n" ], [ "%timeit die2=np.random.randint(1,7,6_000_000)", "44.3 ms ± 428 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ], [ "sum([i for i in range (0,10_000_000)])\n", "_____no_output_____" ], [ "import numpy as np\nw= np.arange(10_000_000)\nw\nw.sum()", "_____no_output_____" ], [ "w=2147483647+1\nw", "_____no_output_____" ], [ "w=2147483647^2\nw", "_____no_output_____" ], [ "w=2147483647**2\nw", "_____no_output_____" ], [ "w=2147483647\nw", "_____no_output_____" ], [ "s=0\nfor i in range(10_000_000):\n s+=i\ns", "_____no_output_____" ], [ "a=np.arange(5)\na", "_____no_output_____" ], [ "a**2", "_____no_output_____" ], [ "a*2", "_____no_output_____" ], [ "b=np.random.randint(1,7,5)\nb", "_____no_output_____" ], [ "c=np.full((2,3), 4)\n\nd=np.full((1,2),5)\n\nc-d", "_____no_output_____" ], [ "numbers=np.arange(5)\nnumbers", "_____no_output_____" ], [ "numbers2=np.linspace(1.1,5.5, 5)\nnumbers2", "_____no_output_____" ], [ "numbers+numbers2", "_____no_output_____" ], [ "numbers >=numbers2", "_____no_output_____" ], [ "grades=np.random.randint(60,100, 20)\ngrades", "_____no_output_____" ], [ "type(grades)", "_____no_output_____" ], [ "grades.dtype\n", "_____no_output_____" ], [ "grades=np.random.randint(60,100, 20)\ngrades.sum()", "_____no_output_____" ], [ "import numpy as np\narray=np.random.randint(60,101, 12).reshape((3,4))\nprint(array)\nprint(array.mean())\nprint('row mean is ', np.mean(array,axis=1))\nprint('column mean is ', np.mean(array,axis=0))\n\n\n", "[[ 93 61 84 88]\n [ 61 72 64 100]\n [ 79 85 79 78]]\n78.66666666666667\nrow mean is [81.5 74.25 80.25]\ncolumn mean is [77.66666667 72.66666667 75.66666667 88.66666667]\n" ], [ "a=np.arange(1,6,1)\na", "_____no_output_____" ], [ "b=np.power(a,3)\nb", "_____no_output_____" ], [ "if 1:\nprint('hi')", "_____no_output_____" ], [ "import numpy as np\na=np.arange(1,16).reshape(3,5)\na", "_____no_output_____" ], [ "a[[0,2]]", "_____no_output_____" ], [ "a[:,0]", "_____no_output_____" ], [ "a[:,1:4]", "_____no_output_____" ], [ "a=np.array([1,2,3])\na", "_____no_output_____" ], [ "b=a\nb\n", "_____no_output_____" ], [ "a=np.array([4,5])\nb, a", "_____no_output_____" ], [ "b=a.view()\nb", "_____no_output_____" ], [ "a=np.array([1,2,3])\na\n", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "a=np.arange(1,6)\na", "_____no_output_____" ], [ "b=a.view()\nb", "_____no_output_____" ], [ "a[1] *= 20\na", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "a[3] +=4\na, b", "_____no_output_____" ], [ "a=np.arange(1,7)\na", "_____no_output_____" ], [ "a.reshape(2,3)", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "a.resize(2,3)\na", "_____no_output_____" ], [ "a= np.arange(1,7)\nb=a.reshape(2,3)\na,b", "_____no_output_____" ], [ "b[1,1]=500\nb", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "a = np.array([[1,2,3],[4,5,6]])\na.reshape (1,6)\nb = a.copy()\na, b", "_____no_output_____" ], [ "a[0,1]=30000\nb,a", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "c=np.array([2,3,4,5])\nc", "_____no_output_____" ], [ "d=c\nc,d", "_____no_output_____" ], [ "c=np.array([1,20])\nd,c", "_____no_output_____" ], [ "jasdhg", "_____no_output_____" ], [ "pip install autopep8", "Requirement already satisfied: autopep8 in c:\\users\\nnikand1\\anaconda3\\lib\\site-packages (1.5.3)\nRequirement already satisfied: pycodestyle>=2.6.0 in c:\\users\\nnikand1\\anaconda3\\lib\\site-packages (from autopep8) (2.6.0)\nRequirement already satisfied: toml in c:\\users\\nnikand1\\anaconda3\\lib\\site-packages (from autopep8) (0.10.1)\nNote: you may need to restart the kernel to use updated packages.\n" ], [ "import numpy as np\na=np.arange(60_000_000)\na", "_____no_output_____" ], [ "import numpy as np\ngrade1=np.arange(1,7).reshape(2,3)\ngrade1", "_____no_output_____" ], [ "grade2=np.hstack((grade1,grade1))\ngrade3=np.vstack((grade2,grade2))\ngrade3", "_____no_output_____" ], [ "import pandas as pd\ntest=pd.Series([2,3,4.])\ntest[2]", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\ntest=pd.Series(10.1, [2,3,4])\ntest", "_____no_output_____" ], [ "import pandas as pd\ntest=pd.Series((5,7, 10))\ntest\n", "_____no_output_____" ], [ "import pandas as pd\ntest=pd.Series([5,7, 10])\ntest", "_____no_output_____" ], [ "import pandas as pd\ntest=pd.Series(2.5, index = range(4))", "_____no_output_____" ], [ "test", "_____no_output_____" ], [ "import pandas as pd\nstudent_grades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudent_grades", "_____no_output_____" ], [ "import pandas as pd\nstudentGrades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudentGrades.values\n", "_____no_output_____" ], [ "import pandas as pd\nstudentGrades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudentGrades.index\n", "_____no_output_____" ], [ "import pandas as pd\nstudentGrades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudentGrades.index\n", "_____no_output_____" ], [ "import pandas as pd\nstudentGrades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudentGrades.values\n", "_____no_output_____" ], [ "import pandas as pd\nstudentGrades = pd.Series([85, 95, 90, 100], index = ['Anna', 'John', 'Milo', 'Yasmin'])\nstudentGrades['Milo']", "_____no_output_____" ], [ "d = {'Anna':85, 'John':95, 'Milo':90, 'Yasmin':100}\nstudentGrades = pd.Series(d)\nstudentGrades\n", "_____no_output_____" ], [ "x=10\ny=20\nsum = x + y\naverage = sum / 2\nprint(sum)", "30\n" ], [ "grade = float(input('Enter a grade'))\nprint('Pass') if grade >= 70 else print('fail')", "Enter a grade 55\n" ], [ "import numpy as np\nimport pandas as pd\n\n%load_ext lab_black", "_____no_output_____" ], [ "2 + 3", "_____no_output_____" ], [ "2+3", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d068510cf18d7c355fd4855b50ed2354738ddd3f
48,460
ipynb
Jupyter Notebook
03 Stats/14 Regression/HomesVCrime - Solution.ipynb
Alashmony/DA_Udacity
12615a1d50be6b8260f021f62b3d4ec34ecc06a3
[ "Unlicense" ]
null
null
null
03 Stats/14 Regression/HomesVCrime - Solution.ipynb
Alashmony/DA_Udacity
12615a1d50be6b8260f021f62b3d4ec34ecc06a3
[ "Unlicense" ]
null
null
null
03 Stats/14 Regression/HomesVCrime - Solution.ipynb
Alashmony/DA_Udacity
12615a1d50be6b8260f021f62b3d4ec34ecc06a3
[ "Unlicense" ]
null
null
null
150.03096
20,746
0.829777
[ [ [ "import numpy as np\nimport pandas as pd\nimport statsmodels.api as sms;\nfrom sklearn.datasets import load_boston\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nboston_data = load_boston()\ndf = pd.DataFrame()\ndf['MedianHomePrice'] = boston_data.target\ndf2 = pd.DataFrame(boston_data.data)\ndf['CrimePerCapita'] = df2.iloc[:,0];\ndf.head()", "_____no_output_____" ] ], [ [ "The Boston housing data is a built in dataset in the sklearn library of python. You will be using two of the variables from this dataset, which are stored in **df**. The median home price in thousands of dollars and the crime per capita in the area of the home are shown above.\n\n`1.` Use this dataframe to fit a linear model to predict the home price based on the crime rate. Use your output to answer the first quiz below. Don't forget an intercept.", "_____no_output_____" ] ], [ [ "df['intercept'] = 1\n\nlm = sms.OLS(df['MedianHomePrice'], df[['intercept', 'CrimePerCapita']])\nresults = lm.fit()\nresults.summary()", "_____no_output_____" ] ], [ [ "`2.`Plot the relationship between the crime rate and median home price below. Use your plot and the results from the first question as necessary to answer the remaining quiz questions below.", "_____no_output_____" ] ], [ [ "plt.scatter(df['CrimePerCapita'], df['MedianHomePrice']);\nplt.xlabel('Crime/Capita');\nplt.ylabel('Median Home Price');\nplt.title('Median Home Price vs. CrimePerCapita');", "_____no_output_____" ], [ "## To show the line that was fit I used the following code from \n## https://plot.ly/matplotlib/linear-fits/\n## It isn't the greatest fit... but it isn't awful either\n\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\n# MatPlotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\n\n# Scientific libraries\nfrom numpy import arange,array,ones\nfrom scipy import stats\n\n\nxi = arange(0,100)\nA = array([ xi, ones(100)])\n\n# (Almost) linear sequence\ny = df['MedianHomePrice']\nx = df['CrimePerCapita']\n\n# Generated linear fit\nslope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\nline = slope*xi+intercept\n\nplt.plot(x,y,'o', xi, line);\nplt.xlabel('Crime/Capita');\nplt.ylabel('Median Home Price');\npylab.title('Median Home Price vs. CrimePerCapita');", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06856045616b774e7b30dc4204a22b534cb76d5
151,660
ipynb
Jupyter Notebook
Continuous_Control.ipynb
bobiblazeski/reacher
d6b2d7197ae889fed1bc624bbaa178550c9c6387
[ "MIT" ]
1
2019-05-05T21:03:54.000Z
2019-05-05T21:03:54.000Z
Continuous_Control.ipynb
bobiblazeski/reacher
d6b2d7197ae889fed1bc624bbaa178550c9c6387
[ "MIT" ]
null
null
null
Continuous_Control.ipynb
bobiblazeski/reacher
d6b2d7197ae889fed1bc624bbaa178550c9c6387
[ "MIT" ]
null
null
null
107.256011
51,488
0.779045
[ [ [ "# Continuous Control\n\n---\n\nIn this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program.\n\n### 1. Start the Environment\n\nWe begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport time\n\nfrom unityagents import UnityEnvironment\nfrom collections import deque\nfrom itertools import count\nimport datetime\n\nfrom ddpg import DDPG, ReplayBuffer\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.\n\n- **Mac**: `\"path/to/Reacher.app\"`\n- **Windows** (x86): `\"path/to/Reacher_Windows_x86/Reacher.exe\"`\n- **Windows** (x86_64): `\"path/to/Reacher_Windows_x86_64/Reacher.exe\"`\n- **Linux** (x86): `\"path/to/Reacher_Linux/Reacher.x86\"`\n- **Linux** (x86_64): `\"path/to/Reacher_Linux/Reacher.x86_64\"`\n- **Linux** (x86, headless): `\"path/to/Reacher_Linux_NoVis/Reacher.x86\"`\n- **Linux** (x86_64, headless): `\"path/to/Reacher_Linux_NoVis/Reacher.x86_64\"`\n\nFor instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:\n```\nenv = UnityEnvironment(file_name=\"Reacher.app\")\n```", "_____no_output_____" ] ], [ [ "#env = UnityEnvironment(file_name='envs/Reacher_Linux_NoVis_20/Reacher.x86_64') # Headless\nenv = UnityEnvironment(file_name='envs/Reacher_Linux_20/Reacher.x86_64') # Visual", "INFO:unityagents:\n'Academy' started successfully!\nUnity Academy name: Academy\n Number of Brains: 1\n Number of External Brains : 1\n Lesson number : 0\n Reset Parameters :\n\t\tgoal_speed -> 1.0\n\t\tgoal_size -> 5.0\nUnity brain name: ReacherBrain\n Number of Visual Observations (per agent): 0\n Vector Observation space type: continuous\n Vector Observation space size (per agent): 33\n Number of stacked Vector Observation: 1\n Vector Action space type: continuous\n Vector Action space size (per agent): 4\n Vector Action descriptions: , , , \n" ] ], [ [ "Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.", "_____no_output_____" ] ], [ [ "# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]", "_____no_output_____" ] ], [ [ "### 2. Examine the State and Action Spaces\n\nIn this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible.\n\nThe observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`.\n\nRun the code cell below to print some information about the environment.", "_____no_output_____" ] ], [ [ "# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space \nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\nprint('The state for the first agent looks like:', states[0])", "Number of agents: 20\nSize of each action: 4\nThere are 20 agents. Each observes a state with length: 33\nThe state for the first agent looks like: [ 0.00000000e+00 -4.00000000e+00 0.00000000e+00 1.00000000e+00\n -0.00000000e+00 -0.00000000e+00 -4.37113883e-08 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 -1.00000000e+01 0.00000000e+00\n 1.00000000e+00 -0.00000000e+00 -0.00000000e+00 -4.37113883e-08\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 5.75471878e+00 -1.00000000e+00\n 5.55726624e+00 0.00000000e+00 1.00000000e+00 0.00000000e+00\n -1.68164849e-01]\n" ] ], [ [ "### 3. Take Random Actions in the Environment\n\nIn the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.\n\nOnce this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. \n\nOf course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment!", "_____no_output_____" ] ], [ [ "env_info = env.reset(train_mode=False)[brain_name] # reset the environment \nstates = env_info.vector_observations # get the current state (for each agent)\nscores = np.zeros(num_agents) # initialize the score (for each agent)\nwhile True:\n actions = np.random.randn(num_agents, action_size) # select an action (for each agent)\n actions = np.clip(actions, -1, 1) # all actions between -1 and 1\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n scores += env_info.rewards # update the score (for each agent)\n states = next_states # roll over states to next time step\n if np.any(dones): # exit loop if episode finished\n break\n break\nprint('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))", "Total score (averaged over agents) this episode: 0.0\n" ] ], [ [ "When finished, you can close the environment.", "_____no_output_____" ], [ "### 4. It's Your Turn!\n\nNow it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:\n```python\nenv_info = env.reset(train_mode=True)[brain_name]\n```", "_____no_output_____" ] ], [ [ "BUFFER_SIZE = int(5e5) # replay buffer size\nCACHE_SIZE = int(6e4)\nBATCH_SIZE = 256 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR_ACTOR = 1e-3 # learning rate of the actor\nLR_CRITIC = 1e-3 # learning rate of the critic\nWEIGHT_DECAY = 0 # L2 weight decay\nUPDATE_EVERY = 20 # timesteps between updates\nNUM_UPDATES = 15 # num of update passes when updating\nEPSILON = 1.0 # epsilon for the noise process added to the actions\nEPSILON_DECAY = 1e-6 # decay for epsilon above\nNOISE_SIGMA = 0.05\n# 96 Neurons solves the environment consistently and usually fastest\nfc1_units=96\nfc2_units=96\n\nrandom_seed=23", "_____no_output_____" ], [ "def store(buffers, states, actions, rewards, next_states, dones, timestep):\n memory, cache = buffers\n for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):\n memory.add(state, action, reward, next_state, done)\n cache.add(state, action, reward, next_state, done)\nstore ", "_____no_output_____" ], [ "def learn(agent, buffers, timestep):\n memory, cache = buffers\n if len(memory) > BATCH_SIZE and timestep % UPDATE_EVERY == 0: \n for _ in range(NUM_UPDATES):\n experiences = memory.sample()\n agent.learn(experiences, GAMMA)\n for _ in range(3):\n experiences = cache.sample()\n agent.learn(experiences, GAMMA)\nlearn ", "_____no_output_____" ], [ "avg_over = 100\nprint_every = 10\n\ndef ddpg(agent, buffers, n_episodes=200, stopOnSolved=True):\n print('Start: ',datetime.datetime.now())\n scores_deque = deque(maxlen=avg_over)\n scores_global = []\n average_global = []\n min_global = []\n max_global = []\n best_avg = -np.inf\n\n tic = time.time()\n print('\\rEpis,EpAvg,GlAvg, Max, Min, Time')\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment \n states = env_info.vector_observations # get the current state (for each agent)\n scores = np.zeros(num_agents) # initialize the score (for each agent)\n agent.reset()\n \n score_average = 0\n timestep = time.time()\n for t in count():\n actions = agent.act(states, add_noise=True)\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished \n store(buffers, states, actions, rewards, next_states, dones, t)\n learn(agent, buffers, t)\n states = next_states # roll over states to next time step\n scores += rewards # update the score (for each agent) \n if np.any(dones): # exit loop if episode finished\n break\n \n score = np.mean(scores) \n scores_deque.append(score)\n score_average = np.mean(scores_deque)\n scores_global.append(score)\n average_global.append(score_average) \n min_global.append(np.min(scores)) \n max_global.append(np.max(scores)) \n print('\\r {}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}'\\\n .format(str(i_episode).zfill(3), score, score_average, np.max(scores), \n np.min(scores), time.time() - timestep), end=\"\\n\") \n if i_episode % print_every == 0:\n agent.save('./') \n if stopOnSolved and score_average >= 30.0: \n toc = time.time()\n print('\\nSolved in {:d} episodes!\\tAvg Score: {:.2f}, time: {}'.format(i_episode, score_average, toc-tic))\n agent.save('./'+str(i_episode)+'_')\n break\n \n print('End: ',datetime.datetime.now())\n return scores_global, average_global, max_global, min_global\n\nddpg", "_____no_output_____" ], [ "# Create new empty buffers to start training from scratch\nbuffers = [ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed),\n ReplayBuffer(action_size, CACHE_SIZE, BATCH_SIZE, random_seed)]\nagent = DDPG(state_size=state_size, action_size=action_size, random_seed=23, \n fc1_units=96, fc2_units=96)\nscores, averages, maxima, minima = ddpg(agent, buffers, n_episodes=130)", "Start: 2018-10-22 18:07:36.058249\nEpis,EpAvg,GlAvg, Max, Min, Time\n 001, 0.51, 0.51, 1.09, 0.11, 12.53\n 002, 0.73, 0.62, 1.97, 0.00, 12.28\n 003, 0.78, 0.67, 1.83, 0.17, 12.09\n 004, 0.88, 0.73, 2.59, 0.14, 12.22\n 005, 1.05, 0.79, 2.22, 0.45, 12.23\n 006, 1.05, 0.83, 1.73, 0.44, 12.40\n 007, 1.12, 0.87, 2.61, 0.33, 12.71\n 008, 1.33, 0.93, 2.32, 0.04, 12.76\n 009, 1.92, 1.04, 3.50, 0.10, 12.94\n 010, 2.39, 1.18, 3.79, 0.73, 12.87\n 011, 2.10, 1.26, 4.37, 0.60, 13.00\n 012, 2.18, 1.34, 3.77, 0.73, 13.22\n 013, 3.41, 1.50, 5.76, 1.64, 13.54\n 014, 4.08, 1.68, 7.74, 2.53, 13.51\n 015, 4.05, 1.84, 6.53, 1.22, 14.05\n 016, 5.10, 2.04, 9.38, 2.01, 14.14\n 017, 4.28, 2.17, 8.01, 1.57, 14.20\n 018, 4.95, 2.33, 8.30, 1.62, 14.78\n 019, 6.12, 2.53, 13.01, 2.59, 14.61\n 020, 6.99, 2.75, 11.67, 3.59, 14.49\n 021, 5.71, 2.89, 12.32, 2.96, 14.71\n 022, 6.78, 3.07, 13.16, 3.37, 15.27\n 023, 8.15, 3.29, 13.13, 4.53, 15.15\n 024, 9.46, 3.55, 14.86, 4.44, 15.94\n 025, 9.51, 3.79, 14.28, 4.59, 16.04\n 026, 10.83, 4.06, 15.42, 6.10, 17.47\n 027, 10.95, 4.31, 17.01, 4.85, 17.01\n 028, 11.31, 4.56, 18.33, 5.65, 16.17\n 029, 12.17, 4.82, 18.43, 2.67, 16.53\n 030, 12.57, 5.08, 17.44, 6.91, 16.99\n 031, 12.54, 5.32, 19.20, 7.18, 17.67\n 032, 15.06, 5.63, 21.38, 8.77, 18.03\n 033, 13.89, 5.88, 21.54, 9.18, 18.16\n 034, 14.67, 6.14, 20.91, 5.97, 18.54\n 035, 17.80, 6.47, 23.40, 11.44, 18.54\n 036, 20.49, 6.86, 38.39, 6.55, 18.43\n 037, 23.27, 7.30, 28.21, 16.53, 19.03\n 038, 24.36, 7.75, 35.24, 15.74, 18.99\n 039, 24.60, 8.18, 35.81, 8.72, 20.19\n 040, 25.19, 8.61, 30.01, 19.37, 19.06\n 041, 26.88, 9.05, 32.64, 21.47, 20.03\n 042, 26.37, 9.47, 35.07, 18.69, 20.36\n 043, 28.64, 9.91, 35.34, 22.21, 21.35\n 044, 29.17, 10.35, 33.88, 24.41, 20.51\n 045, 28.24, 10.75, 34.76, 21.07, 20.16\n 046, 30.40, 11.18, 37.57, 22.16, 20.50\n 047, 30.05, 11.58, 36.40, 24.65, 20.19\n 048, 31.68, 12.00, 38.45, 24.59, 20.73\n 049, 31.15, 12.39, 35.83, 24.57, 21.11\n 050, 32.46, 12.79, 39.34, 18.16, 22.00\n 051, 32.44, 13.17, 39.34, 18.74, 22.07\n 052, 35.50, 13.60, 39.60, 30.02, 21.62\n 053, 35.22, 14.01, 39.54, 29.48, 22.12\n 054, 35.34, 14.41, 39.32, 26.27, 21.98\n 055, 37.20, 14.82, 39.55, 30.60, 21.44\n 056, 36.98, 15.22, 39.42, 33.16, 20.86\n 057, 36.70, 15.59, 39.25, 32.87, 21.15\n 058, 36.82, 15.96, 39.52, 31.55, 21.54\n 059, 36.33, 16.30, 39.51, 27.50, 23.25\n 060, 36.19, 16.64, 39.56, 32.40, 23.51\n 061, 37.29, 16.97, 39.49, 32.23, 23.20\n 062, 36.02, 17.28, 39.52, 29.72, 27.99\n 063, 36.36, 17.58, 39.20, 29.44, 23.26\n 064, 35.23, 17.86, 39.50, 28.77, 21.87\n 065, 37.89, 18.17, 39.54, 27.34, 21.11\n 066, 35.91, 18.44, 39.57, 26.63, 21.33\n 067, 37.73, 18.72, 39.51, 32.23, 23.02\n 068, 38.51, 19.02, 39.63, 36.12, 20.81\n 069, 37.93, 19.29, 39.54, 35.12, 20.68\n 070, 38.49, 19.56, 39.51, 34.50, 20.54\n 071, 37.70, 19.82, 39.60, 29.43, 20.62\n 072, 38.44, 20.08, 39.60, 36.08, 20.53\n 073, 38.09, 20.33, 39.57, 35.33, 20.74\n 074, 38.64, 20.57, 39.56, 36.23, 21.02\n 075, 38.62, 20.81, 39.66, 36.25, 21.00\n 076, 38.64, 21.05, 39.56, 35.62, 20.71\n 077, 39.17, 21.28, 39.51, 38.15, 21.09\n 078, 38.90, 21.51, 39.49, 38.20, 20.88\n 079, 39.05, 21.73, 39.60, 35.87, 20.60\n 080, 39.18, 21.95, 39.57, 37.41, 20.67\n 081, 37.93, 22.15, 39.54, 33.52, 20.70\n 082, 37.66, 22.34, 39.30, 34.44, 20.80\n 083, 38.45, 22.53, 39.54, 33.71, 20.71\n 084, 38.81, 22.72, 39.59, 36.97, 20.77\n 085, 38.75, 22.91, 39.58, 36.35, 20.79\n 086, 38.67, 23.10, 39.62, 33.24, 21.00\n 087, 38.05, 23.27, 39.61, 35.26, 20.99\n 088, 38.85, 23.44, 39.59, 37.19, 20.72\n 089, 39.26, 23.62, 39.55, 38.54, 20.78\n 090, 38.85, 23.79, 39.66, 34.69, 20.56\n 091, 38.69, 23.96, 39.58, 36.42, 20.47\n 092, 39.00, 24.12, 39.57, 37.05, 20.75\n 093, 38.60, 24.27, 39.55, 36.43, 20.57\n 094, 38.76, 24.43, 39.59, 36.81, 20.85\n 095, 38.67, 24.58, 39.61, 32.09, 20.86\n 096, 38.30, 24.72, 39.51, 33.45, 20.66\n 097, 36.94, 24.85, 39.04, 32.80, 20.70\n 098, 36.45, 24.97, 39.59, 32.08, 20.38\n 099, 36.91, 25.09, 39.35, 30.84, 20.36\n 100, 38.79, 25.22, 39.66, 36.67, 20.88\n 101, 35.07, 25.57, 39.53, 26.10, 20.87\n 102, 36.42, 25.93, 38.18, 33.47, 21.15\n 103, 35.36, 26.27, 39.52, 26.30, 20.68\n 104, 34.30, 26.61, 38.48, 27.45, 20.76\n 105, 37.15, 26.97, 39.67, 19.72, 21.08\n 106, 35.29, 27.31, 39.62, 29.61, 20.73\n 107, 36.21, 27.66, 39.51, 23.82, 20.85\n 108, 34.45, 27.99, 39.68, 25.07, 20.60\n 109, 33.81, 28.31, 38.84, 26.93, 20.65\n 110, 33.49, 28.62, 39.51, 24.37, 20.99\n 111, 32.92, 28.93, 39.05, 27.07, 21.00\n 112, 36.27, 29.27, 39.20, 20.57, 20.89\n 113, 31.63, 29.55, 35.82, 20.15, 20.91\n 114, 34.66, 29.86, 39.31, 22.26, 20.95\n 115, 32.98, 30.15, 39.66, 22.31, 21.01\n\nSolved in 115 episodes!\tAvg Score: 30.15, time: 2198.465326309204\nEnd: 2018-10-22 18:44:14.525753\n" ], [ "plt.plot(np.arange(1, len(scores)+1), scores)\nplt.plot(np.arange(1, len(averages)+1), averages)\nplt.plot(np.arange(1, len(maxima)+1), maxima)\nplt.plot(np.arange(1, len(minima)+1), minima)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.legend(['EpAvg', 'GlAvg', 'Max', 'Min'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "# Smaller agent learning this task from larger agent experiences\nagent = DDPG(state_size=state_size, action_size=action_size, random_seed=23, \n fc1_units=48, fc2_units=48)\nscores, averages, maxima, minima = ddpg(agent, buffers, n_episodes=200)", "Start: 2018-10-22 20:29:34.054026\nEpis,EpAvg,GlAvg, Max, Min, Time\n 001, 0.63, 0.63, 1.79, 0.00, 21.90\n 002, 0.90, 0.76, 2.23, 0.07, 22.22\n 003, 0.89, 0.81, 1.99, 0.00, 24.03\n 004, 1.17, 0.90, 2.59, 0.14, 22.20\n 005, 1.51, 1.02, 2.57, 0.50, 24.96\n 006, 1.19, 1.05, 1.99, 0.23, 23.43\n 007, 1.09, 1.05, 1.90, 0.17, 24.12\n 008, 1.11, 1.06, 2.73, 0.18, 23.34\n 009, 2.98, 1.27, 5.04, 1.60, 23.90\n 010, 3.74, 1.52, 7.35, 0.96, 23.29\n 011, 4.95, 1.83, 10.34, 1.99, 23.06\n 012, 11.34, 2.63, 18.17, 4.67, 22.91\n 013, 21.51, 4.08, 28.17, 14.17, 23.59\n 014, 31.32, 6.02, 36.47, 22.24, 23.39\n 015, 34.92, 7.95, 39.59, 25.19, 22.69\n 016, 37.14, 9.78, 38.96, 34.27, 26.16\n 017, 35.03, 11.26, 39.42, 21.28, 23.17\n 018, 34.21, 12.54, 39.49, 15.96, 23.29\n 019, 37.11, 13.83, 39.28, 33.14, 22.98\n 020, 36.58, 14.97, 39.41, 30.97, 23.08\n 021, 36.56, 16.00, 39.59, 17.56, 24.16\n 022, 37.45, 16.97, 39.54, 34.50, 23.07\n 023, 37.94, 17.88, 39.38, 36.54, 22.74\n 024, 35.29, 18.61, 39.27, 7.56, 23.34\n 025, 37.80, 19.38, 39.65, 35.15, 23.15\n 026, 37.67, 20.08, 39.49, 32.14, 22.25\n 027, 37.85, 20.74, 39.66, 31.38, 22.22\n 028, 34.27, 21.22, 37.76, 30.16, 22.70\n 029, 34.96, 21.69, 39.16, 21.20, 23.54\n 030, 38.76, 22.26, 39.66, 36.52, 22.84\n 031, 38.03, 22.77, 39.56, 34.17, 23.26\n 032, 37.96, 23.25, 39.62, 33.99, 23.13\n 033, 33.52, 23.56, 39.20, 19.16, 23.15\n 034, 37.37, 23.96, 39.55, 33.80, 22.96\n 035, 37.87, 24.36, 39.59, 33.72, 22.81\n 036, 37.59, 24.73, 39.42, 31.33, 22.25\n 037, 36.08, 25.04, 39.63, 20.37, 22.68\n 038, 34.42, 25.28, 38.69, 24.79, 22.27\n 039, 35.84, 25.55, 39.01, 27.73, 22.83\n 040, 36.20, 25.82, 39.01, 31.21, 22.09\n 041, 34.97, 26.04, 38.93, 25.67, 22.46\n 042, 37.29, 26.31, 39.51, 25.98, 21.99\n 043, 33.40, 26.47, 39.36, 19.35, 22.72\n 044, 35.01, 26.67, 39.46, 20.89, 22.18\n 045, 37.66, 26.91, 39.02, 35.71, 22.95\n 046, 36.21, 27.12, 39.19, 34.00, 23.37\n 047, 35.21, 27.29, 39.01, 27.84, 23.27\n 048, 37.36, 27.50, 39.56, 33.30, 23.14\n 049, 38.45, 27.72, 39.57, 36.27, 23.04\n 050, 37.02, 27.91, 39.36, 33.33, 23.05\n 051, 35.69, 28.06, 38.29, 26.76, 23.06\n 052, 35.78, 28.21, 39.16, 23.28, 22.73\n 053, 36.31, 28.36, 38.86, 31.58, 22.83\n 054, 30.98, 28.41, 38.84, 22.66, 23.31\n 055, 34.44, 28.52, 39.07, 13.21, 22.64\n 056, 34.64, 28.63, 39.42, 21.66, 23.05\n 057, 34.14, 28.72, 38.72, 23.39, 22.84\n 058, 34.42, 28.82, 39.31, 23.66, 22.45\n 059, 34.60, 28.92, 37.94, 31.20, 22.94\n 060, 36.07, 29.04, 39.35, 32.60, 23.65\n 061, 35.45, 29.15, 38.02, 30.65, 23.33\n 062, 36.85, 29.27, 39.45, 32.68, 26.06\n 063, 37.64, 29.40, 39.49, 27.42, 25.28\n 064, 34.84, 29.49, 39.53, 6.43, 25.96\n 065, 36.42, 29.59, 39.51, 32.75, 25.03\n 066, 34.80, 29.67, 39.66, 17.90, 24.94\n 067, 36.07, 29.77, 39.27, 31.23, 25.69\n 068, 33.33, 29.82, 37.99, 9.40, 24.59\n 069, 34.48, 29.89, 39.38, 28.72, 24.86\n 070, 34.62, 29.96, 38.92, 29.30, 24.17\n 071, 36.70, 30.05, 39.55, 29.94, 24.15\n\nSolved in 71 episodes!\tAvg Score: 30.05, time: 1657.2164599895477\nEnd: 2018-10-22 20:57:11.275171\n" ], [ "plt.plot(np.arange(1, len(scores)+1), scores)\nplt.plot(np.arange(1, len(averages)+1), averages)\nplt.plot(np.arange(1, len(maxima)+1), maxima)\nplt.plot(np.arange(1, len(minima)+1), minima)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.legend(['EpAvg', 'GlAvg', 'Max', 'Min'], loc='lower center')\nplt.show()", "_____no_output_____" ] ], [ [ "Saves experiences for training future agents. Warning file is quite large.", "_____no_output_____" ] ], [ [ "memory, cache = buffers\nmemory.save('experiences.pkl')", "_____no_output_____" ], [ "#env.close() ", "_____no_output_____" ] ], [ [ "### 5. See the pre-trained agent in action", "_____no_output_____" ] ], [ [ "agent = DDPG(state_size=state_size, action_size=action_size, random_seed=23, \n fc1_units=96, fc2_units=96)", "_____no_output_____" ], [ "agent.load('./saves/96_96_108_actor.pth', './saves/96_96_108_critic.pth')", "_____no_output_____" ], [ "def play(agent, episodes=3):\n for i_episode in range(episodes):\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment \n states = env_info.vector_observations # get the current state (for each agent)\n scores = np.zeros(num_agents) # initialize the score (for each agent)\n while True:\n actions = np.random.randn(num_agents, action_size) # select an action (for each agent)\n actions = agent.act(states, add_noise=False) # all actions between -1 and 1\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n scores += env_info.rewards # update the score (for each agent)\n states = next_states # roll over states to next time step\n if np.any(dones): # exit loop if episode finished\n break\n #break\n print('Ep No: {} Total score (averaged over agents): {}'.format(i_episode, np.mean(scores)))", "_____no_output_____" ], [ "play(agent, 10)", "Ep No: 0 Total score (averaged over agents): 37.69499915745109\nEp No: 1 Total score (averaged over agents): 36.70099917966873\nEp No: 2 Total score (averaged over agents): 36.49249918432906\nEp No: 3 Total score (averaged over agents): 37.94049915196374\nEp No: 4 Total score (averaged over agents): 37.35449916506186\nEp No: 5 Total score (averaged over agents): 37.17449916908517\nEp No: 6 Total score (averaged over agents): 36.74749917862937\nEp No: 7 Total score (averaged over agents): 37.175499169062824\nEp No: 8 Total score (averaged over agents): 37.99799915067852\nEp No: 9 Total score (averaged over agents): 36.05999919399619\n" ] ], [ [ "### 6. Experiences\n\nExperiences from the Replay Buffer could be saved and loaded for training different agents.\nAs an example I've provided `experiences.pkl.7z` which you should unpack with your favorite archiver.", "_____no_output_____" ], [ "Create new ReplayBuffer and load saved experiences", "_____no_output_____" ] ], [ [ "savedBuffer = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)\nsavedBuffer.load('experiences.pkl')", "_____no_output_____" ] ], [ [ "Afterward you can use it to train your agent", "_____no_output_____" ] ], [ [ "savedBuffer.sample()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0687118830a43da7e3920e91ff6ef46deaf0bf0
987,705
ipynb
Jupyter Notebook
cnn-rnn-image-caption.ipynb
ashwanikumar04/cnn-rnn-image-caption
e935c012e756d3254ea431c0ce4454b922831ac0
[ "MIT" ]
1
2022-03-23T17:24:46.000Z
2022-03-23T17:24:46.000Z
cnn-rnn-image-caption.ipynb
ashwanikumar04/cnn-rnn-image-caption
e935c012e756d3254ea431c0ce4454b922831ac0
[ "MIT" ]
null
null
null
cnn-rnn-image-caption.ipynb
ashwanikumar04/cnn-rnn-image-caption
e935c012e756d3254ea431c0ce4454b922831ac0
[ "MIT" ]
null
null
null
2,532.576923
389,004
0.957453
[ [ [ "import numpy as np\nimport os\nimport datetime\nimport time\nimport pandas as pd\nimport random\nfrom matplotlib import pyplot as plt\nimport seaborn as sn\n\n%config IPCompleter.greedy = True\n%config InlineBackend.figure_format = 'retina'\n%matplotlib inline\npd.set_option('mode.chained_assignment', None)\nsn.set(font_scale=1.5)\n\n\nfig_size=(15,8)\nsn.set(rc={'figure.figsize':fig_size})", "_____no_output_____" ], [ "files = os.listdir('./results/')\nmodel_stats = None\nfor file in files:\n df = pd.read_csv(\"./results/\"+file)\n if model_stats is not None:\n model_stats = pd.concat([model_stats, df], axis=0)\n else:\n model_stats = df\n \nmodel_stats = model_stats.drop('Unnamed: 0', 1)\nmodel_stats\n", "_____no_output_____" ], [ "def plot_loss(column, plot_name, fig_name):\n loss_data = {}\n plt.rcParams['figure.figsize'] = fig_size\n\n for file in files:\n loss_data[file.replace('.csv','')]= model_stats[model_stats['name']==file.replace('.csv','')][column].values.tolist()\n\n for key, data in loss_data.items():\n plt.plot(data)\n\n leg = plt.legend(loss_data.keys(), loc='upper right')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.title(plot_name)\n for line in leg.get_lines():\n line.set_linewidth(10)\n plt.savefig('images/'+fig_name+\".svg\", format='svg', dpi=1200)\n plt.show()\n", "_____no_output_____" ], [ "plot_loss('loss_plot','Traing loss','training_loss')\nplot_loss('test_loss_plot','Testing loss', 'test_loss')", "_____no_output_____" ], [ "plt.rcParams['figure.figsize'] = (20,10)\n\nbleu_scores={}\nfor file in files:\n bleu_scores[file.replace('.csv','')]= model_stats[model_stats['name']==file.replace('.csv','')][['bleu1','bleu2','bleu3','bleu4']].values.tolist()[0]\nwidth =0.08\nindex=0\nind = np.arange(4)\nfor key, data in bleu_scores.items():\n plt.bar(ind+(index*width), data, width=width,align='edge')\n index+=1\nleg = plt.legend(bleu_scores.keys(), loc='upper left',bbox_to_anchor=(1.0, 1))\nplt.xlabel('Score Type')\nplt.ylabel('Score')\nplt.xticks(ind + (len(bleu_scores)*width) / 2, ['bleu1','bleu2','bleu3','bleu4'])\nplt.savefig('images/'+ 'bleu_scores.svg',format='svg', dpi=1200)\nplt.show()", "_____no_output_____" ] ], [ [ "![Bleu score](https://raw.githubusercontent.com/ashwanikumar04/cnn-rnn-image-caption/main/images/bleu_scores.svg)", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d06879f514d629cbdd55ccdda63ca7b8f946309b
968
ipynb
Jupyter Notebook
test_check_ceamra_caribrate_matrix.ipynb
sunnyCUD/one_camera_knee_angle
4feb3282eff46025b7375ade37f323bf7954b67d
[ "MIT" ]
null
null
null
test_check_ceamra_caribrate_matrix.ipynb
sunnyCUD/one_camera_knee_angle
4feb3282eff46025b7375ade37f323bf7954b67d
[ "MIT" ]
null
null
null
test_check_ceamra_caribrate_matrix.ipynb
sunnyCUD/one_camera_knee_angle
4feb3282eff46025b7375ade37f323bf7954b67d
[ "MIT" ]
null
null
null
18.980392
62
0.458678
[ [ [ "#B.npz checker\ndata = np.load(\"B.npz\")\n\na1 = data['mtx']\nprint(a1)\nb1 = data['dist']\nprint(b1)\na2 = data['rvecs']\nprint(a2)\nb2 = data['tvecs']\nprint(b2)\nprint(\"==========================================\")\n\nprint(mtx)\nprint(dist)\nprint(rvecs)\nprint(tvecs)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0687d209f2e989aa471a6a6e9ddc6550e060f1a
13,334
ipynb
Jupyter Notebook
2-Data-Analysis/1-Numpy/4-Numpy Uebung - Aufgabe.ipynb
Klaynie/Jupyter-Test
8d4d165568527bdd789a1ca9a8f1a56efade9bf8
[ "BSD-3-Clause" ]
null
null
null
2-Data-Analysis/1-Numpy/4-Numpy Uebung - Aufgabe.ipynb
Klaynie/Jupyter-Test
8d4d165568527bdd789a1ca9a8f1a56efade9bf8
[ "BSD-3-Clause" ]
null
null
null
2-Data-Analysis/1-Numpy/4-Numpy Uebung - Aufgabe.ipynb
Klaynie/Jupyter-Test
8d4d165568527bdd789a1ca9a8f1a56efade9bf8
[ "BSD-3-Clause" ]
null
null
null
21.36859
194
0.464002
[ [ [ "# NumPy Übung - Aufgabe\n\nJetzt wo wir schon einiges über NumPy gelernt haben können wir unser Wissen überprüfen. Wir starten dazu mit einigen einfachen Aufgaben und steigern uns zu komplizierteren Fragestellungen.", "_____no_output_____" ], [ "**Importiere NumPy als np**", "_____no_output_____" ], [ "**Erstelle ein Array von 10 Nullen**", "_____no_output_____" ], [ "**Erstelle ein Array von 10 Einsen**", "_____no_output_____" ], [ "**Erstelle ein Array von 10 Fünfen**", "_____no_output_____" ], [ "**Erstelle ein Array der Zahlen von 10 bis 50**", "_____no_output_____" ], [ "**Erstelle ein Array aller gerader Zahlen zwischen 10 und 50**", "_____no_output_____" ], [ "**Erstelle eine 3x3 Matrix, die die Zahlen von 0 bis 8 beinhaltet**", "_____no_output_____" ], [ "**Erstelle eine 3x3 Einheitsmatrix**", "_____no_output_____" ], [ "**Nutze NumPy, um eine Zufallszahl zwischen 0 und 1 zu erstellen**", "_____no_output_____" ], [ "**Nutze NumPy, um ein Array von 25 Zufallszahlen, die einer Standardnormalverteilung folgen, zu erstellen**", "_____no_output_____" ], [ "**Erstelle die folgende Matrix:**", "_____no_output_____" ], [ "**Erstelle ein Array aus 20 gleichmäßig verteilten Punkten zwischen 0 und 1**", "_____no_output_____" ], [ "## NumPy Indexing und Selection\n\nDu wirst nun einige Matrizen sehen und deine Aufgabe ist es, die angezeigten Ergebnis-Outputs zu reproduzieren:", "_____no_output_____" ] ], [ [ "# Schreibe deinen Code hier, der das Ergebnis reproduzieren soll\n# Achte darauf, die untere Zelle nicht auszuführen, sonst\n# wirst du das Ergebnis nicht mehr sehen können", "_____no_output_____" ], [ "# Schreibe deinen Code hier, der das Ergebnis reproduzieren soll\n# Achte darauf, die untere Zelle nicht auszuführen, sonst\n# wirst du das Ergebnis nicht mehr sehen können", "_____no_output_____" ], [ "# Schreibe deinen Code hier, der das Ergebnis reproduzieren soll\n# Achte darauf, die untere Zelle nicht auszuführen, sonst\n# wirst du das Ergebnis nicht mehr sehen können", "_____no_output_____" ], [ "# Schreibe deinen Code hier, der das Ergebnis reproduzieren soll\n# Achte darauf, die untere Zelle nicht auszuführen, sonst\n# wirst du das Ergebnis nicht mehr sehen können", "_____no_output_____" ], [ "# Schreibe deinen Code hier, der das Ergebnis reproduzieren soll\n# Achte darauf, die untere Zelle nicht auszuführen, sonst\n# wirst du das Ergebnis nicht mehr sehen können", "_____no_output_____" ] ], [ [ "## Mache jetzt folgendes\n\n**Erzeuge die Summe aller Werte in der Matrix**", "_____no_output_____" ], [ "**Erhalte die Standardabweichung der Werte in der Matrix**", "_____no_output_____" ], [ "**Erhalte die Summe aller Spalten in der Matrix**", "_____no_output_____" ], [ "# Gut gemacht!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d06882896e21f73380e86a6fc110f6849d415d6e
34,231
ipynb
Jupyter Notebook
AI 이노베이션 스퀘어 언어지능 과정/20190510/Extracting_data(news_etc).ipynb
donddog/AI_Innovation_Square_Codes
a04d50db011d25e00d8486146c24124c50242aa7
[ "MIT" ]
1
2021-02-11T16:45:21.000Z
2021-02-11T16:45:21.000Z
AI 이노베이션 스퀘어 언어지능 과정/20190510/Extracting_data(news_etc).ipynb
donddog/AI_Innovation_Square_Codes
a04d50db011d25e00d8486146c24124c50242aa7
[ "MIT" ]
null
null
null
AI 이노베이션 스퀘어 언어지능 과정/20190510/Extracting_data(news_etc).ipynb
donddog/AI_Innovation_Square_Codes
a04d50db011d25e00d8486146c24124c50242aa7
[ "MIT" ]
null
null
null
35.956933
2,157
0.473168
[ [ [ "def download(url, params={}, retries=3):\n resp = None\n \n header = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36\"}\n \n try:\n resp = requests.get(url, params=params, headers = header)\n resp.raise_for_status()\n except requests.exceptions.HTTPError as e:\n if 500 <= e.response.status_code < 600 and retries > 0:\n print(retries)\n resp = download(url, params, retries - 1)\n else:\n print(e.response.status_code)\n print(e.response.reason)\n print(e.request.headers)\n\n return resp", "_____no_output_____" ], [ "from bs4 import BeautifulSoup\nimport requests\n\nhtml = download(\"https://media.daum.net/breakingnews/society\")\ndaumnews = BeautifulSoup(html.text, \"lxml\")", "_____no_output_____" ], [ "daumnewstitellists = daumnews.select(\"strong > a\")\n\noutput_file_name = \"DaumNews_Urls.txt\"\noutput_file = open(output_file_name, \"w\", encoding=\"utf-8\")\n\nfor links in daumnewstitellists:\n #print(links.text)\n links.get('href')\n print()\n\noutput_file_name = \"DaumNews_Urls.txt\"\noutput_file = open(output_file_name, \"w\", encoding=\"utf-8\")\npage_num = 1\nmax_page_num = 2\n\nuser_agent = \"'Mozilla/5.0\"\nheaders ={\"User-Agent\" : user_agent}\n\nwhile page_num<=max_page_num:\n\n page_url = \"https://media.daum.net/breakingnews/society\"\n response = requests.get(page_url, headers=headers)\n html = response.text\n\n \"\"\"\n 주어진 HTML에서 기사 URL을 추출한다.\n \"\"\"\n url_frags = re.findall('<a href=\"(.*?)\"',html)\n\n urls = []\n \n for url_frag in url_frags:\n urls.append(url_frag)\n\n for url in urls:\n print(url, file=output_file)\n time.sleep(2)\n\n page_num+=1\n\noutput_file.close()", "<a class=\"link_kakao\" href=\"http://www.kakaocorp.com/\">Kakao Corp.</a>\n" ], [ "html = download('http://v.media.daum.net/v/20190512030900250')\n\ndaumnews = BeautifulSoup(html.text, \"lxml\")", "_____no_output_____" ], [ "import json\n\ndaumnewstitellists = daumnews.select(\"p\")\n\nprint(daumnewstitellists)\n\nfor links in daumnewstitellists:\n a = links.text\n\nprint(a) \nwith open('사회-2019051101.txt', 'w+', encoding='utf-8') as json_file:\n json.dump(a, json_file, ensure_ascii=False, indent='\\n', sort_keys=True)", "[<p class=\"desc_translate txt_newsview\">Translated by <a class=\"link_kakaoi #util #translate #kakaoi_link\" href=\"https://kakao.ai/\" target=\"_blank\">kakao i</a></p>, <p dmcf-pid=\"aoYV6Gu4M3\" dmcf-ptype=\"general\">어제저녁 8시 반쯤 울산 남구에 있는 2층짜리 건물의 2층 술집에서 불이 났습니다.</p>, <p dmcf-pid=\"aZU9zLBDDj\" dmcf-ptype=\"general\">불은 소방서 추산 470여만 원의 피해를 낸 뒤 30분 만에 꺼졌으며 종업원 2명이 불을 끄려다 연기를 마셔 병원에서 치료를 받았습니다.</p>, <p dmcf-pid=\"age5qpehgX\" dmcf-ptype=\"general\">소방당국은 주방에서 튀김 요리를 하다 불이 난 것으로 보고 정확한 원인을 조사하고 있습니다.</p>, <p dmcf-pid=\"aj3J8COfk5\" dmcf-ptype=\"general\">김대근 [[email protected]]</p>]\n김대근 [[email protected]]\n" ], [ "import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport ast\n\nbase_url = 'https://media.daum.net/society/'\nreq = requests.get(base_url)\nhtml = req.content\nsoup = BeautifulSoup(html, 'lxml')\nnewslist = soup.find(name=\"div\", attrs={\"class\":\"section_cate section_headline\"})\nnewslist_atag = newslist.find_all('a')\n#print(newslist_atag)\nurl_list = []\nfor a in newslist_atag:\n url_list.append(a.get('href'))\nprint(url_list)\n#print(url_list)\n\n# 각 기사에서 텍스트만 정제하여 추출\nreq = requests.get(url_list[0])\n#print(req)\nhtml = req.content\n#print(html)\nsoup = BeautifulSoup(html, 'lxml')\ntext = ''\ndoc = None\nfor item in soup.find_all('div', id='mArticle'):\n text = text + str(item.find_all(text=True))\n text = ast.literal_eval(text)\n\nprint(text)", "['http://v.media.daum.net/v/20190510192814791', 'http://v.media.daum.net/v/20190510192814791', 'http://v.media.daum.net/v/20190510230851842', 'http://v.media.daum.net/v/20190510120023692', 'http://v.media.daum.net/v/20190510060104105', 'http://v.media.daum.net/v/20190510223306488', '/issue/1310235', 'http://v.media.daum.net/v/20190510223306488', 'http://v.media.daum.net/v/20190510204216049', 'http://v.media.daum.net/v/20190510203904991', 'http://v.media.daum.net/v/20190510221354336', 'http://v.media.daum.net/v/20190510221354336', 'http://v.media.daum.net/v/20190510221424338', 'http://v.media.daum.net/v/20190510214006005', 'http://v.media.daum.net/v/20190510203905992', 'http://v.media.daum.net/v/20190510203905992', 'http://v.media.daum.net/v/20190510214554075', 'http://v.media.daum.net/v/20190510134823729', 'http://v.media.daum.net/v/20190510214337048', 'http://v.media.daum.net/v/20190510214337048', 'http://v.media.daum.net/v/20190510213454914', 'http://v.media.daum.net/v/20190510203022796', 'http://v.media.daum.net/v/20190510221352333', 'http://v.media.daum.net/v/20190510220507245', 'http://v.media.daum.net/v/20190510214934117', 'http://v.media.daum.net/v/20190510212521741', 'http://v.media.daum.net/v/20190510211623617']\n['\\n', ' 바꿔치기 ', '\\n', ' 언론중재법 ', '\\n', ' 번역 안내 ', '\\n', '\\n', 'The copyright belongs to the original writer of the content, and there may be errors in machine translation results.', '\\n', '版权归内容原作者所有。机器翻译结果可能存在错误。', '\\n', '原文の著作権は原著著作者にあり、機械翻訳の結果にエラーが含まれることがあります。', '\\n', 'Hak cipta milik penulis asli dari konten, dan mungkin ditemukan kesalahan dalam hasil terjemahan mesin.', '\\n', 'Bản quyền thuộc về tác giả gốc của nội dung và có thể có lỗi trong kết quả dịch bằng máy.', '\\n', '\\n', '\\n', '\\n', '\\n', '(수원=연합뉴스) 강영훈 기자 = 자유한국당 경기도당 사무실에 침입해 당 해체 구호를 외치며 농성한 대학생 추정 남녀 5명이 경찰에 체포됐다.', '\\n', '경기 수원중부경찰서는 10일 건조물 침입 혐의로 A 씨 등 5명을 붙잡아 조사하고 있다고 밝혔다.', '\\n', '\\n', '\\n', '\\n 자유한국당 경기도당 로고 [자유한국당 경기도당 제공]\\n ', '\\n', '\\n', 'A 씨 등은 이날 오후 4시 30분께 수원시 장안구 영화동 소재 자유한국당 경기도당 사무실에 들어가 \"자유한국당 해체하라\"는 등의 문구가 적힌 피켓을 들고 누워 구호를 외치는 등 농성한 혐의를 받고 있다.', '\\n', '경찰은 \"당 사무실에 대학생들이 들어와 소리 지르고 있다\"는 112 신고를 받고 출동해 A 씨 등을 현행범으로 체포했다.', '\\n', '이 과정에서 다친 사람이나 파손된 기물은 없었다.', '\\n', '붙잡힌 이들은 남성 4명과 여성 1명으로, 묵비권을 행사하며 이름과 나이 등 신원을 밝히지 않고 있다.', '\\n', '경찰은 이들을 상대로 자세한 사건 경위를 조사하고 있다.', '\\n', '[email protected]', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '관련 태그', '\\n', '\\n', '\\n', '연재', '\\n', ' MCCP-856 연재 영역 수정 ', '\\n', '\\n', '\\n', ' //MCCP-856 연재 영역 수정 ', '\\n', '더보기', '\\n', '\\n', '\\n', '저작권자(c)연합뉴스. 무단전재-재배포금지', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', ' 언론사 관련기사(주요뉴스) ', '\\n', '\\n', '연합뉴스 주요 뉴스', '해당 언론사로 연결됩니다.', '\\n', '\\n', '고속道 여배우 사망…2차선에 차 세운 이유 남편 \"몰라\"', '\\n', '배우 조수현, 극단 선택 시도…병원 응급 이송', '\\n', '강다니엘 독자활동 길 열렸다…LM 전속계약 효력정지', '\\n', \"'닥꼬티 얼마예요? 기안84, 장애인 희화화 웹툰 사과\", '\\n', '연기자 보라·가수 필독, 공개 연애 2년만에 결별', '\\n', '길가던 여고생 차로 치고 납치·성폭행 남성, 징역10년', '\\n', '文대통령 \"대담서 더 공격적 공방 오갔어도 괜찮았겠다\"', '\\n', '정준영, 법정서 혐의 인정…\"합의 원해\" 전략 바꾼듯', '\\n', '日문화청장관 \"한국은 일본에 형 누나 같은 존재\"', '\\n', '기술 고도화하랬더니…中업체에 몽땅 빼돌린 中企직원', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '공지', '\\n', \"똑똑한 뉴스 챗봇 '뉴스봇'을 소개합니다.\", '\\n', '\\n', '\\n', '\\n', '\\n', '\\n']\n" ], [ "print(url_list[3])\nreq = requests.get(url_list[3])\n#print(req)\nhtml = req.content\n#print(html)\nsoup = BeautifulSoup(html, 'lxml')\ntext = ''\ndoc = None\nfor item in soup.find_all('div', id='mArticle'):\n text = text + str(item.find_all(text=True))\n text = ast.literal_eval(text)\nprint(text)", "http://v.media.daum.net/v/20190510120023692\n['\\n', ' 바꿔치기 ', '\\n', ' 언론중재법 ', '\\n', ' 번역 안내 ', '\\n', '\\n', 'The copyright belongs to the original writer of the content, and there may be errors in machine translation results.', '\\n', '版权归内容原作者所有。机器翻译结果可能存在错误。', '\\n', '原文の著作権は原著著作者にあり、機械翻訳の結果にエラーが含まれることがあります。', '\\n', 'Hak cipta milik penulis asli dari konten, dan mungkin ditemukan kesalahan dalam hasil terjemahan mesin.', '\\n', 'Bản quyền thuộc về tác giả gốc của nội dung và có thể có lỗi trong kết quả dịch bằng máy.', '\\n', '\\n', '\\n', '9세 어린이 치어 다리 골절상 입혀', '집에 데려다주고 아닌 척 가 버려', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '【서울=뉴시스】김온유 기자 = 전동휠을 타고 가다 어린이를 치고선 본인이 낸 사고가 아닌 것처럼 행세하며 집까지 데려다 준 20대가 검찰에 넘겨졌다.', '\\n', '서울 수서경찰서는 A씨(29)를 특정범죄가중처벌등에관한법률 위반 혐의로 검거해 지난 1일 검찰에 송치했다고 10일 밝혔다.', '\\n', '경찰에 따르면 지난 3월27일 오후 2시30분께 서울 강남구 대치동 인근에서 전동휠을 타고 가던 A씨는 한 아파트 후문에서 나오던 9세 어린이를 쳐 다리에 골절상 등을 입게 했다.', '\\n', 'A씨는 사고를 당한 어린이를 안아서 집에 데려다 준 뒤 본인이 낸 사고가 아닌 척 행동한 것으로 알려졌다.', '\\n', '사건은 해당 어린이가 보호자와 병원으로 가던 중 \"전동휠에 부딪혔다\"고 말하면서 경찰에 신고됐다. 경찰은 사고 현장 주변 폐쇄회로(CC)TV 60여대를 분석, A씨를 특정해 검거했다.', '\\n', '경찰은 \"최근 전동휠이나 퀵보드 관련 교통사고 발생 시 운전자로서 제대로 조치하지 않아 형사처벌 대상이 되거나 면허 취소가 되는 경우가 일어나고 있다\"면서 \"안전 속도와 안전 장구 등 도로교통법상 법규를 준수해야 한다\"고 당부했다.', '\\n', '[email protected] ', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '관련 태그', '\\n', '\\n', '\\n', '연재', '\\n', ' MCCP-856 연재 영역 수정 ', '\\n', '\\n', '\\n', ' //MCCP-856 연재 영역 수정 ', '\\n', '더보기', '\\n', '\\n', '\\n', '<저작권자ⓒ 공감언론 뉴시스통신사. 무단전재-재배포 금지.>', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', ' 언론사 관련기사(주요뉴스) ', '\\n', '\\n', '뉴시스 주요 뉴스', '해당 언론사로 연결됩니다.', '\\n', '\\n', \"'술 마시기 게임' 빙자해 집단성폭행 저지른 4명 실형\", '\\n', '기안84, 웹툰서 장애인 희화화…관련단체 \"공개 사과하라\"', '\\n', '여학생 일부러 차로 친 후 납치해 성폭행한 30대', '\\n', '한지성 음주여부 확인 2주 걸려… 술마신 남편 책임은', '\\n', '고등학교서 동급생 성추행 논란…수사의뢰', '\\n', '\"부적 주겠다\" 점 봤던 여성 성폭행한 무속인 징역 6년', '\\n', '강제추행 40대, 꽁초 DNA로 11년 전 성범죄도 들통', '\\n', '30대 여경 숨진채 발견…\"상관 때문에 힘들어 했다\"', '\\n', '환자 성폭행 혐의 정신과 의사 수사…\"그루밍 성폭력\" 주장', '\\n', \"'감히 이별을 통보해?'…전 여친 흉기로 마구 찌른 男 체포\", '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '\\n', '공지', '\\n', \"똑똑한 뉴스 챗봇 '뉴스봇'을 소개합니다.\", '\\n', '\\n', '\\n', '\\n', '\\n', '\\n']\n" ], [ "from selenium import webdriver\nimport json\n\ndriver = webdriver.Chrome()\ndriver.get('https://media.daum.net/society/')\ndriver.find_element_by_xpath('//*[@id=\"cSub\"]/div/div[1]/div[1]/div/strong/a').click()\ndriver.implicitly_wait(5)\nhtml = driver.page_source\ndaumnews = BeautifulSoup(html, \"lxml\")\nlists = daumnews.select(\"p\")\n\ndata = {}\n\nfor contents in lists:\n a = contents.text\nprint(a)\n \nwith open('daumnews-society.json', 'w+') as json_file:\n json.dump(data, json_file)\n\n#ensure_ascii=False, indent='\\t'\n# encoding='utf-8'\n#driver.close()", "그럼 국회에서 드러눕고 깽판친 한국당 시키들은 왜 구속안시키는건데\n" ], [ "driver.close()", "_____no_output_____" ], [ "from bs4 import BeautifulSoup\nimport requests\n\nhtml = download(\"https://media.daum.net/society/\")\ndaumnews = BeautifulSoup(html.text, \"lxml\")", "_____no_output_____" ], [ "req = requests.get(daumnews)\nhtml = req.content\nsoup = BeautifulSoup(html, 'lxml')", "_____no_output_____" ], [ "#!/usr/bin/env python3\n#-*- coding: utf-8 -*\n\n\"\"\"\n네이버 경제 뉴스 중 증권관련 뉴스의 기사 URL을 수집합니다. 최근 10개의 페이지만 가져오겠습니다.\n\"\"\"\n\nimport time\nimport re\nimport requests\n\neval_d = \"20190511\"\noutput_file_name = \"DaumNews_Urls.txt\"\noutput_file = open(output_file_name, \"w\", encoding=\"utf-8\")\npage_num = 1\nmax_page_num = 2\n\nuser_agent = \"'Mozilla/5.0\"\nheaders ={\"User-Agent\" : user_agent}\n\nwhile page_num<=max_page_num:\n\n page_url = \"https://media.daum.net/breakingnews/society\"\n response = requests.get(page_url, headers=headers)\n html = response.text\n\n \"\"\"\n 주어진 HTML에서 기사 URL을 추출한다.\n \"\"\"\n url_frags = re.findall('<a href=\"(.*?)\"',html)\n\n urls = []\n \n for url_frag in url_frags:\n urls.append(url_frag)\n\n for url in urls:\n print(url, file=output_file)\n time.sleep(2)\n\n page_num+=1\n\noutput_file.close()\n#[출처] 증권뉴스 데이터 수집(1/3)|작성자 엉드루", "_____no_output_____" ], [ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n네이버 뉴스 기사를 수집한다.\n\"\"\"\n\nimport time\nimport requests\nimport os\n\ndef get_url_file_name() :\n \"\"\"\n url 파일 이름을 받아 돌려준다.\n :return:\n \"\"\"\n\n url_file_name = input(\"Enter url file name : \")\n\n return url_file_name\n\ndef get_output_file_name():\n \"\"\"\n 철력 파일의 이름을 입력받아 돌려준다.\n :return:\n \"\"\"\n\n output_file_name = input(\"Enter output file name : \")\n\n return output_file_name\n\ndef open_url_file(url_file_name):\n \"\"\"\n URL 파일을 연다.\n :param url_file_name:\n :return:\n \"\"\"\n\n url_file = open(url_file_name, \"r\", encoding =\"utf-8\")\n\n return url_file\n\ndef create_output_file(output_file_name):\n \"\"\"\n 출력 파일을 생성한다.\n :param output_file_name:\n :return:\n \"\"\"\n\n output_file = open(output_file_name, \"w\", encoding='utf-8')\n\n return output_file\n\ndef gen_print_url(url_line):\n \"\"\"\n 주어진 기사 링크 URL로 부터 인쇄용 URL을 만들어 돌려준다.\n :param url_line:\n :return:\n \"\"\"\n article_id = url_line[(len(url_line)-24):len(url_line)]\n print_url = \"https://media.daum.net/breakingnews/society\" + article_id\n\n return print_url\n\ndef get_html(print_url) :\n \"\"\"\n 주어진 인쇄용 URL에 접근하여 HTML을 읽어서 돌려준다.\n :param print_url:\n :return:\n \"\"\"\n user_agent = \"'Mozilla/5.0\"\n headers ={\"User-Agent\" : user_agent}\n\n response = requests.get(print_url, headers=headers)\n html = response.text\n\n return html\n\ndef write_html(output_file, html):\n \"\"\"\n 주어진 HTML 텍스트를 출력 파일에 쓴다.\n :param output_file:\n :param html:\n :return:\n \"\"\"\n\n output_file.write(\"{}\\n\".format(html))\n output_file.write(\"@@@@@ ARTICLE DELMITER @@@@\\n\")\n\ndef pause():\n \"\"\"\n 3초동안 쉰다.\n :return:\n \"\"\"\n time.sleep(3)\n\ndef close_output_file(output_file):\n \"\"\"\n 출력 파일을 닫는다.\n :param output_file:\n :return:\n \"\"\"\n\n output_file.close()\n\ndef close_url_file(url_file):\n \"\"\"\n URL 파일을 닫는다.\n :param url_file:\n :return:\n \"\"\"\n\n url_file.close()\n\ndef main():\n \"\"\"\n 네이버 뉴스기사를 수집한다.\n :return:\n \"\"\"\n\n url_file_name = get_url_file_name()\n output_file_name = get_output_file_name()\n\n url_file = open_url_file(url_file_name)\n output_file = create_output_file(output_file_name)\n\n for line in url_file:\n print_url = gen_print_url(line)\n html = get_html(print_url)\n write_html(output_file,html)\n\n close_output_file(output_file)\n close_url_file(url_file)\n\nmain()\n#[출처] 증권뉴스데이터 수집(2/3편)|작성자 엉드루", "Enter url file name : DaumNews_Urls.txt\nEnter output file name : society-20190511.txt\n" ], [ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n네이버 뉴스 기사 HTML에서 순수 텍스트 기사를 추출한다.\n\"\"\"\n\nimport bs4\nimport time\nimport requests\nimport os\n\n\nARTICLE_DELIMITER = \"@@@@@ ARTICLE DELMITER @@@@\\n\"\nTITLE_START_PAT = '<h3 class=\"tit_view\" data-translation=\"\">'\nTITLE_END_PAT = '</h3>'\nDATE_TIME_START_PAT = '<span class=\"txt_info\">입력 </span>'\nBODY_START_PAT = '<p dmcf-pid=\"\" dmcf-ptype=\"\">'\nBODY_END_PAT = '</p>'\nTIDYUP_START_PAT = '<div class=\"foot_view\">'\n\ndef get_html_file_name():\n \"\"\"\n 사용자로 부터 HTML 파일 이름을 입력받아 돌려준다.\n :return:\n \"\"\"\n\n html_file_name = input(\"Enter HTML File name : \")\n\n return html_file_name\n\ndef get_text_file_name():\n \"\"\"\n 사용자로부터 텍스트 파일 이름을 입력받아 돌려준다.\n :return:\n \"\"\"\n\n text_file_name = input(\"Enter text file name : \")\n\n return text_file_name\n\ndef open_html_file(html_file_name):\n \"\"\"\n HTML 기사 파일을 열어서 파일 객체를 돌려준다.\n :param html_file_name:\n :return:\n \"\"\"\n\n html_file = open(html_file_name, \"r\", encoding=\"utf-8\")\n\n return html_file\n\ndef create_text_file(text_file_name):\n \"\"\"\n 텍스트 기사 파일을 만들어 파일 객체를 돌려준다.\n :param text_file_name:\n :return:\n \"\"\"\n\n text_file = open(text_file_name, \"w\", encoding=\"utf-8\")\n\n return text_file\n\ndef read_html_article(html_file):\n \"\"\"\n HTML 파일에서 기사 하나를 읽어서 돌려준다.\n :param html_file:\n :return:\n \"\"\"\n\n lines = []\n for line in html_file:\n if line.startswith(ARTICLE_DELIMITER):\n html_text = \"\".join(lines).strip()\n return html_text\n lines.append(line)\n\n return None\n\ndef ext_title(html_text):\n \"\"\"\n HTML 기사에서 제목을 추출하여 돌려준다.\n :param html_text:\n :return:\n \"\"\"\n p = html_text.find(TITLE_START_PAT)\n q = html_text.find(TITLE_END_PAT)\n title = html_text[p + len(TITLE_START_PAT):q]\n title = title.strip()\n\n return title\n\n\ndef ext_date_time(html_text):\n \"\"\"\n HTML 기사에서 날짜와 시간을 추출하여 돌려준다.\n :param html_text:\n :return:\n \"\"\"\n start_p = html_text.find(DATE_TIME_START_PAT)+len(DATE_TIME_START_PAT)\n end_p = start_p + 10\n date_time = html_text[start_p:end_p]\n date_time = date_time.strip()\n\n return date_time\n\ndef strip_html(html_body):\n \"\"\"\n HTML 본문에서 HTML 태그를 제거하고 돌려준다.\n :param html_body:\n :return:\n \"\"\"\n page = bs4.BeautifulSoup(html_body, \"html.parser\")\n body = page.text\n\n return body\n\ndef tidyup(body):\n \"\"\"\n 본문에서 필요없는 부분을 자르고 돌려준다.\n :param body:\n :return:\n \"\"\"\n\n p = body.find(TIDYUP_START_PAT)\n body = body[:p]\n body = body.strip()\n\n return body\n\ndef ext_body(html_text):\n \"\"\"\n HTML 기사에서 본문을 추출하여 돌려준다.\n :param html_text:\n :return:\n \"\"\"\n\n p = html_text.find(BODY_START_PAT)\n q = html_text.find(BODY_END_PAT)\n html_body = html_text[p + len(BODY_START_PAT):q]\n html_body = html_body.replace(\"<br />\",\"\\n\")\n html_body = html_body.strip()\n body = strip_html(html_body)\n body = tidyup(body)\n\n return body\n\ndef write_article(text_file, title, date_time, body):\n \"\"\"\n 텍스트 파일에 항목이 구분된 기사를 출력한다.\n :param text_file:\n :param title:\n :param date_time:\n :param body:\n :return:\n \"\"\"\n\n text_file.write(\"{}\\n\".format(title))\n text_file.write(\"{}\\n\".format(date_time))\n text_file.write(\"{}\\n\".format(body))\n text_file.write(\"{}\\n\".format(ARTICLE_DELIMITER))\n\ndef main():{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 6,\n \"metadata\": {},\n \"outputs\": [\n {\n \"name\": \"stdout\",\n \"output_type\": \"stream\",\n \"text\": [\n \"^C\\n\"\n ]\n }\n ],\n \"source\": [\n \"!pip install newspaper3k\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 112,\n \"metadata\": {},\n \"outputs\": [\n {\n \"name\": \"stdout\",\n \"output_type\": \"stream\",\n \"text\": [\n \"[포토] G2 '밀키' 미하엘, \\\"이제 IG만 남았어요\\\"\\n\",\n \"13일 오후 베트남 하노이 국립 컨벤션센터에서 열린 MSI 그룹 스테이지 4일차 G2 e스포츠와 플래시 울브즈의 경기서 승리한 G2 '밀키' 미하엘 메흘레가 베트남어 방송 인터뷰를 하고 있다.\\n\",\n \"\\n\",\n \"하노이(베트남) ㅣ 김용우 기자 [email protected]\\n\",\n \"\\n\",\n \"포모스와 함께 즐기는 e스포츠, 게임 그 이상을 향해!\\n\",\n \"\\n\",\n \"Copyrights ⓒ FOMOS(http://www.fomos.kr) 무단 전재 및 재배포 금지\\n\"\n ]\n }\n ],\n \"source\": [\n \"from newspaper import Article\\n\",\n \"\\n\",\n \"'''\\n\",\n \"http://v.media.daum.net/v/20190513202543774\\n\",\n \"http://v.media.daum.net/v/20190513202526771\\n\",\n \"http://v.media.daum.net/v/20190513202442768\\n\",\n \"http://v.media.daum.net/v/20190513202100733\\n\",\n \"http://v.media.daum.net/v/20190513201951713\\n\",\n \"http://v.media.daum.net/v/20190513201912711\\n\",\n \"http://v.media.daum.net/v/20190513201708688\\n\",\n \"http://v.media.daum.net/v/20190513201646686\\n\",\n \"http://v.media.daum.net/v/20190513201515670\\n\",\n \"http://v.media.daum.net/v/20190513201343654\\n\",\n \"http://v.media.daum.net/v/20190513201042627\\n\",\n \"http://v.media.daum.net/v/20190513200900613\\n\",\n \"http://v.media.daum.net/v/20190513200731602\\n\",\n \"http://v.media.daum.net/v/20190513200601595\\n\",\n \"http://v.media.daum.net/v/20190513200601594\\n\",\n \"http://v.media.daum.net/v/20190513201012624\\n\",\n \"http://v.media.daum.net/v/20190513200300564\\n\",\n \"'''\\n\",\n \"\\n\",\n \"url = 'http://v.media.daum.net/v/20190513202526771'\\n\",\n \"a = Article(url, language='ko')\\n\",\n \"a.download()\\n\",\n \"a.parse()\\n\",\n \"print(a.title)\\n\",\n \"print(a.text)\\n\",\n \"\\n\",\n \"with open(\\\"F:/daumnews/sports/02.txt\\\", \\\"w\\\") as f:\\n\",\n \" f.write(a.text)\\n\",\n \"f.close()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 6,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from newspaper import Article\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 29,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from bs4 import BeautifulSoup\\n\",\n \"import requests\\n\",\n \"\\n\",\n \"html = download(\\\"https://media.daum.net/breakingnews/culture\\\")\\n\",\n \"daumnews = BeautifulSoup(html.text, \\\"lxml\\\")\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 30,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"daumnewstitellists = daumnews.select(\\\"div > strong > a\\\")\\n\",\n \"k = []\\n\",\n \"\\n\",\n \"t = 18\\n\",\n \"\\n\",\n \"for links in daumnewstitellists:\\n\",\n \" l = links.get('href')\\n\",\n \" k.append(l)\\n\",\n \"\\n\",\n \"for i in range(0,17):\\n\",\n \" url = k[i]\\n\",\n \" a = Article(url, language='ko')\\n\",\n \" a.download()\\n\",\n \" a.parse()\\n\",\n \" with open(\\\"F:/daumnews/culture/%d.txt\\\" % int(i+t), \\\"w\\\", encoding=\\\"utf-8\\\") as f:\\n\",\n \" f.write(a.title)\\n\",\n \" f.write(a.text)\\n\",\n \" f.close()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from bs4 import BeautifulSoup\\n\",\n \"import requests\\n\",\n \"\\n\",\n \"html = download(\\\"https://media.daum.net/breakingnews/sports\\\")\\n\",\n \"daumnews = BeautifulSoup(html.text, \\\"lxml\\\")\\n\",\n \"\\n\",\n \"daumnewstitellists = daumnews.select(\\\"div > strong > a\\\")\\n\",\n \"\\n\",\n \"for links in daumnewstitellists:\\n\",\n \" #print(links.text)\\n\",\n \" print(links.get('href'))\\n\",\n \" #print()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 3,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"def download(url, params={}, retries=3):\\n\",\n \" resp = None\\n\",\n \" \\n\",\n \" header = {\\\"user-agent\\\": \\\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36\\\"}\\n\",\n \" \\n\",\n \" try:\\n\",\n \" resp = requests.get(url, params=params, headers = header)\\n\",\n \" resp.raise_for_status()\\n\",\n \" except requests.exceptions.HTTPError as e:\\n\",\n \" if 500 <= e.response.status_code < 600 and retries > 0:\\n\",\n \" print(retries)\\n\",\n \" resp = download(url, params, retries - 1)\\n\",\n \" else:\\n\",\n \" print(e.response.status_code)\\n\",\n \" print(e.response.reason)\\n\",\n \" print(e.request.headers)\\n\",\n \"\\n\",\n \" return resp\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 117,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from newspaper import Article\\n\",\n \"from bs4 import BeautifulSoup\\n\",\n \"import requests\\n\",\n \"\\n\",\n \"html = download(\\\"https://media.daum.net/breakingnews/sports\\\")\\n\",\n \"daumnews = BeautifulSoup(html.text, \\\"lxml\\\")\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": 139,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"daumnewstitellists = daumnews.select(\\\"div > strong > a\\\")\\n\",\n \"\\n\",\n \"for links in daumnewstitellists:\\n\",\n \" b = links.get('href')\\n\",\n \" a = Article(b, language='ko')\\n\",\n \" a.download()\\n\",\n \" a.parse() \\n\",\n \" with open(\\\"F:/daumnews/sports/01.txt\\\", \\\"w\\\") as f:\\n\",\n \" f.write(a.text)\\n\",\n \" f.close()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": []\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"Python 3\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.7.3\"\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n\n \"\"\"\n 네이트 뉴스 기사 HTML에서 순수 텍스트 기사를 추출한다.\n :return:\n \"\"\"\n\n html_file_name = get_html_file_name()\n text_file_name = get_text_file_name()\n html_file = open_html_file(html_file_name)\n text_file = create_text_file(text_file_name)\n\n while True:\n\n html_text = read_html_article(html_file)\n\n if not html_text:\n break\n\n title = ext_title(html_text)\n date_time = ext_date_time(html_text)\n body = ext_body(html_text)\n write_article(text_file, title, date_time, body)\n\n html_file.close()\n text_file.close()\n\nmain()", "Enter HTML File name : society-20190511.txt\nEnter text file name : society-20190511contentsall.txt\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06899ca1424d963c9296da32df2653028d6a5cc
9,180
ipynb
Jupyter Notebook
sklearn/sklearn learning/demonstration/auto_examples_jupyter/applications/plot_species_distribution_modeling.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
1
2020-06-04T11:10:27.000Z
2020-06-04T11:10:27.000Z
sklearn/sklearn learning/demonstration/auto_examples_jupyter/applications/plot_species_distribution_modeling.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
sklearn/sklearn learning/demonstration/auto_examples_jupyter/applications/plot_species_distribution_modeling.ipynb
wangyendt/deeplearning_models
47883b6c65b8d05a0d1c5737f1552df6476ded34
[ "MIT" ]
null
null
null
170
6,943
0.603704
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Species distribution modeling\n\n\nModeling species' geographic distributions is an important\nproblem in conservation biology. In this example we\nmodel the geographic distribution of two south american\nmammals given past observations and 14 environmental\nvariables. Since we have only positive examples (there are\nno unsuccessful observations), we cast this problem as a\ndensity estimation problem and use the :class:`sklearn.svm.OneClassSVM`\nas our modeling tool. The dataset is provided by Phillips et. al. (2006).\nIf available, the example uses\n`basemap <https://matplotlib.org/basemap/>`_\nto plot the coast lines and national boundaries of South America.\n\nThe two species are:\n\n - `\"Bradypus variegatus\"\n <http://www.iucnredlist.org/details/3038/0>`_ ,\n the Brown-throated Sloth.\n\n - `\"Microryzomys minutus\"\n <http://www.iucnredlist.org/details/13408/0>`_ ,\n also known as the Forest Small Rice Rat, a rodent that lives in Peru,\n Colombia, Ecuador, Peru, and Venezuela.\n\nReferences\n----------\n\n * `\"Maximum entropy modeling of species geographic distributions\"\n <http://rob.schapire.net/papers/ecolmod.pdf>`_\n S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,\n 190:231-259, 2006.\n", "_____no_output_____" ] ], [ [ "# Authors: Peter Prettenhofer <[email protected]>\n# Jake Vanderplas <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom time import time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import Bunch\nfrom sklearn.datasets import fetch_species_distributions\nfrom sklearn import svm, metrics\n\n# if basemap is available, we'll use it.\n# otherwise, we'll improvise later...\ntry:\n from mpl_toolkits.basemap import Basemap\n basemap = True\nexcept ImportError:\n basemap = False\n\nprint(__doc__)\n\n\ndef construct_grids(batch):\n \"\"\"Construct the map grid from the batch object\n\n Parameters\n ----------\n batch : Batch object\n The object returned by :func:`fetch_species_distributions`\n\n Returns\n -------\n (xgrid, ygrid) : 1-D arrays\n The grid corresponding to the values in batch.coverages\n \"\"\"\n # x,y coordinates for corner cells\n xmin = batch.x_left_lower_corner + batch.grid_size\n xmax = xmin + (batch.Nx * batch.grid_size)\n ymin = batch.y_left_lower_corner + batch.grid_size\n ymax = ymin + (batch.Ny * batch.grid_size)\n\n # x coordinates of the grid cells\n xgrid = np.arange(xmin, xmax, batch.grid_size)\n # y coordinates of the grid cells\n ygrid = np.arange(ymin, ymax, batch.grid_size)\n\n return (xgrid, ygrid)\n\n\ndef create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):\n \"\"\"Create a bunch with information about a particular organism\n\n This will use the test/train record arrays to extract the\n data specific to the given species name.\n \"\"\"\n bunch = Bunch(name=' '.join(species_name.split(\"_\")[:2]))\n species_name = species_name.encode('ascii')\n points = dict(test=test, train=train)\n\n for label, pts in points.items():\n # choose points associated with the desired species\n pts = pts[pts['species'] == species_name]\n bunch['pts_%s' % label] = pts\n\n # determine coverage values for each of the training & testing points\n ix = np.searchsorted(xgrid, pts['dd long'])\n iy = np.searchsorted(ygrid, pts['dd lat'])\n bunch['cov_%s' % label] = coverages[:, -iy, ix].T\n\n return bunch\n\n\ndef plot_species_distribution(species=(\"bradypus_variegatus_0\",\n \"microryzomys_minutus_0\")):\n \"\"\"\n Plot the species distribution.\n \"\"\"\n if len(species) > 2:\n print(\"Note: when more than two species are provided,\"\n \" only the first two will be used\")\n\n t0 = time()\n\n # Load the compressed data\n data = fetch_species_distributions()\n\n # Set up the data grid\n xgrid, ygrid = construct_grids(data)\n\n # The grid in x,y coordinates\n X, Y = np.meshgrid(xgrid, ygrid[::-1])\n\n # create a bunch for each species\n BV_bunch = create_species_bunch(species[0],\n data.train, data.test,\n data.coverages, xgrid, ygrid)\n MM_bunch = create_species_bunch(species[1],\n data.train, data.test,\n data.coverages, xgrid, ygrid)\n\n # background points (grid coordinates) for evaluation\n np.random.seed(13)\n background_points = np.c_[np.random.randint(low=0, high=data.Ny,\n size=10000),\n np.random.randint(low=0, high=data.Nx,\n size=10000)].T\n\n # We'll make use of the fact that coverages[6] has measurements at all\n # land points. This will help us decide between land and water.\n land_reference = data.coverages[6]\n\n # Fit, predict, and plot for each species.\n for i, species in enumerate([BV_bunch, MM_bunch]):\n print(\"_\" * 80)\n print(\"Modeling distribution of species '%s'\" % species.name)\n\n # Standardize features\n mean = species.cov_train.mean(axis=0)\n std = species.cov_train.std(axis=0)\n train_cover_std = (species.cov_train - mean) / std\n\n # Fit OneClassSVM\n print(\" - fit OneClassSVM ... \", end='')\n clf = svm.OneClassSVM(nu=0.1, kernel=\"rbf\", gamma=0.5)\n clf.fit(train_cover_std)\n print(\"done.\")\n\n # Plot map of South America\n plt.subplot(1, 2, i + 1)\n if basemap:\n print(\" - plot coastlines using basemap\")\n m = Basemap(projection='cyl', llcrnrlat=Y.min(),\n urcrnrlat=Y.max(), llcrnrlon=X.min(),\n urcrnrlon=X.max(), resolution='c')\n m.drawcoastlines()\n m.drawcountries()\n else:\n print(\" - plot coastlines from coverage\")\n plt.contour(X, Y, land_reference,\n levels=[-9998], colors=\"k\",\n linestyles=\"solid\")\n plt.xticks([])\n plt.yticks([])\n\n print(\" - predict species distribution\")\n\n # Predict species distribution using the training data\n Z = np.ones((data.Ny, data.Nx), dtype=np.float64)\n\n # We'll predict only for the land points.\n idx = np.where(land_reference > -9999)\n coverages_land = data.coverages[:, idx[0], idx[1]].T\n\n pred = clf.decision_function((coverages_land - mean) / std)\n Z *= pred.min()\n Z[idx[0], idx[1]] = pred\n\n levels = np.linspace(Z.min(), Z.max(), 25)\n Z[land_reference == -9999] = -9999\n\n # plot contours of the prediction\n plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)\n plt.colorbar(format='%.2f')\n\n # scatter training/testing points\n plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],\n s=2 ** 2, c='black',\n marker='^', label='train')\n plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],\n s=2 ** 2, c='black',\n marker='x', label='test')\n plt.legend()\n plt.title(species.name)\n plt.axis('equal')\n\n # Compute AUC with regards to background points\n pred_background = Z[background_points[0], background_points[1]]\n pred_test = clf.decision_function((species.cov_test - mean) / std)\n scores = np.r_[pred_test, pred_background]\n y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]\n fpr, tpr, thresholds = metrics.roc_curve(y, scores)\n roc_auc = metrics.auc(fpr, tpr)\n plt.text(-35, -70, \"AUC: %.3f\" % roc_auc, ha=\"right\")\n print(\"\\n Area under the ROC curve : %f\" % roc_auc)\n\n print(\"\\ntime elapsed: %.2fs\" % (time() - t0))\n\n\nplot_species_distribution()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
d0689c06870a1babe3ddb4e6fd934eaf9880b904
241,132
ipynb
Jupyter Notebook
NaiveCoverage.ipynb
dk1010101/astroplay
88dd0d9a804876ff0948a5617dcb10671773bcb6
[ "MIT" ]
null
null
null
NaiveCoverage.ipynb
dk1010101/astroplay
88dd0d9a804876ff0948a5617dcb10671773bcb6
[ "MIT" ]
null
null
null
NaiveCoverage.ipynb
dk1010101/astroplay
88dd0d9a804876ff0948a5617dcb10671773bcb6
[ "MIT" ]
null
null
null
889.785978
145,568
0.955854
[ [ [ "from ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nimport matplotlib.cm as cm\nimport matplotlib\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n%matplotlib inline\nimport numpy as np\nfrom sklearn.neighbors import KDTree", "_____no_output_____" ] ], [ [ "First create some random 3d data points", "_____no_output_____" ] ], [ [ "N = 10 # The number of points\npoints = np.random.rand(N, 3)", "_____no_output_____" ] ], [ [ "Now create KDTree from these so that we can look for the neighbours. TBH we don't really need KDTree. We can do this probably better and easier with a distance matrix but this will do for now.", "_____no_output_____" ] ], [ [ "kdt = KDTree(points)", "_____no_output_____" ] ], [ [ "Test by looking for the two neighbours of the first point", "_____no_output_____" ] ], [ [ "kdt.query([points[0]], 3, False)", "_____no_output_____" ] ], [ [ "So the neighbous of 0 are point 2 and 4. ok.", "_____no_output_____" ], [ "Let's plot the 3d points and see them", "_____no_output_____" ] ], [ [ "x = [p[0] for p in points]\ny = [p[1] for p in points]\nz = [p[2] for p in points]\nfig = plt.figure(figsize=(8, 8), constrained_layout=True)\nax = fig.add_subplot(projection='3d')\n\nax.scatter(points[0][0],points[0][1],points[0][2], c='yellow',s=75)\nax.scatter(x[1:],y[1:],z[1:],c='blue',s=45)\nfor i, p in enumerate(points):\n ax.text(p[0], p[1], p[2], str(i), fontsize=14)\nplt.show()", "_____no_output_____" ] ], [ [ "Now we will look at the the algo and if it works...", "_____no_output_____" ] ], [ [ "def gen_tris(points):\n processed_points = set()\n points_to_do = set(range(len(points)))\n tris = []\n # pick the first three points\n start = 0\n nns = kdt.query([points[start]], N, False)\n work_pts = nns[0][:3]\n tris.append(Poly3DCollection([[points[i] for i in work_pts]], edgecolors='black', facecolors='w', linewidths=1, alpha=0.8))\n for p in work_pts:\n processed_points.add(p)\n print(f'added tri [{work_pts[0]}, {work_pts[1]}, {work_pts[2]}]')\n start = work_pts[1]\n while True:\n nns = kdt.query([points[start]], N, False)\n for p in nns[0]:\n if p in processed_points:\n continue\n nns2 = kdt.query([points[p]], N, False)\n for p2 in nns2[0]:\n if p2 in processed_points and p2 != start:\n break\n print(f'added tri [{start}, {p}, {p2}]')\n tris.append(Poly3DCollection([[points[start], points[p], points[p2]]],edgecolors='black',facecolors='w', linewidths=1, alpha=0.8))\n processed_points.add(p)\n start = p\n break\n if len(processed_points) == len(points):\n break\n return tris\n\ntris = gen_tris(points)\n# and show the points and the triangles\nfig = plt.figure(figsize=(10, 10), constrained_layout=True)\n# ax = Axes3D(fig, auto_add_to_figure=False)\nax = fig.add_subplot(111, projection='3d')\n\nfig.add_axes(ax)\nax.scatter(points[0][0],points[0][1],points[0][2], c='yellow',s=75)\nax.scatter(x[1:],y[1:],z[1:],c='blue',s=45)\n\nfor p in tris:\n ax.add_collection3d(p)\n\nfor i, p in enumerate(points):\n ax.text(p[0], p[1], p[2], str(i), fontsize=16)\n \nplt.show()\n", "added tri [0, 1, 6]\nadded tri [1, 2, 0]\nadded tri [2, 4, 6]\nadded tri [4, 3, 6]\nadded tri [3, 8, 6]\nadded tri [8, 7, 3]\nadded tri [7, 9, 3]\nadded tri [9, 5, 3]\n" ] ], [ [ "It does. Sort of...", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d068b373338b4b78bf4997a4882b3b59c19a2ece
15,198
ipynb
Jupyter Notebook
notebooks/stats/Generic_Segmentation-stats-Copy4.ipynb
utkarshojha/rewriting
347495da9f4a2c9802553e1ac79bb70929c60bca
[ "MIT" ]
526
2020-07-29T01:25:29.000Z
2022-03-22T02:52:29.000Z
notebooks/stats/Generic_Segmentation-stats-Copy4.ipynb
utkarshojha/rewriting
347495da9f4a2c9802553e1ac79bb70929c60bca
[ "MIT" ]
8
2020-08-05T11:44:14.000Z
2021-06-22T06:48:37.000Z
notebooks/stats/Generic_Segmentation-stats-Copy4.ipynb
utkarshojha/rewriting
347495da9f4a2c9802553e1ac79bb70929c60bca
[ "MIT" ]
74
2020-07-30T22:17:42.000Z
2022-03-02T06:06:10.000Z
27.187835
169
0.530004
[ [ [ "%pushd ../../", "_____no_output_____" ], [ "%env CUDA_VISIBLE_DEVICES=3", "_____no_output_____" ], [ "import json\n\nimport os\nimport sys\nimport tempfile\nfrom tqdm.auto import tqdm\n\nimport torch\nimport torchvision\nfrom torchvision import transforms\nfrom PIL import Image\nimport numpy as np\n\ntorch.cuda.set_device(0)", "_____no_output_____" ], [ "from netdissect import setting", "_____no_output_____" ], [ "segopts = 'netpqc'", "_____no_output_____" ], [ "segmodel, seglabels, _ = setting.load_segmenter(segopts)", "_____no_output_____" ], [ "segmodel.get_label_and_category_names()", "_____no_output_____" ], [ "!ls notebooks/stats/churches", "_____no_output_____" ], [ "import glob", "_____no_output_____" ], [ "ns = []\nfor f in glob.glob('/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/domes/*.png'):\n ns.append(int(os.path.split(f)[1][6:][:-4]))\n\nns = sorted(ns)", "_____no_output_____" ], [ "label2idx = {l: i for i, l in enumerate(seglabels)}", "_____no_output_____" ], [ "label2idx['dome']", "_____no_output_____" ], [ "label2idx['building']", "_____no_output_____" ], [ "label2idx['tree']", "_____no_output_____" ], [ "class Dataset():\n def __init__(self, before, before_prefix, after, after_prefix, device='cpu'):\n self.before = before\n self.before_prefix = before_prefix\n self.after = after\n self.after_prefix = after_prefix\n self.device = device\n \n def __getitem__(self, key):\n before_seg = torch.load(os.path.join(self.before, f'{self.before_prefix}{key}.pth'), map_location=self.device)\n after_seg = torch.load(os.path.join(self.after, f'{self.after_prefix}{key}.pth'), map_location=self.device)\n mapped = after_seg.permute(1, 2, 0)[(before_seg == 1708).sum(0).nonzero(as_tuple=True)]\n assert mapped.shape[1] == 6\n return (mapped == 5).sum(), mapped.shape[0]\n\nclass Sampler(torch.utils.data.Sampler):\n def __init__(self, indices):\n self.indices = indices\n \n def __len__(self):\n return len(self.indices)\n \n def __iter__(self):\n yield from self.indices", "_____no_output_____" ], [ "def compute(before, before_pref, after, after_pref, tgt=5, tgtc=0, src=1708, srcc=2, ns=ns):\n total = 0\n count = 0\n\n import time\n\n for subn in tqdm(torch.as_tensor(ns).split(100)):\n t0 = time.time()\n before_segs = [\n torch.load(os.path.join(before, f'{before_pref}{n}.pth'), map_location='cpu') for n in subn]\n after_segs = [\n torch.load(os.path.join(after, f'{after_pref}{n}.pth'), map_location='cpu') for n in subn]\n t1 = time.time()\n before_segs = torch.stack(before_segs).cuda()\n after_segs = torch.stack(after_segs).cuda()\n mapped = after_segs[:, tgtc][before_segs[:, srcc] == src]\n t2 = time.time()\n total += (mapped == tgt).sum()\n count += mapped.shape[0]\n print(total, count, t1-t0,t2-t1)\n\n return total.item(), count", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/domes'\nbefore_pref = 'domes_'\nafter = 'notebooks/stats/churches/dome2tree/ours'\nafter_pref = 'dome2tree_'\n\ndome2tree_ours = compute(before, before_pref, after, after_pref, tgt=4)", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/domes'\nbefore_pref = 'domes_'\nafter = 'notebooks/stats/churches/dome2tree/overfit'\nafter_pref = 'image_'\n\ndome2tree_overfit = compute(before, before_pref, after, after_pref, tgt=4)", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/church'\nbefore_pref = 'church_'\nafter = 'notebooks/stats/churches/dome2tree_all/ours'\nafter_pref = 'dome2tree_all_'\n\ndome2tree_all_ours = compute(before, before_pref, after, after_pref, ns=torch.arange(10000))", "_____no_output_____" ], [ "dome2tree_all_overfit[0] / dome2tree_all_overfit[1]", "_____no_output_____" ], [ "!ls /data/vision/torralba/ganprojects/placesgan/tracer/results/ablations/stylegan-church-dome2tree-8-1-2001-0.0001-overfit", "_____no_output_____" ], [ "Image.open('/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/church/church_1.png')", "_____no_output_____" ], [ "Image.open('/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/dome2spire_all/dome2spire_all_1.png')", "_____no_output_____" ], [ "Image.open('/data/vision/torralba/distillation/gan_rewriting/results/ablations/stylegan-church-dome2spire-8-10-2001-0.05-ours-10-stdcovariance/images/image_0.png')", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/church'\nbefore_pref = 'church_'\nafter = 'notebooks/stats/churches/dome2tree_all/overfit'\nafter_pref = 'image_'\n\ndome2tree_all_overfit = compute(before, before_pref, after, after_pref, ns=torch.arange(10000), tgt=4)", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/domes'\nbefore_pref = 'domes_'\nafter = 'notebooks/stats/churches/dome2spire/ours'\nafter_pref = 'dome2spire_'\n\nall_mapped = []\n\ntotal = 0\ncount = 0\n\nimport time\n\nfor subn in tqdm(torch.as_tensor(ns).split(100)):\n t0 = time.time()\n before_segs = [\n torch.load(os.path.join(before, f'{before_pref}{n}.pth'), map_location='cpu') for n in subn]\n after_segs = [\n torch.load(os.path.join(after, f'{after_pref}{n}.pth'), map_location='cpu') for n in subn]\n t1 = time.time()\n before_segs = torch.stack(before_segs).cuda()\n after_segs = torch.stack(after_segs).cuda()\n# mapped = after_segs.permute(0, 2, 3, 1)[before_segs[:, 2] == 1708]\n mapped = after_segs[:, 0][before_segs[:, 2] == 1708]\n# all_mapped.append()\n t2 = time.time()\n total += (mapped == 5).sum()\n count += mapped.shape[0]\n print(total, count, t1-t0,t2-t1)\n", "_____no_output_____" ], [ "before = 'notebooks/stats/churches/domes'\nbefore_pref = 'domes_'\nafter = 'notebooks/stats/churches/dome2spire/ours'\nafter_pref = 'dome2spire_'\n\ndataset = Dataset(before, before_pref, after, after_pref)\n\ndef wif(*args):\n torch.set_num_threads(8)\n \ndef cfn(l):\n return torch.stack([p[0] for p in l]).sum(), sum(p[1] for p in l)\n \n\nloader = torch.utils.data.DataLoader(dataset, num_workers=10, batch_size=50, sampler=Sampler(ns), collate_fn=cfn, worker_init_fn=wif)\n\nall_mapped = []\n\nfor mapped in tqdm(loader):\n all_mapped.append(mapped)\n", "_____no_output_____" ], [ "after_seg.permute(1, 2, 0)[(before_seg == 1708).to(torch.int64).sum(0).nonzero(as_tuple=True)].shape", "_____no_output_____" ], [ "!ls notebooks/stats/churches/dome2spire/ours", "_____no_output_____" ], [ "class UnsupervisedImageFolder(torchvision.datasets.ImageFolder):\n def __init__(self, root, transform=None, max_size=None, get_path=False):\n self.temp_dir = tempfile.TemporaryDirectory()\n os.symlink(root, os.path.join(self.temp_dir.name, 'dummy'))\n root = self.temp_dir.name\n super().__init__(root, transform=transform)\n self.get_path = get_path\n self.perm = None\n if max_size is not None:\n actual_size = super().__len__()\n if actual_size > max_size:\n self.perm = torch.randperm(actual_size)[:max_size].clone()\n logging.info(f\"{root} has {actual_size} images, downsample to {max_size}\")\n else:\n logging.info(f\"{root} has {actual_size} images <= max_size={max_size}\")\n\n def _find_classes(self, dir):\n return ['./dummy'], {'./dummy': 0}\n\n def __getitem__(self, key):\n if self.perm is not None:\n key = self.perm[key].item()\n sample = super().__getitem__(key)[0]\n if self.get_path:\n path, _ = self.samples[key]\n return sample, path\n else:\n return sample\n \n\n def __len__(self):\n if self.perm is not None:\n return self.perm.size(0)\n else:\n return super().__len__()", "_____no_output_____" ], [ "len(seglabels)", "_____no_output_____" ], [ "transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n", "_____no_output_____" ], [ "def process(img_path, seg_path, device='cuda', batch_size=128, **kwargs):\n os.makedirs(seg_path, exist_ok=True)\n\n dataset = UnsupervisedImageFolder(img_path, transform=transform, get_path=True)\n loader = torch.utils.data.DataLoader(dataset, num_workers=24, batch_size=batch_size, pin_memory=True) \n \n with torch.no_grad():\n for x, paths in tqdm(loader):\n segs = segmodel.segment_batch(x.to(device), **kwargs).detach().cpu()\n for path, seg in zip(paths, segs):\n k = os.path.splitext(os.path.basename(path))[0]\n torch.save(seg, os.path.join(seg_path, k + '.pth'))\n del segs", "_____no_output_____" ], [ "import glob", "_____no_output_____" ], [ "torch.backends.cudnn.benchmark=True", "_____no_output_____" ], [ "process(\n '/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/domes',\n 'churches/domes',\n batch_size=12)", "_____no_output_____" ], [ "process(\n '/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/dome2tree',\n 'churches/dome2tree/ours',\n batch_size=8)", "_____no_output_____" ], [ "process(\n '/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/dome2spire',\n 'churches/dome2spire/ours',\n batch_size=8)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d068b4d0beb2bb227ae2ba723fc062f36039bd3c
84,537
ipynb
Jupyter Notebook
notebooks/lectures_potsdam_201802/python_intro.ipynb
benbovy/python_short_course
f53d90b708ad70787877bd26d2da08d1043fec1f
[ "CC-BY-4.0" ]
null
null
null
notebooks/lectures_potsdam_201802/python_intro.ipynb
benbovy/python_short_course
f53d90b708ad70787877bd26d2da08d1043fec1f
[ "CC-BY-4.0" ]
null
null
null
notebooks/lectures_potsdam_201802/python_intro.ipynb
benbovy/python_short_course
f53d90b708ad70787877bd26d2da08d1043fec1f
[ "CC-BY-4.0" ]
null
null
null
18.317876
962
0.459609
[ [ [ "# Introduction to the Python language\n\n**Note**: This notebooks is not really a ready-to-use tutorial but rather serves as a table of contents that we will fill during the short course. It might later be useful as a memo, but it clearly lacks important notes and explanations.\n\nThere are lots of tutorials that you can find online, though. A useful ressource is for example the [The Python Tutorial](https://docs.python.org/3/tutorial/).\n\nTopics covered:\n\n- Primitives (use Python as a calculator)\n- Control flows (for, while, if...)\n- Containers (tuple, list, dict)\n- Some Python specifics!\n - Immutable vs. mutable\n - Variables: names bound to objects\n - Typing\n - List comprehensions\n- Functions\n- Modules\n- Basic (text) File IO", "_____no_output_____" ], [ "## Comments", "_____no_output_____" ] ], [ [ "# this is a comment ", "_____no_output_____" ] ], [ [ "## Using Python as a calculator", "_____no_output_____" ] ], [ [ "2 / 2", "_____no_output_____" ] ], [ [ "Automatic type casting for int and float (more on that later)", "_____no_output_____" ] ], [ [ "2 + 2.", "_____no_output_____" ] ], [ [ "Automatic float conversion for division (only in Python 3 !!!) ", "_____no_output_____" ] ], [ [ "2 / 3", "_____no_output_____" ] ], [ [ "**Tip**: if you don't want integer division, use float explicitly (works with both Python 2 and 3)", "_____no_output_____" ] ], [ [ "2. / 3", "_____no_output_____" ] ], [ [ "Integer division (in Python: returns floor)", "_____no_output_____" ] ], [ [ "2 // 3", "_____no_output_____" ] ], [ [ "Import math module for built-in math functions (more on how to import modules later)", "_____no_output_____" ] ], [ [ "import math\n\nmath.sin(math.pi / 2)\n\nmath.log(2.)", "_____no_output_____" ] ], [ [ "**Tip**: to get help interactively for a function, press shift-tab when the cursor is on the function, or alternatively use `?` or `help()`", "_____no_output_____" ] ], [ [ "math.log?", "_____no_output_____" ], [ "help(math.log)", "Help on built-in function log in module math:\n\nlog(...)\n log(x[, base])\n \n Return the logarithm of x to the given base.\n If the base not specified, returns the natural logarithm (base e) of x.\n\n" ] ], [ [ "Complex numbers built in the language", "_____no_output_____" ] ], [ [ "0+1j**2", "_____no_output_____" ], [ "(3+4j).real", "_____no_output_____" ], [ "(3+4j).imag", "_____no_output_____" ] ], [ [ "Create variables, or rather bound values (objects) to identifiers (more on that later)", "_____no_output_____" ] ], [ [ "earth_radius = 6.371e6", "_____no_output_____" ], [ "earth_radius * 2", "_____no_output_____" ] ], [ [ "*Note*: Python instructions are usually separated by new line characters", "_____no_output_____" ] ], [ [ "a = 1\na + 2", "_____no_output_____" ] ], [ [ "It is possible to write several instructions on a single line using semi-colons, but it is strongly discouraged", "_____no_output_____" ] ], [ [ "a = 1; a + 1", "_____no_output_____" ], [ "A", "_____no_output_____" ] ], [ [ "In a notebook, only the output of the last line executed in the cell is shown", "_____no_output_____" ] ], [ [ "a = 10\n2 + 2", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "2 + 2\n2 + 1", "_____no_output_____" ] ], [ [ "To show intermediate results, you need to use the `print()` built-in function, or write code in separate notebook cells", "_____no_output_____" ] ], [ [ "print(2 + 2)\nprint(2 + 1)", "4\n3\n" ] ], [ [ "### Strings", "_____no_output_____" ], [ "String are created using single or double quotes", "_____no_output_____" ] ], [ [ "food = \"bradwurst\"\n\ndessert = 'cake'", "_____no_output_____" ] ], [ [ "You may need to include a single (double) quote in a string", "_____no_output_____" ] ], [ [ "s = 'you\\'ll need the \\\\ character'\n\ns", "_____no_output_____" ] ], [ [ "We still see two \"\\\". Why??? This is actually what you want when printing the string", "_____no_output_____" ] ], [ [ "print(s)", "you'll need the \\ character\n" ] ], [ [ "Other special characters (e.g., line return)", "_____no_output_____" ] ], [ [ "two_lines = \"frist_line\\n\\tsecond_line\"\n\ntwo_lines", "_____no_output_____" ], [ "print(two_lines)", "frist_line\n\tsecond_line\n" ] ], [ [ "Long strings", "_____no_output_____" ] ], [ [ "lunch = \"\"\"\nMenu \n\nMain courses\n\n\"\"\"\n\nlunch", "_____no_output_____" ], [ "print(lunch)", "\nMenu \n\nMain courses\n\n\n" ] ], [ [ "Concatenate strings using the `+` operator", "_____no_output_____" ] ], [ [ "food + ' and ' + dessert", "_____no_output_____" ] ], [ [ "Concatenate strings using `join()`", "_____no_output_____" ] ], [ [ "s = ' '.join([food, 'and', dessert, 'coffee'])\n\ns", "_____no_output_____" ], [ "s = '\\n'.join([food, 'and', dessert, 'coffee'])\n\nprint(s)", "bradwurst\nand\ncake\ncoffee\n" ] ], [ [ "Some useful string manipulation (see https://docs.python.org/3/library/stdtypes.html#string-methods)", "_____no_output_____" ] ], [ [ "food = ' bradwurst '\n\nfood.strip()", "_____no_output_____" ] ], [ [ "Format strings\n\nFor more info, see this very nice user guide: https://pyformat.info/", "_____no_output_____" ] ], [ [ "nb = 2\n\n\"{} bradwursts bitte!\".format(nb)", "_____no_output_____" ], [ "\"{number} bradwursts bitte!\".format(number=nb)", "_____no_output_____" ] ], [ [ "## Control flow", "_____no_output_____" ], [ "Example of an if/else statement", "_____no_output_____" ] ], [ [ "x = -1\n\nif x < 0:\n print(\"negative\")", "negative\n" ] ], [ [ "Indentation is important!", "_____no_output_____" ] ], [ [ "x = 1\n\nif x < 0:\n print(\"negative\")\n print(x)\n\nprint(x)", "1\n" ] ], [ [ "**Warning**: don't mix tabs and space!!! visually it may look as properly indented but for Python tab and space are different.", "_____no_output_____" ], [ "A more complete example:\n \nif elif else example + comparison operators (==, !=, <, >, ) + logical operators (and, or, not)", "_____no_output_____" ] ], [ [ "x = -1", "_____no_output_____" ], [ "if x < 0:\n x = 0\n print(\"negative and changed to zero\")\nelif x == 0:\n print(\"zero\")\nelif x == 1:\n print(\"Single\")\nelse:\n print(\"More\")\n\n", "negative and changed to zero\n" ], [ "True and False", "_____no_output_____" ] ], [ [ "The `range()` function, used in a `for` loop", "_____no_output_____" ] ], [ [ "for i in range(10):\n print(i)", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n" ] ], [ [ "*Note*: by default, range starts from 0 (this is consistent with other behavior that we'll see later). Also, its stops just before the given value.\n\nRange can be used with more parameters (see help). For example: start, stop, step:", "_____no_output_____" ] ], [ [ "for i in range(1, 11, 2):\n print(i)", "1\n3\n5\n7\n9\n" ] ], [ [ "A loop can also be used to iterate through values other than incrementing numbers (more on how to create iterables later).", "_____no_output_____" ] ], [ [ "words = ['cat', 'open', 'window', 'floor 20', 'be careful']\n\nfor w in words:\n print(w)", "cat\nopen\nwindow\nfloor 20\nbe careful\n" ] ], [ [ "Control the loop: the continue statement", "_____no_output_____" ] ], [ [ "for w in words:\n if w == 'open':\n continue\n print(w)", "cat\nwindow\nfloor 20\nbe careful\n" ] ], [ [ "More possibilities, e.g., a `while` loop and the `break` statement", "_____no_output_____" ] ], [ [ "i = 0\n\nwhile True:\n i = i + 1\n print(i)\n if i > 9:\n break", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n" ] ], [ [ "## Containers", "_____no_output_____" ], [ "### Lists", "_____no_output_____" ] ], [ [ "a = [1, 2, 3, 4]\n\na", "_____no_output_____" ] ], [ [ "Lists may contain different types of values", "_____no_output_____" ] ], [ [ "a = [1, \"2\", 3., 4+0j]\n\na", "_____no_output_____" ] ], [ [ "Lists may contain lists (nested)", "_____no_output_____" ] ], [ [ "c = [1, [2, 3], 4]\n\nc", "_____no_output_____" ] ], [ [ "\"Indexing\": retrieve elements of a list by location\n\n**Warning**: Unlike Fortran and Matlab, position start at zero!!", "_____no_output_____" ] ], [ [ "c[0]", "_____no_output_____" ] ], [ [ "Negative position is for starting the search at the end of the list", "_____no_output_____" ] ], [ [ "a = [1, 2, 3, 4]\n\na[-1]", "_____no_output_____" ] ], [ [ "\"Slicing\": extract a sublist", "_____no_output_____" ] ], [ [ "a", "_____no_output_____" ], [ "list(range(4))", "_____no_output_____" ] ], [ [ "$$[0, 4[$$", "_____no_output_____" ], [ "Iterate through a list", "_____no_output_____" ] ], [ [ "for i in a:\n print(i)", "1\n2\n3\n4\n" ] ], [ [ "# Tuples\n\nvery similar to lists", "_____no_output_____" ] ], [ [ "t = (1, 2, 3, 4)\n\nt", "_____no_output_____" ] ], [ [ "*Note*: the brackets are optional", "_____no_output_____" ] ], [ [ "t = 1, 2, 3, 4\n\nt", "_____no_output_____" ] ], [ [ "\"Unpacking\": as with lists (or any iterable), it is possible to extract values in a tuple and assign them to new variables", "_____no_output_____" ] ], [ [ "t[1:3]", "_____no_output_____" ], [ "second_item, third_item = t[1], t[2]", "_____no_output_____" ], [ "print(second_item)\nprint(third_item)", "2\n3\n" ] ], [ [ "**Tip**: unpack undefined number of items", "_____no_output_____" ] ], [ [ "second_item, *greater_items = t[1:]", "_____no_output_____" ], [ "second_item", "_____no_output_____" ], [ "greater_items", "_____no_output_____" ] ], [ [ "### Dictionnaries\n\nMap keys to values", "_____no_output_____" ] ], [ [ "d = {'key1': 0, 'key2': 1}\n\nd", "_____no_output_____" ] ], [ [ "Keys must be unique.\n\nBut be careful: no error is raised if you provide multiple, identical keys!", "_____no_output_____" ] ], [ [ "d = {'key1': 0, 'key2': 1, 'key1': 3}\n\nd", "_____no_output_____" ] ], [ [ "Indexing dictionnaries by key", "_____no_output_____" ] ], [ [ "d['key1']", "_____no_output_____" ] ], [ [ "Keys are not limited to strings, they can be many things (but not anything, we'll see later)", "_____no_output_____" ] ], [ [ "d = {'key1': 0, 2: 1, 3.: 3}\n\nd[2]", "_____no_output_____" ] ], [ [ "Get keys or values", "_____no_output_____" ] ], [ [ "d.keys()", "_____no_output_____" ], [ "d.values()", "_____no_output_____" ], [ "a[d['key1']]", "_____no_output_____" ], [ "d = {\n 'benoit': {\n 'age': 33,\n 'section':'5.5'\n }\n}", "_____no_output_____" ], [ "d['benoit']['age']", "_____no_output_____" ] ], [ [ "## Mutable vs. immutable", "_____no_output_____" ], [ "We can change the value of a variable in place (after we create the variable) or we can't.", "_____no_output_____" ], [ "For example, lists are mutable.", "_____no_output_____" ] ], [ [ "a = [1, 2, 3, 4]\n\na", "_____no_output_____" ] ], [ [ "Change the value of one item in place", "_____no_output_____" ] ], [ [ "a[0] = 'one'\n\na", "_____no_output_____" ] ], [ [ "Append one item at the end of the list", "_____no_output_____" ] ], [ [ "a.append(5)\n\na", "_____no_output_____" ] ], [ [ "Insert one item at a given position", "_____no_output_____" ] ], [ [ "a.insert(0, 'zero')\n\na", "_____no_output_____" ] ], [ [ "Extract and remove the last item", "_____no_output_____" ] ], [ [ "a.pop()", "_____no_output_____" ], [ "a", "_____no_output_____" ] ], [ [ "Dictionnaries are mutable \n\n(note the order of the keys in the printed dict)", "_____no_output_____" ] ], [ [ "d = {'key1': 0, 'key2': 1, 'key3': 2}\n\nd['key4'] = 4\n\nd", "_____no_output_____" ] ], [ [ "Pop an item of given key", "_____no_output_____" ] ], [ [ "d.pop('key1')", "_____no_output_____" ], [ "d", "_____no_output_____" ] ], [ [ "Tuples are immutable!", "_____no_output_____" ] ], [ [ "t = (1, 2, 3, 4)\n\nt.append(5)", "_____no_output_____" ] ], [ [ "Strings are immutable!", "_____no_output_____" ] ], [ [ "food = \"bradwurst\"\n\nfood[0:4] = \"cury\"", "_____no_output_____" ] ], [ [ "But is easy and efficient to create new strings", "_____no_output_____" ] ], [ [ "food = \"curry\" + food[-5:]\n\nfood", "_____no_output_____" ] ], [ [ "A reason why strings are immutable?\n\nThe keys of a dictionnary cannot be mutable, e.g., we cannot not use a list", "_____no_output_____" ] ], [ [ "d = {[1, 3]: 0}", "_____no_output_____" ] ], [ [ "The keys of a dictionnary cannot be mutable, for a quite obvious reason that it is used as indexes, like in a database. If we allow changing the indexes, it can be a real mess!\n\nIf strings were mutable, then we could'nt use it as keys in dictionnaries.\n\n*Note*: more precisely, keys of a dictionnary must be \"hashable\".", "_____no_output_____" ], [ "## Variables or identifiers?\n\n", "_____no_output_____" ], [ "What's happening here?", "_____no_output_____" ] ], [ [ "a = [1, 2, 3]\n\nb = a\n\nb[0] = 'one'\n\na", "_____no_output_____" ] ], [ [ "Explanation: the concept of variable is different in Python than in, e.g., C or Fortran\n\n`a = [1, 2, 3]` means we create a list object and we bind this object to a name (label or identifier) \"a\"\n`b = a` means we bind the same object to a new name \"b\"\n\nYou can find more details and good illustrations here: https://nedbatchelder.com/text/names1.html", "_____no_output_____" ], [ "`id()` returns the (unique) identifiant of the value (object) bound to a given identifier", "_____no_output_____" ] ], [ [ "id(a)", "_____no_output_____" ], [ "id(b)", "_____no_output_____" ] ], [ [ "`is` : check whether two identifiers are bound to the same value (object)", "_____no_output_____" ] ], [ [ "a is b", "_____no_output_____" ] ], [ [ "OK, but how do you explain this?", "_____no_output_____" ] ], [ [ "a = 1\nb = a\n\nb = 2\n\na", "_____no_output_____" ], [ "a is b", "_____no_output_____" ], [ "id(a)", "_____no_output_____" ], [ "id(b)", "_____no_output_____" ] ], [ [ "Can you explain what's going on here? ", "_____no_output_____" ] ], [ [ "a = 1\nb = 2\n\nb = a + b\n\nb", "_____no_output_____" ] ], [ [ "Where does go the value \"2\" that was initially bounded to \"b\"?", "_____no_output_____" ], [ "OK, now what about this? Very confusing!", "_____no_output_____" ] ], [ [ "a = 1\nb = 1\n\na is b", "_____no_output_____" ], [ "a = 1.\nb = 1.\n\na is b", "_____no_output_____" ] ], [ [ "## Dynamic, strong, duck typing", "_____no_output_____" ], [ "Dynamic typing: no need to explicitly declare a type of an object/variable before using it. This is done automatically depending on the given object/value.", "_____no_output_____" ] ], [ [ "a = 1", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ] ], [ [ "Strong typing: Converting from one type to another must be explicit, i.e., a value of a given type cannot be magically converted into another type", "_____no_output_____" ] ], [ [ "a + '1'", "_____no_output_____" ], [ "a + int('1')", "_____no_output_____" ], [ "eval('1 + 2 * 3')", "_____no_output_____" ] ], [ [ "An exception: integer to float casting", "_____no_output_____" ] ], [ [ "a + 1.", "_____no_output_____" ] ], [ [ "Duck typing: The type of an object doesn't really matter. What an object can or cannot do is more important.\n\n> \"If it walks like a duck and it quacks like a duck, then it must be a duck\"\n", "_____no_output_____" ], [ "For example, we can show that iterating trough list, string or dict can be done using the exact same loop", "_____no_output_____" ] ], [ [ "var = [1, 2, 3, 4]\n\nfor i in var:\n print(i)", "1\n2\n3\n4\n" ], [ "var = 'abcd'\n\nfor i in var:\n print(i)", "a\nb\nc\nd\n" ], [ "var = {'key1': 1, 'key2': 2}\n\nfor i in var:\n print(i)", "key1\nkey2\n" ] ], [ [ "In the last case, iterating a dictionnary uses the keys.\n\nIt is possible to iterate the values:", "_____no_output_____" ] ], [ [ "for v in var.values():\n print(v)", "1\n2\n" ] ], [ [ "Or more useful, iterate trough both keys and values", "_____no_output_____" ] ], [ [ "for k, v in var.items():\n print(k, v)", "key1 1\nkey2 2\n" ], [ "t = ('key1', 1)", "_____no_output_____" ], [ "k, v = t", "_____no_output_____" ], [ "var.items()", "_____no_output_____" ] ], [ [ "Arithmetic operators can be obviously applied on integer, float...", "_____no_output_____" ] ], [ [ "1 + 1", "_____no_output_____" ], [ "1 + 2.", "_____no_output_____" ] ], [ [ "...but also on strings and lists (in this case it does concatenation)", "_____no_output_____" ] ], [ [ "[1, 2, 3] + ['a', 'b', 'c']", "_____no_output_____" ], [ "'other' + 'one'", "_____no_output_____" ] ], [ [ "... and also mixing the types, e.g., repeat sequence x times", "_____no_output_____" ] ], [ [ "[1, 2, 3] * 3", "_____no_output_____" ], [ "'one' * 3", "_____no_output_____" ] ], [ [ "...although, everything is not possible", "_____no_output_____" ] ], [ [ "[1, 2, 3] * 3.5", "_____no_output_____" ] ], [ [ "Boolean: what is True and what is False", "_____no_output_____" ] ], [ [ "print(True)\nprint(False)", "True\nFalse\n" ], [ "print(bool(0))\nprint(bool(-1))", "False\nTrue\n" ], [ "a = 1.7\n\nif a:\n print('non zero')", "non zero\n" ], [ "print(bool(''))\nprint(bool('no empty'))", "False\nTrue\n" ], [ "print(bool([]))\nprint(bool([1, 2]))", "False\nTrue\n" ], [ "print(bool({}))\nprint(bool({'key1': 1}))", "False\nTrue\n" ], [ "d = {}\n\nif not d:\n print('there is no item')\n", "there is no item\n" ] ], [ [ "## list comprehension", "_____no_output_____" ], [ "Example: we create a list from another one using a `for` loop", "_____no_output_____" ] ], [ [ "ints = [1, 3, 5, 0, 2, 0]\n\ntrue_or_false = []\n\nfor i in ints:\n true_or_false.append(bool(i))\n\ntrue_or_false", "_____no_output_____" ] ], [ [ "But there is a much more succint way to do it. It is still (and maybe even more) readable", "_____no_output_____" ] ], [ [ "true_or_false = [bool(i) for i in ints]\n\ntrue_or_false", "_____no_output_____" ] ], [ [ "More complex example, with conditions", "_____no_output_____" ] ], [ [ "float_no3 = [float(i) for i in ints if i != 3]\n\nfloat_no3", "_____no_output_____" ] ], [ [ "Other kinds of conditions\n\n(It starts to be less readable -> don't abuse list comprehension)", "_____no_output_____" ] ], [ [ "float_str3 = [float(i) if i != 3 else str(i) for i in ints]\n\nfloat_str3", "_____no_output_____" ] ], [ [ "Dict comprehensions", "_____no_output_____" ] ], [ [ "int2float_map = {i: float(i) for i in ints}\n\nint2float_map", "_____no_output_____" ] ], [ [ "## Functions\n\nA function take value(s) as input and (optionally) return value(s) as output\n\ninputs = arguments", "_____no_output_____" ] ], [ [ "def add(a, b):\n \"\"\"Add two things.\"\"\"\n return a + b", "_____no_output_____" ], [ "def print_the_argument(arg):\n print(arg)", "_____no_output_____" ], [ "print_the_argument('a string')", "a string\n" ] ], [ [ "We can call it several times with different values", "_____no_output_____" ] ], [ [ "add(1, 3)", "_____no_output_____" ], [ "help(add)", "Help on function add in module __main__:\n\nadd(a, b)\n Add two things.\n\n" ] ], [ [ "Nested calls", "_____no_output_____" ] ], [ [ "add(add(1, 2), 3)", "_____no_output_____" ] ], [ [ "Duck typing is really useful! A single function for doing many things (write less code)", "_____no_output_____" ] ], [ [ "add(1., 2.)", "_____no_output_____" ], [ "add('one', 'two')", "_____no_output_____" ], [ "add([1, 2, 3], [1, 2, 3])", "_____no_output_____" ] ], [ [ "Functions have a scope that is local ", "_____no_output_____" ] ], [ [ "a = 1\n\ndef func():\n a = 2\n\na", "_____no_output_____" ], [ "func()", "_____no_output_____" ], [ "a", "_____no_output_____" ] ], [ [ "Call by value?", "_____no_output_____" ] ], [ [ "def func(j):\n j = j + 1\n print('inside: ', j)\n return j\n\ni = 1\nprint('before:', i)\ni = func(i)\nprint('after:', i)", "before: 1\ninside: 2\nafter: 2\n" ] ], [ [ "Not really...", "_____no_output_____" ] ], [ [ "def func(li):\n li[0] = 1000\n print('inside: ', li[0])\n\nli = [1]\nprint('before:', li[0])\nfunc(li)\nprint('after:', li[0])", "before: 1\ninside: 1000\nafter: 1000\n" ] ], [ [ "Composing functions (start to look like functional programming)", "_____no_output_____" ] ], [ [ "C2K_OFFSET = 273.15\n\ndef fahr_to_kelvin(temp):\n \"\"\"convert temp from fahrenheit to kelvin\"\"\"\n return ((temp - 32) * (5/9)) + C2K_OFFSET\n\ndef kelvin_to_celsius(temp_k):\n # convert temperature from kevin to celsius\n return temp_k - C2K_OFFSET\n\ndef fahr_to_celsius(temp_f):\n temp_k = fahr_to_kelvin(temp_f)\n temp_c = kelvin_to_celsius(temp_k)\n return temp_c", "_____no_output_____" ], [ "fahr_to_kelvin(50)", "_____no_output_____" ], [ "fahr_to_celsius(50)", "_____no_output_____" ] ], [ [ "Function docstring (help)", "_____no_output_____" ], [ "Default argument values (keyword arguments)", "_____no_output_____" ] ], [ [ "def display(a=1, b=2, c=3):\n print(a, b, c)", "_____no_output_____" ], [ "display(b=4)", "1 4 3\n" ] ], [ [ "When calling a function, the order of the keyword arguments doesn't matter\n\nBut the order matters for positional arguments!!", "_____no_output_____" ] ], [ [ "display(c=5, a=1)", "1 2 5\n" ], [ "display(3)", "3 2 3\n" ] ], [ [ "Mix positional and keyword arguments: positional arguments must be added before keyword arguments", "_____no_output_____" ] ], [ [ "def display(c, a=1, b=2):\n print(a, b, c)", "_____no_output_____" ], [ "display(1000)", "1 2 1000\n" ] ], [ [ "What's going on here?", "_____no_output_____" ] ], [ [ "def add_to_list(li=[], value=1):\n li.append(value)\n return li\n", "_____no_output_____" ], [ "add_to_list()", "_____no_output_____" ], [ "add_to_list()", "_____no_output_____" ], [ "add_to_list()", "_____no_output_____" ] ], [ [ "Try running again the cell that defines the function, and then the cells that call the function\n\nThis is sooo confusing!", "_____no_output_____" ], [ "So you shouldn't use mutable objects as default values\n\nWorkaround:", "_____no_output_____" ] ], [ [ "def add_to_list(li=None, value=1):\n if li is None:\n li = []\n li.append(value)\n return li", "_____no_output_____" ], [ "add_to_list()", "_____no_output_____" ], [ "add_to_list()", "_____no_output_____" ] ], [ [ "Arbitrary number of arguments", "_____no_output_____" ] ], [ [ "def display_args(*args):\n print(args)\n nb_args = len(args)\n print(nb_args)\n print(*args)", "_____no_output_____" ], [ "display_args('one')", "('one',)\n1\none\n" ], [ "display_args(1, '2', 'bradwurst')", "(1, '2', 'bradwurst')\n3\n1 2 bradwurst\n" ] ], [ [ "Arbitrary number of keyword arguments", "_____no_output_____" ] ], [ [ "def display_args_kwargs(*args, **kwargs):\n print(*args)\n print(kwargs)", "_____no_output_____" ], [ "display_args_kwargs('one', 2, three=3.)", "one 2\n{'three': 3.0}\n" ] ], [ [ "Return more than one value (tuple)", "_____no_output_____" ] ], [ [ "def spherical_coords(x, y, z):\n # convert\n return r, theta, phi", "_____no_output_____" ] ], [ [ "## Modules\n\nModules are Python code in (`.py`) files that can be imported from within Python.\n\nLike functions, it allows to reusing the code in different contexts. ", "_____no_output_____" ], [ "Write a module with the temperature conversion functions above\n\n(note: the `%%writefile` is a magic cell command in the notebook that writes the content of the cell in a file)", "_____no_output_____" ] ], [ [ "%%writefile temp_converter.py\n\nC2K_OFFSET = 273.15\n\ndef fahr_to_kelvin(temp):\n \"\"\"convert temp from fahrenheit to kelvin\"\"\"\n return ((temp - 32) * (5/9)) + C2K_OFFSET\n\ndef kelvin_to_celsius(temp_k):\n # convert temperature from kevin to celsius\n return temp_k - C2K_OFFSET\n\ndef fahr_to_celsius(temp_f):\n temp_k = fahr_to_kelvin(temp_f)\n temp_c = kelvin_to_celsius(temp_k)\n return temp_c", "Overwriting temp_converter.py\n" ] ], [ [ "Import a module", "_____no_output_____" ] ], [ [ "import temp_converter", "_____no_output_____" ] ], [ [ "Access the functions imported with the module using the module name as a \"namespace\"\n\n**Tip**: imported module + dot + <tab> for autocompletion", "_____no_output_____" ] ], [ [ "temp_converter.fahr_to_celsius(100.)", "_____no_output_____" ] ], [ [ "Import the module with a (short) alias for the namespace", "_____no_output_____" ] ], [ [ "import temp_converter as tc", "_____no_output_____" ], [ "tc.fahr_to_celsius(100.)", "_____no_output_____" ] ], [ [ "Import just a function from the module", "_____no_output_____" ] ], [ [ "from temp_converter import fahr_to_celsius", "_____no_output_____" ], [ "fahr_to_celsius(100.)", "_____no_output_____" ] ], [ [ "Import everything in the module (without using a namespace)\n\nStrongly discouraged!! Name conflicts!", "_____no_output_____" ] ], [ [ "from temp_converter import *", "_____no_output_____" ], [ "kelvin_to_celsius(270)", "_____no_output_____" ] ], [ [ "## (Text) file IO\n\nLet's create a small file with some data", "_____no_output_____" ] ], [ [ "%%writefile data.csv\n\"depth\", \"some_variable\"\n200, 2.4e2\n400, 5.6e2\n600, 2.6e8", "Writing data.csv\n" ] ], [ [ "Open the file using Python:", "_____no_output_____" ] ], [ [ "f = open(\"data.csv\", \"r\")", "_____no_output_____" ], [ "f", "_____no_output_____" ] ], [ [ "Read the content", "_____no_output_____" ] ], [ [ "raw_data = f.readlines()\n\nraw_data", "_____no_output_____" ] ], [ [ "What happens here?", "_____no_output_____" ] ], [ [ "f.readlines()", "_____no_output_____" ], [ "f.seek(0)\nf.readlines()", "_____no_output_____" ] ], [ [ "Close the file", "_____no_output_____" ] ], [ [ "f.close()", "_____no_output_____" ] ], [ [ "It is safer to use the `with` statement (contexts)", "_____no_output_____" ] ], [ [ "with open(\"data.csv\") as f:\n raw_data = f.readlines()\n\nraw_data", "_____no_output_____" ], [ "f.closed", "_____no_output_____" ] ], [ [ "We don't need to close the file, it is done automatically after executing the block of instructions under the `with` statement", "_____no_output_____" ], [ "It is safer because if an error happens within the block of instructions, the file is closed anyway.\n\nNote here how we can explicitly raise an Error. There are many kinds of exceptions, see: https://docs.python.org/3/library/exceptions.html#bltin-exceptions", "_____no_output_____" ] ], [ [ "with open(\"data.csv\") as f:\n raw_data = f.readlines()\n raise ValueError(\"something wrong happened\")\n\nraw_data", "_____no_output_____" ], [ "f.closed", "_____no_output_____" ] ], [ [ "*Note*: there are much more efficient ways to import data from a csv file!!! We'll see that later using scientific libraries.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d068b5e82b3cf85082a852a79004337babd792d4
50,024
ipynb
Jupyter Notebook
Classification/Logistic Regression/Logistic Regression.ipynb
jv640/Learning-ML
455415735c61433cab225dd5978ce9aea12c3f2c
[ "MIT" ]
null
null
null
Classification/Logistic Regression/Logistic Regression.ipynb
jv640/Learning-ML
455415735c61433cab225dd5978ce9aea12c3f2c
[ "MIT" ]
null
null
null
Classification/Logistic Regression/Logistic Regression.ipynb
jv640/Learning-ML
455415735c61433cab225dd5978ce9aea12c3f2c
[ "MIT" ]
null
null
null
148.439169
22,964
0.867384
[ [ [ "import pandas as pd, numpy as np, matplotlib.pyplot as plt", "_____no_output_____" ], [ "dataset = pd.read_csv('Social_Network_Ads.csv')\ndataset.head()", "_____no_output_____" ], [ "X = dataset.iloc[:,2:4].values\nY = dataset.iloc[:,-1].values", "_____no_output_____" ], [ "# dividing data in train and test \nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.25, random_state =0)", "_____no_output_____" ], [ "# scaling data for right prediction\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(X_train, Y_train)", "_____no_output_____" ], [ "y_pred = classifier.predict(X_test)\ny_pred", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y_test, y_pred)\ncm", "_____no_output_____" ], [ "from matplotlib.colors import ListedColormap\nX_set, y_set = X_train, Y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n", "'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n" ], [ "from matplotlib.colors import ListedColormap\nX_set, y_set = X_test, Y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n", "'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d068dc6b66ae9a05ac18ff396426a495a5608ffd
51,715
ipynb
Jupyter Notebook
notebooks/pagerank-simple.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
notebooks/pagerank-simple.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
notebooks/pagerank-simple.ipynb
quang-ha/IA-maths-Ipython
8ff8533d64a3d8db8e4813a7b6dfee39339fd846
[ "BSD-3-Clause" ]
null
null
null
211.081633
42,753
0.88456
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d068eeda17e3a565da722f4fe6ca955c63da3255
243,151
ipynb
Jupyter Notebook
notebooks/14_Intro_DeepLearning.ipynb
Torroledo/ML_RiskManagement
9efad6ba1d26e002ff77c9f44869ddcd06231232
[ "MIT" ]
null
null
null
notebooks/14_Intro_DeepLearning.ipynb
Torroledo/ML_RiskManagement
9efad6ba1d26e002ff77c9f44869ddcd06231232
[ "MIT" ]
null
null
null
notebooks/14_Intro_DeepLearning.ipynb
Torroledo/ML_RiskManagement
9efad6ba1d26e002ff77c9f44869ddcd06231232
[ "MIT" ]
1
2018-07-16T12:22:33.000Z
2018-07-16T12:22:33.000Z
123.11443
19,486
0.847593
[ [ [ "# 14 - Introduction to Deep Learning\n\nby [Alejandro Correa Bahnsen](albahnsen.com/)\n\nversion 0.1, May 2016\n\n## Part of the class [Machine Learning Applied to Risk Management](https://github.com/albahnsen/ML_RiskManagement)\n\n\n\nThis notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US)\n\nBased on the slides and presentation by [Alec Radford](https://www.youtube.com/watch?v=S75EdAcXHKk) [github](https://github.com/Newmu/Theano-Tutorials/)", "_____no_output_____" ], [ "For this class you must install theno\n\n```pip instal theano```", "_____no_output_____" ], [ "# Motivation\n\nHow do we program a computer to recognize a picture of a\nhandwritten digit as a 0-9?\n\n![1](images/d1.png)", "_____no_output_____" ], [ "### What if we have 60,000 of these images and their label?", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "from load import mnist\nX_train, X_test, y_train2, y_test2 = mnist(onehot=True)", "_____no_output_____" ], [ "y_train = np.argmax(y_train2, axis=1)\ny_test = np.argmax(y_test2, axis=1)", "_____no_output_____" ], [ "X_train[1].reshape((28, 28)).round(0).astype(int)[:, 4:26].tolist()", "_____no_output_____" ], [ "from pylab import imshow, show, cm\nimport matplotlib.pylab as plt\n%matplotlib inline\n\ndef view_image(image, label=\"\", predicted='', size=4):\n \"\"\"View a single image.\"\"\"\n plt.figure(figsize = (size, size))\n plt.imshow(image.reshape((28, 28)), cmap=cm.gray, )\n plt.tick_params(axis='x',which='both', bottom='off',top='off', labelbottom='off')\n plt.tick_params(axis='y',which='both', left='off',top='off', labelleft='off')\n show()\n if predicted == '':\n print(\"Label: %s\" % label)\n else:\n print('Label: ', str(label), 'Predicted: ', str(predicted))\n ", "_____no_output_____" ], [ "view_image(X_train[1], y_train[1])", "_____no_output_____" ], [ "view_image(X_train[40000], y_train[40000])", "_____no_output_____" ] ], [ [ "# Naive model\n\nFor each image, find the “most similar” image and guess\nthat as the label", "_____no_output_____" ] ], [ [ "def similarity(image, images):\n similarities = []\n image = image.reshape((28, 28))\n images = images.reshape((-1, 28, 28))\n for i in range(images.shape[0]):\n distance = np.sqrt(np.sum(image - images[i]) ** 2)\n sim = 1 / distance\n similarities.append(sim)\n return similarities", "_____no_output_____" ], [ "np.random.seed(52)\nsmall_train = np.random.choice(X_train.shape[0], 100)", "_____no_output_____" ], [ "view_image(X_test[0])", "_____no_output_____" ], [ "similarities = similarity(X_test[0], X_train[small_train])", "_____no_output_____" ], [ "view_image(X_train[small_train[np.argmax(similarities)]])", "_____no_output_____" ] ], [ [ "Lets try an other example", "_____no_output_____" ] ], [ [ "view_image(X_test[200])", "_____no_output_____" ], [ "similarities = similarity(X_test[200], X_train[small_train])\nview_image(X_train[small_train[np.argmax(similarities)]])", "_____no_output_____" ] ], [ [ "# Logistic Regression\n\nLogistic regression is a probabilistic, linear classifier. It is parametrized\nby a weight matrix $W$ and a bias vector $b$ Classification is\ndone by projecting data points onto a set of hyperplanes, the distance to\nwhich is used to determine a class membership probability.\n\nMathematically, this can be written as:\n\n$$\n P(Y=i\\vert x, W,b) = softmax_i(W x + b) \n$$\n$$\n P(Y=i|x, W,b) = \\frac {e^{W_i x + b_i}} {\\sum_j e^{W_j x + b_j}}\n$$\n\nThe output of the model or prediction is then done by taking the argmax of\nthe vector whose i'th element is $P(Y=i|x)$.\n\n$$\n y_{pred} = argmax_i P(Y=i|x,W,b)\n$$\n\n![a](images/d2.png)", "_____no_output_____" ] ], [ [ "import theano\nfrom theano import tensor as T\nimport numpy as np\nimport datetime as dt", "_____no_output_____" ], [ "theano.config.floatX = 'float32'", "_____no_output_____" ] ], [ [ "```\nTheano is a Python library that lets you to define, optimize, and evaluate mathematical expressions, especially ones with multi-dimensional arrays (numpy.ndarray). Using Theano it is possible to attain speeds rivaling hand-crafted C implementations for problems involving large amounts of data. It can also surpass C on a CPU by many orders of magnitude by taking advantage of recent GPUs.\n\nTheano combines aspects of a computer algebra system (CAS) with aspects of an optimizing compiler. It can also generate customized C code for many mathematical operations. This combination of CAS with optimizing compilation is particularly useful for tasks in which complicated mathematical expressions are evaluated repeatedly and evaluation speed is critical. For situations where many different expressions are each evaluated once Theano can minimize the amount of compilation/analysis overhead, but still provide symbolic features such as automatic differentiation.\n```", "_____no_output_____" ] ], [ [ "def floatX(X):\n# return np.asarray(X, dtype='float32')\n return np.asarray(X, dtype=theano.config.floatX)\n\ndef init_weights(shape):\n return theano.shared(floatX(np.random.randn(*shape) * 0.01))\n\ndef model(X, w):\n return T.nnet.softmax(T.dot(X, w))", "_____no_output_____" ], [ "X = T.fmatrix()\nY = T.fmatrix()\n\nw = init_weights((784, 10))", "_____no_output_____" ], [ "w.get_value()", "_____no_output_____" ] ], [ [ "initialize model", "_____no_output_____" ] ], [ [ "py_x = model(X, w)\ny_pred = T.argmax(py_x, axis=1)", "_____no_output_____" ], [ "cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))\ngradient = T.grad(cost=cost, wrt=w)\nupdate = [[w, w - gradient * 0.05]]", "_____no_output_____" ], [ "train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True)\npredict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True)", "_____no_output_____" ] ], [ [ "One iteration", "_____no_output_____" ] ], [ [ "for start, end in zip(range(0, X_train.shape[0], 128), range(128, X_train.shape[0], 128)):\n cost = train(X_train[start:end], y_train2[start:end]) ", "_____no_output_____" ], [ "errors = [(np.mean(y_train != predict(X_train)), \n np.mean(y_test != predict(X_test)))]\nerrors", "_____no_output_____" ] ], [ [ "Now for 100 epochs", "_____no_output_____" ] ], [ [ "t0 = dt.datetime.now()\n\nfor i in range(100):\n \n for start, end in zip(range(0, X_train.shape[0], 128), \n range(128, X_train.shape[0], 128)):\n cost = train(X_train[start:end], y_train2[start:end])\n \n errors.append((np.mean(y_train != predict(X_train)), \n np.mean(y_test != predict(X_test))))\n print(i, errors[-1])\n\nprint('Total time: ', (dt.datetime.now()-t0).seconds / 60.)", "0 (0.10956666666666667, 0.10150000000000001)\n1 (0.10246666666666666, 0.095399999999999999)\n2 (0.098183333333333331, 0.091899999999999996)\n3 (0.095533333333333331, 0.089999999999999997)\n4 (0.093450000000000005, 0.088900000000000007)\n5 (0.091366666666666665, 0.087900000000000006)\n6 (0.089766666666666661, 0.086599999999999996)\n7 (0.088883333333333328, 0.084900000000000003)\n8 (0.087650000000000006, 0.0843)\n9 (0.086683333333333334, 0.084400000000000003)\n10 (0.085800000000000001, 0.083500000000000005)\n11 (0.085166666666666668, 0.083299999999999999)\n12 (0.084449999999999997, 0.083099999999999993)\n13 (0.083883333333333338, 0.082199999999999995)\n14 (0.083366666666666672, 0.081600000000000006)\n15 (0.083016666666666669, 0.081600000000000006)\n16 (0.082483333333333339, 0.081100000000000005)\n17 (0.082116666666666671, 0.080799999999999997)\n18 (0.081833333333333327, 0.080500000000000002)\n19 (0.081549999999999997, 0.080699999999999994)\n20 (0.08118333333333333, 0.080399999999999999)\n21 (0.080816666666666662, 0.080100000000000005)\n22 (0.080483333333333337, 0.079799999999999996)\n23 (0.080183333333333329, 0.079699999999999993)\n24 (0.079750000000000001, 0.079699999999999993)\n25 (0.079483333333333336, 0.079100000000000004)\n26 (0.079216666666666671, 0.078899999999999998)\n27 (0.07906666666666666, 0.079000000000000001)\n28 (0.078799999999999995, 0.079000000000000001)\n29 (0.078483333333333336, 0.079000000000000001)\n30 (0.078100000000000003, 0.078899999999999998)\n31 (0.077833333333333338, 0.079000000000000001)\n32 (0.077666666666666662, 0.078600000000000003)\n33 (0.077566666666666673, 0.078100000000000003)\n34 (0.077366666666666667, 0.078)\n35 (0.077166666666666661, 0.078200000000000006)\n36 (0.077066666666666672, 0.078200000000000006)\n37 (0.076916666666666661, 0.078200000000000006)\n38 (0.076700000000000004, 0.078)\n39 (0.076550000000000007, 0.077700000000000005)\n40 (0.076466666666666669, 0.077700000000000005)\n41 (0.076316666666666672, 0.077499999999999999)\n42 (0.076200000000000004, 0.077399999999999997)\n43 (0.076050000000000006, 0.077499999999999999)\n44 (0.075933333333333339, 0.077700000000000005)\n45 (0.075783333333333328, 0.077700000000000005)\n46 (0.075566666666666671, 0.077600000000000002)\n47 (0.07538333333333333, 0.077299999999999994)\n48 (0.075283333333333327, 0.077100000000000002)\n49 (0.075083333333333335, 0.076999999999999999)\n50 (0.075033333333333327, 0.077100000000000002)\n51 (0.074800000000000005, 0.076799999999999993)\n52 (0.074766666666666662, 0.076799999999999993)\n53 (0.074483333333333332, 0.076899999999999996)\n54 (0.074416666666666673, 0.076499999999999999)\n55 (0.074300000000000005, 0.076499999999999999)\n56 (0.074083333333333334, 0.076200000000000004)\n57 (0.073950000000000002, 0.075899999999999995)\n58 (0.07378333333333334, 0.075800000000000006)\n59 (0.073649999999999993, 0.075800000000000006)\n60 (0.073566666666666669, 0.075700000000000003)\n61 (0.073516666666666661, 0.075700000000000003)\n62 (0.073450000000000001, 0.075700000000000003)\n63 (0.073266666666666661, 0.075700000000000003)\n64 (0.073266666666666661, 0.075700000000000003)\n65 (0.073133333333333328, 0.075700000000000003)\n66 (0.073133333333333328, 0.075800000000000006)\n67 (0.073083333333333333, 0.075899999999999995)\n68 (0.072916666666666671, 0.075899999999999995)\n69 (0.072833333333333333, 0.075800000000000006)\n70 (0.072849999999999998, 0.075800000000000006)\n71 (0.07276666666666666, 0.075800000000000006)\n72 (0.07273333333333333, 0.076100000000000001)\n73 (0.072583333333333333, 0.075899999999999995)\n74 (0.072433333333333336, 0.075800000000000006)\n75 (0.072366666666666662, 0.075800000000000006)\n76 (0.072366666666666662, 0.075800000000000006)\n77 (0.072249999999999995, 0.075899999999999995)\n78 (0.072266666666666673, 0.075899999999999995)\n79 (0.072249999999999995, 0.075899999999999995)\n80 (0.07223333333333333, 0.075700000000000003)\n81 (0.072133333333333327, 0.075800000000000006)\n82 (0.072016666666666673, 0.075700000000000003)\n83 (0.071966666666666665, 0.075700000000000003)\n84 (0.071900000000000006, 0.075600000000000001)\n85 (0.071883333333333327, 0.075499999999999998)\n86 (0.071866666666666662, 0.075499999999999998)\n87 (0.071766666666666673, 0.075499999999999998)\n88 (0.071766666666666673, 0.075499999999999998)\n89 (0.071800000000000003, 0.075399999999999995)\n90 (0.071749999999999994, 0.075300000000000006)\n91 (0.071633333333333327, 0.075200000000000003)\n92 (0.071583333333333332, 0.074999999999999997)\n93 (0.071566666666666667, 0.075200000000000003)\n94 (0.071516666666666673, 0.075300000000000006)\n95 (0.071483333333333329, 0.075300000000000006)\n96 (0.07145, 0.075300000000000006)\n97 (0.071433333333333335, 0.075200000000000003)\n98 (0.071400000000000005, 0.075300000000000006)\n99 (0.071300000000000002, 0.075300000000000006)\nTotal time: 0.4\n" ], [ "res = np.array(errors)\nplt.plot(np.arange(res.shape[0]), res[:, 0], label='train error')\nplt.plot(np.arange(res.shape[0]), res[:, 1], label='test error')\nplt.legend()", "_____no_output_____" ] ], [ [ "### Checking the results", "_____no_output_____" ] ], [ [ "y_pred = predict(X_test)", "_____no_output_____" ], [ "np.random.seed(2)\nsmall_test = np.random.choice(X_test.shape[0], 10)\n\nfor i in small_test:\n view_image(X_test[i], label=y_test[i], predicted=y_pred[i], size=1)", "_____no_output_____" ] ], [ [ "# Simple Neural Net\n\nAdd a hidden layer with a sigmoid activation function\n\n![a](images/d3.png)", "_____no_output_____" ] ], [ [ "def sgd(cost, params, lr=0.05):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n for p, g in zip(params, grads):\n updates.append([p, p - g * lr])\n return updates\n\ndef model(X, w_h, w_o):\n h = T.nnet.sigmoid(T.dot(X, w_h))\n pyx = T.nnet.softmax(T.dot(h, w_o))\n return pyx\n\nw_h = init_weights((784, 625))\nw_o = init_weights((625, 10))\n\npy_x = model(X, w_h, w_o)\ny_x = T.argmax(py_x, axis=1)\n\ncost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))\nparams = [w_h, w_o]\nupdates = sgd(cost, params)\n\ntrain = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\npredict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)", "_____no_output_____" ], [ "t0 = dt.datetime.now()\n\nerrors = []\nfor i in range(100):\n \n for start, end in zip(range(0, X_train.shape[0], 128), \n range(128, X_train.shape[0], 128)):\n cost = train(X_train[start:end], y_train2[start:end])\n \n errors.append((np.mean(y_train != predict(X_train)), \n np.mean(y_test != predict(X_test))))\n print(i, errors[-1])\n\nprint('Total time: ', (dt.datetime.now()-t0).seconds / 60.)", "0 (0.30408333333333332, 0.29780000000000001)\n1 (0.17413333333333333, 0.1714)\n2 (0.13850000000000001, 0.1328)\n3 (0.12273333333333333, 0.1168)\n4 (0.11371666666666666, 0.1101)\n5 (0.1079, 0.1051)\n6 (0.10331666666666667, 0.10150000000000001)\n7 (0.10036666666666667, 0.098000000000000004)\n8 (0.097699999999999995, 0.095100000000000004)\n9 (0.095750000000000002, 0.092799999999999994)\n10 (0.093700000000000006, 0.0906)\n11 (0.092133333333333331, 0.088300000000000003)\n12 (0.090683333333333338, 0.0872)\n13 (0.089316666666666669, 0.085699999999999998)\n14 (0.088016666666666674, 0.085000000000000006)\n15 (0.086966666666666664, 0.084099999999999994)\n16 (0.086099999999999996, 0.083500000000000005)\n17 (0.085316666666666666, 0.082900000000000001)\n18 (0.084400000000000003, 0.082000000000000003)\n19 (0.083783333333333335, 0.081699999999999995)\n20 (0.082799999999999999, 0.081199999999999994)\n21 (0.082166666666666666, 0.080600000000000005)\n22 (0.081416666666666665, 0.080000000000000002)\n23 (0.080766666666666667, 0.079200000000000007)\n24 (0.079916666666666664, 0.078700000000000006)\n25 (0.079200000000000007, 0.078600000000000003)\n26 (0.078750000000000001, 0.078200000000000006)\n27 (0.07825, 0.077899999999999997)\n28 (0.077799999999999994, 0.077299999999999994)\n29 (0.07721666666666667, 0.076499999999999999)\n30 (0.076666666666666661, 0.075899999999999995)\n31 (0.076050000000000006, 0.075999999999999998)\n32 (0.075266666666666662, 0.0751)\n33 (0.07456666666666667, 0.074399999999999994)\n34 (0.073666666666666672, 0.073899999999999993)\n35 (0.072999999999999995, 0.073300000000000004)\n36 (0.072383333333333327, 0.073200000000000001)\n37 (0.071633333333333327, 0.072700000000000001)\n38 (0.070866666666666661, 0.072599999999999998)\n39 (0.070316666666666666, 0.072099999999999997)\n40 (0.069449999999999998, 0.071099999999999997)\n41 (0.06876666666666667, 0.070499999999999993)\n42 (0.068166666666666667, 0.069699999999999998)\n43 (0.067549999999999999, 0.069199999999999998)\n44 (0.066733333333333339, 0.068900000000000003)\n45 (0.066133333333333336, 0.0688)\n46 (0.065299999999999997, 0.068199999999999997)\n47 (0.064466666666666672, 0.066799999999999998)\n48 (0.063783333333333331, 0.066199999999999995)\n49 (0.063233333333333336, 0.066100000000000006)\n50 (0.062449999999999999, 0.0654)\n51 (0.061866666666666667, 0.064500000000000002)\n52 (0.061366666666666667, 0.063799999999999996)\n53 (0.060633333333333331, 0.062700000000000006)\n54 (0.06001666666666667, 0.061899999999999997)\n55 (0.059366666666666665, 0.061100000000000002)\n56 (0.058716666666666667, 0.0608)\n57 (0.058083333333333334, 0.060299999999999999)\n58 (0.057283333333333332, 0.059900000000000002)\n59 (0.056583333333333333, 0.059299999999999999)\n60 (0.055933333333333335, 0.058799999999999998)\n61 (0.055316666666666667, 0.058099999999999999)\n62 (0.054649999999999997, 0.057700000000000001)\n63 (0.054100000000000002, 0.056899999999999999)\n64 (0.05358333333333333, 0.056300000000000003)\n65 (0.053066666666666665, 0.055800000000000002)\n66 (0.052650000000000002, 0.055199999999999999)\n67 (0.052033333333333334, 0.054899999999999997)\n68 (0.051650000000000001, 0.054600000000000003)\n69 (0.051283333333333334, 0.054100000000000002)\n70 (0.050766666666666668, 0.053699999999999998)\n71 (0.050316666666666669, 0.053100000000000001)\n72 (0.049799999999999997, 0.052699999999999997)\n73 (0.049299999999999997, 0.052600000000000001)\n74 (0.048916666666666664, 0.051499999999999997)\n75 (0.048550000000000003, 0.051499999999999997)\n76 (0.047966666666666664, 0.051299999999999998)\n77 (0.047600000000000003, 0.050700000000000002)\n78 (0.047133333333333333, 0.0504)\n79 (0.046716666666666663, 0.050200000000000002)\n80 (0.046183333333333333, 0.049399999999999999)\n81 (0.0458, 0.049200000000000001)\n82 (0.045350000000000001, 0.048899999999999999)\n83 (0.044883333333333331, 0.048599999999999997)\n84 (0.044416666666666667, 0.048000000000000001)\n85 (0.043900000000000002, 0.046800000000000001)\n86 (0.043483333333333332, 0.046800000000000001)\n87 (0.042900000000000001, 0.046600000000000003)\n88 (0.042516666666666668, 0.046199999999999998)\n89 (0.042133333333333335, 0.0458)\n90 (0.041633333333333335, 0.045499999999999999)\n91 (0.04123333333333333, 0.0453)\n92 (0.040733333333333337, 0.044999999999999998)\n93 (0.040349999999999997, 0.044400000000000002)\n94 (0.040083333333333332, 0.044200000000000003)\n95 (0.039866666666666668, 0.0441)\n96 (0.039449999999999999, 0.043799999999999999)\n97 (0.039166666666666669, 0.043499999999999997)\n98 (0.038883333333333332, 0.043299999999999998)\n99 (0.038516666666666664, 0.042799999999999998)\nTotal time: 7.066666666666666\n" ], [ "res = np.array(errors)\nplt.plot(np.arange(res.shape[0]), res[:, 0], label='train error')\nplt.plot(np.arange(res.shape[0]), res[:, 1], label='test error')\nplt.legend()", "_____no_output_____" ] ], [ [ "# Complex Neural Net\n\nTwo hidden layers with dropout\n\n![a](images/d4.png)", "_____no_output_____" ] ], [ [ "from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nsrng = RandomStreams()\n\ndef rectify(X):\n return T.maximum(X, 0.)", "_____no_output_____" ] ], [ [ "### Understanding rectifier units\n![A](images/d5.png)", "_____no_output_____" ] ], [ [ "def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n for p, g in zip(params, grads):\n acc = theano.shared(p.get_value() * 0.)\n acc_new = rho * acc + (1 - rho) * g ** 2\n gradient_scaling = T.sqrt(acc_new + epsilon)\n g = g / gradient_scaling\n updates.append((acc, acc_new))\n updates.append((p, p - lr * g))\n return updates", "_____no_output_____" ] ], [ [ "### RMSprop\n\n\nRMSprop is an unpublished, adaptive learning rate method proposed by Geoff Hinton in \n[Lecture 6e of his Coursera Class](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)\n\nRMSprop and Adadelta have both been developed independently around the same time stemming from the need to resolve Adagrad's radically diminishing learning rates. RMSprop in fact is identical to the first update vector of Adadelta that we derived above:\n\n$$ E[g^2]_t = 0.9 E[g^2]_{t-1} + 0.1 g^2_t. $$\n\n$$\\theta_{t+1} = \\theta_{t} - \\frac{\\eta}{\\sqrt{E[g^2]_t + \\epsilon}} g_{t}.$$\n\nRMSprop as well divides the learning rate by an exponentially decaying average of squared gradients. Hinton suggests $\\gamma$ to be set to 0.9, while a good default value for the learning rate $\\eta$ is 0.001.", "_____no_output_____" ] ], [ [ "def dropout(X, p=0.):\n if p > 0:\n retain_prob = 1 - p\n X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)\n X /= retain_prob\n return X\n\ndef model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden):\n X = dropout(X, p_drop_input)\n h = rectify(T.dot(X, w_h))\n\n h = dropout(h, p_drop_hidden)\n h2 = rectify(T.dot(h, w_h2))\n\n h2 = dropout(h2, p_drop_hidden)\n py_x = softmax(T.dot(h2, w_o))\n return h, h2, py_x\n\ndef softmax(X):\n e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x'))\n return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')", "_____no_output_____" ], [ "w_h = init_weights((784, 625))\nw_h2 = init_weights((625, 625))\nw_o = init_weights((625, 10))\n\nnoise_h, noise_h2, noise_py_x = model(X, w_h, w_h2, w_o, 0.2, 0.5)\nh, h2, py_x = model(X, w_h, w_h2, w_o, 0., 0.)\ny_x = T.argmax(py_x, axis=1)\n\ncost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))\nparams = [w_h, w_h2, w_o]\nupdates = RMSprop(cost, params, lr=0.001)\n\ntrain = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\npredict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)", "_____no_output_____" ], [ "t0 = dt.datetime.now()\n\nerrors = []\nfor i in range(100):\n \n for start, end in zip(range(0, X_train.shape[0], 128), \n range(128, X_train.shape[0], 128)):\n cost = train(X_train[start:end], y_train2[start:end])\n \n errors.append((np.mean(y_train != predict(X_train)), \n np.mean(y_test != predict(X_test))))\n print(i, errors[-1])\n\nprint('Total time: ', (dt.datetime.now()-t0).seconds / 60.)", "0 (0.063916666666666663, 0.062399999999999997)\n1 (0.034116666666666663, 0.035400000000000001)\n2 (0.025133333333333334, 0.027799999999999998)\n3 (0.020866666666666665, 0.025600000000000001)\n4 (0.017466666666666665, 0.023900000000000001)\n5 (0.015233333333333333, 0.0206)\n6 (0.015366666666666667, 0.021700000000000001)\n7 (0.012366666666666666, 0.020199999999999999)\n8 (0.011950000000000001, 0.019800000000000002)\n9 (0.011983333333333334, 0.020199999999999999)\n10 (0.0091000000000000004, 0.0178)\n11 (0.0084666666666666675, 0.017000000000000001)\n12 (0.0086999999999999994, 0.017500000000000002)\n13 (0.0074333333333333335, 0.016500000000000001)\n14 (0.0067999999999999996, 0.017100000000000001)\n15 (0.0066166666666666665, 0.016299999999999999)\n16 (0.005783333333333333, 0.015699999999999999)\n17 (0.0061833333333333332, 0.0161)\n18 (0.0058666666666666667, 0.015100000000000001)\n19 (0.0061500000000000001, 0.017100000000000001)\n20 (0.0050499999999999998, 0.016199999999999999)\n21 (0.004816666666666667, 0.016500000000000001)\n22 (0.0045833333333333334, 0.0161)\n23 (0.0041000000000000003, 0.015699999999999999)\n24 (0.0040666666666666663, 0.016)\n25 (0.0035666666666666668, 0.0149)\n26 (0.0043333333333333331, 0.0161)\n27 (0.0032666666666666669, 0.013899999999999999)\n28 (0.0033333333333333335, 0.0143)\n29 (0.0029166666666666668, 0.013100000000000001)\n30 (0.0032166666666666667, 0.014500000000000001)\n31 (0.0033333333333333335, 0.014999999999999999)\n32 (0.0028333333333333335, 0.015100000000000001)\n33 (0.0029666666666666665, 0.0146)\n34 (0.0030500000000000002, 0.0137)\n35 (0.0029166666666666668, 0.0147)\n36 (0.0023666666666666667, 0.014200000000000001)\n37 (0.0030000000000000001, 0.0149)\n38 (0.0028333333333333335, 0.0137)\n39 (0.0020666666666666667, 0.012699999999999999)\n40 (0.0021666666666666666, 0.0134)\n41 (0.0023666666666666667, 0.0129)\n42 (0.0023500000000000001, 0.0141)\n43 (0.0021666666666666666, 0.013899999999999999)\n44 (0.0021833333333333331, 0.0154)\n45 (0.0020500000000000002, 0.0146)\n46 (0.0021833333333333331, 0.014800000000000001)\n47 (0.0016166666666666666, 0.014)\n48 (0.00175, 0.012800000000000001)\n49 (0.0017333333333333333, 0.012999999999999999)\n50 (0.0016833333333333333, 0.012999999999999999)\n51 (0.0013833333333333334, 0.0118)\n52 (0.0019333333333333333, 0.014200000000000001)\n53 (0.0013166666666666667, 0.012500000000000001)\n54 (0.0015, 0.013299999999999999)\n55 (0.0012166666666666667, 0.0132)\n56 (0.0012999999999999999, 0.012)\n57 (0.00115, 0.011900000000000001)\n58 (0.0014333333333333333, 0.012999999999999999)\n59 (0.0014666666666666667, 0.012500000000000001)\n60 (0.0010833333333333333, 0.0124)\n61 (0.00125, 0.0126)\n62 (0.0013333333333333333, 0.013299999999999999)\n63 (0.0012166666666666667, 0.0126)\n64 (0.0011000000000000001, 0.0129)\n65 (0.0010833333333333333, 0.012800000000000001)\n66 (0.0010166666666666666, 0.012999999999999999)\n67 (0.0010666666666666667, 0.013299999999999999)\n68 (0.001, 0.013100000000000001)\n69 (0.0010499999999999999, 0.013100000000000001)\n70 (0.00089999999999999998, 0.0121)\n71 (0.00075000000000000002, 0.012699999999999999)\n72 (0.00080000000000000004, 0.012999999999999999)\n73 (0.00084999999999999995, 0.0129)\n74 (0.00083333333333333339, 0.012200000000000001)\n75 (0.00083333333333333339, 0.012200000000000001)\n76 (0.00081666666666666671, 0.0121)\n77 (0.00075000000000000002, 0.013299999999999999)\n78 (0.00061666666666666662, 0.0124)\n79 (0.00066666666666666664, 0.0134)\n80 (0.0006333333333333333, 0.012800000000000001)\n81 (0.00061666666666666662, 0.013100000000000001)\n82 (0.0006333333333333333, 0.0134)\n83 (0.00080000000000000004, 0.012200000000000001)\n84 (0.00061666666666666662, 0.0124)\n85 (0.00069999999999999999, 0.0129)\n86 (0.00075000000000000002, 0.0126)\n87 (0.00046666666666666666, 0.011900000000000001)\n88 (0.00056666666666666671, 0.0118)\n89 (0.00051666666666666668, 0.0121)\n90 (0.00078333333333333336, 0.012)\n91 (0.00051666666666666668, 0.012)\n92 (0.00050000000000000001, 0.0118)\n93 (0.00056666666666666671, 0.012200000000000001)\n94 (0.00055000000000000003, 0.0124)\n95 (0.00069999999999999999, 0.012800000000000001)\n96 (0.00058333333333333338, 0.0135)\n97 (0.00055000000000000003, 0.012699999999999999)\n98 (0.00050000000000000001, 0.012999999999999999)\n99 (0.00048333333333333334, 0.0117)\nTotal time: 17.85\n" ], [ "res = np.array(errors)\nplt.plot(np.arange(res.shape[0]), res[:, 0], label='train error')\nplt.plot(np.arange(res.shape[0]), res[:, 1], label='test error')\nplt.legend()", "_____no_output_____" ] ], [ [ "# Convolutional Neural Network\n\n\nIn machine learning, a convolutional neural network (CNN, or ConvNet) is a type of feed-forward artificial neural network in which the connectivity pattern between its neurons is inspired by the organization of the animal visual cortex, whose individual neurons are arranged in such a way that they respond to overlapping regions tiling the visual field. Convolutional networks were inspired by biological processes and are variations of multilayer perceptrons designed to use minimal amounts of preprocessing. (Wikipedia)\n\n\n![a](images/d7.png)", "_____no_output_____" ], [ "### Motivation\n\nConvolutional Neural Networks (CNN) are biologically-inspired variants of MLPs.\nFrom Hubel and Wiesel's early work on the cat's visual cortex, we\nknow the visual cortex contains a complex arrangement of cells. These cells are\nsensitive to small sub-regions of the visual field, called a *receptive\nfield*. The sub-regions are tiled to cover the entire visual field. These\ncells act as local filters over the input space and are well-suited to exploit\nthe strong spatially local correlation present in natural images.\n\nAdditionally, two basic cell types have been identified: Simple cells respond\nmaximally to specific edge-like patterns within their receptive field. Complex\ncells have larger receptive fields and are locally invariant to the exact\nposition of the pattern.\n\nThe animal visual cortex being the most powerful visual processing system in\nexistence, it seems natural to emulate its behavior. Hence, many\nneurally-inspired models can be found in the literature. \n\n### Sparse Connectivity\n\n\nCNNs exploit spatially-local correlation by enforcing a local connectivity\npattern between neurons of adjacent layers. In other words, the inputs of\nhidden units in layer **m** are from a subset of units in layer **m-1**, units\nthat have spatially contiguous receptive fields. We can illustrate this\ngraphically as follows:\n\n![A](images/sparse_1D_nn.png)\n\nImagine that layer **m-1** is the input retina. In the above figure, units in\nlayer **m** have receptive fields of width 3 in the input retina and are thus\nonly connected to 3 adjacent neurons in the retina layer. Units in layer\n**m+1** have a similar connectivity with the layer below. We say that their\nreceptive field with respect to the layer below is also 3, but their receptive\nfield with respect to the input is larger (5). Each unit is unresponsive to\nvariations outside of its receptive field with respect to the retina. The\narchitecture thus ensures that the learnt \"filters\" produce the strongest\nresponse to a spatially local input pattern.\n\nHowever, as shown above, stacking many such layers leads to (non-linear)\n\"filters\" that become increasingly \"global\" (i.e. responsive to a larger region\nof pixel space). For example, the unit in hidden layer **m+1** can encode a\nnon-linear feature of width 5 (in terms of pixel space).", "_____no_output_____" ], [ "### Shared Weights\n\n\nIn addition, in CNNs, each filter $h_i$ is replicated across the entire\nvisual field. These replicated units share the same parameterization (weight\nvector and bias) and form a *feature map*.\n\n![](images/conv_1D_nn.png)\n\nIn the above figure, we show 3 hidden units belonging to the same feature map.\nWeights of the same color are shared---constrained to be identical. Gradient\ndescent can still be used to learn such shared parameters, with only a small\nchange to the original algorithm. The gradient of a shared weight is simply the\nsum of the gradients of the parameters being shared.\n\nReplicating units in this way allows for features to be detected *regardless\nof their position in the visual field.* Additionally, weight sharing increases\nlearning efficiency by greatly reducing the number of free parameters being\nlearnt. The constraints on the model enable CNNs to achieve better\ngeneralization on vision problems.\n\n\n### Details and Notation\n\n\nA feature map is obtained by repeated application of a function across\nsub-regions of the entire image, in other words, by *convolution* of the\ninput image with a linear filter, adding a bias term and then applying a\nnon-linear function. If we denote the k-th feature map at a given layer as\n$h^k$, whose filters are determined by the weights $W^k$ and bias\n$b_k$, then the feature map $h^k$ is obtained as follows (for\n$tanh$ non-linearities):", "_____no_output_____" ], [ "$$\n h^k_{ij} = \\tanh ( (W^k * x)_{ij} + b_k ).\n$$\n\nNote\n\n* Recall the following definition of convolution for a 1D signal.\n$$ o[n] = f[n]*g[n] = \\sum_{u=-\\infty}^{\\infty} f[u] g[n-u] = \\sum_{u=-\\infty}^{\\infty} f[n-u] g[u]`.\n$$\n\n* This can be extended to 2D as follows:\n\n$$o[m,n] = f[m,n]*g[m,n] = \\sum_{u=-\\infty}^{\\infty} \\sum_{v=-\\infty}^{\\infty} f[u,v] g[m-u,n-v]`.\n$$", "_____no_output_____" ], [ "To form a richer representation of the data, each hidden layer is composed of\n*multiple* feature maps, $\\{h^{(k)}, k=0..K\\}$. The weights $W$ of\na hidden layer can be represented in a 4D tensor containing elements for every\ncombination of destination feature map, source feature map, source vertical\nposition, and source horizontal position. The biases $b$ can be\nrepresented as a vector containing one element for every destination feature\nmap. We illustrate this graphically as follows:\n\n**Figure 1**: example of a convolutional layer\n![](images/cnn_explained.png)\n\n\nThe figure shows two layers of a CNN. **Layer m-1** contains four feature maps.\n**Hidden layer m** contains two feature maps ($h^0$ and $h^1$).\nPixels (neuron outputs) in $h^0$ and $h^1$ (outlined as blue and\nred squares) are computed from pixels of layer (m-1) which fall within their\n2x2 receptive field in the layer below (shown as colored rectangles). Notice\nhow the receptive field spans all four input feature maps. The weights\n$W^0$ and $W^1$ of $h^0$ and $h^1$ are thus 3D weight\ntensors. The leading dimension indexes the input feature maps, while the other\ntwo refer to the pixel coordinates.\n\nPutting it all together, $W^{kl}_{ij}$ denotes the weight connecting\neach pixel of the k-th feature map at layer m, with the pixel at coordinates\n(i,j) of the l-th feature map of layer (m-1).", "_____no_output_____" ], [ "### The Convolution Operator\n\n\nConvOp is the main workhorse for implementing a convolutional layer in Theano.\nConvOp is used by ``theano.tensor.signal.conv2d``, which takes two symbolic inputs:\n\n\n* a 4D tensor corresponding to a mini-batch of input images. The shape of the\n tensor is as follows: [mini-batch size, number of input feature maps, image\n height, image width].\n\n* a 4D tensor corresponding to the weight matrix $W$. The shape of the\n tensor is: [number of feature maps at layer m, number of feature maps at\n layer m-1, filter height, filter width]", "_____no_output_____" ], [ "### MaxPooling\n\n\nAnother important concept of CNNs is *max-pooling,* which is a form of\nnon-linear down-sampling. Max-pooling partitions the input image into\na set of non-overlapping rectangles and, for each such sub-region, outputs the\nmaximum value.\n\nMax-pooling is useful in vision for two reasons: \n* By eliminating non-maximal values, it reduces computation for upper layers.\n\n* It provides a form of translation invariance. Imagine\n cascading a max-pooling layer with a convolutional layer. There are 8\n directions in which one can translate the input image by a single pixel.\n If max-pooling is done over a 2x2 region, 3 out of these 8 possible\n configurations will produce exactly the same output at the convolutional\n layer. For max-pooling over a 3x3 window, this jumps to 5/8.\n\n Since it provides additional robustness to position, max-pooling is a\n \"smart\" way of reducing the dimensionality of intermediate representations.\n\nMax-pooling is done in Theano by way of\n``theano.tensor.signal.downsample.max_pool_2d``. This function takes as input\nan N dimensional tensor (where N >= 2) and a downscaling factor and performs\nmax-pooling over the 2 trailing dimensions of the tensor.\n\n\n### The Full Model: CovNet\n\n\nSparse, convolutional layers and max-pooling are at the heart of the LeNet\nfamily of models. While the exact details of the model will vary greatly,\nthe figure below shows a graphical depiction of a LeNet model.\n\n![](images/mylenet.png)\n\n\nThe lower-layers are composed to alternating convolution and max-pooling\nlayers. The upper-layers however are fully-connected and correspond to a\ntraditional MLP (hidden layer + logistic regression). The input to the\nfirst fully-connected layer is the set of all features maps at the layer\nbelow.\n\nFrom an implementation point of view, this means lower-layers operate on 4D\ntensors. These are then flattened to a 2D matrix of rasterized feature maps,\nto be compatible with our previous MLP implementation.\n", "_____no_output_____" ] ], [ [ "# from theano.tensor.nnet.conv import conv2d\nfrom theano.tensor.nnet import conv2d\nfrom theano.tensor.signal.downsample import max_pool_2d", "/home/al/anaconda3/lib/python3.5/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.\n \"downsample module has been moved to the theano.tensor.signal.pool module.\")\n" ] ], [ [ "Modify dropout function", "_____no_output_____" ] ], [ [ "def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):\n l1a = rectify(conv2d(X, w, border_mode='full'))\n l1 = max_pool_2d(l1a, (2, 2))\n l1 = dropout(l1, p_drop_conv)\n\n l2a = rectify(conv2d(l1, w2))\n l2 = max_pool_2d(l2a, (2, 2))\n l2 = dropout(l2, p_drop_conv)\n\n l3a = rectify(conv2d(l2, w3))\n l3b = max_pool_2d(l3a, (2, 2))\n # convert from 4tensor to normal matrix\n l3 = T.flatten(l3b, outdim=2)\n l3 = dropout(l3, p_drop_conv)\n\n l4 = rectify(T.dot(l3, w4))\n l4 = dropout(l4, p_drop_hidden)\n\n pyx = softmax(T.dot(l4, w_o))\n return l1, l2, l3, l4, pyx", "_____no_output_____" ] ], [ [ "reshape into conv 4tensor (b, c, 0, 1) format", "_____no_output_____" ] ], [ [ "X_train2 = X_train.reshape(-1, 1, 28, 28)\nX_test2 = X_test.reshape(-1, 1, 28, 28)", "_____no_output_____" ], [ "# now 4tensor for conv instead of matrix\nX = T.ftensor4()\nY = T.fmatrix()", "_____no_output_____" ], [ "w = init_weights((32, 1, 3, 3))\nw2 = init_weights((64, 32, 3, 3))\nw3 = init_weights((128, 64, 3, 3))\nw4 = init_weights((128 * 3 * 3, 625))\nw_o = init_weights((625, 10))", "_____no_output_____" ], [ "noise_l1, noise_l2, noise_l3, noise_l4, noise_py_x = model(X, w, w2, w3, w4, w_o, 0.2, 0.5)\nl1, l2, l3, l4, py_x = model(X, w, w2, w3, w4, w_o, 0., 0.)\ny_x = T.argmax(py_x, axis=1)\n\ncost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))\nparams = [w, w2, w3, w4, w_o]\nupdates = RMSprop(cost, params, lr=0.001)\n\ntrain = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)\npredict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)", "/home/al/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:3: UserWarning: pool_2d() will have the parameter ignore_border default value changed to True (currently False). To have consistent behavior with all Theano version, explicitly add the parameter ignore_border=True. On the GPU, using ignore_border=True is needed to use cuDNN. When using ignore_border=False and not using cuDNN, the only GPU combination supported is when `ds == st and padding == (0, 0) and mode == 'max'`. Otherwise, the convolution will be executed on CPU.\n app.launch_new_instance()\n/home/al/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:7: UserWarning: pool_2d() will have the parameter ignore_border default value changed to True (currently False). To have consistent behavior with all Theano version, explicitly add the parameter ignore_border=True. On the GPU, using ignore_border=True is needed to use cuDNN. When using ignore_border=False and not using cuDNN, the only GPU combination supported is when `ds == st and padding == (0, 0) and mode == 'max'`. Otherwise, the convolution will be executed on CPU.\n/home/al/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:11: UserWarning: pool_2d() will have the parameter ignore_border default value changed to True (currently False). To have consistent behavior with all Theano version, explicitly add the parameter ignore_border=True. On the GPU, using ignore_border=True is needed to use cuDNN. When using ignore_border=False and not using cuDNN, the only GPU combination supported is when `ds == st and padding == (0, 0) and mode == 'max'`. Otherwise, the convolution will be executed on CPU.\n" ], [ "t0 = dt.datetime.now()\n\nerrors = []\nfor i in range(100):\n t1 = dt.datetime.now()\n \n for start, end in zip(range(0, X_train.shape[0], 128), \n range(128, X_train.shape[0], 128)):\n cost = train(X_train2[start:end], y_train2[start:end])\n \n errors.append((np.mean(y_train != predict(X_train2)), \n np.mean(y_test != predict(X_test2))))\n print(i, errors[-1])\n print('Current iter time: ', (dt.datetime.now()-t1).seconds / 60.)\n\nprint('Total time: ', (dt.datetime.now()-t0).seconds / 60.)", "0 (0.066083333333333327, 0.058799999999999998)\nCurrent iter time: 3.966666666666667\n1 (0.029049999999999999, 0.025100000000000001)\nCurrent iter time: 2.3666666666666667\n2 (0.021183333333333332, 0.019400000000000001)\nCurrent iter time: 2.466666666666667\n3 (0.012483333333333334, 0.0132)\nCurrent iter time: 2.466666666666667\n4 (0.010183333333333334, 0.0117)\nCurrent iter time: 2.45\n5 (0.0080333333333333333, 0.010500000000000001)\nCurrent iter time: 2.4833333333333334\n6 (0.0078499999999999993, 0.010500000000000001)\nCurrent iter time: 2.3666666666666667\n7 (0.0064999999999999997, 0.0085000000000000006)\nCurrent iter time: 2.4833333333333334\n8 (0.0053166666666666666, 0.0088000000000000005)\nCurrent iter time: 2.466666666666667\n9 (0.0067166666666666668, 0.0092999999999999992)\nCurrent iter time: 2.5166666666666666\n10 (0.003966666666666667, 0.0080999999999999996)\nCurrent iter time: 2.5166666666666666\n11 (0.0043666666666666663, 0.0077999999999999996)\nCurrent iter time: 2.55\n12 (0.0029499999999999999, 0.0080000000000000002)\nCurrent iter time: 2.5833333333333335\n13 (0.0028166666666666665, 0.0077000000000000002)\nCurrent iter time: 2.45\n14 (0.0028500000000000001, 0.0071999999999999998)\nCurrent iter time: 2.466666666666667\n15 (0.0021666666666666666, 0.0067000000000000002)\nCurrent iter time: 2.4833333333333334\n16 (0.0020833333333333333, 0.0073000000000000001)\nCurrent iter time: 2.4\n17 (0.0017333333333333333, 0.0071999999999999998)\nCurrent iter time: 2.433333333333333\n18 (0.0020333333333333332, 0.0067999999999999996)\nCurrent iter time: 2.4166666666666665\n19 (0.0014833333333333332, 0.0068999999999999999)\nCurrent iter time: 2.35\n20 (0.0015, 0.0070000000000000001)\nCurrent iter time: 2.45\n" ], [ "print('Total time: ', (dt.datetime.now()-t0).seconds / 60.)", "Total time: 53.483333333333334\n" ], [ "res = np.array(errors)\nplt.plot(np.arange(res.shape[0]), res[:, 0], label='train error')\nplt.plot(np.arange(res.shape[0]), res[:, 1], label='test error')\nplt.legend()", "_____no_output_____" ] ], [ [ "# Even more complex networks\n\n## GoogLeNet\n![a](images/googlenet2.png)\n\n[examples](http://www.csc.kth.se/~roelof/deepdream/bvlc_googlenet.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d068f9d297c023d84d06f6d9ac1ed01698b7952c
7,712
ipynb
Jupyter Notebook
predict.ipynb
trancongthinh6304/trafficsignclassification
74bb7b6ed82a6efffc8f14f3bb8201dadb386c70
[ "MIT" ]
null
null
null
predict.ipynb
trancongthinh6304/trafficsignclassification
74bb7b6ed82a6efffc8f14f3bb8201dadb386c70
[ "MIT" ]
null
null
null
predict.ipynb
trancongthinh6304/trafficsignclassification
74bb7b6ed82a6efffc8f14f3bb8201dadb386c70
[ "MIT" ]
1
2021-09-20T14:26:14.000Z
2021-09-20T14:26:14.000Z
36.206573
1,616
0.526063
[ [ [ "import numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.preprocessing import image\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nimport warnings;\r\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "# Predict batches of images", "_____no_output_____" ] ], [ [ "tf.compat.v1.enable_v2_behavior()", "_____no_output_____" ], [ "label = ['3_24+10', '3_24+30', '3_24+5', '3_24+60', '3_24+70', '3_24+90', '3_24+110', '3_24+20', '3_24+40', '3_24+50', '3_24+80', '1_12_1', '1_12_2', '1_13', '1_14', '1_19', '1_24', '1_26', '1_27', '3_21', '3_31', '3_33', '4_4_1', '4_4_2', '4_5_2', '4_5_4', '4_5_5', '4_8_5', '4_8_6', '5_17', '6_2+50', '6_2+70', '6_2+30', '6_2+40', '6_2+60', '6_2+80', '6_7', '7_1', '7_11', '7_13', '7_14', '7_2', '7_4', '7_7', '7_9', 'smoke', 'unknown', '1_11_1', '1_11_2', '1_15', '1_16', '1_18', '1_20_1', '1_22', '1_25', '1_28', '1_29', '1_30', '1_8', '2_3_1', '2_3_L', '2_3_R', '2_6', '2_7', '3_15', '3_17', '3_20', '3_25+70', '3_25+20', '3_25+30', '3_25+40', '3_25+50', '3_25+5', '3_25+60', '3_6', '4_1_6', '4_2_1', '4_2_2', '5_15_5', '6_3_1', '7_3', '7_6', '1_17', '3_16', '5_15_3', '5_20', '7_12', '1_31', '3_10', '3_19', '3_2', '3_5', '3_7', '3_9', '4_1_2_1', '4_1_3_1', '4_5_1', '4_5_6', '4_8_1', '4_8_2', '4_8_3', '5_1', '5_11_1', '5_12_1', '5_13_1', '5_13_2', '5_14_1', '5_14_2', '5_14_3', '5_2', '5_23_2', '5_24_2', '5_3', '5_4', '5_8', '7_5', '3_32', '7_18', '1_2', '1_33', '1_7', '2_4', '3_18_1', '3_18_2', '3_8', '4_1_2', '4_1_3', '5_14', '6_15_2', '6_15_3', '6_6', '6_8_1', '1_1', '1_20_2', '1_20_3', '1_21', '1_23', '1_5', '2_1', '2_2', '2_5', '3_1', '3_26', '3_27', '3_28', '3_29', '3_30', '4_1_1', '4_1_4', '4_1_5', '4_2_3', '4_3', '4_8_4', '5_16', '5_18', '5_19', '5_21', '5_22', '5_5', '5_6', '5_7_1', '5_7_2', '5_9', '6_15_1', '6_16', '6_4', '6_8_2', '6_8_3', '5_29', '5_31+10', '5_31+20', '5_31+30', '5_31+40', '5_31+5', '5_31+50', '5_32', '5_33', '1_6', '5_15_2+2', '5_15_2+1', '5_15_2+3', '5_15_2+5']", "_____no_output_____" ], [ "autoencoder = keras.models.load_model(\"../input/aaaaaaaaaa/autoencoder.h5\") # load pre_trained auto encoder model", "_____no_output_____" ], [ "model_1= keras.models.load_model(\"../input/aaaaaaaaaa/VGG19_2.h5\")\r\nmodel_2= keras.models.load_model(\"../input/aaaaaaaaaa/InceptionResNetV2_2.h5\")\r\nmodel_3 = keras.models.load_model('../input/aaaaaaaaaa/denset201_2.h5')", "_____no_output_____" ], [ "root_dir = '../input/aiijc-final-dcm/AIJ_2gis_data/'\r\ndef load_and_change_img(img):\r\n img = image.img_to_array(img)\r\n img = img/255.\r\n result= autoencoder.predict(img[None])\r\n new_arr = ((result - result.min()) * (1/(result.max() - result.min()) * 255)).astype('uint8')\r\n img_new = np.zeros(shape=(80,80,3), dtype= np.int16)\r\n img_new[..., 0] = new_arr[...,2]\r\n img_new[...,1]=new_arr[...,1]\r\n img_new[..., 2] = new_arr[...,0]\r\n return img_new/255.", "_____no_output_____" ], [ "df = pd.read_csv(\"../input/aiijc-final-dcm/AIJ_2gis_data/sample_submission.csv\")", "_____no_output_____" ], [ "df_a=df[0:100000]", "_____no_output_____" ], [ "train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=load_and_change_img)\r\ntest_set =train_datagen.flow_from_dataframe(directory = root_dir,\r\n dataframe=df_a,\r\n x_col = 'filename',\r\n y_col='label',\r\n classes=None,\r\n class_model=None,\r\n shuffle=False,\r\n batch_size=256,\r\n target_size=(80,80))", "_____no_output_____" ], [ "outputs=[]\r\ny_pred_1=model_1.predict(test_set, batch_size=256,verbose=1)\r\ny_pred_2=model_2.predict(test_set, batch_size=256,verbose=1)\r\ny_pred_3=model_3.predict(test_set, batch_size=256, verbose=1)\r\ny_pred=y_pred_1*0.2 + y_pred_2*0.4 + y_pred_3*0.4\r\ndel y_pred_1\r\ndel y_pred_2\r\ndel y_pred_3\r\nfor i in range(len(np.argmax(y_pred, axis=1))):\r\n outputs.append(label[np.argmax(y_pred[i], axis=0)])", "_____no_output_____" ], [ "df_new=pd.DataFrame({'filename': df_a['filename'], 'label': outputs})\r\ndf_new.to_csv('predict.csv')", "_____no_output_____" ] ], [ [ "# Predict single image", "_____no_output_____" ] ], [ [ "model1= keras.models.load_model(\"../input/aaaaaaaaaa/VGG19_2.h5\")\r\nmodel2= keras.models.load_model(\"../input/aaaaaaaaaa/InceptionResNetV2_2.h5\")\r\nmodel3 = keras.models.load_model('../input/aaaaaaaaaa/denset201_2.h5')", "_____no_output_____" ], [ "def auto_encoder(img_path):\r\n img = image.load_img(img_path, target_size=(80,80,3))\r\n img = image.img_to_array(img)\r\n img = img/255.\r\n result= autoencoder.predict(img[None])\r\n new_arr = ((result - result.min()) * (1/(result.max() - result.min()) * 255)).astype('uint8')\r\n img_new = np.zeros(shape=(80,80,3), dtype=np.int16)\r\n img_new[..., 0] = new_arr[...,2]\r\n img_new[...,1]=new_arr[...,1]\r\n img_new[..., 2] = new_arr[...,0]\r\n return img_new/255.", "_____no_output_____" ], [ "labels=[]\r\nimg_path=\"\"\r\ndef predict(img_path):\r\n img = auto_encoder(img_path)\r\n y_pred1=model1.predict(np.expand_dims(img, axis=0)*1/255.0)\r\n y_pred2=model2.predict(np.expand_dims(img, axis=0)*1/255.0)\r\n y_pred3=model3.predict(np.expand_dims(img, axis=0)*1/255.0)\r\n y_pred=y_pred1*0.2 + y_pred2*0.4 + y_pred3*0.4\r\n print(label[np.argmax(y_pred)])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d068fb3f178c42fc524d64ed4a80346a8a0a7d5a
64,304
ipynb
Jupyter Notebook
ML Project Feedforward Neural Network 6033657523.ipynb
bellmcp/machine-learning-price-prediction
d4cdcb873ace5c0427bafd26936ec4776d5aae57
[ "MIT" ]
6
2020-07-03T09:51:00.000Z
2021-07-08T17:04:06.000Z
ML Project Feedforward Neural Network 6033657523.ipynb
bellmcp/machine-learning-price-prediction
d4cdcb873ace5c0427bafd26936ec4776d5aae57
[ "MIT" ]
null
null
null
ML Project Feedforward Neural Network 6033657523.ipynb
bellmcp/machine-learning-price-prediction
d4cdcb873ace5c0427bafd26936ec4776d5aae57
[ "MIT" ]
2
2021-02-13T03:31:36.000Z
2021-02-28T08:55:06.000Z
64,304
64,304
0.691528
[ [ [ "# ML Project 6033657523 - Feedforward neural network", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_absolute_error\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import KFold, train_test_split\nfrom math import sqrt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Importing the cleaned dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('cleanData_Final.csv')\nX = dataset[['PrevAVGCost', 'PrevAssignedCost', 'AVGCost', 'LatestDateCost', 'A', 'B', 'C', 'D', 'E', 'F', 'G']]\ny = dataset['GenPrice']", "_____no_output_____" ], [ "X", "_____no_output_____" ] ], [ [ "## Splitting the dataset into the Training set and Test set", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)", "_____no_output_____" ] ], [ [ "## Feedforward neural network", "_____no_output_____" ], [ "### Fitting Feedforward neural network to the Training Set", "_____no_output_____" ] ], [ [ "from sklearn.neural_network import MLPRegressor\nregressor = MLPRegressor(hidden_layer_sizes = (200, 200, 200, 200, 200), activation = 'relu', solver = 'adam', max_iter = 500, learning_rate = 'adaptive')\nregressor.fit(X_train, y_train)", "_____no_output_____" ], [ "trainSet = pd.concat([X_train, y_train], axis = 1)\ntrainSet.head()", "_____no_output_____" ] ], [ [ "## Evaluate model accuracy", "_____no_output_____" ] ], [ [ "y_pred = regressor.predict(X_test)\ny_pred", "_____no_output_____" ], [ "testSet = pd.concat([X_test, y_test], axis = 1)\ntestSet.head()", "_____no_output_____" ] ], [ [ "Compare GenPrice with PredictedGenPrice", "_____no_output_____" ] ], [ [ "datasetPredict = pd.concat([testSet.reset_index(), pd.Series(y_pred, name = 'PredictedGenPrice')], axis = 1).round(2)\ndatasetPredict.head(10)", "_____no_output_____" ], [ "datasetPredict.corr()", "_____no_output_____" ], [ "print(\"Training set accuracy = \" + str(regressor.score(X_train, y_train)))\nprint(\"Test set accuracy = \" + str(regressor.score(X_test, y_test)))", "Training set accuracy = 0.9898465392908009\nTest set accuracy = 0.9841771850834575\n" ] ], [ [ "Training set accuracy = 0.9885445650077587<br>\nTest set accuracy = 0.9829187423043221", "_____no_output_____" ], [ "### MSE", "_____no_output_____" ] ], [ [ "from sklearn import metrics\nprint('MSE:', metrics.mean_squared_error(y_test, y_pred))", "MSE: 160.2404730229541\n" ] ], [ [ "MSE v1: 177.15763887557458<br>\nMSE v2: 165.73161615532584<br>\nMSE v3: 172.98494783761967", "_____no_output_____" ], [ "### MAPE", "_____no_output_____" ] ], [ [ "def mean_absolute_percentage_error(y_test, y_pred):\n y_test, y_pred = np.array(y_test), np.array(y_pred)\n return np.mean(np.abs((y_test - y_pred)/y_test)) * 100\n\nprint('MAPE:', mean_absolute_percentage_error(y_test, y_pred))", "MAPE: 6.159884199380194\n" ] ], [ [ "MAPE v1: 6.706572320387714<br>\nMAPE v2: 6.926678067146115<br>\nMAPE v3: 7.34081953098462\n", "_____no_output_____" ], [ "### Visualize", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nplt.plot([i for i in range(len(y_pred))], y_pred, color = 'r')\nplt.scatter([i for i in range(len(y_pred))], y_test, color = 'b')\nplt.ylabel('Price')\nplt.xlabel('Index')\nplt.legend(['Predict', 'True'], loc = 'best')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d068feff717a085b2f41440b03a21fdc7601446d
567,909
ipynb
Jupyter Notebook
transfer-learning/Transfer_Learning.ipynb
skagrawal/Deep-Learning-Udacity-ND
0de312e4efa08ae12025b0c2ad0b5a97066e6d0c
[ "MIT" ]
null
null
null
transfer-learning/Transfer_Learning.ipynb
skagrawal/Deep-Learning-Udacity-ND
0de312e4efa08ae12025b0c2ad0b5a97066e6d0c
[ "MIT" ]
null
null
null
transfer-learning/Transfer_Learning.ipynb
skagrawal/Deep-Learning-Udacity-ND
0de312e4efa08ae12025b0c2ad0b5a97066e6d0c
[ "MIT" ]
null
null
null
511.169217
260,732
0.929496
[ [ [ "# Transfer Learning\n\nMost of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture.\n\n<img src=\"assets/cnnarchitecture.jpg\" width=700px>\n\nVGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.\n\nYou can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf).\n\n## Pretrained VGGNet\n\nWe'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This code is already included in 'tensorflow_vgg' directory, sdo you don't have to clone it.\n\nThis is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. **You'll need to clone the repo into the folder containing this notebook.** Then download the parameter file using the next cell.", "_____no_output_____" ] ], [ [ "from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\n\nvgg_dir = 'tensorflow_vgg/'\n# Make sure vgg exists\nif not isdir(vgg_dir):\n raise Exception(\"VGG directory doesn't exist!\")\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(vgg_dir + \"vgg16.npy\"):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:\n urlretrieve(\n 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',\n vgg_dir + 'vgg16.npy',\n pbar.hook)\nelse:\n print(\"Parameter file already exists!\")", "Parameter file already exists!\n" ] ], [ [ "## Flower power\n\nHere we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining).", "_____no_output_____" ] ], [ [ "import tarfile\n\ndataset_folder_path = 'flower_photos'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile('flower_photos.tar.gz'):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:\n urlretrieve(\n 'http://download.tensorflow.org/example_images/flower_photos.tgz',\n 'flower_photos.tar.gz',\n pbar.hook)\n\nif not isdir(dataset_folder_path):\n with tarfile.open('flower_photos.tar.gz') as tar:\n tar.extractall()\n tar.close()", "_____no_output_____" ] ], [ [ "## ConvNet Codes\n\nBelow, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.\n\nHere we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $224 \\times 224 \\times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py)):\n\n```\nself.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\nself.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\nself.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\nself.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\nself.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\nself.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\nself.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\nself.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\nself.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\nself.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\nself.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\nself.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\nself.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\nself.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\nself.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\nself.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\nself.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\nself.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\nself.fc6 = self.fc_layer(self.pool5, \"fc6\")\nself.relu6 = tf.nn.relu(self.fc6)\n```\n\nSo what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use\n\n```\nwith tf.Session() as sess:\n vgg = vgg16.Vgg16()\n input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\n with tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n```\n\nThis creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer,\n\n```\nfeed_dict = {input_: images}\ncodes = sess.run(vgg.relu6, feed_dict=feed_dict)\n```", "_____no_output_____" ] ], [ [ "import os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_vgg import vgg16\nfrom tensorflow_vgg import utils", "_____no_output_____" ], [ "data_dir = 'flower_photos/'\ncontents = os.listdir(data_dir)\nclasses = [each for each in contents if os.path.isdir(data_dir + each)]", "_____no_output_____" ] ], [ [ "Below I'm running images through the VGG network in batches.\n\n> **Exercise:** Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).", "_____no_output_____" ] ], [ [ "# Set the batch size higher if you can fit in in your GPU memory\nbatch_size = 10\ncodes_list = []\nlabels = []\nbatch = []\n\ncodes = None\n\nwith tf.Session() as sess:\n \n # TODO: Build the vgg network here\n\n vgg = vgg16.Vgg16()\n input_ = tf.placeholder(tf.float32, [None,224,224,3])\n with tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n for each in classes:\n print(\"Starting {} images\".format(each))\n class_path = data_dir + each\n files = os.listdir(class_path)\n for ii, file in enumerate(files, 1):\n # Add images to the current batch\n # utils.load_image crops the input images for us, from the center\n img = utils.load_image(os.path.join(class_path, file))\n batch.append(img.reshape((1, 224, 224, 3)))\n labels.append(each)\n \n # Running the batch through the network to get the codes\n if ii % batch_size == 0 or ii == len(files):\n \n # Image batch to pass to VGG network\n images = np.concatenate(batch)\n \n # TODO: Get the values from the relu6 layer of the VGG network\n feed_dict = {input_: images}\n codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)\n \n # Here I'm building an array of the codes\n if codes is None:\n codes = codes_batch\n else:\n codes = np.concatenate((codes, codes_batch))\n \n # Reset to start building the next batch\n batch = []\n print('{} images processed'.format(ii))", "/Users/shubhama/Documents/Deeplearning-github-backup/DL-ND-Repo/transfer-learning/tensorflow_vgg/vgg16.npy\nnpy file loaded\nbuild model started\nbuild model finished: 4s\nStarting daisy images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n633 images processed\nStarting dandelion images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n460 images processed\n470 images processed\n480 images processed\n490 images processed\n500 images processed\n510 images processed\n520 images processed\n530 images processed\n540 images processed\n550 images processed\n560 images processed\n570 images processed\n580 images processed\n590 images processed\n600 images processed\n610 images processed\n620 images processed\n630 images processed\n640 images processed\n650 images processed\n660 images processed\n670 images processed\n680 images processed\n690 images processed\n700 images processed\n710 images processed\n720 images processed\n730 images processed\n740 images processed\n750 images processed\n760 images processed\n770 images processed\n780 images processed\n790 images processed\n800 images processed\n810 images processed\n820 images processed\n830 images processed\n840 images processed\n850 images processed\n860 images processed\n870 images processed\n880 images processed\n890 images processed\n898 images processed\nStarting roses images\n10 images processed\n20 images processed\n30 images processed\n40 images processed\n50 images processed\n60 images processed\n70 images processed\n80 images processed\n90 images processed\n100 images processed\n110 images processed\n120 images processed\n130 images processed\n140 images processed\n150 images processed\n160 images processed\n170 images processed\n180 images processed\n190 images processed\n200 images processed\n210 images processed\n220 images processed\n230 images processed\n240 images processed\n250 images processed\n260 images processed\n270 images processed\n280 images processed\n290 images processed\n300 images processed\n310 images processed\n320 images processed\n330 images processed\n340 images processed\n350 images processed\n360 images processed\n370 images processed\n380 images processed\n390 images processed\n400 images processed\n410 images processed\n420 images processed\n430 images processed\n440 images processed\n450 images processed\n" ], [ "# write codes to file\nwith open('codes', 'w') as f:\n codes.tofile(f)\n \n# write labels to file\nimport csv\nwith open('labels', 'w') as f:\n writer = csv.writer(f, delimiter='\\n')\n writer.writerow(labels)", "_____no_output_____" ] ], [ [ "## Building the Classifier\n\nNow that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.", "_____no_output_____" ] ], [ [ "# read codes and labels from file\nimport csv\n\nwith open('labels') as f:\n reader = csv.reader(f, delimiter='\\n')\n labels = np.array([each for each in reader if len(each) > 0]).squeeze()\nwith open('codes') as f:\n codes = np.fromfile(f, dtype=np.float32)\n codes = codes.reshape((len(labels), -1))", "_____no_output_____" ] ], [ [ "### Data prep\n\nAs usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!\n\n> **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels. ", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelBinarizer\n\nlb = LabelBinarizer()\nlb.fit(labels)\n\nlabels_vecs = lb.transform(labels)", "_____no_output_____" ] ], [ [ "Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn.\n\nYou can create the splitter like so:\n```\nss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n```\nThen split the data with \n```\nsplitter = ss.split(x, y)\n```\n\n`ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split).\n\n> **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import StratifiedShuffleSplit\n\nss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n\nX = codes\ny = labels_vecs\nfor train_index, test_index in ss.split(X, y):\n train_x, train_y = X[train_index], y[train_index]\n test_x, test_y = X[test_index], y[test_index]\n\nss = StratifiedShuffleSplit(n_splits=1, test_size=0.5)\n\nX = test_x\ny = test_y\n\nfor train_index, test_index in ss.split(X, y):\n test_x, test_y = X[train_index], y[train_index]\n val_x, val_y = X[test_index], y[test_index]\n", "_____no_output_____" ], [ "print(\"Train shapes (x, y):\", train_x.shape, train_y.shape)\nprint(\"Validation shapes (x, y):\", val_x.shape, val_y.shape)\nprint(\"Test shapes (x, y):\", test_x.shape, test_y.shape)", "Train shapes (x, y): (2936, 4096) (2936, 5)\nValidation shapes (x, y): (367, 4096) (367, 5)\nTest shapes (x, y): (367, 4096) (367, 5)\n" ] ], [ [ "If you did it right, you should see these sizes for the training sets:\n\n```\nTrain shapes (x, y): (2936, 4096) (2936, 5)\nValidation shapes (x, y): (367, 4096) (367, 5)\nTest shapes (x, y): (367, 4096) (367, 5)\n```", "_____no_output_____" ], [ "### Classifier layers\n\nOnce you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.\n\n> **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.", "_____no_output_____" ] ], [ [ "inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])\nlabels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])\nprint(labels_vecs.shape)\n# TODO: Classifier layers and operations\nfc = tf.contrib.layers.fully_connected(inputs_, 256)\n\nlogits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits)\ncost = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Operations for validation/test accuracy\npredicted = tf.nn.softmax(logits)\ncorrect_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "(3670, 5)\n" ] ], [ [ "### Batches!\n\nHere is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.", "_____no_output_____" ] ], [ [ "def get_batches(x, y, n_batches=10):\n \"\"\" Return a generator that yields batches from arrays x and y. \"\"\"\n batch_size = len(x)//n_batches\n \n for ii in range(0, n_batches*batch_size, batch_size):\n # If we're not on the last batch, grab data with size batch_size\n if ii != (n_batches-1)*batch_size:\n X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] \n # On the last batch, grab the rest of the data\n else:\n X, Y = x[ii:], y[ii:]\n # I love generators\n yield X, Y", "_____no_output_____" ] ], [ [ "### Training\n\nHere, we'll train the network.\n\n> **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the `get_batches` function I wrote before to get your batches like `for x, y in get_batches(train_x, train_y)`. Or write your own!", "_____no_output_____" ] ], [ [ "saver = tf.train.Saver()\nepochs = 10\niteration = 0\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n \n for x, y in get_batches(train_x, train_y):\n feed = {inputs_: x,\n labels_: y}\n loss, _ = sess.run([cost, optimizer], feed_dict=feed)\n print(\"Epoch: {}/{}\".format(e+1, epochs),\n \"Iteration: {}\".format(iteration),\n \"Training loss: {:.5f}\".format(loss))\n iteration += 1\n \n if iteration % 5 == 0:\n feed = {inputs_: val_x,\n labels_: val_y}\n val_acc = sess.run(accuracy, feed_dict=feed)\n print(\"Epoch: {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Validation Acc: {:.4f}\".format(val_acc))\n\n \n saver.save(sess, \"checkpoints/flowers.ckpt\")", "Epoch: 1/10 Iteration: 0 Training loss: 6.09479\nEpoch: 1/10 Iteration: 1 Training loss: 19.38938\nEpoch: 1/10 Iteration: 2 Training loss: 14.50047\nEpoch: 1/10 Iteration: 3 Training loss: 13.24159\nEpoch: 1/10 Iteration: 4 Training loss: 7.22328\nEpoch: 0/10 Iteration: 5 Validation Acc: 0.6866\nEpoch: 1/10 Iteration: 5 Training loss: 4.34417\nEpoch: 1/10 Iteration: 6 Training loss: 2.55807\nEpoch: 1/10 Iteration: 7 Training loss: 3.47730\nEpoch: 1/10 Iteration: 8 Training loss: 2.65929\nEpoch: 1/10 Iteration: 9 Training loss: 2.90010\nEpoch: 0/10 Iteration: 10 Validation Acc: 0.7030\nEpoch: 2/10 Iteration: 10 Training loss: 2.14254\nEpoch: 2/10 Iteration: 11 Training loss: 2.04713\nEpoch: 2/10 Iteration: 12 Training loss: 2.08653\nEpoch: 2/10 Iteration: 13 Training loss: 1.52148\nEpoch: 2/10 Iteration: 14 Training loss: 1.14696\nEpoch: 1/10 Iteration: 15 Validation Acc: 0.7493\nEpoch: 2/10 Iteration: 15 Training loss: 0.90808\nEpoch: 2/10 Iteration: 16 Training loss: 0.89916\nEpoch: 2/10 Iteration: 17 Training loss: 0.77664\nEpoch: 2/10 Iteration: 18 Training loss: 0.71632\nEpoch: 2/10 Iteration: 19 Training loss: 0.80467\nEpoch: 1/10 Iteration: 20 Validation Acc: 0.7548\nEpoch: 3/10 Iteration: 20 Training loss: 0.69502\nEpoch: 3/10 Iteration: 21 Training loss: 0.60635\nEpoch: 3/10 Iteration: 22 Training loss: 0.86740\nEpoch: 3/10 Iteration: 23 Training loss: 0.55340\nEpoch: 3/10 Iteration: 24 Training loss: 0.50799\nEpoch: 2/10 Iteration: 25 Validation Acc: 0.7875\nEpoch: 3/10 Iteration: 25 Training loss: 0.46593\nEpoch: 3/10 Iteration: 26 Training loss: 0.41811\nEpoch: 3/10 Iteration: 27 Training loss: 0.39395\nEpoch: 3/10 Iteration: 28 Training loss: 0.44384\nEpoch: 3/10 Iteration: 29 Training loss: 0.43772\nEpoch: 2/10 Iteration: 30 Validation Acc: 0.8065\nEpoch: 4/10 Iteration: 30 Training loss: 0.37363\nEpoch: 4/10 Iteration: 31 Training loss: 0.17821\nEpoch: 4/10 Iteration: 32 Training loss: 0.30352\nEpoch: 4/10 Iteration: 33 Training loss: 0.27625\nEpoch: 4/10 Iteration: 34 Training loss: 0.34593\nEpoch: 3/10 Iteration: 35 Validation Acc: 0.8229\nEpoch: 4/10 Iteration: 35 Training loss: 0.30728\nEpoch: 4/10 Iteration: 36 Training loss: 0.38150\nEpoch: 4/10 Iteration: 37 Training loss: 0.35443\nEpoch: 4/10 Iteration: 38 Training loss: 0.26565\nEpoch: 4/10 Iteration: 39 Training loss: 0.27981\nEpoch: 3/10 Iteration: 40 Validation Acc: 0.8365\nEpoch: 5/10 Iteration: 40 Training loss: 0.22080\nEpoch: 5/10 Iteration: 41 Training loss: 0.13720\nEpoch: 5/10 Iteration: 42 Training loss: 0.26349\nEpoch: 5/10 Iteration: 43 Training loss: 0.20846\nEpoch: 5/10 Iteration: 44 Training loss: 0.21817\nEpoch: 4/10 Iteration: 45 Validation Acc: 0.8392\nEpoch: 5/10 Iteration: 45 Training loss: 0.21050\nEpoch: 5/10 Iteration: 46 Training loss: 0.24346\nEpoch: 5/10 Iteration: 47 Training loss: 0.23473\nEpoch: 5/10 Iteration: 48 Training loss: 0.19866\nEpoch: 5/10 Iteration: 49 Training loss: 0.23902\nEpoch: 4/10 Iteration: 50 Validation Acc: 0.8501\nEpoch: 6/10 Iteration: 50 Training loss: 0.16664\nEpoch: 6/10 Iteration: 51 Training loss: 0.10767\nEpoch: 6/10 Iteration: 52 Training loss: 0.17396\nEpoch: 6/10 Iteration: 53 Training loss: 0.14594\nEpoch: 6/10 Iteration: 54 Training loss: 0.18902\nEpoch: 5/10 Iteration: 55 Validation Acc: 0.8692\nEpoch: 6/10 Iteration: 55 Training loss: 0.18315\nEpoch: 6/10 Iteration: 56 Training loss: 0.19464\nEpoch: 6/10 Iteration: 57 Training loss: 0.18242\nEpoch: 6/10 Iteration: 58 Training loss: 0.13424\nEpoch: 6/10 Iteration: 59 Training loss: 0.18221\nEpoch: 5/10 Iteration: 60 Validation Acc: 0.8583\nEpoch: 7/10 Iteration: 60 Training loss: 0.12580\nEpoch: 7/10 Iteration: 61 Training loss: 0.07224\nEpoch: 7/10 Iteration: 62 Training loss: 0.12352\nEpoch: 7/10 Iteration: 63 Training loss: 0.11218\nEpoch: 7/10 Iteration: 64 Training loss: 0.13097\nEpoch: 6/10 Iteration: 65 Validation Acc: 0.8665\nEpoch: 7/10 Iteration: 65 Training loss: 0.13078\nEpoch: 7/10 Iteration: 66 Training loss: 0.15979\nEpoch: 7/10 Iteration: 67 Training loss: 0.13183\nEpoch: 7/10 Iteration: 68 Training loss: 0.10843\nEpoch: 7/10 Iteration: 69 Training loss: 0.14170\nEpoch: 6/10 Iteration: 70 Validation Acc: 0.8719\nEpoch: 8/10 Iteration: 70 Training loss: 0.08602\nEpoch: 8/10 Iteration: 71 Training loss: 0.05326\nEpoch: 8/10 Iteration: 72 Training loss: 0.09561\nEpoch: 8/10 Iteration: 73 Training loss: 0.08072\nEpoch: 8/10 Iteration: 74 Training loss: 0.10511\nEpoch: 7/10 Iteration: 75 Validation Acc: 0.8665\nEpoch: 8/10 Iteration: 75 Training loss: 0.10438\nEpoch: 8/10 Iteration: 76 Training loss: 0.13204\nEpoch: 8/10 Iteration: 77 Training loss: 0.09238\nEpoch: 8/10 Iteration: 78 Training loss: 0.08482\nEpoch: 8/10 Iteration: 79 Training loss: 0.11027\nEpoch: 7/10 Iteration: 80 Validation Acc: 0.8665\nEpoch: 9/10 Iteration: 80 Training loss: 0.06720\nEpoch: 9/10 Iteration: 81 Training loss: 0.04523\nEpoch: 9/10 Iteration: 82 Training loss: 0.07565\nEpoch: 9/10 Iteration: 83 Training loss: 0.06168\nEpoch: 9/10 Iteration: 84 Training loss: 0.07394\nEpoch: 8/10 Iteration: 85 Validation Acc: 0.8747\nEpoch: 9/10 Iteration: 85 Training loss: 0.08303\nEpoch: 9/10 Iteration: 86 Training loss: 0.10531\nEpoch: 9/10 Iteration: 87 Training loss: 0.07123\nEpoch: 9/10 Iteration: 88 Training loss: 0.06659\nEpoch: 9/10 Iteration: 89 Training loss: 0.08400\nEpoch: 8/10 Iteration: 90 Validation Acc: 0.8692\nEpoch: 10/10 Iteration: 90 Training loss: 0.05142\nEpoch: 10/10 Iteration: 91 Training loss: 0.03468\nEpoch: 10/10 Iteration: 92 Training loss: 0.05952\nEpoch: 10/10 Iteration: 93 Training loss: 0.04842\nEpoch: 10/10 Iteration: 94 Training loss: 0.05592\nEpoch: 9/10 Iteration: 95 Validation Acc: 0.8719\nEpoch: 10/10 Iteration: 95 Training loss: 0.06398\nEpoch: 10/10 Iteration: 96 Training loss: 0.08367\nEpoch: 10/10 Iteration: 97 Training loss: 0.05550\nEpoch: 10/10 Iteration: 98 Training loss: 0.05315\nEpoch: 10/10 Iteration: 99 Training loss: 0.06566\nEpoch: 9/10 Iteration: 100 Validation Acc: 0.8719\n" ] ], [ [ "### Testing\n\nBelow you see the test accuracy. You can also see the predictions returned for images.", "_____no_output_____" ] ], [ [ "with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n feed = {inputs_: test_x,\n labels_: test_y}\n test_acc = sess.run(accuracy, feed_dict=feed)\n print(\"Test accuracy: {:.4f}\".format(test_acc))", "INFO:tensorflow:Restoring parameters from checkpoints/flowers.ckpt\nTest accuracy: 0.9019\n" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import imread", "_____no_output_____" ] ], [ [ "Below, feel free to choose images and see how the trained classifier predicts the flowers in them.", "_____no_output_____" ] ], [ [ "test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'\ntest_img = imread(test_img_path)\nplt.imshow(test_img)", "_____no_output_____" ], [ "# Run this cell if you don't have a vgg graph built\nif 'vgg' in globals():\n print('\"vgg\" object already exists. Will not create again.')\nelse:\n #create vgg\n with tf.Session() as sess:\n input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\n vgg = vgg16.Vgg16()\n vgg.build(input_)", "\"vgg\" object already exists. Will not create again.\n" ], [ "with tf.Session() as sess:\n img = utils.load_image(test_img_path)\n img = img.reshape((1, 224, 224, 3))\n\n feed_dict = {input_: img}\n code = sess.run(vgg.relu6, feed_dict=feed_dict)\n \nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n \n feed = {inputs_: code}\n prediction = sess.run(predicted, feed_dict=feed).squeeze()", "INFO:tensorflow:Restoring parameters from checkpoints/flowers.ckpt\n" ], [ "plt.imshow(test_img)", "_____no_output_____" ], [ "plt.barh(np.arange(5), prediction)\n_ = plt.yticks(np.arange(5), lb.classes_)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0690368552c4d8b43c1e652897b74572a143aa3
11,641
ipynb
Jupyter Notebook
exercise4/lab_task.ipynb
kosticmarin/nnets
a3e514d9798e709548f6e905f857d160c5176393
[ "MIT" ]
null
null
null
exercise4/lab_task.ipynb
kosticmarin/nnets
a3e514d9798e709548f6e905f857d160c5176393
[ "MIT" ]
null
null
null
exercise4/lab_task.ipynb
kosticmarin/nnets
a3e514d9798e709548f6e905f857d160c5176393
[ "MIT" ]
null
null
null
26.884527
111
0.489649
[ [ [ "import tensorflow as tf", "_____no_output_____" ], [ "from keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.models import Model\nimport numpy as np\n\nbase_model=ResNet50(weights=\"imagenet\")\nmodel=Model(inputs=base_model.input, outputs=base_model.get_layer(base_model.layers[-2].name).output)\nimg_path=\"cnn_img/rabbit.jpg\"\nimg=image.load_img(img_path, target_size=(224, 224))\nx=image.img_to_array(img)\nx=np.expand_dims(x, axis=0)\nx=preprocess_input(x)\n\nfeatures=model.predict(x)\nprint(features.shape)\nfeature_layer_size=features.shape[1];", "Using TensorFlow backend.\n" ], [ "f16_dir=\"plane/f16/\"\nspitfire_dir=\"plane/spitfire/\"\n\ndef create_numbered_paths(home_dir, n):\n return [home_dir+str(i)+\".jpg\" for i in range(n)]\n\ndef create_paired_numbered_paths(first_home_dir, second_home_dir, n):\n image_paths=[]\n for p in zip(create_numbered_paths(first_home_dir, n), create_numbered_paths(second_home_dir, n)):\n image_paths.extend(p)\n return image_paths\n \ndef create_features(paths, verbose=True):\n n=len(paths)\n features=np.zeros((n, feature_layer_size))\n for i in range(n):\n if (verbose==True):\n print(\"\\t%2d / %2d\"%(i+1, n))\n img=image.load_img(paths[i], target_size=(224, 224))\n img=image.img_to_array(img)\n img=np.expand_dims(img, axis=0)\n features[i, :]=preprocess_input(model.predict(img))\n \n return features\nindividual_n=30\nimage_paths=create_paired_numbered_paths(f16_dir, spitfire_dir, individual_n)\n\nimage_classes=[]\nfor i in range(individual_n):\n #0 stands for the pincer image and 0 stands for the scissors image\n image_classes.extend((0, 1))\n\n#number of all images\nn=60\n#number of training images\nn_train=30\n#number of test images\nn_test=n-n_train\nprint(\"Creating training features...\")\n#here we will store the features of training images\nx_train=create_features(image_paths[:n_train])\n#train classes\ny_train=np.array(image_classes[:n_train])\n\nprint(\"Creating test features...\")\n#here we will store the features of test images\nx_test=create_features(image_paths[n_train:])\n#train classes\ny_test=np.array(image_classes[n_train:])", "Creating training features...\n\t 1 / 30\n\t 2 / 30\n\t 3 / 30\n\t 4 / 30\n\t 5 / 30\n\t 6 / 30\n\t 7 / 30\n\t 8 / 30\n\t 9 / 30\n\t10 / 30\n\t11 / 30\n\t12 / 30\n\t13 / 30\n\t14 / 30\n\t15 / 30\n\t16 / 30\n\t17 / 30\n\t18 / 30\n\t19 / 30\n\t20 / 30\n\t21 / 30\n\t22 / 30\n\t23 / 30\n\t24 / 30\n\t25 / 30\n\t26 / 30\n\t27 / 30\n\t28 / 30\n\t29 / 30\n\t30 / 30\nCreating test features...\n\t 1 / 30\n\t 2 / 30\n\t 3 / 30\n\t 4 / 30\n\t 5 / 30\n\t 6 / 30\n\t 7 / 30\n\t 8 / 30\n\t 9 / 30\n\t10 / 30\n\t11 / 30\n\t12 / 30\n\t13 / 30\n\t14 / 30\n\t15 / 30\n\t16 / 30\n\t17 / 30\n\t18 / 30\n\t19 / 30\n\t20 / 30\n\t21 / 30\n\t22 / 30\n\t23 / 30\n\t24 / 30\n\t25 / 30\n\t26 / 30\n\t27 / 30\n\t28 / 30\n\t29 / 30\n\t30 / 30\n" ], [ "print (np.shape(x_train))\nprint (y_train)\nprint (np.shape(x_test))\nprint (y_test)", "(30, 2048)\n[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]\n(30, 2048)\n[0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]\n" ], [ "from sklearn import svm\n\ndef create_svm_classifier(x, y):\n #we will use linear SVM\n C=1.0\n classifier=svm.SVC(kernel=\"rbf\", C=C);\n classifier.fit(x, y)\n return classifier\n\ndef calculate_accuracy(classifier, x, y):\n predicted=classifier.predict(x)\n return np.sum(y==predicted)/y.size\n\n#training the model\nclassifier=create_svm_classifier(x_train, y_train)\n\n#checking the model's accuracy\nprint(\"Accuracy: %.2lf%%\"%(100*calculate_accuracy(classifier, x_test, y_test)))", "Accuracy: 96.67%\n" ], [ "for i in range(60):\n test_example_1 = np.zeros((1, feature_layer_size))\n img=image.load_img(\"plane/f16/\"+str(i)+\".jpg\", target_size=(224, 224))\n img=image.img_to_array(img)\n img=np.expand_dims(img, axis=0)\n test_example_1[0,:]=preprocess_input(model.predict(img))\n print(\"example num \"+str(i)+\" \" + str(classifier.predict(test_example_1)))", "example num 0 [0]\nexample num 1 [0]\nexample num 2 [0]\nexample num 3 [0]\nexample num 4 [0]\nexample num 5 [0]\nexample num 6 [0]\nexample num 7 [0]\nexample num 8 [0]\nexample num 9 [0]\nexample num 10 [0]\nexample num 11 [0]\nexample num 12 [0]\nexample num 13 [0]\nexample num 14 [0]\nexample num 15 [0]\nexample num 16 [0]\nexample num 17 [0]\nexample num 18 [0]\nexample num 19 [0]\nexample num 20 [1]\nexample num 21 [0]\nexample num 22 [0]\nexample num 23 [0]\nexample num 24 [0]\nexample num 25 [0]\nexample num 26 [0]\nexample num 27 [0]\nexample num 28 [0]\nexample num 29 [0]\nexample num 30 [0]\nexample num 31 [0]\nexample num 32 [0]\nexample num 33 [0]\nexample num 34 [0]\nexample num 35 [0]\nexample num 36 [0]\nexample num 37 [0]\nexample num 38 [0]\nexample num 39 [0]\nexample num 40 [0]\nexample num 41 [0]\nexample num 42 [0]\nexample num 43 [0]\nexample num 44 [0]\nexample num 45 [0]\nexample num 46 [0]\nexample num 47 [0]\nexample num 48 [0]\nexample num 49 [0]\nexample num 50 [0]\nexample num 51 [0]\nexample num 52 [0]\nexample num 53 [0]\nexample num 54 [0]\nexample num 55 [0]\nexample num 56 [0]\nexample num 57 [0]\nexample num 58 [0]\nexample num 59 [0]\n" ], [ "for i in range(60):\n test_example_2 = np.zeros((1,feature_layer_size))\n img = image.load_img(\"plane/spitfire/\"+str(i)+\".jpg\",target_size=(224,224))\n img = image.img_to_array(img)\n img = np.expand_dims(img,axis=0)\n test_example_2[0,:] = preprocess_input(model.predict(img))\n print(\"example num \"+str(i)+\" \" + str(classifier.predict(test_example_2)))", "example num 0 [1]\nexample num 1 [1]\nexample num 2 [1]\nexample num 3 [1]\nexample num 4 [1]\nexample num 5 [1]\nexample num 6 [1]\nexample num 7 [1]\nexample num 8 [1]\nexample num 9 [1]\nexample num 10 [1]\nexample num 11 [1]\nexample num 12 [1]\nexample num 13 [1]\nexample num 14 [1]\nexample num 15 [1]\nexample num 16 [1]\nexample num 17 [1]\nexample num 18 [1]\nexample num 19 [1]\nexample num 20 [1]\nexample num 21 [1]\nexample num 22 [1]\nexample num 23 [1]\nexample num 24 [1]\nexample num 25 [1]\nexample num 26 [1]\nexample num 27 [1]\nexample num 28 [1]\nexample num 29 [1]\nexample num 30 [1]\nexample num 31 [1]\nexample num 32 [1]\nexample num 33 [0]\nexample num 34 [1]\nexample num 35 [1]\nexample num 36 [1]\nexample num 37 [1]\nexample num 38 [1]\nexample num 39 [1]\nexample num 40 [1]\nexample num 41 [1]\nexample num 42 [1]\nexample num 43 [1]\nexample num 44 [1]\nexample num 45 [1]\nexample num 46 [1]\nexample num 47 [1]\nexample num 48 [1]\nexample num 49 [1]\nexample num 50 [1]\nexample num 51 [1]\nexample num 52 [1]\nexample num 53 [1]\nexample num 54 [0]\nexample num 55 [1]\nexample num 56 [1]\nexample num 57 [1]\nexample num 58 [1]\nexample num 59 [1]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d06904b0921a888fb67dd29f0eb21768bc04c6d3
17,393
ipynb
Jupyter Notebook
JupyterNotebooks/Lessons/Lesson 4.ipynb
emilekhoury/CMPT-120L-910-20F
c4522da4fede94119e13928eb2724b260899c505
[ "MIT" ]
null
null
null
JupyterNotebooks/Lessons/Lesson 4.ipynb
emilekhoury/CMPT-120L-910-20F
c4522da4fede94119e13928eb2724b260899c505
[ "MIT" ]
null
null
null
JupyterNotebooks/Lessons/Lesson 4.ipynb
emilekhoury/CMPT-120L-910-20F
c4522da4fede94119e13928eb2724b260899c505
[ "MIT" ]
null
null
null
32.149723
516
0.592077
[ [ [ "# Introduction to Programming\n\nTopics for today will include:\n- Mozilla Developer Network [(MDN)](https://developer.mozilla.org/en-US/)\n- Python Documentation [(Official Documentation)](https://docs.python.org/3/)\n- Importance of Design\n- Functions\n- Built in Functions", "_____no_output_____" ], [ "## Mozilla Developer Network [(MDN)](https://developer.mozilla.org/en-US/)\n---\nThe Mozilla Developer Network is a great resource for all things web dev. This site is good for trying to learn about standards as well as finding out quick information about something that you're trying to do Web Dev Wise\n\nThis will be a major resource going forward when it comes to doing things with HTML and CSS\n\nYou'll often find that you're not the first to try and do something. That being said you need to start to get comfortable looking for information on your own when things go wrong.", "_____no_output_____" ], [ "## Python Documentation [(Official Documentation)](https://docs.python.org/3/)\n---\nThis section is similar to the one above. Python has a lot of resources out there that we can utilize when we're stuck or need some help with something that we may not have encountered before.\n\nSince this is the official documentation page for the language you may often be given too much information or something that you wanted but in the wrong form or for the wrong version of the language. It is up to you to learn how to utilize these things and use them to your advantage. ", "_____no_output_____" ], [ "## Importance of Design\n---\nSo this is a topic that i didn't learn the importance of until I was in the work force. Design is a major influence in the way that code is build and in a major capacity and significant effect on the industry. \n\nLet's pretend we have a client that wants us to do the following:\n- Write a function which will count the number of times any one character appears in a string of characters. \n- Write a main function which takes the character to be counted from the user and calls the function, outputting the result to the user.\n\nFor example, are you like Android and take the latest and greatest and put them into phones in an unregulated hardware market. Thus leaving great variability in the market for your brand? Are you an Apple where you control the full stack. Your hardware and software may not be bleeding edge but it's seamless and uniform.\n\nWhat does the market want? What are you good at? Do you have people around you that can fill your gaps?\n\nHere's a blurb from a friend about the matter:\n>Design, often paired with the phrase \"design thinking\", is an approach and method of problem solving that builds empathy for user(s) of a product, resulting in the creation of a seamless and delightful user experience tailored to the user's needs.\n\n>Design thinks holistically about the experience that a user would go through when encountering and interacting with a product or technology. Design understands the user and their needs in great detail so that the product team can build the product and experience that fits what the user is looking for. We don't want to create products for the sake of creating them, we want to ensure that there is a need for it by a user.\n\n>Design not only focuses on the actual interface design of a product, but can also ensure the actual technology has a seamless experience as well. Anything that blocks potential users from wanting to buy a product or prohibits current users from utilizing the product successfully, design wants to investigate. We ensure all pieces fit together from the user's standpoint, and we work to build a bridge between the technology and the user, who doesn't need to understand the technical depths of the product.\n\n### Sorting Example [(Toptal Sorting Algorithms)](https://www.toptal.com/developers/sorting-algorithms)\n---\nHypothetical, a client comes to you and they want you sort a list of numbers how do you optimally sort a list? `[2, 5, 6, 1, 4, 3]`\n\n### Design Thinking [(IBM Design Thinking)](https://www.ibm.com/design/thinking/)\n---\nAs this idea starts to grow you come to realize that different companies have different design methodologies. IBM has it's own version of Design Thinking. You can find more information about that at the site linked in the title. IBM is very focused on being exactly like its customers in most aspects. \n\nWhat we're mostly going to take from this is that there are entire careers birthed from thinking before you act. That being said we're going to harp on a couple parts of this.\n\n### Knowing what your requirements are\n---\nOne of the most common scenarios to come across is a product that is annouced that's going to change everything. In the planning phase everyone agrees that the idea is amazing and going to solve all of our problems. \n\nWe get down the line and things start to fall apart, we run out of time. Things ran late, or didn't come in in time pushing everything out. \n\nScope creep ensued.\n\nThis is typically the result of not agreeing on what our requirements are. Something as basic as agreeing on what needs to be done needs to be discussed and checked on thouroughly. We do this because often two people rarely are thinking exactly the same thing.\n\nYou need to be on the same page as your client and your fellow developers as well. If you don't know ask. \n\n### Planning Things Out\n---\nWe have an idea on what we want to do. So now we just write it? No, not quite. We need to have a rough plan on how we're going to do things. Do we want to use functions, do we need a quick solution, is this going to be verbose and complex? \n\nIt's important to look at what we can set up for ourselves. We don't need to make things difficult by planning things out poorly. This means allotting time for things like getting stuck and brainstorming.\n\n### Breaking things down\n---\nPersonally I like to take my problem and scale it down into an easy example so in the case of our problem. The client may want to process a text like Moby Dick. We can start with a sentence and work our way up!\n\nTaking the time to break things in to multiple pieces and figure out what goes where is an art in itself.\n", "_____no_output_____" ] ], [ [ "def char_finder(character, string):\n total = 0\n for char in string:\n if char == character:\n total += 1\n\n return total\n \nif __name__ == \"__main__\":\n output = char_finder('z', 'Quick brown fox jumped over the lazy dog')\n print(output)\n", "1\n" ] ], [ [ "## Functions\n---\nThis is a intergral piece of how we do things in any programming language. This allows us to repeat instances of code that we've seen and use them at our preferance.\n\nWe'll often be using functions similar to how we use variables and our data types.\n\n### Making Our Own Functions\n---\nSo to make a functions we'll be using the `def` keyword followed by a name and then parameters. We've seen this a couple times now in code examples.\n```\ndef exampleName(exampleParameter1, exampleParameter2):\n print(exampleParameter1, exampleParameter2)\n```\n\nThere are many ways to write functions, we can say that we're going return a specific type of data type.\n```\ndef exampleName(exampleParameter1, exampleParameter2) -> any:\n print(exampleParameter1, exampleParameter2)\n```\n\nWe can also specify the types that the parameters are going to be. \n```\ndef exampleName(exampleParameter1: any, exampleParameter2: any) -> any:\n print(exampleParameter1, exampleParameter2)\n```\n\nWriting functions is only one part of the fun. We still have to be able to use them. \n\n", "_____no_output_____" ] ], [ [ "def exampleName(exampleParameter1: any, exampleParameter2: any) -> any:\n print(exampleParameter1, exampleParameter2)\n\nexampleName(\"Hello\", 5)", "Hello 5\n" ] ], [ [ "### Using functions\n---\nUsing functions is fairly simple. To use a function all we have to do is give the function name followed by parenthesis. This should seem familiar.", "_____no_output_____" ], [ "### Functions In Classes\n---\nNow we've mentioned classes before, classes can have functions but they're used a little differently. Functions that stem from classes are used often with a dot notation. ", "_____no_output_____" ] ], [ [ "class Person:\n def __init__(self, weight: int, height: int, name: str):\n self.weight = weight\n self.height = height\n self.name = name\n\n def who_is_this(self):\n print(\"This person's name is \" + self.name)\n print(\"This person's weight is \" + str(self.weight) + \" pounds\")\n print(\"This person's height is \" + str(self.height) + \" inches\")\n \nif __name__ == \"__main__\":\n Kipp = Person(225, 70, \"Aaron Kippins\")\n Kipp.who_is_this()", "This person's name is Aaron Kippins\nThis person's weight is 225 pounds\nThis person's height is 70 inches\n" ] ], [ [ "## Built in Functions and Modules\n---\nWith the talk of dot notation those are often used with built in functions. Built in function are functions that come along with the language. These tend to be very useful because as we start to visit more complex issues they allow us to do complexs thing with ease in some cases.\n\nWe have functions that belong to particular classes or special things that can be done with things of a certain class type. \n\nAlong side those we can also have Modules. Modules are classes or functions that other people wrote that we can import into our code to use.\n\n", "_____no_output_____" ], [ "### Substrings\n---\n", "_____no_output_____" ] ], [ [ "string = \"I want to go home!\"\nprint(string[0:12], \"to Cancun!\")\n# print(string[0:1])", "I want to go to Cancun!\n" ] ], [ [ "### toUpper toLower\n---\n\n", "_____no_output_____" ] ], [ [ "alpha_sentence = 'Quick brown fox jumped over the lazy dog'\nprint(alpha_sentence.title())\nprint(alpha_sentence.upper())\nprint(alpha_sentence.lower())\nif alpha_sentence.lower().islower():\n print(\"sentence is all lowercase\")\n\n", "Quick Brown Fox Jumped Over The Lazy Dog\nQUICK BROWN FOX JUMPED OVER THE LAZY DOG\nquick brown fox jumped over the lazy dog\nsentence is all lowercase\n" ] ], [ [ "### Exponents\n---\n", "_____no_output_____" ] ], [ [ "print(2 ** 3)", "8\n" ] ], [ [ "### math.sqrt()\n---", "_____no_output_____" ] ], [ [ "import math\n\nmath.sqrt(4)", "_____no_output_____" ] ], [ [ "### Integer Division vs Float Division\n---\n", "_____no_output_____" ] ], [ [ "print(4//2)\nprint(4/2)", "2\n2.0\n" ] ], [ [ "### Abs()\n---", "_____no_output_____" ] ], [ [ "abs(-10)", "_____no_output_____" ] ], [ [ "### String Manipulation\n---", "_____no_output_____" ] ], [ [ "dummy_string = \"Hey there I'm just a string for the example about to happen.\"\n\nprint(dummy_string.center(70, \"-\"))\nprint(dummy_string.partition(\" \"))\nprint(dummy_string.swapcase())\nprint(dummy_string.split(\" \"))", "-----Hey there I&#39;m just a string for the example about to happen.-----\n(&#39;Hey&#39;, &#39; &#39;, &quot;there I&#39;m just a string for the example about to happen.&quot;)\nhEY THERE i&#39;M JUST A STRING FOR THE EXAMPLE ABOUT TO HAPPEN.\n[&#39;Hey&#39;, &#39;there&#39;, &quot;I&#39;m&quot;, &#39;just&#39;, &#39;a&#39;, &#39;string&#39;, &#39;for&#39;, &#39;the&#39;, &#39;example&#39;, &#39;about&#39;, &#39;to&#39;, &#39;happen.&#39;]\n" ] ], [ [ "### Array Manipulation\n---", "_____no_output_____" ] ], [ [ "arr = [2, 5, 6, 1, 4, 3]\narr.sort()\nprint(arr)\nprint(arr[3])\n# sorted(arr)\nprint(arr[1:3])\n\n", "[1, 2, 3, 4, 5, 6]\n4\n" ] ], [ [ "### Insert and Pop, Append and Remove\n---\n", "_____no_output_____" ] ], [ [ "arr.append(7)\nprint(arr)\n\narr.pop()\nprint(arr)", "[1, 2, 3, 4, 5, 6, 7, 7]\n[1, 2, 3, 4, 5, 6, 7]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06914e2527ff11c63fd4c012d075622a37436b6
351,725
ipynb
Jupyter Notebook
issues/closed/issue2_add_mollweide_plotting_to_gwylm.ipynb
llondon6/nrutils_dev
ddd046d3290856ffff1a6c74a5fc08f2c6249e83
[ "MIT" ]
5
2016-04-04T05:37:49.000Z
2021-11-21T16:06:15.000Z
issues/closed/issue2_add_mollweide_plotting_to_gwylm.ipynb
llondon6/nrutils_dev
ddd046d3290856ffff1a6c74a5fc08f2c6249e83
[ "MIT" ]
36
2016-04-10T08:28:41.000Z
2021-12-13T15:17:47.000Z
issues/closed/issue2_add_mollweide_plotting_to_gwylm.ipynb
llondon6/nrutils_dev
ddd046d3290856ffff1a6c74a5fc08f2c6249e83
[ "MIT" ]
6
2016-04-04T05:27:33.000Z
2021-12-13T14:29:27.000Z
1,109.542587
171,250
0.945406
[ [ [ "# Add MollWeide Plotting to gwylm class\n<center>(L. London 2017) Related: positive_dev/examples/plotting_spherical_harmonics.ipynb</center>", "_____no_output_____" ], [ "### Setup Environment", "_____no_output_____" ] ], [ [ "# Setup ipython environment\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n# Import usefuls\nfrom nrutils import scsearch,gwylm\nfrom matplotlib.pyplot import *\nfrom numpy import array", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ] ], [ [ "### Find a simulation and load data", "_____no_output_____" ] ], [ [ "# Find sim\nA = scsearch(q=[10,20],keyword='hr',verbose=True,institute='gt')", "(\u001b[0;36mscsearch\u001b[0m)>> Found \u001b[4minstitute\u001b[0m (='gt') keyword.\n(\u001b[0;36mscsearch\u001b[0m)>> Found \u001b[4mkeyword\u001b[0m (='hr') keyword.\n(\u001b[0;36mscsearch\u001b[0m)>> Found \u001b[4mq\u001b[0m (=[10, 20]) keyword.\n(\u001b[0;36mscsearch\u001b[0m)>> Found \u001b[4mverbose\u001b[0m (=True) keyword.\n(\u001b[0;36mscsearch\u001b[0m)>> List of keywords or string keyword found: \u001b[0;36mALL scentry objects matching will be passed.\u001b[0m To pass ANY entries matching the keywords, input the keywords using an iterable of not of type list.\n## Found \u001b[1m1\u001b[0m\u001b[0;35m possibly degenerate\u001b[0m simulations:\n[0001][\u001b[92mbradwr\u001b[0m] \u001b[0;36mHR-series\u001b[0m: ns-q15.04\t(\u001b[0;36mD7.5_q15.00_a0.0_CHgEEB_m800\u001b[0m)\n\n" ], [ "# Load data\ny = gwylm(A[0],lmax=4,verbose=False,clean=True)", "\u001b[1m(\u001b[0;33mvalidate!\u001b[0m)>> \u001b[0mMultiple catalog directories found. We will scan through the related list, and then store first the catalog_dir that the OS can find.\n\u001b[1m(\u001b[0;33mvalidate!\u001b[0m)>> \u001b[0mSelecting \"\u001b[0;36m/Volumes/athena/bradwr/\u001b[0m\"\n" ] ], [ [ "### Plot Mollweide", "_____no_output_____" ] ], [ [ "\n#\nkind = 'strain'\n\n# Make mollweide plot -- NOTE that the time input is relative to the peak in h22\nax0,real_time = y.mollweide_plot(time=0,form='abs',kind=kind)\nax0.set_title('$l_{max} = %i$'%max([l for l,m in y.lm]),size=20)\n\n# Make time domain plot for reference\naxarr,fig = y.lm[2,2][kind].plot()\nfor ax in axarr:\n sca( ax )\n axvline( real_time, linestyle='--', color='k' )\n", "_____no_output_____" ] ], [ [ "### Try to put everything in the same figure", "_____no_output_____" ] ], [ [ "\n#\nR,C = 6,3\n\n#\nfig = figure( figsize=3*array([C,1.0*R]) )\n\n# \nax4 = subplot2grid( (R,C), (0, 0), colspan=C, rowspan=3, projection='mollweide' )\nax1 = subplot2grid( (R,C), (3, 0), colspan=C)\nax2 = subplot2grid( (R,C), (4, 0), colspan=C, sharex=ax1)\nax3 = subplot2grid( (R,C), (5, 0), colspan=C, sharex=ax1)\n\n#\nkind = 'strain'\n\n# Make mollweide plot -- NOTE that the time input is relative to the peak in h22\n_,real_time = y.mollweide_plot(time=0,ax=ax4,form='abs',kind=kind,colorbar_shrink=0.8)\nax4.set_title('$l_{max} = %i$'%max([l for l,m in y.lm]),size=20)\n\n\n#\nwfax,_ = y.lm[2,2][kind].plot(ax=[ax1,ax2,ax3],tlim=[100,800])\nfor a in wfax:\n sca( a ); axvline( real_time, linestyle='-', color='k' )\n\n#\nsubplots_adjust(hspace = 0.1)\n", "_____no_output_____" ] ], [ [ "### Now perhaps write an external script that animates frames for select time samples?", "_____no_output_____" ] ], [ [ "from os.path import join", "_____no_output_____" ], [ "range(0,100,10)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0691887d6ea707991b21d03335f2630e3df38d5
151,748
ipynb
Jupyter Notebook
Numeric and scientific python.ipynb
Pytoddler/Data-analysis-and-visualization
833b1ae7ae36ee8168f655a1497f081438f9e0aa
[ "MIT" ]
null
null
null
Numeric and scientific python.ipynb
Pytoddler/Data-analysis-and-visualization
833b1ae7ae36ee8168f655a1497f081438f9e0aa
[ "MIT" ]
null
null
null
Numeric and scientific python.ipynb
Pytoddler/Data-analysis-and-visualization
833b1ae7ae36ee8168f655a1497f081438f9e0aa
[ "MIT" ]
null
null
null
141.688142
25,980
0.877534
[ [ [ "!conda info", "Current conda install:\n\n platform : win-64\n conda version : 4.3.22\n conda is private : False\n conda-env version : 4.3.22\n conda-build version : not installed\n python version : 3.6.1.final.0\n requests version : 2.14.2\n root environment : C:\\Users\\USER\\Anaconda3 (writable)\n default environment : C:\\Users\\USER\\Anaconda3\n envs directories : C:\\Users\\USER\\Anaconda3\\envs\n C:\\Users\\USER\\AppData\\Local\\conda\\conda\\envs\n C:\\Users\\USER\\.conda\\envs\n package cache : C:\\Users\\USER\\Anaconda3\\pkgs\n C:\\Users\\USER\\AppData\\Local\\conda\\conda\\pkgs\n channel URLs : https://repo.continuum.io/pkgs/free/win-64\n https://repo.continuum.io/pkgs/free/noarch\n https://repo.continuum.io/pkgs/r/win-64\n https://repo.continuum.io/pkgs/r/noarch\n https://repo.continuum.io/pkgs/pro/win-64\n https://repo.continuum.io/pkgs/pro/noarch\n https://repo.continuum.io/pkgs/msys2/win-64\n https://repo.continuum.io/pkgs/msys2/noarch\n config file : C:\\Users\\USER\\.condarc\n netrc file : None\n offline mode : False\n user-agent : conda/4.3.22 requests/2.14.2 CPython/3.6.1 Windows/10 Windows/10.0.14393 \n administrator : False\n" ] ], [ [ "# Variables", "_____no_output_____" ] ], [ [ "x = 2\ny = '3'\nprint(x+int(y))\n\nz = [1, 2, 3] #List\nw = (2, 3, 4) #Tuple\n\nimport numpy as np\nq = np.array([1, 2, 3]) #numpy.ndarray\ntype(q)", "5\n" ] ], [ [ "# Console input and output", "_____no_output_____" ] ], [ [ "MyName = input('My name is: ')\nprint('Hello, '+MyName)", "My name is: david\nHello, david\n" ] ], [ [ "# File input and output", "_____no_output_____" ] ], [ [ "fid = open('msg.txt','w')\nfid.write('demo of writing.\\n')\nfid.write('Second line')\nfid.close()\n\nfid = open('msg.txt','r')\nmsg = fid.readline()\nprint(msg)\nmsg = fid.readline()\nprint(msg)\n\nfid.close()", "demo of writing.\n\nSecond line\n" ], [ "fid = open('msg.txt','r')\nmsg = fid.readlines()\nprint(msg)", "['demo of writing.\\n', 'Second line']\n" ], [ "fid = open('msg.txt','r')\nmsg = fid.read()\nprint(msg)", "demo of writing.\nSecond line\n" ], [ "import numpy as np\nx = np.linspace(0, 2*np.pi,4)\ny = np.cos(x)\n\n#Stack arrays in sequence vertically (row wise).\ndata = np.vstack((x,y)) #上下對隊齊好\ndataT = data.T #Transpose\n\nnp.savetxt('data.txt', data, delimiter=',')\nz = np.loadtxt('data.txt', delimiter=',')\n\nprint(x)\nprint(y)\nprint(data)\nprint(dataT)\nprint(z)", "[ 0. 2.0943951 4.1887902 6.28318531]\n[ 1. -0.5 -0.5 1. ]\n[[ 0. 2.0943951 4.1887902 6.28318531]\n [ 1. -0.5 -0.5 1. ]]\n[[ 0. 1. ]\n [ 2.0943951 -0.5 ]\n [ 4.1887902 -0.5 ]\n [ 6.28318531 1. ]]\n[[ 0. 2.0943951 4.1887902 6.28318531]\n [ 1. -0.5 -0.5 1. ]]\n" ], [ "import numpy as np\nx = np.linspace(0, 2*np.pi,20)\ny = np.cos(x)\nz = np.sin(x)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n#使用 help(plt.plot) 可以看到所有畫圖玩法\nplt.plot(x,y,'b')\nplt.plot(x,y,'go', label = 'cos(x)')\nplt.plot(x,z,'r')\nplt.plot(x,z,'go', label = 'sin(x)')\nplt.legend(loc='best') # 放到最好的位置\nplt.xlim([0, 2*np.pi])", "_____no_output_____" ], [ "import numpy as np\nx = np.linspace(0, 2*np.pi,20)\ny = np.cos(x)\nz = np.sin(x)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n#使用 help(plt.plot) 可以看到所有畫圖玩法\nplt.subplot(2,1,1) #分成兩張圖 形式是(row, column, order)\nplt.plot(x,y,'b')\nplt.plot(x,y,'go', label = 'cos(x)')\nplt.legend(loc='best') #放到最好的位置\n\nplt.subplot(2,1,2) #分成兩張圖\nplt.plot(x,z,'r')\nplt.plot(x,z,'go', label = 'sin(x)')\nplt.legend(loc='best') #放到最好的位置\n\nplt.xlim([0, 2*np.pi])", "_____no_output_____" ] ], [ [ "# Functions, Conditions, Loop", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef f(x):\n return x**2\n\nx = np.linspace(0,5,10)\ny = f(x)\n\nprint(y)", "[ 0. 0.30864198 1.2345679 2.77777778 4.9382716\n 7.71604938 11.11111111 15.12345679 19.75308642 25. ]\n" ], [ "import numpy as np\n\ndef f(x): #這是個奇怪的練習用函數\n res = x\n if res < 3:\n res = np.nan #<3就傳 Not a Number \n elif res < 15:\n res = x**3\n else:\n res = x**4\n return res\n\nx = np.linspace(0,10,20)\ny = np.empty_like(x) \n#Return a new array with the same shape and type as a given array.\n#傳一個跟x一樣的array回來\n\ni = 0\nfor xi in x:\n y[i] = f(xi)\n i = i + 1\nprint(y)\n \n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.plot(x,y,'bp')\nplt.xlim([0,11])", "[ nan nan nan nan nan\n nan 31.49147106 50.00728969 74.64644992 106.28371483\n 145.7938475 194.05161102 251.93176848 320.30908296 400.05831754\n 492.05423531 597.17159936 716.28517277 850.26971862 1000. ]\n" ] ], [ [ "# Matrices, linear equations", "_____no_output_____" ] ], [ [ "A = np.array([[1,2],[3,2]])\nB = np.array([1,0])\n\n# x = A^-1 * b\nsol1 = np.dot(np.linalg.inv(A),B)\nprint(sol1)\nsol2 = np.linalg.solve(A,B)\nprint(sol2)\n\n\nimport sympy as sym\nsym.init_printing() \n#This will automatically enable the best printer available in your environment.\n\nx,y = sym.symbols('x y')\nz = sym.linsolve([3*x+2*y-1,x+2*y],(x,y))\nz\n#sym.pprint(z) The ASCII pretty printer", "[-0.5 0.75]\n[-0.5 0.75]\n" ] ], [ [ "# Non-linear equation", "_____no_output_____" ] ], [ [ "from scipy.optimize import fsolve\n\ndef f(z): #用z參數來表示x和y,做函數運算 \n x = z[0]\n y = z[1]\n return [x+2*y, x**2+y**2-1]\n\nz0 = [0,1]\nz = fsolve(f,z0)\nprint(z)\nprint(f(z))", "[-0.89442719 0.4472136 ]\n[0.0, -1.1102230246251565e-16]\n" ] ], [ [ "# Integration", "_____no_output_____" ] ], [ [ "from scipy.integrate import quad\n\ndef f(x):\n return x**2\n\nquad(f,0,2) #計算積分值\n\nimport sympy as sym\nsym.init_printing()\nx = sym.Symbol('x')\nf = sym.integrate(x**2,x)\nf.subs(x,2) #將值帶入函數中\nf", "_____no_output_____" ] ], [ [ "# Derivative", "_____no_output_____" ] ], [ [ "from scipy.misc import derivative\n\ndef f(x):\n return x**2\n\nprint(derivative(f,2,dx=0.01)) #dx表示精確程度\n\nimport sympy as sym\nsym.init_printing()\nx = sym.Symbol('x')\nf = sym.diff(x**3,x)\nf.subs(x,2) #將值帶入函數中,得解\nf", "4.0\n" ] ], [ [ "# Interpolation", "_____no_output_____" ] ], [ [ "from scipy.interpolate import interp1d #中間的字是1不是L喔!!!\n\nx = np.arange(0,6,1)\ny = np.array([0.2,0.3,0.5,1.0,0.9,1.1])\n\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(x,y,'bo')\n\nxp = np.linspace(0,5,100) #為了顯示差別把點增加\n\ny1 = interp1d(x,y,kind='linear') #一階\nplt.plot(xp,y1(xp),'r-')\n\ny2 = interp1d(x,y,kind='quadratic') #二階\nplt.plot(xp,y2(xp),'k--')\n\ny3 = interp1d(x,y,kind='cubic') #三階\nplt.plot(xp,y3(xp),'g--')\n", "_____no_output_____" ] ], [ [ "# Linear regression", "_____no_output_____" ] ], [ [ "import numpy as np\nx = np.array([0,1,2,3,4,5])\ny = np.array([0.1,0.2,0.3,0.5,0.8,2.0 ])\n\n#多項式逼近法,選擇階層\np1 = np.polyfit(x,y,1)\nprint(p1)\np2 = np.polyfit(x,y,2)\nprint(p2)\np3 = np.polyfit(x,y,3)\nprint(p3)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(x,y,'ro')\n\n# np.polyval表示多項式的值,把係數p_帶入多項式x求出來的值\nxp = np.linspace(0,5,100)\nplt.plot(xp, np.polyval(p1,xp), 'b-', label='linear') #這個字是polyvaL喔!!\nplt.plot(xp, np.polyval(p2,xp), 'g--', label='quadratic')\nplt.plot(xp, np.polyval(p3,xp), 'k:', label='cubic')\nplt.legend(loc='best')", "[ 0.32857143 -0.17142857]\n[ 0.1125 -0.23392857 0.20357143]\n[ 0.04166667 -0.2 0.33690476 0.07857143]\n" ] ], [ [ "# Nonlinear regression", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy.optimize import curve_fit\n\nx = np.array([0,1,2,3,4,5])\ny = np.array([0.1,0.2,0.3,0.5,0.8,2.0 ])\n\n#多項式逼近法,選擇階層\np1 = np.polyfit(x,y,1)\nprint(p1)\np2 = np.polyfit(x,y,2)\nprint(p2)\np3 = np.polyfit(x,y,3)\nprint(p3)\n\n#使用指數對數\ndef f(x,a):\n return 0.1 * np.exp(a*x)\na = curve_fit(f,x,y)[0] #非線性回歸,Use non-linear least squares to fit a function,取第0項\nprint('a='+str(a))\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(x,y,'ro')\n\n# np.polyval表示多項式的值,把係數p_帶入多項式x求出來的值\nxp = np.linspace(0,5,100)\nplt.plot(xp, np.polyval(p1,xp), 'b-', label='linear') #這個字是polyvaL喔!!\nplt.plot(xp, np.polyval(p2,xp), 'g--', label='quadratic')\nplt.plot(xp, np.polyval(p3,xp), 'k:', label='cubic')\nplt.plot(xp, f(xp,a), 'c', label='nonlinear')\nplt.legend(loc='best')", "[ 0.32857143 -0.17142857]\n[ 0.1125 -0.23392857 0.20357143]\n[ 0.04166667 -0.2 0.33690476 0.07857143]\na=[ 0.58628748]\n" ] ], [ [ "# Differential equation", "_____no_output_____" ] ], [ [ "from scipy.integrate import odeint\n\ndef dydt(y,t,a):\n return -a * y\n\na = 0.5\nt = np.linspace(0,20)\ny0 = 5.0\ny = odeint(dydt,y0,t,args=(a,))\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(t,y)\nplt.xlabel('time')\nplt.ylabel('y')", "_____no_output_____" ] ], [ [ "# Nonlinear optimization", "_____no_output_____" ] ], [ [ "#概念:要有Objective、Constraint,然後初始猜想值\nimport numpy as np\nfrom scipy.optimize import minimize", "_____no_output_____" ], [ "def objective(x): #此函數求最小值\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n return x1*x4*(x1+x2+x3)+x3\n\n#用減法做比較\ndef constraint1(x):\n return x[0]*x[1]*x[2]*x[3] - 25.0\n\n#用減法做比較\ndef constraint2(x):\n sum_sq = 40.0\n for i in range(0,4):\n sum_sq = sum_sq - x[i]**2\n return sum_sq \n\n#初始猜想值\nx0 = [1,5,5,1]\nprint(objective(x0))\n\n#設定值域\nb = (1.0,5.0) #x的值域\nbnds = (b,b,b,b) #四個值域都一樣b\ncon1 = {'type':'ineq','fun': constraint1} #第一個是不等式\ncon2 = {'type':'eq','fun': constraint2} #第二個需要等式\ncons = [con1,con2] #cons合成一個list\n\nsol = minimize(objective,x0,method='SLSQP',\\\n bounds = bnds, constraints = cons)\n", "16\n" ], [ "print(sol)", " fun: 17.01401724563517\n jac: array([ 14.57227015, 1.37940764, 2.37940764, 9.56415057])\n message: 'Optimization terminated successfully.'\n nfev: 30\n nit: 5\n njev: 5\n status: 0\n success: True\n x: array([ 1. , 4.7429961 , 3.82115462, 1.37940765])\n" ], [ "print(sol.fun)", "17.01401724563517\n" ], [ "print(sol.x)", "[ 1. 4.7429961 3.82115462 1.37940765]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0691f21bb6f3258e9530db51cf85e22ae369ad7
22,849
ipynb
Jupyter Notebook
examples/transformer/custom-transformer/PyFunc-Transformer.ipynb
Omrisnyk/merlin
cc2dbeabe52ac6e413db7f7647ed54c7edb7695f
[ "Apache-2.0" ]
97
2020-10-15T08:03:56.000Z
2022-03-31T22:30:59.000Z
examples/transformer/custom-transformer/PyFunc-Transformer.ipynb
Omrisnyk/merlin
cc2dbeabe52ac6e413db7f7647ed54c7edb7695f
[ "Apache-2.0" ]
91
2020-10-26T03:15:27.000Z
2022-03-31T10:19:55.000Z
examples/transformer/custom-transformer/PyFunc-Transformer.ipynb
Omrisnyk/merlin
cc2dbeabe52ac6e413db7f7647ed54c7edb7695f
[ "Apache-2.0" ]
26
2020-10-21T03:53:36.000Z
2022-03-16T06:43:15.000Z
31.559392
518
0.566546
[ [ [ "# PyFunc Model + Transformer Example\n\nThis notebook demonstrates how to deploy a Python function based model and a custom transformer. This type of model is useful as user would be able to define their own logic inside the model as long as it satisfy contract given in `merlin.PyFuncModel`. If the pre/post-processing steps could be implemented in Python, it's encouraged to write them in the PyFunc model code instead of separating them into another transformer.\n\nThe model we are going to develop and deploy is a cifar10 model accepts a tensor input. The transformer has preprocessing step that allows the user to send a raw image data and convert it to a tensor input.", "_____no_output_____" ], [ "## Requirements\n\n- Authenticated to gcloud (```gcloud auth application-default login```)", "_____no_output_____" ] ], [ [ "!pip install --upgrade -r requirements.txt > /dev/null", "\u001b[33mYou are using pip version 19.0.3, however version 21.1.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ], [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "## 1. Initialize Merlin", "_____no_output_____" ], [ "### 1.1 Set Merlin Server", "_____no_output_____" ] ], [ [ "import merlin\n\nMERLIN_URL = \"<MERLIN_HOST>/api/merlin\"\n\nmerlin.set_url(MERLIN_URL)", "_____no_output_____" ] ], [ [ "### 1.2 Set Active Project\n\n`project` represent a project in real life. You may have multiple model within a project.\n\n`merlin.set_project(<project-name>)` will set the active project into the name matched by argument. You can only set it to an existing project. If you would like to create a new project, please do so from the MLP UI.", "_____no_output_____" ] ], [ [ "PROJECT_NAME = \"sample\"\n\nmerlin.set_project(PROJECT_NAME)", "/Users/ariefrahmansyah/.pyenv/versions/3.7.3/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\n" ] ], [ [ "### 1.3 Set Active Model\n\n`model` represents an abstract ML model. Conceptually, `model` in Merlin is similar to a class in programming language. To instantiate a `model` you'll have to create a `model_version`.\n\nEach `model` has a type, currently model type supported by Merlin are: sklearn, xgboost, tensorflow, pytorch, and user defined model (i.e. pyfunc model).\n\n`model_version` represents a snapshot of particular `model` iteration. You'll be able to attach information such as metrics and tag to a given `model_version` as well as deploy it as a model service.\n\n`merlin.set_model(<model_name>, <model_type>)` will set the active model to the name given by parameter, if the model with given name is not found, a new model will be created.", "_____no_output_____" ] ], [ [ "from merlin.model import ModelType\n\nMODEL_NAME = \"transformer-pyfunc\"\n\nmerlin.set_model(MODEL_NAME, ModelType.PYFUNC)", "_____no_output_____" ] ], [ [ "## 2. Train Model\n\nIn this step, we are going to train a cifar10 model using PyToch and create PyFunc model class that does the prediction using trained PyTorch model.", "_____no_output_____" ], [ "### 2.1 Prepare Training Data", "_____no_output_____" ] ], [ [ "import torch\nimport torchvision\nimport torchvision.transforms as transforms\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n shuffle=True, num_workers=2)", "/Users/ariefrahmansyah/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torchvision/datasets/lsun.py:8: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n from collections import Iterable\n0it [00:00, ?it/s]" ] ], [ [ "### 2.2 Create PyTorch Model", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass PyTorchModel(nn.Module):\n def __init__(self):\n super(PyTorchModel, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "/Users/ariefrahmansyah/.pyenv/versions/3.7.3/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\n" ] ], [ [ "### 2.3 Train Model", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\nnet = PyTorchModel()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nfor epoch in range(2): # loop over the dataset multiple times\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0", "170500096it [03:10, 1240089.84it/s] " ] ], [ [ "### 2.4 Check Prediction", "_____no_output_____" ] ], [ [ "dataiter = iter(trainloader)\ninputs, labels = dataiter.next()\n\npredict_out = net(inputs[0:1])\npredict_out", "_____no_output_____" ] ], [ [ "### 2.5 Serialize Model", "_____no_output_____" ] ], [ [ "import os\n\nmodel_dir = \"pytorch-model\"\nmodel_path = os.path.join(model_dir, \"model.pt\")\nmodel_class_path = os.path.join(model_dir, \"model.py\")\n\ntorch.save(net.state_dict(), model_path)", "_____no_output_____" ] ], [ [ "### 2.6 Save PyTorchModel Class\n\nWe also need to save the PyTorchModel class and upload it to Merlin alongside the serialized trained model. The next cell will write the PyTorchModel we defined above to `pytorch-model/model.py` file.", "_____no_output_____" ] ], [ [ "%%file pytorch-model/model.py\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass PyTorchModel(nn.Module):\n def __init__(self):\n super(PyTorchModel, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "Overwriting pytorch-model/model.py\n" ] ], [ [ "## 3. Create PyFunc Model\n\nTo create a PyFunc model you'll have to extend `merlin.PyFuncModel` class and implement its `initialize` and `infer` method.\n\n`initialize` will be called once during model initialization. The argument to `initialize` is a dictionary containing a key value pair of artifact name and its URL. The artifact's keys are the same value as received by `log_pyfunc_model`.\n\n`infer` method is the prediction method that is need to be implemented. It accept a dictionary type argument which represent incoming request body. `infer` should return a dictionary object which correspond to response body of prediction result.\n\nIn following example we are creating PyFunc model called `CifarModel`. In its `initialize` method we expect 2 artifacts called `model_path` and `model_class_path`, those 2 artifacts would point to the serialized model and the PyTorch model class file. The `infer` method will simply does prediction for the model and return the result.", "_____no_output_____" ] ], [ [ "import importlib\nimport sys\n\nfrom merlin.model import PyFuncModel\n\nMODEL_CLASS_NAME=\"PyTorchModel\"\n\nclass CifarModel(PyFuncModel):\n def initialize(self, artifacts):\n model_path = artifacts[\"model_path\"]\n model_class_path = artifacts[\"model_class_path\"]\n \n # Load the python class into memory\n sys.path.append(os.path.dirname(model_class_path))\n modulename = os.path.basename(model_class_path).split('.')[0].replace('-', '_')\n model_class = getattr(importlib.import_module(modulename), MODEL_CLASS_NAME)\n \n # Make sure the model weight is transform with the right device in this machine\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n \n self._pytorch = model_class().to(device)\n self._pytorch.load_state_dict(torch.load(model_path, map_location=device))\n self._pytorch.eval()\n \n def infer(self, request, **kwargs):\n inputs = torch.tensor(request[\"instances\"])\n result = self._pytorch(inputs)\n return {\"predictions\": result.tolist()}", "_____no_output_____" ] ], [ [ "Now, let's test it locally.", "_____no_output_____" ] ], [ [ "import json\n\nwith open(os.path.join(\"input-tensor.json\"), \"r\") as f:\n tensor_req = json.load(f)\n\nm = CifarModel()\nm.initialize({\"model_path\": model_path, \"model_class_path\": model_class_path})\nm.infer(tensor_req)", "_____no_output_____" ] ], [ [ "## 4. Deploy Model", "_____no_output_____" ], [ "To deploy the model, we will have to create an iteration of the model (by create a `model_version`), upload the serialized model to MLP, and then deploy.", "_____no_output_____" ], [ "### 4.1 Create Model Version and Upload", "_____no_output_____" ], [ "`merlin.new_model_version()` is a convenient method to create a model version and start its development process. It is equal to following codes:\n\n```\nv = model.new_model_version()\nv.start()\nv.log_pyfunc_model(model_instance=EnsembleModel(), \n conda_env=\"env.yaml\", \n artifacts={\"xgb_model\": model_1_path, \"sklearn_model\": model_2_path})\nv.finish()\n```", "_____no_output_____" ], [ "To upload PyFunc model you have to provide following arguments:\n1. `model_instance` is the instance of PyFunc model, the model has to extend `merlin.PyFuncModel`\n2. `conda_env` is path to conda environment yaml file. The environment yaml file must contain all dependency required by the PyFunc model.\n3. (Optional) `artifacts` is additional artifact that you want to include in the model\n4. (Optional) `code_path` is a list of directory containing python code that will be loaded during model initialization, this is required when `model_instance` depend on local python package", "_____no_output_____" ] ], [ [ "with merlin.new_model_version() as v: \n merlin.log_pyfunc_model(model_instance=CifarModel(),\n conda_env=\"env.yaml\",\n artifacts={\"model_path\": model_path, \"model_class_path\": model_class_path})", "2021/06/23 05:41:28 WARNING mlflow.models.model: Logging model metadata to the tracking server has failed, possibly due older server version. The model artifacts have been logged successfully under gs://<MERLIN_BUCKET>/mlflow/604/7b57180c051842fe815adbacfa282541/artifacts. In addition to exporting model artifacts, MLflow clients 1.7.0 and above attempt to record model metadata to the tracking store. If logging to a mlflow server via REST, consider upgrading the server version to MLflow 1.7.0 or above.\n" ] ], [ [ "### 4.2 Deploy Model and Transformer\n\nTo deploy a model and its transformer, you must pass a `transformer` object to `deploy()` function. Each of deployed model version will have its own generated url.", "_____no_output_____" ] ], [ [ "from merlin.resource_request import ResourceRequest\nfrom merlin.transformer import Transformer\n\n# Create a transformer object and its resources requests\nresource_request = ResourceRequest(min_replica=1, max_replica=1, \n cpu_request=\"100m\", memory_request=\"200Mi\")\ntransformer = Transformer(\"gcr.io/kubeflow-ci/kfserving/image-transformer:latest\",\n resource_request=resource_request)\n\nendpoint = merlin.deploy(v, transformer=transformer)", "/Users/ariefrahmansyah/.pyenv/versions/3.7.3/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n and should_run_async(code)\nDeploying model transformer-pyfunc version 2\n0% [##############################] 100% | ETA: 00:00:00" ] ], [ [ "### 4.3 Send Test Request", "_____no_output_____" ] ], [ [ "import json\nimport requests\n\nwith open(os.path.join(\"input-raw-image.json\"), \"r\") as f:\n req = json.load(f)\n\nresp = requests.post(endpoint.url, json=req)\nresp.text", "_____no_output_____" ] ], [ [ "## 4. Clean Up", "_____no_output_____" ], [ "## 4.1 Delete Deployment", "_____no_output_____" ] ], [ [ "merlin.undeploy(v)", "Deleting deployment of model transformer-pyfunc version 2 from enviroment id-staging\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d06928dba32ea8c995e971ee38a94adf20dc72e4
39,728
ipynb
Jupyter Notebook
test/ipynb/clojure/XChartingTest.ipynb
ssadedin/beakerx
34479b07d2dfdf1404692692f483faf0251632c3
[ "Apache-2.0" ]
1,491
2017-03-30T03:05:05.000Z
2022-03-27T04:26:02.000Z
test/ipynb/clojure/XChartingTest.ipynb
ssadedin/beakerx
34479b07d2dfdf1404692692f483faf0251632c3
[ "Apache-2.0" ]
3,268
2015-01-01T00:10:26.000Z
2017-05-05T18:59:41.000Z
test/ipynb/clojure/XChartingTest.ipynb
ssadedin/beakerx
34479b07d2dfdf1404692692f483faf0251632c3
[ "Apache-2.0" ]
287
2017-04-03T01:30:06.000Z
2022-03-17T06:09:15.000Z
94.816229
21,882
0.670484
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0692eedf86fcd6e2ef61f7d395428492fedc823
725,556
ipynb
Jupyter Notebook
CCI_501_ML_Project.ipynb
mghendi/smartphonepriceclassifier
6bc6f8d0c2a7649575bebe011c248a8252e6ec43
[ "MIT" ]
null
null
null
CCI_501_ML_Project.ipynb
mghendi/smartphonepriceclassifier
6bc6f8d0c2a7649575bebe011c248a8252e6ec43
[ "MIT" ]
null
null
null
CCI_501_ML_Project.ipynb
mghendi/smartphonepriceclassifier
6bc6f8d0c2a7649575bebe011c248a8252e6ec43
[ "MIT" ]
null
null
null
353.068613
552,470
0.902196
[ [ [ "<a href=\"https://colab.research.google.com/github/mghendi/smartphonepriceclassifier/blob/main/CCI_501_ML_Project.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## CCI501 - Machine Learning Project\n\n### Name: Samuel Mwamburi Mghendi \n\n### Admission Number: P52/37621/2020\n\n### Email: [email protected]\n\n### Course: Machine Learning – CCI 501", "_____no_output_____" ], [ "### Applying Logistic Regression to Establish a Good Pricing Model for Mobile Phone Manufacturers in the Current Market Landscape using Technical Specifications and User Preference.", "_____no_output_____" ], [ "#### This report is organised as follows.\n\n1. Problem Statement\n2. Data Description\n * Data Loading and Preparation\n * Exploratory Data Analysis\n3. Data Preprocessing\n4. Data Modelling\n5. Performance Evaluation\n6. Conclusion", "_____no_output_____" ], [ "### 1. Problem Statement", "_____no_output_____" ], [ "#### To determine the price of a mobile phone in the current market using specifications i.e. screen size, screen and camera resolution, internal storage and battery capacity and user preference.\n\nTraditionally, and rightfully so, consumers have been forced to part with a premium to own a mobile phone with top-of-the-line features and specifications. Some smartphone manufacturers in 2020 still charge upwards of KES 100,000 for a mobile phone that has a large screen, good battery, fast processor and sufficient storage capacity. However, according to a December article on Android Central, mobile phones with great features are getting significantly affordable. (Johnson, 2020)\n\nA phone’s specifications is a logical way of determining which class it falls under, with the emergence of cheaper manufacturing techniques and parts however, phone pricing models have become more blurry and it is possible for consumers to purchase more powerful smartphones at cheaper prices.\n\nThis study intends to explore this hypothesis and predict the relationship between these features and the price of a mobile phone in the current landscape using phone specification, product rating and prices data scraped from a Kenyan e-commerce site.\n\n#### Why Logistic Regression?\n\nA supervised learning approach would be useful for this experiment since the data being explored has price labels and categories. Logistic regression is used to classify data by considering outcome variables on extreme ends and consequently forms a line to distinguish them.\n", "_____no_output_____" ], [ "### 2. Data Description\n #### Data Loading and Preparation\n \n \n \n#### Initialization", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "import os\nfrom tqdm import trange", "_____no_output_____" ] ], [ [ "#### Import Data", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"productdata.csv\")", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "from sklearn import preprocessing", "_____no_output_____" ] ], [ [ "#### Exploratory Data Analysis\n\nGathering more information about the dataset in order to better understand it.\nThe relationship and distribution between screen size, screen resolution, camera resolution, storage space, memory, rating and likes against the resultant price charged for each phone sold was plotted and analyzed.", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1148 entries, 0 to 1147\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Phone 1148 non-null object \n 1 Screen (inches) 1148 non-null float64\n 2 Resolution (pixels) 1148 non-null object \n 3 Camera (MP) 1148 non-null float64\n 4 OS 1131 non-null object \n 5 Storage (GB) 1148 non-null int64 \n 6 RAM (GB) 1148 non-null int64 \n 7 Battery (mAh) 1148 non-null int64 \n 8 Battery Type 1148 non-null object \n 9 Price(Kshs) 1148 non-null int64 \n 10 Price Category 1148 non-null object \n 11 Rating 1148 non-null float64\n 12 Likes 1148 non-null int64 \ndtypes: float64(3), int64(5), object(5)\nmemory usage: 116.7+ KB\n" ] ], [ [ "The feature OS has missing values.", "_____no_output_____" ] ], [ [ "# check shape\ndf.shape", "_____no_output_____" ] ], [ [ "The dataset has 1,148 records and 12 features.", "_____no_output_____" ] ], [ [ "# remove duplicates, if any\ndf.drop_duplicates(inplace = True)\ndf.shape", "_____no_output_____" ] ], [ [ "No duplicate records available in the dataset.", "_____no_output_____" ], [ "#### Mobile Phones by Screen Size Contrasted by User Rating", "_____no_output_____" ] ], [ [ "# previewing distribution of screen size by rating\ndf['Round Rating'] = df['Rating'].round(decimals=0)\nplt.figure(figsize = (20, 6))\n\nax = sns.histplot(df, x=\"Screen (inches)\", stat=\"count\", hue=\"Round Rating\", multiple=\"dodge\", shrink=0.8)\nfor p in ax.patches:# histogram bar label\n h = p.get_height()\n if (h != 0): ax.text(x = p.get_x()+(p.get_width()/2), y = h+1, s = \"{:.0f}\".format(h),ha = \"center\")\nplt.xlabel('Screen Size (inches)')\nplt.title(\"Screen Size of Mobile Phones contrasted by User Rating\", fontsize=12, fontweight=\"bold\");\nplt.show()\nprint(\"Screen Size: values count=\" + str(df['Screen (inches)'].count()) + \", min=\" + str(df['Screen (inches)'].min()) + \", max=\" + str(df['Screen (inches)'].max()) + \", mean=\" + str(df['Screen (inches)'].mean()))", "_____no_output_____" ] ], [ [ "The chart can be used to deduce a high-level inference on the phone industry consumer purchase preference. Phones with a larger screen size, which are inherently larger in size, between 5 to 7 inches are seen to be rated higher. ", "_____no_output_____" ] ], [ [ "# changing the datatype of the 'OS' variable\ndf['OS'] = df['OS'].astype('str')", "_____no_output_____" ] ], [ [ "#### Mobile Phones by Camera Resolution contrasted by User Rating", "_____no_output_____" ] ], [ [ "# previewing distribution of camera resolution by rating\nplt.figure(figsize = (20, 6))\nax = sns.histplot(df, x=\"Camera (MP)\", hue=\"Round Rating\", multiple=\"dodge\", shrink=0.8)\nfor p in ax.patches:# label each bar in histogram\n h = p.get_height()\n if (h != 0): ax.text(x = p.get_x()+(p.get_width()/2), y = h+1, s = \"{:.0f}\".format(h),ha = \"center\")\nplt.xlabel('Camera (MP)')\nplt.title(\"Distribution of Camera Resolution by User Rating\", fontsize=12, fontweight=\"bold\");\nplt.show()\nprint(\"Camera (MP): values count=\" + str(df['Camera (MP)'].count()) + \", min=\" + str(df['Camera (MP)'].min()) + \", max=\" + str(df['Camera (MP)'].max()) + \", mean=\" + str(df['Camera (MP)'].mean()))", "_____no_output_____" ] ], [ [ "Mobile phones with cameras sporting high resolutions,15 and 32 Megapixels , based on the current offering in the market have significantly better relative ratings than mid-tier models between 20 to 30 Megapixels and low-tier models less than 5 megapixels.", "_____no_output_____" ] ], [ [ "# previewing distribution of Storage Capacity by rating\nplt.figure(figsize = (20, 6))\nax = sns.histplot(df, x=\"Storage (GB)\", hue=\"Round Rating\", multiple=\"dodge\", shrink=0.8)\nfor p in ax.patches:# label each bar in histogram\n h = p.get_height()\n if (h != 0): ax.text(x = p.get_x()+(p.get_width()/2), y = h+1, s = \"{:.0f}\".format(h),ha = \"center\")\nplt.xlabel('Storage (GB)')\nplt.title(\"Distribution of Storage Capacity by User Rating\", fontsize=12, fontweight=\"bold\");\nplt.show()\nprint(\"Storage (GB): values count=\" + str(df['Storage (GB)'].count()) + \", min=\" + str(df['Storage (GB)'].min()) + \", max=\" + str(df['Storage (GB)'].max()) + \", mean=\" + str(df['Storage (GB)'].mean()))", "_____no_output_____" ] ], [ [ "As anticipated, mobile phones with higher internal storage capacities, greater than or equal to 256 Gigabytes, recieve significantly better relative ratings than models with less than 128 gigabytes. Additionally, there are very few purchases of mobile phones equal to or greater than 512 gigabytes of storage.", "_____no_output_____" ], [ "#### Mobile Phones Specifications by User Preference(Likes)", "_____no_output_____" ], [ "In the E-Commerce store from which the data was retrieved, users are also capable of adding a product to their wishlist after a high level assessment of the product features and pricing. The number of likes a product has recieved refers to the number of users who have added the given product to their wishlist.", "_____no_output_____" ] ], [ [ "#pairplot to investigate the relationship between all the variables \nsns.pairplot(df)\nplt.show()", "_____no_output_____" ] ], [ [ "In reference to the pair plot above, mid-tier phone models are significantly better rated and well recieved as compared to their much more expensive and budget counterparts in the local current market.\n\nPhones with mid-tier features such as an average storage capacity, such as a large display 5 to 7 inches, storage of between 128 Gigabytes of storage, 4 Gigabytes of RAM, 3000 to 5000 milliampere hours battery capacity and 10 to 30 Megapixel Camera Resolution are well recieve more likes. \n\nThere appears to be a direct correlation between the number of likes a product recieves before-hand and the user ratings after purchase. Mobile phones that recieved an average rating of 4 had roughly 300 likes from users based on the specifications and price point provided.\n\nThis also implies that the likes a product recieved directly translates to an purchase of the product in the long term. ", "_____no_output_____" ], [ "### 3. Data Preprocessing\n\n\n\n#### Converting Text to Numerical Vector\n\nThe features Price Category and Battery Type contain important dependent and independent values respectively key in the experiment. These values would need to be converted into numerical values in order to be applied in the algorithm.", "_____no_output_____" ] ], [ [ "# creating categorigal variables for the battery type feature\ndf[\"Battery Type\"].replace({\"Li-Po\": \"0\", \"Li-Ion\": \"1\"}, inplace=True)\nprint(df)", " Phone Screen (inches) Resolution (pixels) \\\n0 Gionee M7 Power 6.00 720x1440 \n1 Gionee M7 6.01 1080x2160 \n2 Samsung Galaxy M21 6GB/128GB 6.40 1080x2340 \n3 Samsung Galaxy M21 4GB/64GB 6.40 1080x2340 \n4 Samsung Galaxy A31 6GB/128GB 6.40 1080x2400 \n... ... ... ... \n1143 Nokia 105 (2019) 1.77 120x160 \n1144 Nokia 220 4G 2.40 240x320 \n1145 Nokia X71 6.39 1080x2316 \n1146 Nokia 2.2 3GB/32GB 5.71 720x1520 \n1147 Nokia 2.2 2GB/16GB 5.71 720x1520 \n\n Camera (MP) OS Storage (GB) RAM (GB) \\\n0 8.0 OS:Android 7.1 Nougat 64 4 \n1 8.0 OS:Android 7.1 Nougat 64 6 \n2 20.0 OS:Android 10; One UI 2.0 128 6 \n3 20.0 OS:Android 10; One UI 2.0 64 4 \n4 20.0 OS:Android 10; One UI 2.0 128 6 \n... ... ... ... ... \n1143 8.0 nan 4 8 \n1144 8.0 nan 24 8 \n1145 16.0 OS:Android 9.0 Pie; Android One 128 6 \n1146 5.0 OS:Android 9.0 Pie; Android One 32 3 \n1147 5.0 OS:Android 9.0 Pie; Android One 16 2 \n\n Battery (mAh) Battery Type Price(Kshs) Price Category Rating Likes \\\n0 4000 0 15880 Mid-Tier 4.0 13 \n1 4000 0 15880 Mid-Tier 4.5 8 \n2 6000 0 21590 Mid-Tier 4.3 30 \n3 6000 0 22499 Mid-Tier 3.8 31 \n4 5000 0 24999 Mid-Tier 3.8 31 \n... ... ... ... ... ... ... \n1143 800 1 1900 Budget 3.0 11 \n1144 1200 1 1900 Budget 4.0 8 \n1145 3500 0 1900 Budget 3.0 53 \n1146 3000 1 1900 Budget 3.4 50 \n1147 3000 1 9500 Budget 3.9 39 \n\n Round Rating \n0 4.0 \n1 4.0 \n2 4.0 \n3 4.0 \n4 4.0 \n... ... \n1143 3.0 \n1144 4.0 \n1145 3.0 \n1146 3.0 \n1147 4.0 \n\n[1148 rows x 14 columns]\n" ], [ "# creating categorigal variables for the battery type feature\ndf[\"Price Category\"].replace({\"Budget\": \"0\", \"Mid-Tier\": \"1\", \"Flagship\": \"2\"}, inplace=True)\nprint(df)", " Phone Screen (inches) Resolution (pixels) \\\n0 Gionee M7 Power 6.00 720x1440 \n1 Gionee M7 6.01 1080x2160 \n2 Samsung Galaxy M21 6GB/128GB 6.40 1080x2340 \n3 Samsung Galaxy M21 4GB/64GB 6.40 1080x2340 \n4 Samsung Galaxy A31 6GB/128GB 6.40 1080x2400 \n... ... ... ... \n1143 Nokia 105 (2019) 1.77 120x160 \n1144 Nokia 220 4G 2.40 240x320 \n1145 Nokia X71 6.39 1080x2316 \n1146 Nokia 2.2 3GB/32GB 5.71 720x1520 \n1147 Nokia 2.2 2GB/16GB 5.71 720x1520 \n\n Camera (MP) OS Storage (GB) RAM (GB) \\\n0 8.0 OS:Android 7.1 Nougat 64 4 \n1 8.0 OS:Android 7.1 Nougat 64 6 \n2 20.0 OS:Android 10; One UI 2.0 128 6 \n3 20.0 OS:Android 10; One UI 2.0 64 4 \n4 20.0 OS:Android 10; One UI 2.0 128 6 \n... ... ... ... ... \n1143 8.0 nan 4 8 \n1144 8.0 nan 24 8 \n1145 16.0 OS:Android 9.0 Pie; Android One 128 6 \n1146 5.0 OS:Android 9.0 Pie; Android One 32 3 \n1147 5.0 OS:Android 9.0 Pie; Android One 16 2 \n\n Battery (mAh) Battery Type Price(Kshs) Price Category Rating Likes \\\n0 4000 0 15880 1 4.0 13 \n1 4000 0 15880 1 4.5 8 \n2 6000 0 21590 1 4.3 30 \n3 6000 0 22499 1 3.8 31 \n4 5000 0 24999 1 3.8 31 \n... ... ... ... ... ... ... \n1143 800 1 1900 0 3.0 11 \n1144 1200 1 1900 0 4.0 8 \n1145 3500 0 1900 0 3.0 53 \n1146 3000 1 1900 0 3.4 50 \n1147 3000 1 9500 0 3.9 39 \n\n Round Rating \n0 4.0 \n1 4.0 \n2 4.0 \n3 4.0 \n4 4.0 \n... ... \n1143 3.0 \n1144 4.0 \n1145 3.0 \n1146 3.0 \n1147 4.0 \n\n[1148 rows x 14 columns]\n" ], [ "df[\"Price Category\"].value_counts(normalize= True)", "_____no_output_____" ], [ "import nltk\nimport string\nimport math\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import metrics\nimport re\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import CountVectorizer", "_____no_output_____" ], [ "vectorizer = CountVectorizer(min_df=0, lowercase=False)\nvectorizer.fit(df[\"OS\"])\nvectorizer.vocabulary_", "_____no_output_____" ], [ "vectorizer = CountVectorizer(min_df=0, lowercase=False)\nvectorizer.fit(df[\"Resolution (pixels)\"])\nvectorizer.vocabulary_", "_____no_output_____" ] ], [ [ "#### Creating Bag of Words models", "_____no_output_____" ] ], [ [ "df[\"OS\"] = vectorizer.transform(df[\"OS\"]).toarray()", "_____no_output_____" ], [ "print(df)", " Phone Screen (inches) Resolution (pixels) \\\n0 Gionee M7 Power 6.00 720x1440 \n1 Gionee M7 6.01 1080x2160 \n2 Samsung Galaxy M21 6GB/128GB 6.40 1080x2340 \n3 Samsung Galaxy M21 4GB/64GB 6.40 1080x2340 \n4 Samsung Galaxy A31 6GB/128GB 6.40 1080x2400 \n... ... ... ... \n1143 Nokia 105 (2019) 1.77 120x160 \n1144 Nokia 220 4G 2.40 240x320 \n1145 Nokia X71 6.39 1080x2316 \n1146 Nokia 2.2 3GB/32GB 5.71 720x1520 \n1147 Nokia 2.2 2GB/16GB 5.71 720x1520 \n\n Camera (MP) OS Storage (GB) RAM (GB) Battery (mAh) Battery Type \\\n0 8.0 0 64 4 4000 0 \n1 8.0 0 64 6 4000 0 \n2 20.0 0 128 6 6000 0 \n3 20.0 0 64 4 6000 0 \n4 20.0 0 128 6 5000 0 \n... ... .. ... ... ... ... \n1143 8.0 0 4 8 800 1 \n1144 8.0 0 24 8 1200 1 \n1145 16.0 0 128 6 3500 0 \n1146 5.0 0 32 3 3000 1 \n1147 5.0 0 16 2 3000 1 \n\n Price(Kshs) Price Category Rating Likes Round Rating \n0 15880 1 4.0 13 4.0 \n1 15880 1 4.5 8 4.0 \n2 21590 1 4.3 30 4.0 \n3 22499 1 3.8 31 4.0 \n4 24999 1 3.8 31 4.0 \n... ... ... ... ... ... \n1143 1900 0 3.0 11 3.0 \n1144 1900 0 4.0 8 4.0 \n1145 1900 0 3.0 53 3.0 \n1146 1900 0 3.4 50 3.0 \n1147 9500 0 3.9 39 4.0 \n\n[1148 rows x 14 columns]\n" ], [ "df[\"Resolution (pixels)\"] = vectorizer.transform(df[\"Resolution (pixels)\"]).toarray()\nprint (df)", " Phone Screen (inches) Resolution (pixels) \\\n0 Gionee M7 Power 6.00 0 \n1 Gionee M7 6.01 0 \n2 Samsung Galaxy M21 6GB/128GB 6.40 0 \n3 Samsung Galaxy M21 4GB/64GB 6.40 0 \n4 Samsung Galaxy A31 6GB/128GB 6.40 0 \n... ... ... ... \n1143 Nokia 105 (2019) 1.77 0 \n1144 Nokia 220 4G 2.40 0 \n1145 Nokia X71 6.39 0 \n1146 Nokia 2.2 3GB/32GB 5.71 0 \n1147 Nokia 2.2 2GB/16GB 5.71 0 \n\n Camera (MP) OS Storage (GB) RAM (GB) Battery (mAh) Battery Type \\\n0 8.0 0 64 4 4000 0 \n1 8.0 0 64 6 4000 0 \n2 20.0 0 128 6 6000 0 \n3 20.0 0 64 4 6000 0 \n4 20.0 0 128 6 5000 0 \n... ... .. ... ... ... ... \n1143 8.0 0 4 8 800 1 \n1144 8.0 0 24 8 1200 1 \n1145 16.0 0 128 6 3500 0 \n1146 5.0 0 32 3 3000 1 \n1147 5.0 0 16 2 3000 1 \n\n Price(Kshs) Price Category Rating Likes Round Rating \n0 15880 1 4.0 13 4.0 \n1 15880 1 4.5 8 4.0 \n2 21590 1 4.3 30 4.0 \n3 22499 1 3.8 31 4.0 \n4 24999 1 3.8 31 4.0 \n... ... ... ... ... ... \n1143 1900 0 3.0 11 3.0 \n1144 1900 0 4.0 8 4.0 \n1145 1900 0 3.0 53 3.0 \n1146 1900 0 3.4 50 3.0 \n1147 9500 0 3.9 39 4.0 \n\n[1148 rows x 14 columns]\n" ] ], [ [ "### 4. Data Modelling", "_____no_output_____" ], [ "### Data Modelling for Logistic Regression", "_____no_output_____" ], [ "#### Feature Selection", "_____no_output_____" ], [ "For this experiment, the mobile phone's technical specifications will be used as the independent variables. The ratings and likes which are subjective assessments will be dropped.\nVariables such as the Phone Name are not important in price point predictability for this particular endevour and will therefore be dropped.", "_____no_output_____" ] ], [ [ "X = df.drop(columns = ['Phone','Price(Kshs)', 'Rating', 'Likes', 'OS', 'Battery Type', 'Resolution (pixels)', 'Round Rating']).values\ny = df['Price Category'].values", "_____no_output_____" ] ], [ [ "#### Splitting Data", "_____no_output_____" ] ], [ [ "# splitting into 75% training and 25% test sets\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1000)", "_____no_output_____" ] ], [ [ "#### Feature Scaling", "_____no_output_____" ] ], [ [ "scaler = preprocessing.StandardScaler().fit(X_train)", "_____no_output_____" ], [ "scaler", "_____no_output_____" ], [ "scaler.mean_", "_____no_output_____" ], [ "scaler.scale_", "_____no_output_____" ], [ "X_scaled = scaler.transform(X_train)", "_____no_output_____" ], [ "X_scaled", "_____no_output_____" ], [ "X_scaled.mean(axis=0)", "_____no_output_____" ], [ "X_scaled.std(axis=0)", "_____no_output_____" ] ], [ [ "#### Logistic Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "from sklearn.datasets import make_classification\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "X, y = make_classification(random_state=42)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\npipe = make_pipeline(StandardScaler(), LogisticRegression())\npipe.fit(X_train, y_train) # apply scaling on training data", "_____no_output_____" ], [ "# apply scaling on testing data, without leaking training data.\npipe.score(X_test, y_test) ", "_____no_output_____" ], [ "classifier = LogisticRegression()\nclassifier.fit(X_train, y_train)\nscore = classifier.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "### 5. Performance Evaluation", "_____no_output_____" ] ], [ [ "print(\"Accuracy:\", (score)*100, \"%\")", "Accuracy: 100.0 %\n" ] ], [ [ "### 6. Conclusion", "_____no_output_____" ], [ "Logistic regression provides great efficiency, works well in the segmentation and categorization of a small number of categorical variables, in this case, price category. \nIt allows the evaluation of multiple explanatory variables and is relatively fast compared to other supervised classification techniques applied in this experiment such as SVM.\n\nIt is, however, not accurate enough for complex relationships between variables denoted by the exclusion of multiple features.\n\nThis algorithm seems to have performed well in the creation of this model because the decision boundary was rather linear as observed during the exploratory analysis between the various technical specifications against the pricing model.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]