markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
To set the position and rotation of each cell, we use the built in function positions_columinar and xiter_random, which returns a list of values given the parameters. A user could set the values themselves using a list (or function that returns a list) of size N. The parameters like location, ei (potential), params_file, etc. are cell-type parameters, and will be used for all N cells of that type.The excitory cells are also given a tuning_angle parameter. An instrinsic "tuning angle" is a property found in some cells in the visual cortex. In this model, we will use this property to determine number of strenght of connections between subsets of cells by using custom functions. But in general most models will not have or use a tuning angle, but they may require some other parameter. In general, users can assign whatever custom parameters they want to cells and cell-types and use them as properties for creating connections and running simulations.Next we continue to create our point (integrate-and-fire) neurons. Notice they don't have properities like y/z rotation or morphology, as they wouldn't apply to point neurons.
""" net.add_nodes(N=200, pop_name='LIF_exc', positions=positions_columinar(N=200, center=[0, 50.0, 0], min_radius=30.0, max_radius=60.0, height=100.0), tuning_angle=np.linspace(start=0.0, stop=360.0, num=200, endpoint=False), location='VisL4', ei='e', model_type='point_process', model_template='nrn:IntFire1', dynamics_params='IntFire1_exc_1.json') net.add_nodes(N=100, pop_name='LIF_inh', positions=positions_columinar(N=100, center=[0, 50.0, 0], min_radius=30.0, max_radius=60.0, height=100.0), location='VisL4', ei='i', model_type='point_process', model_template='nrn:IntFire1', dynamics_params='IntFire1_inh_1.json') """ import glob import json import os def syn_params_dicts(syn_dir='sim_theta/components/synaptic_models'): """ returns: A dictionary of dictionaries containing all properties in the synapse json files """ files = glob.glob(os.path.join(syn_dir,'*.json')) data = {} for fh in files: print(fh) with open(fh) as f: data[os.path.basename(fh)] = json.load(f) #data["filename.json"] = {"prop1":"val1",...} return data syn = syn_params_dicts() from bmtk.simulator.bionet.pyfunction_cache import add_synapse_model from neuron import h import random def Pyr2Pyr(syn_params, sec_x, sec_id): """Create a pyr2pyr synapse :param syn_params: parameters of a synapse :param sec_x: normalized distance along the section :param sec_id: target section :return: NEURON synapse object """ lsyn = h.pyr2pyr(sec_x, sec=sec_id) if syn_params.get('AlphaTmax_ampa'): lsyn.AlphaTmax_ampa = float(syn_params['AlphaTmax_ampa']) # par.x(21) if syn_params.get('Beta_ampa'): lsyn.Beta_ampa = float(syn_params['Beta_ampa']) # par.x(22) if syn_params.get('Cdur_ampa'): lsyn.Cdur_ampa = float(syn_params['Cdur_ampa']) # par.x(23) if syn_params.get('gbar_ampa'): lsyn.gbar_ampa = float(syn_params['gbar_ampa']) # par.x(24) if syn_params.get('Erev_ampa'): lsyn.Erev_ampa = float(syn_params['Erev_ampa']) # par.x(16) if syn_params.get('AlphaTmax_nmda'): lsyn.AlphaTmax_nmda = float(syn_params['AlphaTmax_nmda']) # par.x(25) if syn_params.get('Beta_nmda'): lsyn.Beta_nmda = float(syn_params['Beta_nmda']) # par.x(26) if syn_params.get('Cdur_nmda'): lsyn.Cdur_nmda = float(syn_params['Cdur_nmda']) # par.x(27) if syn_params.get('gbar_nmda'): lsyn.gbar_nmda = float(syn_params['gbar_nmda']) # par.x(28) if syn_params.get('Erev_nmda'): lsyn.Erev_nmda = float(syn_params['Erev_nmda']) # par.x(16) if syn_params.get('initW'): lsyn.initW = float(syn_params['initW']) * random.uniform(0.5,1.0) # par.x(0) * rC.uniform(0.5,1.0)//rand.normal(0.5,1.5) //`rand.repick() if syn_params.get('Wmax'): lsyn.Wmax = float(syn_params['Wmax']) * lsyn.initW # par.x(1) * lsyn.initW if syn_params.get('Wmin'): lsyn.Wmin = float(syn_params['Wmin']) * lsyn.initW # par.x(2) * lsyn.initW #delay = float(syn_params['initW']) # par.x(3) + delayDistance #lcon = new NetCon(&v(0.5), lsyn, 0, delay, 1) if syn_params.get('lambda1'): lsyn.lambda1 = float(syn_params['lambda1']) # par.x(6) if syn_params.get('lambda2'): lsyn.lambda2 = float(syn_params['lambda2']) # par.x(7) if syn_params.get('threshold1'): lsyn.threshold1 = float(syn_params['threshold1']) # par.x(8) if syn_params.get('threshold2'): lsyn.threshold2 = float(syn_params['threshold2']) # par.x(9) if syn_params.get('tauD1'): lsyn.tauD1 = float(syn_params['tauD1']) # par.x(10) if syn_params.get('d1'): lsyn.d1 = float(syn_params['d1']) # par.x(11) if syn_params.get('tauD2'): lsyn.tauD2 = float(syn_params['tauD2']) # par.x(12) if syn_params.get('d2'): lsyn.d2 = float(syn_params['d2']) # par.x(13) if syn_params.get('tauF'): lsyn.tauF = float(syn_params['tauF']) # par.x(14) if syn_params.get('f'): lsyn.f = float(syn_params['f']) # par.x(15) if syn_params.get('bACH'): lsyn.bACH = float(syn_params['bACH']) # par.x(17) if syn_params.get('aDA'): lsyn.aDA = float(syn_params['aDA']) # par.x(18) if syn_params.get('bDA'): lsyn.bDA = float(syn_params['bDA']) # par.x(19) if syn_params.get('wACH'): lsyn.wACH = float(syn_params['wACH']) # par.x(20) return lsyn def pyr2pyr(syn_params, xs, secs): """Create a list of pyr2pyr synapses :param syn_params: parameters of a synapse :param xs: list of normalized distances along the section :param secs: target sections :return: list of NEURON synpase objects """ syns = [] for x, sec in zip(xs, secs): syn = Pyr2Pyr(syn_params, x, sec) syns.append(syn) return syns def Inter2Pyr(syn_params, sec_x, sec_id): lsyn = h.inter2pyr(sec_x, sec=sec_id) #target.soma lsyn = new inter2pyr(0.9) if syn_params.get('AlphaTmax_gabaa'): lsyn.AlphaTmax_gabaa = float(syn_params['AlphaTmax_gabaa']) # par.x(21) if syn_params.get('Beta_gabaa'): lsyn.Beta_gabaa = float(syn_params['Beta_gabaa']) # par.x(22) if syn_params.get('Cdur_gabaa'): lsyn.Cdur_gabaa = float(syn_params['Cdur_gabaa']) # par.x(23) if syn_params.get('gbar_gabaa'): lsyn.gbar_gabaa = float(syn_params['gbar_gabaa']) # par.x(24) if syn_params.get('Erev_gabaa'): lsyn.Erev_gabaa = float(syn_params['Erev_gabaa']) # par.x(16) if syn_params.get('AlphaTmax_gabab'): lsyn.AlphaTmax_gabab = float(syn_params['AlphaTmax_gabab']) # par.x(25) if syn_params.get('Beta_gabab'): lsyn.Beta_gabab = float(syn_params['Beta_gabab']) # par.x(26) if syn_params.get('Cdur_gabab'): lsyn.Cdur_gabab = float(syn_params['Cdur_gabab']) # par.x(27) if syn_params.get('gbar_gabab'): lsyn.gbar_gabab = float(syn_params['gbar_gabab']) # par.x(28) if syn_params.get('Erev_gabab'): lsyn.Erev_gabab = float(syn_params['Erev_gabab']) # par.x(16) if syn_params.get('initW'): lsyn.initW = float(syn_params['initW']) * random.uniform(0.5,1.0) # par.x(0) * rC.uniform(0.5,1.0)//rand.normal(0.5,1.5) //`rand.repick() if syn_params.get('Wmax'): lsyn.Wmax = float(syn_params['Wmax']) * lsyn.initW # par.x(1) * lsyn.initW if syn_params.get('Wmin'): lsyn.Wmin = float(syn_params['Wmin']) * lsyn.initW # par.x(2) * lsyn.initW #delay = float(syn_params['initW']) # par.x(3) + delayDistance #lcon = new NetCon(&v(0.5), lsyn, 0, delay, 1) if syn_params.get('lambda1'): lsyn.lambda1 = float(syn_params['lambda1']) # par.x(6) if syn_params.get('lambda2'): lsyn.lambda2 = float(syn_params['lambda2']) # par.x(7) if syn_params.get('threshold1'): lsyn.threshold1 = float(syn_params['threshold1']) # par.x(8) if syn_params.get('threshold2'): lsyn.threshold2 = float(syn_params['threshold2']) # par.x(9) if syn_params.get('tauD1'): lsyn.tauD1 = float(syn_params['tauD1']) # par.x(10) if syn_params.get('d1'): lsyn.d1 = float(syn_params['d1']) # par.x(11) if syn_params.get('tauD2'): lsyn.tauD2 = float(syn_params['tauD2']) # par.x(12) if syn_params.get('d2'): lsyn.d2 = float(syn_params['d2']) # par.x(13) if syn_params.get('tauF'): lsyn.tauF = float(syn_params['tauF']) # par.x(14) if syn_params.get('f'): lsyn.f = float(syn_params['f']) # par.x(15) if syn_params.get('bACH'): lsyn.bACH = float(syn_params['bACH']) # par.x(17) if syn_params.get('aDA'): lsyn.aDA = float(syn_params['aDA']) # par.x(18) if syn_params.get('bDA'): lsyn.bDA = float(syn_params['bDA']) # par.x(19) if syn_params.get('wACH'): lsyn.wACH = float(syn_params['wACH']) # par.x(20) return lsyn def inter2pyr(syn_params, xs, secs): """Create a list of pyr2pyr synapses :param syn_params: parameters of a synapse :param xs: list of normalized distances along the section :param secs: target sections :return: list of NEURON synpase objects """ syns = [] for x, sec in zip(xs, secs): syn = Inter2Pyr(syn_params, x, sec) syns.append(syn) return syns def load_synapses(): add_synapse_model(Pyr2Pyr, 'pyr2pyr', overwrite=False) add_synapse_model(Pyr2Pyr, overwrite=False) add_synapse_model(Inter2Pyr, 'inter2pyr', overwrite=False) add_synapse_model(Inter2Pyr, overwrite=False) return load_synapses()
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
connectionsNow we want to create connections between the cells. Depending on the model type, and whether or not the presynpatic "source" cell is excitory or inhibitory, we will have different synpatic model and parameters. Using the source and target filter parameters, we can create different connection types.To determine excitory-to-excitory connection matrix we want to use distance and tuning_angle property. To do this we create a customized function "dist_tuning_connector"
import random import math # list of all synapses created - used for recurrent connections syn_list = [] ########################################################### # Build custom connection rules ########################################################### #See bmtk.builder.auxi.edge_connectors def hipp_dist_connector(source, target, con_pattern, ratio=1, gaussa=0, min_syn=1, max_syn=1): """ :returns: number of synapses per connection """ ratio = float(ratio) gaussa = float(gaussa) Lamellar = "0" Homogenous = "1" AxonalPlexus = "2" IntPyrFeedback = "3" x_ind,y_ind = 0,1 dx = target['positions'][x_ind] - source['positions'][x_ind] dy = target['positions'][y_ind] - source['positions'][y_ind] distxy = math.sqrt(dx**2 + dy**2) prob = 1 if con_pattern == Lamellar: prob = ratio/(math.exp(((abs(dx)-0)**2)/(2*(3**2)))) if con_pattern == Homogenous: prob = ratio if con_pattern == IntPyrFeedback or con_pattern == AxonalPlexus: c = ratio a = gaussa prob = a /(math.exp(((abs(distxy)-0)**2)/(2*(c**2)))) if random.random() < prob: #Since there will be recurrect connections we need to keep track externally to BMTK #BMTK will call build_edges twice if we use net.edges() before net.build() #Resulting in double the edge count syn_list.append({'source_gid':source['node_id'],'target_gid':target['node_id']}) return random.randint(min_syn,max_syn) else: return 0 ########################################################### # Build recurrent connection rules ########################################################### def hipp_recurrent_connector(source,target,all_edges=[],min_syn=1, max_syn=1): """ General logic: 1. Given a *potential* source and target 2. Look through all edges currently made 3. If any of the current edges contains a. the current source as a previous target of b. the current target as a prevous source 4. Return number of synapses per this connection, 0 otherwise (no connection) """ for e in all_edges: #if source['node_id'] == e.target_gid and target['node_id'] == e.source_gid: if source['node_id'] == e['target_gid'] and target['node_id'] == e['source_gid']: return random.randint(min_syn,max_syn) return 0 def syn_dist_delay(source, target, base_delay, dist_delay=None):#, min_weight, max_weight): """ Original Code: distDelay = 0.1* (0.5*dist + rC.normal(0,1.5)*(1-exp(-dist^2/3)) ) """ base_delay = float(base_delay) if dist_delay: dist_delay = float(dist_delay) if dist_delay: #An override of sorts return base_delay + dist_delay x_ind,y_ind,z_ind = 0,1,2 dx = target['positions'][x_ind] - source['positions'][x_ind] dy = target['positions'][y_ind] - source['positions'][y_ind] dz = target['positions'][z_ind] - source['positions'][z_ind] dist = math.sqrt(dx**2 + dy**2 + dz**2) distDelay = 0.1* (0.5*dist + np.random.normal(0,1.5,1)[0]*(1-math.exp(-dist**2/3)) ) return float(base_delay) + distDelay def syn_dist_delay_section(source, target, base_delay, dist_delay=None, sec_id=0, sec_x=0.9): return syn_dist_delay(source, target, base_delay, dist_delay), sec_id, sec_x
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
This first two parameters of this function is "source" and "target" and are required for all custom connector functions. These are node objects which gives a representation of a single source and target cell, with properties that can be accessed like a python dictionary. When The Network Builder is creating the connection matrix, it will call this function for all possible source-target pairs. The user doesn't call this function directly.The remaining parameters are optional. Using these parameters, plus the distance and angles between source and target cells, this function determines the number of connections between each given source and target cell. If there are none you can return either None or 0.To create these connections we call add_edges method of the builder. We use the source and target parameter to filter out only excitory-to-excitory connections. We must also take into consideration the model type (biophysical or integrate-and-fire) of the target when setting parameters. We pass in the function throught the connection_rule parameter, and the function parameters (except source and target) through connection_params. (If our dist_tuning_connector function didn't have any parameters other than source and target, we could just not set connection_params).
dynamics_file = 'CA3o2CA3e.inh.json' conn = net.add_edges(source={'pop_name': 'CA3o'}, target={'pop_name': 'CA3e'}, connection_rule=hipp_dist_connector, connection_params={'con_pattern':syn[dynamics_file]['con_pattern'], 'ratio':syn[dynamics_file]['ratio'], 'gaussa':syn[dynamics_file]['gaussa']}, syn_weight=1, dynamics_params=dynamics_file, model_template=syn[dynamics_file]['level_of_detail'], distance_range=[0.0, 300.0], target_sections=['soma']) conn.add_properties(names=['delay', 'sec_id', 'sec_x'], rule=syn_dist_delay_section, rule_params={'base_delay':syn[dynamics_file]['delay'], 'sec_id':0, 'sec_x':0.9}, dtypes=[np.float, np.int32, np.float])
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
Similarly we create the other types of connections. But since either the source, target, or both cells will not have the tuning_angle parameter, we don't want to use dist_tuning_connector. Instead we can use the built-in distance_connector function which just creates connections determined by distance.
dynamics_file = 'CA3e2CA3o.exc.json' experiment = 'original' if experiment == "SFN19-D": #Weight of the synapses are set to 6 from max weight of 2 dynamics_file = 'CA3e2CA3o.exc.sfn19exp2d.json' conn = net.add_edges(source={'pop_name': 'CA3e'}, target={'pop_name': 'CA3o'}, connection_rule=hipp_recurrent_connector, connection_params={'all_edges':syn_list}, #net.edges()}, syn_weight=1, dynamics_params=dynamics_file, model_template=syn[dynamics_file]['level_of_detail'], distance_range=[0.0, 300.0], target_sections=['soma']) conn.add_properties(names=['delay', 'sec_id', 'sec_x'], rule=syn_dist_delay_section, rule_params={'base_delay':syn[dynamics_file]['delay'], 'dist_delay':0.1, 'sec_id':0, 'sec_x':0.9}, #Connect.hoc:274 0.1 dist delay dtypes=[np.float, np.int32, np.float])
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
Finally we build the network (this may take a bit of time since it's essentially iterating over all 400x400 possible connection combinations), and save the nodes and edges.
net.build() net.save_nodes(output_dir='sim_theta/network') net.save_edges(output_dir='sim_theta/network')
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
Building external networkNext we want to create an external network consisting of virtual cells that form a feedforward network onto our V1, which will provide input during the simulation. We will call this LGN, since the LGN is the primary input the layer 4 cells of the V1 (if we wanted to we could also create multiple external networks and run simulations on any number of them). First we build our LGN nodes. Then we must import the V1 network nodes, and create connections between LGN --> V1.
from bmtk.builder.networks import NetworkBuilder exp0net = NetworkBuilder('exp0net') exp0net.add_nodes(N=CA3eTotal, model_type='virtual', pop_name='bgnoisevirtCA3', pop_group='bgnoisevirtCA3')
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
As before, we will use a customized function to determine the number of connections between each source and target pair, however this time our connection_rule is a bit differentIn the previous example, our connection_rule function's first two arguments were the presynaptic and postsynaptic cells, which allowed us to choose how many synaptic connections between the pairs existed based on individual properties:```pythondef connection_fnc(source, target, ...): source['param'] presynaptic cell params target['param'] postsynaptic cell params ... return nsyns number of connections between pair```But for our LGN --> V1 connection, we do things a bit differently. We want to make sure that for every source cell, there are a limited number of presynaptic targets. This is a not really possible with a function that iterates on a one-to-one basis. So instead we have a connector function who's first parameter is a list of all N source cell, and the second parameter is a single target cell. We return an array of integers, size N; which each index representing the number of synaptics between sources and the target. To tell the builder to use this schema, we must set iterator='all_to_one' in the add_edges method. (By default this is set to 'one_to_one'. You can also use 'one_to_all' iterator which will pass in a single source and all possible targets).
def target_ind_equals_source_ind(source, targets, offset=0, min_syn=1,max_syn=1): # Creates a 1 to 1 mapping between source and destination nodes total_targets = len(targets) syns = np.zeros(total_targets) target_index = source['node_id'] syns[target_index-offset] = 1 return syns conn = exp0net.add_edges(target=net.nodes(pop_name='CA3e'), source={'pop_name':'bgnoisevirtCA3'}, iterator='one_to_all', connection_rule=target_ind_equals_source_ind, connection_params={'offset':0}, dynamics_params='NetCon2EC.exc.json', model_template='pyr2pyr', delay=0, syn_weight=1, ) conn.add_properties(['sec_id','sec_x'],rule=(0, 0.9), dtypes=[np.int32,np.float]) exp0net.build() exp0net.save_nodes(output_dir='sim_theta/network') exp0net.save_edges(output_dir='sim_theta/network')
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
2. Setting up BioNet file structure.Before running a simulation, we will need to create the runtime environment, including parameter files, run-script and configuration files. You can copy the files from an existing simuatlion, execute the following command:```bash$ python -m bmtk.utils.sim_setup \ --report-vars v \ --report-nodes 10,80 \ --network sim_theta/network \ --dt 0.1 \ --tstop 3000.0 \ --include-examples \ --compile-mechanisms \ bionet sim_ch04```$ python -m bmtk.utils.sim_setup --report-vars v --report-nodes 0,80,100,300 --network sim_theta/network --dt 0.1 --tstop 3000.0 --include-examples --compile-mechanisms bionet sim_thetaor run it directly in python
from bmtk.utils.sim_setup import build_env_bionet build_env_bionet(base_dir='sim_theta', network_dir='sim_theta/network', tstop=3000.0, dt=0.1, report_vars=['v'], # Record membrane potential (default soma) include_examples=True, # Copies components files compile_mechanisms=True # Will try to compile NEURON mechanisms )
ERROR:bmtk.utils.sim_setup: Was unable to compile mechanism in C:\Users\Tyler\Desktop\git_stage\theta\sim_theta\components\mechanisms
MIT
theta.ipynb
cyneuro/theta
This will fill out the **sim_ch04** with all the files we need to get started to run the simulation. Of interest includes* **circuit_config.json** - A configuration file that contains the location of the network files we created above. Plus location of neuron and synpatic models, templates, morphologies and mechanisms required to build our instantiate individual cell models.* **simulation_config.json** - contains information about the simulation. Including initial conditions and run-time configuration (_run_ and _conditions_). In the _inputs_ section we define what external sources we will use to drive the network (in this case a current clamp). And in the _reports_ section we define the variables (soma membrane potential and calcium) that will be recorded during the simulation * **run_bionent.py** - A script for running our simulation. Usually this file doesn't need to be modified.* **components/biophysical_neuron_models/** - The parameter file for the cells we're modeling. Originally [downloaded from the Allen Cell Types Database](http://celltypes.brain-map.org/neuronal_model/download/482934212). These files were automatically copies over when we used the _include-examples_ directive. If using a differrent or extended set of cell models place them here* **components/biophysical_neuron_models/** - The morphology file for our cells. Originally [downloaded from the Allen Cell Types Database](http://celltypes.brain-map.org/neuronal_model/download/482934212) and copied over using the _include_examples_.* **components/point_neuron_models/** - The parameter file for our LIF_exc and LIF_inh cells.* **components/synaptic_models/** - Parameter files used to create different types of synapses. lgn inputWe need to provide our LGN external network cells with spike-trains so they can activate our recurrent network. Previously we showed how to do this by generating csv files. We can also use NWB files, which are a common format for saving electrophysiological data in neuroscience.We can use any NWB file generated experimentally or computationally, but for this example we will use a preexsting one. First download the file:```bash $ wget https://github.com/AllenInstitute/bmtk/blob/develop/docs/examples/spikes_inputs/lgn_spikes.nwb?raw=true```or copy from [here](https://github.com/AllenInstitute/bmtk/tree/develop/docs/examples/spikes_inputs/lgn_spikes.nwb).Then we must edit the **simulation_config.json** file to tell the simulator to find the nwb file and which network to associate it with.```json{ "inputs": { "LGN_spikes": { "input_type": "spikes", "module": "nwb", "input_file": "$BASE_DIR/lgn_spikes.nwb", "node_set": "LGN", "trial": "trial_0" } }}``` 3. Running the simulationWe are close to running our simulation, however unlike in previous chapters we need a little more programming before we can begin. For most of the connections we added the parameter weight_function='wmax'. This is a built-in function that tells the simulator when creating a connection between two cells, just use the 'weight_max' value assigned to that given edge-type. However, when creating excitatory-to-excitatory connections we used weight_function='gaussianLL'. This is because we want to use the tuning_angle parameter, when avaiable, to determine the synaptic strength between two connections. First we create the function which takes in target, source and connection properties (which are just the edge-type and properties set in the add_edges method). Then we must register the function with the BioNet simulator:
import math from bmtk.simulator.bionet.pyfunction_cache import add_weight_function def gaussianLL(edge_props, source, target): src_tuning = source['tuning_angle'] tar_tuning = target['tuning_angle'] w0 = edge_props["syn_weight"] sigma = edge_props["weight_sigma"] delta_tuning = abs(abs(abs(180.0 - abs(float(tar_tuning) - float(src_tuning)) % 360.0) - 90.0) - 90.0) return w0 * math.exp(-(delta_tuning / sigma) ** 2) add_weight_function(gaussianLL)
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
The weights will be adjusted before each simulation, and the function can be changed between different runs.. Simply opening the edge_types.csv file with a text editor and altering the weight_function column allows users to take an existing network and readjust weights on-the-fly.Finally we are ready to run the simulation. Note that because this is a 400 cell simulation, this may be computationally intensive for some older computers and may take anywhere between a few minutes to half-an-hour to complete.
from bmtk.simulator import bionet conf = bionet.Config.from_json('sim_theta/simulation_config.json') conf.build_env() net = bionet.BioNetwork.from_config(conf) sim = bionet.BioSimulator.from_config(conf, network=net) sim.run()
2020-09-28 22:46:28,632 [INFO] Created log file
MIT
theta.ipynb
cyneuro/theta
4. Analyzing resultsResults of the simulation, as specified in the config, are saved into the output directory. Using the analyzer functions, we can do things like plot the raster plot
from bmtk.analyzer.spike_trains import plot_raster, plot_rates_boxplot plot_raster(config_file='sim_theta/simulation_config.json', group_by='pop_name')
c:\users\tyler\desktop\git_stage\bmtk\bmtk\simulator\utils\config.py:4: UserWarning: Please use bmtk.simulator.core.simulation_config instead. warnings.warn('Please use bmtk.simulator.core.simulation_config instead.')
MIT
theta.ipynb
cyneuro/theta
and the rates of each node
plot_rates_boxplot(config_file='sim_ch04/simulation_config.json', group_by='pop_name')
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
In our config file we used the cell_vars and node_id_selections parameters to save the calcium influx and membrane potential of selected cells. We can also use the analyzer to display these traces:
from bmtk.analyzer.compartment import plot_traces _ = plot_traces(config_file='sim_ch04/simulation_config.json', group_by='pop_name', report_name='v_report')
_____no_output_____
MIT
theta.ipynb
cyneuro/theta
问题设定 在小车倒立杆(CartPole)游戏中,我们希望通过强化学习训练一个智能体(agent),尽可能不断地左右移动小车,使得小车上的杆不倒,我们首先定义CartPole游戏: CartPole游戏即是强化学习模型的enviorment,它与agent交互,实时更新state,内部定义了reward function,其中state有以下定义: state每一个维度分别代表了:- 小车位置,它的取值范围是-2.4到2.4- 小车速度,它的取值范围是负无穷到正无穷- 杆的角度,它的取值范围是-41.8°到41.8°- 杆的角速,它的取值范围是负无穷到正无穷 action是一个2维向量,每一个维度分别代表向左和向右移动。 $$action \in \mathbb{R}^2$$ DQN 我们将设计一个网络,作为状态-动作值函数(state-action value function),其输入是state,输出是对应各个action的value,并TD(Temporal Difference)进行迭代训练直至收敛。我们将定义两个这样的网络,分别记作$\theta$和$\theta^-$,分别代表估计网络与目标网络。 我们希望最小化: $$\left( y_j - Q \left( \phi_j, a_j; \theta \right) \right)^2$$ 其中,$a_j$具有以下形式: $$a_j = \mathrm{argmax}_{a} Q \left( \phi(s_j), a; \theta\right)$$ 其中,$y_j$具有以下形式: $$f(x)=\begin{cases}r_j & \text{if episode ends at j + 1}\\r_j + \gamma \max_{a^{\prime}} \hat{Q} \left( \phi_{j+1}, a^{\prime}; \theta^{-} \right)& \text{otherwise}\end{cases}$$ 在最小化TD-Error时,我们将固定目标网络,只对估计网络做梯度反向传播,每次到达一定迭代次数后,将估计网络的权重复制到目标网络。在这个过程中,需要用到经验回放(Experience Replay)技术,即将每一次迭代观测到的$s_t, r_t, a_t, s_{t+1}$作为一个元组缓存,然后在这些缓存中随机抽取元组做批次梯度下降。 代码实现
# coding=utf-8 import tensorflow as tf import numpy as np import gym import sys sys.path.append('..') from base.model import * %matplotlib inline class Agent(BaseRLModel): def __init__(self, session, env, a_space, s_space, **options): super(Agent, self).__init__(session, env, a_space, s_space, **options) self._init_input() self._init_nn() self._init_op() self._init_saver() self.buffer = np.zeros((self.buffer_size, self.s_space + 1 + 1 + self.s_space)) self.buffer_count = 0 self.total_train_step = 0 self.update_target_net_step = 200 self.session.run(tf.global_variables_initializer()) def _init_input(self, *args): with tf.variable_scope('input'): self.s_n = tf.placeholder(tf.float32, [None, self.s_space]) self.s = tf.placeholder(tf.float32, [None, self.s_space]) self.r = tf.placeholder(tf.float32, [None, ]) self.a = tf.placeholder(tf.int32, [None, ]) def _init_nn(self, *args): with tf.variable_scope('actor_net'): # w,b initializer w_initializer = tf.random_normal_initializer(mean=0.0, stddev=0.3) b_initializer = tf.constant_initializer(0.1) with tf.variable_scope('predict_q_net'): phi_state = tf.layers.dense(self.s, 32, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer) self.q_predict = tf.layers.dense(phi_state, self.a_space, kernel_initializer=w_initializer, bias_initializer=b_initializer) with tf.variable_scope('target_q_net'): phi_state_next = tf.layers.dense(self.s_n, 32, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer) self.q_target = tf.layers.dense(phi_state_next, self.a_space, kernel_initializer=w_initializer, bias_initializer=b_initializer) def _init_op(self): with tf.variable_scope('q_real'): # size of q_value_real is [BATCH_SIZE, 1] max_q_value = tf.reduce_max(self.q_target, axis=1) q_next = self.r + self.gamma * max_q_value self.q_next = tf.stop_gradient(q_next) with tf.variable_scope('q_predict'): # size of q_value_predict is [BATCH_SIZE, 1] action_indices = tf.stack([tf.range(tf.shape(self.a)[0], dtype=tf.int32), self.a], axis=1) self.q_eval = tf.gather_nd(self.q_predict, action_indices) with tf.variable_scope('loss'): self.loss_func = tf.reduce_mean(tf.squared_difference(self.q_next, self.q_eval, name='mse')) with tf.variable_scope('train'): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_func) with tf.variable_scope('update_target_net'): t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_net') p_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='predict_q_net') self.update_q_net = [tf.assign(t, e) for t, e in zip(t_params, p_params)] def predict(self, s): if np.random.uniform() < self.epsilon: a = np.argmax(self.session.run(self.q_predict, feed_dict={self.s: s[np.newaxis, :]})) else: a = np.random.randint(0, self.a_space) return a def snapshot(self, s, a, r, s_n): self.buffer[self.buffer_count % self.buffer_size, :] = np.hstack((s, [a, r], s_n)) self.buffer_count += 1 def train(self): if self.total_train_step % self.update_target_net_step == 0: self.session.run(self.update_q_net) batch = self.buffer[np.random.choice(self.buffer_size, size=self.batch_size), :] s = batch[:, :self.s_space] s_n = batch[:, -self.s_space:] a = batch[:, self.s_space].reshape((-1)) r = batch[:, self.s_space + 1] _, cost = self.session.run([self.train_op, self.loss_func], { self.s: s, self.a: a, self.r: r, self.s_n: s_n }) def run(self): if self.mode == 'train': for episode in range(self.train_episodes): s, r_episode = self.env.reset(), 0 while True: # if episode > 400: # self.env.render() a = self.predict(s) s_n, r, done, _ = self.env.step(a) if done: r = -5 r_episode += r self.snapshot(s, a, r_episode, s_n) s = s_n if done: break if self.buffer_count > self.buffer_size: self.train() if episode % 200 == 0: self.logger.warning('Episode: {} | Rewards: {}'.format(episode, r_episode)) self.save() else: for episode in range(self.eval_episodes): s, r_episode = self.env.reset() while True: a = self.predict(s) s_n, r, done, _ = self.env.step(a) r_episode += r s = s_n if done: break def main(_): # Make env. env = gym.make('CartPole-v0') env.seed(1) env = env.unwrapped # Init session. session = tf.Session() # Init agent. agent = Agent(session, env, env.action_space.n, env.observation_space.shape[0], **{ KEY_MODEL_NAME: 'DQN', KEY_TRAIN_EPISODE: 3000 }) agent.run() main(_)
WARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.
MIT
note/DQN.ipynb
Ceruleanacg/Learning-Notes
XGBBOOST
xgb_params = { 'max_depth' : 5, 'n_estimators' : 50, 'learning_rate' : 0.1, 'seed' : 0 } model = xgb.XGBRegressor(**xgb_params) run_model(model,cat_feats) m = xgb.XGBRegressor(**xgb_params) m.fit(X,Y) imp = PermutationImportance(m,random_state = 0).fit(X,Y) eli5.show_weights(imp,feature_names = cat_feats) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'seller_name__cat', 'param_pojemność-skokowa__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params),feats) df['param_napęd'].unique() df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'seller_name__cat', 'param_pojemność-skokowa__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params),feats) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0])) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'seller_name__cat', 'param_pojemność-skokowa__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params),feats) df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ',''))) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'seller_name__cat', 'param_pojemność-skokowa', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params),feats)
_____no_output_____
MIT
Day4.ipynb
JoachimMakowski/DataScienceMatrix2
Data Science Unit 1 Sprint Challenge 2 Data Wrangling and StorytellingTaming data from its raw form into informative insights and stories. Data WranglingIn this Sprint Challenge you will first "wrangle" some data from [Gapminder](https://www.gapminder.org/about-gapminder/), a Swedish non-profit co-founded by Hans Rosling. "Gapminder produces free teaching resources making the world understandable based on reliable statistics."- [Cell phones (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv)- [Population (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)- [Geo country codes](https://github.com/open-numbers/ddf--gapminder--systema_globalis/blob/master/ddf--entities--geo--country.csv)These two links have everything you need to successfully complete the first part of this sprint challenge.- [Pandas documentation: Working with Text Data](https://pandas.pydata.org/pandas-docs/stable/text.html) (one question)- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) (everything else) Part 0. Load dataYou don't need to add or change anything here. Just run this cell and it loads the data for you, into three dataframes.
import pandas as pd cell_phones = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv') population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv') geo_country_codes = (pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv') .rename(columns={'country': 'geo', 'name': 'country'}))
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 1. Join data First, join the `cell_phones` and `population` dataframes (with an inner join on `geo` and `time`).The resulting dataframe's shape should be: (8590, 4)
cell_phones.head() population.head() geo_country_codes.head() #join the cell_phones and population dataframes (with an inner join on geo and time). df=pd.merge(cell_phones,population, on=['geo','time'], how='inner') print(df.shape) df.head()
(8590, 4)
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Then, select the `geo` and `country` columns from the `geo_country_codes` dataframe, and join with your population and cell phone data.The resulting dataframe's shape should be: (8590, 5)
geo_country= geo_country_codes [['geo','country']] geo_country.head() df_merged = pd.merge(df, geo_country, on='geo') print(df_merged.shape) df_merged.head()
(8590, 5)
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 2. Make features Calculate the number of cell phones per person, and add this column onto your dataframe.(You've calculated correctly if you get 1.220 cell phones per person in the United States in 2017.)
df_merged['cellphone_person']=df_merged['cell_phones_total']/df_merged['population_total'] df_merged.head()
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Modify the `geo` column to make the geo codes uppercase instead of lowercase.
df_merged[(df_merged['country'] == 'United States') & (df_merged['time'] ==2017 )]
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 3. Process data Use the describe function, to describe your dataframe's numeric columns, and then its non-numeric columns.(You'll see the time period ranges from 1960 to 2017, and there are 195 unique countries represented.)
import numpy as np # describe your dataframe's numeric columns df_merged.describe() df_merged.describe(exclude = [np.number])
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
In 2017, what were the top 5 countries with the most cell phones total?Your list of countries should have these totals:| country | cell phones total ||:-------:|:-----------------:|| ? | 1,474,097,000 || ? | 1,168,902,277 || ? | 458,923,202 || ? | 395,881,000 || ? | 236,488,548 |
# This optional code formats float numbers with comma separators pd.options.display.float_format = '{:,}'.format year2017 = df_merged[df_merged.time == 2017] year2017.head() #code to check the values year2017.sort_values('cell_phones_total', ascending=False) # Make top5 top5_all=year2017.nlargest(5,'cell_phones_total') top5=top5_all[['country','cell_phones_total']] top5.head()
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
2017 was the first year that China had more cell phones than people.What was the first year that the USA had more cell phones than people?
order_celphones=df_merged.sort_values('cell_phones_total', ascending=False) order_celphones.head(10) country_usa=df_merged[(df_merged['country'] == 'United States')] country_usa.head() # country_usa.country.unique() condition_usa= country_usa[(country_usa['cell_phones_total'] > country_usa['population_total'])] condition_usa.head() #one way to do : What was the first year that the USA had more cell phones than people? order_usa=condition_usa.sort_values('time', ascending=True) order_usa.head(1) #Second way to do: What was the first year that the USA had more cell phones than people? condition_usa.nsmallest(1,'time')
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 4. Reshape data Create a pivot table:- Columns: Years 2007—2017- Rows: China, India, United States, Indonesia, Brazil (order doesn't matter)- Values: Cell Phones TotalThe table's shape should be: (5, 11)
years=[2007,2008,2009,2010,2011,2012,2013,20014,2015,2016,2017] countries=['China', 'India', 'United States', 'Indonesia', 'Brazil'] #countries_pivot=df_merged.loc[df_merged['country'].isin(countries)] years_merged=df_merged.loc[df_merged['time'].isin(years)& df_merged['country'].isin(countries)] years_merged.head() pivot_years=years_merged.pivot_table(index='country',columns='time',values='cell_phones_total',aggfunc='sum') pivot_years.head()
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Sort these 5 countries, by biggest increase in cell phones from 2007 to 2017.Which country had 935,282,277 more cell phones in 2017 versus 2007?
flat_pivot= pd.DataFrame(pivot_years.to_records()) flat_pivot.head() flat_pivot['Percentage Change']=(flat_pivot['2017']-flat_pivot['2007'])/flat_pivot['2017'] flat_pivot.head() #ANSWER= Sort these 5 countries, by biggest increase in cell phones from 2007 to 2017. flat_pivot.sort_values('Percentage Change', ascending=False) #Which country had 935,282,277 more cell phones in 2017 versus 2007? flat_pivot['Change']=(flat_pivot['2017']-flat_pivot['2007']) flat_pivot.head() flat_pivot[flat_pivot['Change']==935282277]
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Data StorytellingIn this part of the sprint challenge you'll work with a dataset from **FiveThirtyEight's article, [Every Guest Jon Stewart Ever Had On ‘The Daily Show’](https://fivethirtyeight.com/features/every-guest-jon-stewart-ever-had-on-the-daily-show/)**! Part 0 — Run this starter codeYou don't need to add or change anything here. Just run this cell and it loads the data for you, into a dataframe named `df`.(You can explore the data if you want, but it's not required to pass the Sprint Challenge.)
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/daily-show-guests/daily_show_guests.csv' df1 = pd.read_csv(url).rename(columns={'YEAR': 'Year', 'Raw_Guest_List': 'Guest'}) def get_occupation(group): if group in ['Acting', 'Comedy', 'Musician']: return 'Acting, Comedy & Music' elif group in ['Media', 'media']: return 'Media' elif group in ['Government', 'Politician', 'Political Aide']: return 'Government and Politics' else: return 'Other' df1['Occupation'] = df1['Group'].apply(get_occupation)
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 1 — What's the breakdown of guests’ occupations per year?For example, in 1999, what percentage of guests were actors, comedians, or musicians? What percentage were in the media? What percentage were in politics? What percentage were from another occupation?Then, what about in 2000? In 2001? And so on, up through 2015.So, **for each year of _The Daily Show_, calculate the percentage of guests from each occupation:**- Acting, Comedy & Music- Government and Politics- Media- Other Hints:You can make a crosstab. (See pandas documentation for examples, explanation, and parameters.)You'll know you've calculated correctly when the percentage of "Acting, Comedy & Music" guests is 90.36% in 1999, and 45% in 2015.
print(df1.shape) df1.head() crosstab_profession=pd.crosstab(df1['Year'], df1['Occupation'], normalize='index').round(4)*100 crosstab_profession.head(20)
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Part 2 — Recreate this explanatory visualization: **Hints:**- You can choose any Python visualization library you want. I've verified the plot can be reproduced with matplotlib, pandas plot, or seaborn. I assume other libraries like altair or plotly would work too.- If you choose to use seaborn, you may want to upgrade the version to 0.9.0.**Expectations:** Your plot should include:- 3 lines visualizing "occupation of guests, by year." The shapes of the lines should look roughly identical to 538's example. Each line should be a different color. (But you don't need to use the _same_ colors as 538.)- Legend or labels for the lines. (But you don't need each label positioned next to its line or colored like 538.)- Title in the upper left: _"Who Got To Be On 'The Daily Show'?"_ with more visual emphasis than the subtitle. (Bolder and/or larger font.)- Subtitle underneath the title: _"Occupation of guests, by year"_**Optional Bonus Challenge:**- Give your plot polished aesthetics, with improved resemblance to the 538 example.- Any visual element not specifically mentioned in the expectations is an optional bonus.
from IPython.display import display, Image png = 'https://fivethirtyeight.com/wp-content/uploads/2015/08/hickey-datalab-dailyshow.png' example = Image(png, width=500) display(example) import seaborn as sns sns.__version__ flat_df1= pd.DataFrame(crosstab_profession.to_records()) flat_df1 flat_df1=flat_df1.drop(['Other'],axis=1) flat_df1.head() from matplotlib.ticker import AutoMinorLocator, MultipleLocator, FuncFormatter sns.set(style="ticks") sns.set(rc={'axes.facecolor':'#F0F0F0', 'figure.facecolor':'#F0F0F0'}) plt.figure(figsize=(8,5.5)) ax=sns.lineplot(data=flat_df1, y='Media',x='Year', color='#6f2aa1', linewidth=2.5) ax=sns.lineplot(data=flat_df1, y='Government and Politics',x='Year', color='red', linewidth=2.5) ax=sns.lineplot(data=flat_df1, y='Acting, Comedy & Music',x='Year', color='#2bb1f0', linewidth=2.5) plt.suptitle('Who Got To Be On The Daily Show?', x=0.35, y=1.04,fontweight="bold", ) plt.title('Occupation of guests, by year', x=0.2, y=1.06) ax.spines['right'].set_color('#F0F0F0') ax.spines['left'].set_color('#F0F0F0') ax.spines['bottom'].set_color('#bdbdbd') ax.spines['top'].set_color('#F0F0F0') ax.set_xlim(1999, 2015) ax.set_ylim(0, 108) ax.set_xlabel("") ax.set_ylabel("") ax.grid(linestyle="-", linewidth=0.5, color='#bdbdbd', zorder=-10) ax.yaxis.set_major_locator(MultipleLocator(25)) ax.yaxis.set_minor_locator(AutoMinorLocator(100)) ax.tick_params(which='major', width=0.25) ax.tick_params(which='major', length=1.0) ax.tick_params(which='minor', width=0.1, labelsize=10) ax.tick_params(which='minor', length=5, labelsize=10, labelcolor='0.25') plt.text(x=2008, y= 50, s='Media' ,color='#6f2aa1', weight='bold') plt.text(x=2010, y= 5, s='Government and Politics' ,color='red', weight='bold') plt.text(x=2001, y= 75, s='Acting, Comedy & Music' ,color='#2bb1f0',weight='bold');
_____no_output_____
MIT
DS7_Unit_1_Sprint_Challenge_2_Data_Wrangling_and_Storytelling_(3).ipynb
johanaluna/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
Sicherman Dice*Note: This notebook takes the form of a conversation between two problem solvers. One speaks in* **bold**, *the other in* plain. *Also note, for those who are not native speakers of English: "dice" is the plural form; "die" is the singular.*Huh. This is interesting. You know how in many games, such as craps or Monopoly, you roll two regular dice and add them up. Only the sum matters, not what either of the individual dice shows.**Right.**And some of those sums, like 8, can be made multiple ways, while 2 and 12 can only be made one way. **Yeah. 8 can be made 5 ways, so it has a 5/36 probability of occuring.**The interesting thing is that people have been playing dice games for 7,000 years. But it wasn't until 1977 that Colonel George Sicherman asked whether is is possible to have a pair of dice that are not regular dice&mdash;that is, they don't have (1, 2, 3, 4, 5, 6) on the six sides&mdash;but have the same distribution of sums as a regular pair&mdash;so the pair of dice would also have to have 5 ways of making 8, but it could be different ways; maybe 7+1 could be one way. Sicherman assumes that each side bears a positive integer.**And what did he find?**Wouldn't it be more fun to figure it out for ourselves?**OK!**How could we proceed?**When in doubt, [use brute force](http://quotes.lifehack.org/quote/ken-thompson/when-in-doubt-use-brute-force/): we can write a program to enumerate the possibilities:**- **Generate all dice that could possibly be part of a solution, such as (1, 2, 2, 4, 8, 9).**- **Consider all pairs of these dice, such as ((1, 3, 4, 4, 5, 8), (1, 2, 2, 3, 3, 4))**- **See if we find any pairs that are not the regular pair, but do have the same distribution of sums as the regular pair.**That's great. I can code up your description almost verbatim. I'll also keep track of our TO DO list:
def sicherman(): """The set of pairs of 6-sided dice that have the same distribution of sums as a regular pair of dice.""" return {pair for pair in pairs(all_dice()) if pair != regular_pair and sums(pair) == regular_sums} # TODO: pairs, all_dice, regular_pair, sums, regular_sums
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Looks good to me.**Now we can tick off the items in the TO DO list. The function `pairs` is first, and it is easy:
def pairs(collection): "Return all pairs (A, B) from collection where A <= B." return [(A, B) for A in collection for B in collection if A <= B] # TODO: all_dice, regular_pair, sums, regular_sums
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**That's good. We could have used the library function `itertools.combinations_with_replacement`, but let's just leave it as is. We should test to make sure it works:**
pairs(['A', 'B', 'C'])
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
TO DO: `sums(pair)`Now for `sums`: we need some way to represent all the 36 possible sums from a pair of dice. We want a representation that will be the same for two different pairs if all 36 sums are the same, but where the order or composition of the sums doesn't matter. **So we want a set of the sums?**Well, it can't be a set, because we need to know that 8 can be made 5 ways, not just that 8 is a member of the set. The technical term for a collection where order doesn't matter but where you can have repeated elements is a **bag**, or sometimes called a [**multiset**](https://en.wikipedia.org/wiki/Multiset). For example, the regular pair of dice makes two 11s with 5+6 and 6+5, and another pair could make two 11s with 7+4 and 3+8. Can you think of a representation that will do that?**Well the easiest is just a sorted list or tuple&mdash;if we don't want order to matter, sorting takes care of that. Another choice would be a dictionary of {sum: count} entries, like {2: 1, 3: 2, ... 11: 2, 12: 1}. There is even a library class, `collections.Counter`, that does exactly that.**How do we choose between the two representations?**I don't think it matters much. Since there are only 36 entries, I think the sorted list will be simpler, and probably more efficient. For 100-sided dice I'd probably go with the Counter.**OK, here's some code implementing `sums` as a sorted list, and definitions for regular die pair, and sums.By the way, I could have used `range(1, 7)` to define a regular die, but `range` is zero-based, and regular dice are one-based, so I defined the function `ints` instead.
def sums(pair): "All possible sums of a side from one die plus a side from the other." (A, B) = pair return Bag(a + b for a in A for b in B) Bag = sorted # Implement a bag as a sorted list def ints(start, end): "A tuple of the integers from start to end, inclusive." return tuple(range(start, end + 1)) regular_die = ints(1, 6) regular_pair = (regular_die, regular_die) regular_sums = sums(regular_pair) # TODO: all_dice
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Let's check the `regular_sums`:
len(regular_sums) print(regular_sums)
[2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 11, 11, 12]
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**And we can see what that would look like to a `Counter`:**
from collections import Counter Counter(regular_sums)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Looks good! Now only one more thing on our TODO list:** TO DO: `all_dice()``all_dice` should generate all possible dice, where by "possible" I mean the dice that could feasibly be part of a pair that is a solution to the Sicherman problem. Do we know how many dice that will be? Is it a large enough number that efficiency will be a problem?**Let's see. A die has six sides each. If each side can be a number from, say, 1 to 10, that's 106 or a million possible dice; a million is a small number for a computer.**True, a million is a relatively small number for `all_dice()`, but how many `pairs(all_dice())` will there be?**Ah. A million squared is a trillion. That's a large number even for a computer. Just counting to a trillion takes hours in Python; checking a trillion pairs will take days.**So we need to get rid of most of the dice. What about permutations?**Good point. If I have the die (1, 2, 3, 4, 5, 6), then I don't need the 6! = 720 different permutations of this die&mdash; that is, dice like (2, 4, 6, 1, 3, 5).Each die should be a bag (I learned a new word!) of sides. So we've already eliminated 719/720 = 99.9% of the work.**One other thing bothers me ... how do you know that the sides can range from 1 to 10? Are you sure that 11 can't be part of a solution? Or 12?**Every side on every die must be a positive integer, right?**Right. No zeroes, no negative numbers, no fractions.**Then I know for sure that 12 can't be on any die, because when you add 12 to whatever is on the other die, you would get at least 13, and 13 is not allowed in the regular distribution of sums.**Good. How about 11?**We can't have a sum that is bigger than 12. So if one die had an 11, the other would have to have all 1s. That wouldn't work, because then we'd have six 12s, but we only want one. So 10 is the biggest allowable number on a die.**What else can we say about the biggest number on a die?**There's one 12 in the sums. But there are several ways to make a 12: 6+6 or 7+5 or 8+4, and so on. So I can't say for sure what the biggest number on any one die will be. But I can say that whatever the biggest number on a die is, it will be involved in summing to 12, so there can be only one of them, because we only want to make one 12.**What about the smallest number on a die?**Well, there's only one 2 allowed in the sums. The only way to sum to 2 is 1+1: a 1 from each of the dice in the pair. If a die had no 1s, we wouldn't get a 2; if a die had more than one 1, we would get too many 2s. So every die has to have exactly one 1.**Good. So each die has exactly one 1, and exactly one of whatever the biggest number is, something in the range up to 10. Here's a picture of the six sides of any one die:&nbsp;1&nbsp; &lt;2-10 &le;2-10 &le;2-10 &le;2-10 &lt;2-10The bag of sides is always listed in non-decreasing order; the first side, 1, is less than the next, and the last side, whatever it is, is greater than the one before it.**Wait a minute: you have [2-10] &lt; [2-10]. But 2 is not less than 2, and 10 is not less than 10. I think it should be [2-9] &lt; [3-10]. So the picture should be like this:**&nbsp;1&nbsp; &lt;2-9 &le;2-9 &le;2-9 &le;2-9 &lt;3-10Good! We're making progress in cutting down the range. But it That this bothers me because it says the range for the biggest number is 3 to 10. But if one die has a 3 and the other a 10, that adds to 13. So I'm thinking that it is not possible to have a 10 after all&mdash;because if one die had a 10, then the other would have to have a 2 as the biggest number, and that can't be. Therefore the biggest number is in the range of 3 to 9. But then the others have to be less, so make them 2 to 8:&nbsp;1&nbsp; &lt;2-8 &le;2-8 &le;2-8 &le;2-8 &lt;3-9**I can turn this picture into code:**
def all_dice(): "A list of all feasible 6-sided dice for the Sicherman problem." return [(1, s2, s3, s4, s5, s6) for s2 in ints(2, 8) for s3 in ints(s2, 8) for s4 in ints(s3, 8) for s5 in ints(s4, 8) for s6 in ints(s5+1, 9)]
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
I think we're ready to run `sicherman()`. Any bets on what we'll find out?**I bet that Sicherman is remembered because he discovered a pair of dice that works. If he just proved the non-existence of a pair, I don't think that would be noteworthy.**Makes sense. Here goes: The Answer
sicherman()
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Look at that!**It turns out you can buy a pair of dice with just these numbers.Here's a table I borrowed from [Wikipedia](https://en.wikipedia.org/wiki/Sicherman_dice) that shows both pairs of dice have the same sums. 23456789101112Regular dice:(1, 2, 3, 4, 5, 6)(1, 2, 3, 4, 5, 6)1+11+22+11+32+23+11+42+33+24+11+52+43+34+25+11+62+53+44+35+26+12+63+54+45+36+23+64+55+46+34+65+56+45+66+56+6Sicherman dice:(1, 2, 2, 3, 3, 4)(1, 3, 4, 5, 6, 8)1+12+12+13+13+11+31+42+32+34+11+52+42+43+33+31+62+52+53+43+44+32+62+63+53+54+41+83+63+64+52+82+84+63+83+84+8We could stop here. Or we could try to solve it for *N*-sided dice. Why stop now? Onward! OK. I know 4-, 12-, and 20-sided dice are common, but we'll try to handle any *N* > 1. My guess is we won't go too far before our program becomes too slow. So, before we try *N*-sided dice, let's analyze six-sided dice a little better, to see if we can eliminate some of the pairs before we start. The picture says that (1, 2, 2, 2, 2, 3) could be a valid die. Could it?**No! If a die had four 2s, then we know that since the other die has one 1, we could make 2 + 1 = 3 four ways. But the `regular_sums` has only two 3s. So that means that a die can have no more than two 2s. New picture:**&nbsp;1&nbsp; &lt;2-8 &le;2-8 &le;3-8 &le;3-8 &lt;3-9Now we've got [3-8] < [3-9]; that's not right. If a die can only have one 1 and two 2s, then it must have at least one number that is a 3 or more, followed by the biggest number, which must be 4 or more, and we know a pair of biggest numbers must sum to 12, so the range of the biggest can't be [4-9], it must be [4-8]:&nbsp;1&nbsp; &lt;2-7 &le;2-7 &le;3-7 &le;3-7 &lt;4-8
def all_dice(): "A list of all feasible 6-sided dice for the Sicherman problem." return [(1, s2, s3, s4, s5, s6) for s2 in ints(2, 7) for s3 in ints(s2, 7) for s4 in ints(max(s3, 3), 7) for s5 in ints(s4, 7) for s6 in ints(s5+1, 8)]
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
I'll count how many dice and how many pairs there are now:
len(all_dice()) len(pairs(all_dice()))
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Nice&mdash;we got down from a trillion pairs to 26,000. I don't want to print `all_dice()`, but I can sample a few:**
import random random.sample(all_dice(), 10)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
`sicherman(N)`OK, I think we're ready to update `sicherman()` to `sicherman(N)`. **Sure, most of that will be easy, just parameterizing with `N`:**
def sicherman(N=6): """The set of pairs of N-sided dice that have the same distribution of sums as a regular pair of N-sided dice.""" reg_sums = regular_sums(N) reg_pair = regular_pair(N) return {pair for pair in pairs(all_dice(N)) if pair != reg_pair and sums(pair) == reg_sums} def regular_die(N): return ints(1, N) def regular_pair(N): return (regular_die(N), regular_die(N)) def regular_sums(N): return sums(regular_pair(N)) # TODO: all_dice(N)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Good. I think it would be helpful for me to look at a table of `regular_sums`:
for N in ints(1, 7): print("N:", N, dict(Counter(regular_sums(N))))
N: 1 {2: 1} N: 2 {2: 1, 3: 2, 4: 1} N: 3 {2: 1, 3: 2, 4: 3, 5: 2, 6: 1} N: 4 {2: 1, 3: 2, 4: 3, 5: 4, 6: 3, 7: 2, 8: 1} N: 5 {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 4, 8: 3, 9: 2, 10: 1} N: 6 {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 5, 9: 4, 10: 3, 11: 2, 12: 1} N: 7 {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 6, 10: 5, 11: 4, 12: 3, 13: 2, 14: 1}
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**That is helpful. I can see that any `regular_sums` must have one 2 and two 3s, and three 4s, and so on, not just for `N=6` but for any `N` (except for trivially small `N`). And that means that any regular die can have at most two 2s, three 3s, four 4s, and so on. So we have this picture:**&nbsp;1&nbsp; &lt;2+ &le;2+ &le;3+ &le;3+ &le;3+ &le;4+ &le;4+ &le;4+ &le;4+ &le; ...**where [2+] means the lower bound is 2, but we haven't figured out yet what the upper bound is.**Let's figure out upper bounds starting from the biggest number. What can the biggest number be?**For a pair of *N*-sided die, the biggest sides from each one must add up to 2*N*. Let's take *N*=10 as an example. The biggest numbers on two 10-sided Sicherman dice must sum to 20. According to the picture above, the lower bound on the biggest number would be 4, but because there can only be one of the biggest number, the lower bound is 5. So to add up to 20, the range must be [5-15]:**&nbsp;1&nbsp; &lt;2+ &le;2+ &le;3+ &le;3+ &le;3+ &le;4+ &le;4+ &le;4+ &lt;5-15 There's probably some tricky argument for the upper bounds of the other sides, but I'm just going to say the upper bound is one less than the upper bound of the biggest number:&nbsp;1&nbsp; &lt;2-14 &le;2-14 &le;3-14 &le;3-14 &le;3-14 &le;4-14 &le;4-14 &le;4-14 &lt;5-15 Let's start by coding up `lower_bounds(N)`:
def lower_bounds(N): "A list of lower bounds for respective sides of an N-sided die." lowers = [1] for _ in range(N-1): m = lowers[-1] # The last number in lowers so far lowers.append(m if (lowers.count(m) < m) else m + 1) lowers[-1] = lowers[-2] + 1 return lowers lower_bounds(6) lower_bounds(10)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
And `upper_bounds(N)`:
def upper_bounds(N): "A list of upper bounds for respective sides of an N-sided die." U = 2 * N - lower_bounds(N)[-1] return [1] + (N - 2) * [U - 1] + [U] upper_bounds(6) upper_bounds(10)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Now, what do we have to do for `all_dice(N)`? When we knew we had six sides, we wrote six nested loops. We can't do that for *N*, so what do we do?**Here's an iterative approach: we keep track of a list of partially-formed dice, and on each iteration, we add a side to all the partially-formed dice in all possible ways, until the dice all have `N` sides. So for eaxmple, we'd start with:** dice = [(1,)] **and then on the next iteration (let's assume *N*=6, so the lower bound is 2 and the upper bound is 7), we'd get this:** dice = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)] **on the next iteration, we find all the ways of adding a third side, and so on. Like this:**
def all_dice(N): "Return a list of all possible N-sided dice for the Sicherman problem." lowers = lower_bounds(N) uppers = upper_bounds(N) def possibles(die, i): "The possible numbers for the ith side of an N-sided die." return ints(max(lowers[i], die[-1] + int(i == N-1)), uppers[i]) dice = [(1,)] for i in range(1, N): dice = [die + (side,) for die in dice for side in possibles(die, i)] return dice
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**The tricky part was with the `max`: the actual lower bound at least `lowers[i]`, but it must be as big as the previous side, `die[-1]`. And just to make things complicated, the very last side has to be strictly bigger than the previous; `" + int(i == N-1)"` does that by adding 1 just in case we're on the last side, and 0 otherwise.****Let's check it out:**
len(all_dice(6))
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Reassuring that we get the same number we got with the old version of `all_dice()`.
random.sample(all_dice(6), 8)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Running `sicherman(N)` for small `N`Let's try `sicherman` for some small values of `N`:
{N: sicherman(N) for N in ints(2, 6)}
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Again, reassuring that we get the same result for `sicherman(6)`. And interesting that there is a result for `sicherman(4)` but not for the other *N*.Let's go onwards from *N*=6, but let's check the timing as we go:
%time sicherman(6) %time sicherman(7)
CPU times: user 18.2 s, sys: 209 ms, total: 18.4 s Wall time: 21.4 s
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Estimating run time of `sicherman(N)` for larger `N`OK, it takes 50 or 60 times longer to do 7, compared to 6. At this rate, *N*=8 will take 15 minutes, 9 will take 15 hours, and 10 will take a month.**Do we know it will continue to rise at the same rate? You're saying the run time is exponential in *N*? **I think so. The run time is proportional to the number of pairs. The number of pairs is proportional to the square of the number of dice. The number of dice is roughly exponential in *N*, because each time you increase *N* by 1, you have to try a number of new sides that is similar to the number for the previous side (but not quite the same). I should plot the number of pairs on a log scale and see if it looks like a straight line.I can count the number of pairs without explicitly generating the pairs. If there are *D* dice, then the number of pairs is what? Something like *D* &times; (*D* + 1) / 2? Or is it *D* &times; (*D* - 1) / 2?**Let's draw a picture. With *D* = 4, here are all the ways to pair one die with another to yield 10 distinct pairs:** 11 .. .. .. 21 22 .. .. 31 32 33 .. 41 42 43 44 **To figure out the formula, add a row to the top:** .. .. .. .. 11 .. .. .. 21 22 .. .. 31 32 33 .. 41 42 43 44 **Now we have a *D* &times; (*D* + 1) rectangle, and we can see that half (10) of them are pairs, and half (the other 10) are not pairs (because they would be repetitions). So the formula is *D* &times; (*D* + 1)/2, and checking for *D*=4, (4 &times; 5) / 2 = 10, so we're good.** OK, let's try it. First some boilerplate for plotting:
%matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt def logplot(X, Y, *options): "Plot Y on a log scale vs X." fig, ax = plt.subplots() ax.set_yscale('log') ax.plot(X, Y, *options)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Now we can plot and display the number of pairs:
def plot_pairs(Ns): "Given a list of N values, plot the number of pairs and return a dict of them." Ds = [len(all_dice(N)) for N in Ns] Npairs = [D * (D + 1) // 2 for D in Ds] logplot(Ns, Npairs, 'bo-') return {Ns[i]: Npairs[i] for i in range(len(Ns))} plot_pairs(ints(2, 12))
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
OK, we've learned two things. One, it *is* roughly a straight line, so the number of pairs is roughly exponential. Two, there are a *lot* of pairs. 1014, just for *N*=12. I don't want to even think about *N*=20.**So if we want to get much beyond *N*=8, we're either going to need a brand new approach, or we need to make far fewer pairs of dice.** Making Fewer `pairs`Maybe we could tighten up the upper bounds, but I don't think that will help very much.How about if we concentrate on making fewer pairs, without worrying about making fewer dice?**How could we do that? Isn't the number of pairs always (*D*2 + *D*)/2 ?**Remember, we're looking for *feasible* pairs. So if there was some way of knowing ahead of time that two dice were incompatible as a pair, we wouldn't even need to consider the pair.**By incompatible, you mean they can't form a pair that is a solution.**Right. Consider this: in any valid pair, the sum of the biggest number on each die must be 2*N*. For example, with *N* = 6: ((1, 2, 2, 3, 3, 4), (1, 3, 4, 5, 6, 8)) sum of biggests = 4 + 8 = 12 ((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 5, 6)) sum of biggests = 6 + 6 = 12 So if we have a die with biggest number 7, what dice should we consider pairing it with?**Only ones with biggest number 5.****I get it: we sort all the die into bins labeled with their biggest number. Then we look at each bin, and for the "7" bin, we pair them up with the dice in the "5" bin. In general, the *B* bin can only pair with the 2*N* - *B* bin.**Exactly. **Cool. I can see how that can cut the amount of work by a factor of 10 or so. But I was hoping for a factor of a million or so.**There are other properties of a feasible pair.**Like what?**Well, what about the number of 2s in a pair?**Let's see. We know that any `regular_sums` has to have two 3s, and the only way to make a 3 is 2+1. And each die has only one 1, so that means that each pair of dice has to have a total of exactly two 2s.**Does it have to be one 2 on each die?**No. It could be one each, or it could be two on one die and none on the other. So a die with *T* twos can only pair with dice that have 2 - *T* twos.**Great. Can you think of another property?**Give me a hint.**Let's look at the sums of 6-sided Sicherman and regular pairs:
sum((1, 2, 2, 3, 3, 4) + (1, 3, 4, 5, 6, 8)) sum((1, 2, 3, 4, 5, 6) + (1, 2, 3, 4, 5, 6))
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**They're the same. Is that [the question](http://hitchhikers.wikia.com/wiki/42) that 42 is the answer to? But does a Sicherman pair always have to have the same sum as a regular pair? I guess it doea, because the sum of `sums(pair)` is just all the sides added up *N* times each, so two pairs have the same sum of `sums(pair)` if and only if they have the same sum.**So consider the die (1, 3, 3, 3, 4, 5). What do we know about the dice that it can possibly pair with?**OK, that die has a biggest side of 5, so it can only pair with dice that have a biggest side of 12 - 5 = 7. It has a sum of 19, so it can only pair with dice that have a sum of 42 - 19 = 23. And it has no 2s, so it can only pair with dice that have two 2s.**I wonder how many such dice there are, out of all 231 `all_dice(6)`?
{die for die in all_dice(6) if max(die) == 12 - 5 and sum(die) == 42 - 19 and die.count(2) == 2}
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**There's only 1. So, (1, 3, 3, 3, 4, 5) only has to try to pair with one die, rather than 230. Nice improvement!**In general, I wonder what the sum of the sides of a regular pair is?**Easy, that's `N * (N + 1)`. [Gauss](http://betterexplained.com/articles/techniques-for-adding-the-numbers-1-to-100/) knew that when he was in elementary school!** More efficient `pairs(dice)`**OK, we can code this up easily enough**:
from collections import defaultdict def tabulate(dice): """Put all dice into bins in a hash table, keyed by bin_label(die). Each bin holds a list of dice with that key.""" # Example: {(21, 6, 1): [(1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 4, 7), ...] table = defaultdict(list) for die in dice: table[bin_label(die)].append(die) return table def pairs(dice): "Return all pairs of dice that could possibly be a solution to the Sicherman problem." table = tabulate(dice) N = len(dice[0]) for bin1 in table: bin2 = compatible_bin(bin1, N) if bin2 in table and bin1 <= bin2: for A in table[bin1]: for B in table[bin2]: yield (A, B) def bin_label(die): return sum(die), max(die), die.count(2) def compatible_bin(bin1, N): "Return a bin label that is compatible with bin1." (S1, M1, T1) = bin1 return (N * (N + 1) - S1, 2 * N - M1, 2 - T1)
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Let's make sure it works:**
{N: sicherman(N) for N in ints(2, 6)}
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Good, those are the same answers as before. But how much faster is it?
%time sicherman(7)
CPU times: user 24.9 ms, sys: 1.23 ms, total: 26.1 ms Wall time: 153 ms
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
Wow, that's 1000 times faster than before. **I want to take a peek at what some of the bins look like:**
tabulate(all_dice(5))
_____no_output_____
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
**Pretty good: four of the bins have two dice, but the rest have only one die.**And let's see how many pairs we're producing now. We'll tabulate *N* (the number of sides); *D* (the number of *N*-sided dice), the number `pairs(dice)` using the new `pairs`, and the number using the old `pairs`:
print(' N: D #pairs(dice) D*(D-1)/2') for N in ints(2, 11): dice = list(all_dice(N)) D = len(dice) print('{:2}: {:9,d} {:12,d} {:17,d}'.format(N, D, len(list(pairs(dice))), D*(D-1)//2))
N: D #pairs(dice) D*(D-1)/2 2: 1 1 0 3: 1 1 0 4: 10 3 45 5: 31 9 465 6: 231 71 26,565 7: 1,596 670 1,272,810 8: 5,916 6,614 17,496,570 9: 40,590 76,215 823,753,755 10: 274,274 920,518 37,612,976,401 11: 1,837,836 11,506,826 1,688,819,662,530
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
OK, we're doing 100,000 times better for *N*=11. But it would still take a long time to test 11 million pairs. Let's just get the answers up to *N*=10:
%%time {N: sicherman(N) for N in ints(2, 10)}
CPU times: user 26.2 s, sys: 129 ms, total: 26.3 s Wall time: 26.6 s
MIT
ipynb/Sicherman Dice.ipynb
awesome-archive/pytudes
APG-MLE performanceReconstructing the `cat` state from measurements of the Husimi Q function. The cat state is defined as:$$|\psi_{\text{cat}} \rangle = \frac{1}{\mathcal N} ( |\alpha \rangle + |-\alpha \rangle \big ) $$with $\alpha=2$ and normalization $\mathcal N$. Husimi Q function measurementsThe Husimi Q function can be obtained by calculating the expectation value of measuring the following operator:$$\mathcal O_i = \frac{1}{\pi}|\beta_i \rangle \langle \beta_i|$$where $|\beta_i \rangle $ are coherent states written in the Fock basis.
import numpy as np from qutip import coherent, coherent_dm, expect, Qobj, fidelity, rand_dm from qutip.wigner import wigner, qfunc from scipy.io import savemat, loadmat import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.inset_locator import inset_axes %load_ext autoreload tf.keras.backend.set_floatx('float64') # Set float64 as the default hilbert_size = 32 alpha = 2 psi = coherent(hilbert_size, alpha) + coherent(hilbert_size, -alpha) psi = psi.unit() # The .unit() function normalizes the state to have unit trace rho = psi*psi.dag() grid = 32 xvec = np.linspace(-3, 3, grid) yvec = np.linspace(-3, 3, grid) q = qfunc(rho, xvec, yvec, g=2) cmap = "Blues" im = plt.pcolor(xvec, yvec, q, vmin=0, vmax=np.max(q), cmap=cmap, shading='auto') plt.colorbar(im) plt.xlabel(r"Re($\beta$)") plt.ylabel(r"Im($\beta$)") plt.title("Husimi Q function") plt.show()
_____no_output_____
MIT
paper-figures/fig3a-apg-mle-data.ipynb
quantshah/qst-cgan
Construct the measurement operators and simulated data (without any noise)
X, Y = np.meshgrid(xvec, yvec) betas = (X + 1j*Y).ravel() m_ops = [coherent_dm(hilbert_size, beta) for beta in betas] data = expect(m_ops, rho)
_____no_output_____
MIT
paper-figures/fig3a-apg-mle-data.ipynb
quantshah/qst-cgan
APG-MLEThe APG-MLE method implementation in MATLAB provided in https://github.com/qMLE/qMLE requires an input density matrix and a set of measurement operators. Here, we will export the same data to a matlab format and use the APG-MLE method for reconstruction of the density matrix of the state.
ops_numpy = np.array([op.data.toarray() for op in m_ops]) # convert the QuTiP Qobj to numpy arrays ops = np.transpose(ops_numpy, [1, 2, 0]) mdic = {"measurements": ops} savemat("data/measurements.mat", mdic) mdic = {"rho": rho.full()} savemat("data/rho.mat", mdic)
_____no_output_____
MIT
paper-figures/fig3a-apg-mle-data.ipynb
quantshah/qst-cgan
Reconstruct using the APG-MLE MATLAB code
fidelities = loadmat("data/fidelities-apg-mle.mat") fidelities = fidelities['flist1'].ravel() iterations = np.arange(len(fidelities)) plt.plot(iterations, fidelities, color="black", label="APG-MLE") plt.legend() plt.xlabel("Iterations") plt.ylabel("Fidelity") plt.ylim(0, 1) plt.grid(which='minor', alpha=0.2) plt.grid(which='major', alpha=0.2) plt.xscale('log') plt.show()
_____no_output_____
MIT
paper-figures/fig3a-apg-mle-data.ipynb
quantshah/qst-cgan
MMU Confusion matrix & Metrics walkthroughThis notebook briefly demonstrates the various capabilities of the package on the computation of confusion matrix/matrices and binary classification metrics.
import pandas as pd import numpy as np import mmu
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Data generationWe generate predictions and true labels where:* `scores`: classifier scores* `yhat`: estimated labels* `y`: true labels
scores, yhat, y = mmu.generate_data(n_samples=10000)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Confusion matrix onlyWe can compute the confusion matrix for a single run using the estimated labels or based on the probability and a classification threshold.Based on the esstimated labels `yhat`
# based on yhat mmu.confusion_matrix(y, yhat)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
based on classifier score with classification threshold
mmu.confusion_matrix(y, scores=scores, threshold=0.5)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Precision-Recallmmu has a specialised function for the positive precision and recall
cm, prec_rec = mmu.precision_recall(y, scores=scores, threshold=0.5, return_df=True)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Next to the point precision and recall there is also a function to compute the precision recall curve.`precision_recall_curve` also available under alias `pr_curve` requires you to pass the discrimination/classification thresholds. Auto thresholdsmmu provides an utility function `auto_thresholds` that returns the all the thresholds that result in a different confusion matrix.For large test sets this can be a bit much. `auto_thresholds` has an optional parameter `max_steps` that limits the number of thresholds.This is done by weighted sampling where the scaled inverse proximity to the next score is used as a weight. In practice this means that the extremes of the scores are oversampled. `auto_thresholds` always ensures that the lowest and highest score are included.
thresholds = mmu.auto_thresholds(scores)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Confusion matrix and metricsThe ``binary_metrics*`` functions compute ten classification metrics: * 0 - neg.precision aka Negative Predictive Value * 1 - pos.precision aka Positive Predictive Value * 2 - neg.recall aka True Negative Rate & Specificity * 3 - pos.recall aka True Positive Rate aka Sensitivity * 4 - neg.f1 score * 5 - pos.f1 score * 6 - False Positive Rate * 7 - False Negative Rate * 8 - Accuracy * 9 - MCCThese metrics were chosen as they are the most commonly used metrics and most other metrics can be compute from these. We don't provide individual functions at the moment as the overhead of computing all of them vs one or two is negligable.This index can be retrieved using:
col_index = mmu.metrics.col_index col_index
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
For a single test set
cm, metrics = mmu.binary_metrics(y, yhat) # the confusion matrix cm metrics
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
We can also request dataframes back:
cm, metrics = mmu.binary_metrics(y, yhat, return_df=True) metrics
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
A single run using probabilities
cm, metrics = mmu.binary_metrics(y, scores=scores, threshold=0.5, return_df=True) cm metrics
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
A single run using multiple thresholdsCan be used when you want to compute a precision-recall curve for example
thresholds = mmu.auto_thresholds(scores) cm, metrics = mmu.binary_metrics_thresholds( y=y, scores=scores, thresholds=thresholds, return_df=True )
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
The confusion matrix is now an 2D array where the rows contain the confusion matrix for a single threshold
cm
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Similarly, `metrics` is now an 2D array where the rows contain the metrics for a single threshold
metrics
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Generate multiple runs for the below functions
scores, yhat, y = mmu.generate_data(n_samples=10000, n_sets=100)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Multiple runs using a single thresholdYou have performed bootstrap or multiple train-test runs and want to evaluate the distribution of the metrics you can use `binary_metrics_runs`.`cm` and `metrics` are now two dimensional arrays where the rows are the confusion matrices/metrics for that a run
cm, metrics = mmu.binary_metrics_runs( y=y, scores=scores, threshold=0.5, ) cm[:5, :]
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Multiple runs using multiple thresholdsYou have performed bootstrap or multiple train-test runs and, for example, want to evaluate the different precision recall curves
cm, metrics = mmu.binary_metrics_runs_thresholds( y=y, scores=scores, thresholds=thresholds, fill=1.0 )
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
The confusion matrix and metrics are now cubes.For the confusion matrix the:* row -- the thresholds* colomns -- the confusion matrix elements* slices -- the runsFor the metrics:* row -- thresholds* colomns -- the metrics* slices -- the runsThe stride is such that the biggest stride is over the thresholds for the confusion matrix and over the metrics for the metrics.The argument being that you will want to model the confusion matrices over the runsand the metrics individually over the thresholds and runs
print('shape confusion matrix: ', cm.shape) print('strides confusion matrix: ', cm.strides) print('shape metrics: ', metrics.shape) print('strides metrics: ', metrics.strides) pos_recalls = metrics[:, mmu.metrics.col_index['pos.rec'], :] pos_precisions = metrics[:, mmu.metrics.col_index['pos.prec'], :]
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
Binary metrics over confusion matricesThis can be used when you have a methodology where you model and generate confusion matrices
# We use confusion_matrices to create confusion matrices based on some output cm = mmu.confusion_matrices( y=y, scores=scores, threshold=0.5, ) metrics = mmu.binary_metrics_confusion_matrices(cm, 0.0) mmu.metrics_to_dataframe(metrics)
_____no_output_____
Apache-2.0
notebooks/metrics_tutorial.ipynb
RUrlus/ModelMetricUncertainty
- https://github.com/tidyverse/tidyr/tree/master/vignettes - https://cran.r-project.org/web/packages/tidyr/vignettes/tidy-data.html - https://github.com/cmrivers/ebola
pd.read_csv('../data/preg.csv') pd.read_csv('../data/preg2.csv') pd.melt(pd.read_csv('../data/preg.csv'), 'name')
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
``` Each variable forms a column. Each observation forms a row. Each type of observational unit forms a table.``` Some common data problems``` Column headers are values, not variable names. Multiple variables are stored in one column. Variables are stored in both rows and columns. Multiple types of observational units are stored in the same table. A single observational unit is stored in multiple tables.``` Column contain values, not variables
pew = pd.read_csv('../data/pew.csv') pew.head() pd.melt(pew, id_vars=['religion']) pd.melt(pew, id_vars='religion', var_name='income', value_name='count')
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
Keep multiple columns fixed
billboard = pd.read_csv('../data/billboard.csv') billboard.head() pd.melt(billboard, id_vars=['year', 'artist', 'track', 'time', 'date.entered'], value_name='rank', var_name='week')
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
Multiple variables are stored in one column.
tb = pd.read_csv('../data/tb.csv') tb.head() ebola = pd.read_csv('../data/ebola_country_timeseries.csv') ebola.head() # first let's melt the data down ebola_long = ebola.melt(id_vars=['Date', 'Day'], value_name='count', var_name='cd_country') ebola_long.head() var_split = ebola_long['cd_country'].str.split('_') var_split type(var_split) var_split[0] var_split[0][0] var_split[0][1] # save each part to a separate variable status_values = var_split.str.get(0) country_values = var_split.str.get(1) status_values.head() country_values.head() # assign the parts to new dataframe columns ebola_long['status'] = status_values ebola_long['country'] = country_values ebola_long.head()
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
above in a single step
variable_split = ebola_long['cd_country'].str.split('_', expand=True) variable_split.head() variable_split.columns = ['status1', 'country1'] ebola = pd.concat([ebola_long, variable_split], axis=1) ebola.head()
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
Variables in both rows and columns
weather = pd.read_csv('../data/weather.csv') weather.head() weather_melt = pd.melt(weather, id_vars=['id', 'year', 'month', 'element'], var_name='day', value_name='temp') weather_melt.head() weather_tidy = weather_melt.pivot_table( index=['id', 'year', 'month', 'day'], columns='element', values='temp') weather_tidy.head() weather_flat = weather_tidy.reset_index() weather_flat.head()
_____no_output_____
MIT
01-notes/04-tidy.ipynb
chilperic/scipy-2017-tutorial-pandas
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement breadth-first search on a graph.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is the graph directed? * Yes* Can we assume we already have Graph and Node classes? * Yes* Can we assume the inputs are valid? * Yes* Can we assume this fits memory? * Yes Test CasesInput:* `add_edge(source, destination, weight)````graph.add_edge(0, 1, 5)graph.add_edge(0, 4, 3)graph.add_edge(0, 5, 2)graph.add_edge(1, 3, 5)graph.add_edge(1, 4, 4)graph.add_edge(2, 1, 6)graph.add_edge(3, 2, 7)graph.add_edge(3, 4, 8)```Result:* Order of nodes visited: [0, 1, 4, 5, 3, 2] AlgorithmRefer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
%run ../graph/graph.py class GraphBfs(Graph): def bfs(self, root, visit_func): # TODO: Implement me pass
_____no_output_____
Apache-2.0
graphs_trees/graph_bfs/bfs_challenge.ipynb
janhak/ica-answers
Unit Test **The following unit test is expected to fail until you solve the challenge.**
%run ../utils/results.py # %load test_bfs.py from nose.tools import assert_equal class TestBfs(object): def __init__(self): self.results = Results() def test_bfs(self): nodes = [] graph = GraphBfs() for id in range(0, 6): nodes.append(graph.add_node(id)) graph.add_edge(0, 1, 5) graph.add_edge(0, 4, 3) graph.add_edge(0, 5, 2) graph.add_edge(1, 3, 5) graph.add_edge(1, 4, 4) graph.add_edge(2, 1, 6) graph.add_edge(3, 2, 7) graph.add_edge(3, 4, 8) graph.bfs(nodes[0], self.results.add_result) assert_equal(str(self.results), "[0, 1, 4, 5, 3, 2]") print('Success: test_bfs') def main(): test = TestBfs() test.test_bfs() if __name__ == '__main__': main()
_____no_output_____
Apache-2.0
graphs_trees/graph_bfs/bfs_challenge.ipynb
janhak/ica-answers
ScottPlot Notebook Quickstart_How to use ScottPlot to plot data in a Jupyter / .NET Interactive notebook_
// Install the ScottPlot NuGet package #r "nuget:ScottPlot" // Plot some data double[] dataX = new double[] { 1, 2, 3, 4, 5 }; double[] dataY = new double[] { 1, 4, 9, 16, 25 }; var plt = new ScottPlot.Plot(400, 300); plt.AddScatter(dataX, dataY); // Display the result as a HTML image display(HTML(plt.GetImageHTML()));
_____no_output_____
MIT
src/ScottPlot4/ScottPlot.Sandbox/Notebook/ScottPlotQuickstart.ipynb
p-rakash/ScottPlot
PyTorch Image Classification Single GPU using Vertex Training with Custom Container View on GitHub Setup
PROJECT_ID = "YOUR PROJECT ID" BUCKET_NAME = "gs://YOUR BUCKET NAME" REGION = "YOUR REGION" SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT" content_name = "pt-img-cls-gpu-cust-cont-torchserve"
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Local Training
! ls trainer ! cat trainer/requirements.txt ! pip install -r trainer/requirements.txt ! cat trainer/task.py %run trainer/task.py --epochs 5 --local-mode ! ls ./tmp ! rm -rf ./tmp
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Vertex Training using Vertex SDK and Custom Container Build Custom Container
hostname = "gcr.io" image_name_train = content_name tag = "latest" custom_container_image_uri_train = f"{hostname}/{PROJECT_ID}/{image_name_train}:{tag}" ! cd trainer && docker build -t $custom_container_image_uri_train -f Dockerfile . ! docker run --rm $custom_container_image_uri_train --epochs 5 --local-mode ! docker push $custom_container_image_uri_train ! gcloud container images list --repository $hostname/$PROJECT_ID
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Initialize Vertex SDK
! pip install -r requirements.txt from google.cloud import aiplatform aiplatform.init( project=PROJECT_ID, staging_bucket=BUCKET_NAME, location=REGION, )
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Create a Vertex Tensorboard Instance
tensorboard = aiplatform.Tensorboard.create( display_name=content_name, )
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Option: Use a Previously Created Vertex Tensorboard Instance```tensorboard_name = "Your Tensorboard Resource Name or Tensorboard ID"tensorboard = aiplatform.Tensorboard(tensorboard_name=tensorboard_name)``` Run a Vertex SDK CustomContainerTrainingJob
display_name = content_name gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}" machine_type = "n1-standard-4" accelerator_count = 1 accelerator_type = "NVIDIA_TESLA_K80" container_args = [ "--batch-size", "256", "--epochs", "100", ] custom_container_training_job = aiplatform.CustomContainerTrainingJob( display_name=display_name, container_uri=custom_container_image_uri_train, ) custom_container_training_job.run( args=container_args, base_output_dir=gcs_output_uri_prefix, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, tensorboard=tensorboard.resource_name, service_account=SERVICE_ACCOUNT, ) print(f"Custom Training Job Name: {custom_container_training_job.resource_name}") print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
Training Artifact
! gsutil ls $gcs_output_uri_prefix
_____no_output_____
Apache-2.0
community-content/pytorch_image_classification_single_gpu_with_vertex_sdk_and_torchserve/vertex_training_with_custom_container.ipynb
nayaknishant/vertex-ai-samples
PostImages UploaderThis notebook provide an easy way to upload your images to [postimages.org](https://postimages.org). How to use:- Modify the **configurations** if needed.- At menu bar, select Run > Run All Cells.- Scroll to the end of this notebook for outputs. Configurations ---Path to a directory which contains your images you want to upload.> Only files in top level directory will be uploaded. > Limit: 24 MiB per file or image dimension of 2508 squared pixels.
INPUT_DIR = '../outputs/images'
_____no_output_____
MIT
notebooks/uploader/postimages.ipynb
TheYoke/PngBin
---Path to a file which will contain a list of uploaded image urls used for updating metadata database file.> Append if exists.
URLS_PATH = '../outputs/urls.txt'
_____no_output_____
MIT
notebooks/uploader/postimages.ipynb
TheYoke/PngBin
--- Import
import os import sys from modules.PostImages import PostImages
_____no_output_____
MIT
notebooks/uploader/postimages.ipynb
TheYoke/PngBin
Basic Configuration Validation
assert os.path.isdir(INPUT_DIR), 'INPUT_DIR must exist and be a directory.' assert any(x.is_file() for x in os.scandir(INPUT_DIR)), 'INPUT_DIR top level directory must have at least one file.' assert not os.path.exists(URLS_PATH) or os.path.isfile(URLS_PATH), 'URLS_PATH must be a file if it exists.'
_____no_output_____
MIT
notebooks/uploader/postimages.ipynb
TheYoke/PngBin