repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
IRASatUC/two_loggers | [
"c5c99868a9c896aa2fdb940f2f7b7173abed9e00"
] | [
"loggers_control/scripts/envs/solo_escape_task_env.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nTask environment for single logger escaping from the walled cell\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom numpy import pi\nfrom numpy import random\nimport time\n\nimport rospy\nimport tf\nfrom std_srvs.srv import Empty\nfrom gazebo_msgs.srv import SetModelState\nfrom gazebo_msgs.msg import ModelState, LinkState, ModelStates, LinkStates\nfrom geometry_msgs.msg import Pose, Twist\n\n\nclass SoloEscapeEnv(object):\n \"\"\"\n SoloEscape Class\n \"\"\"\n def __init__(self):\n rospy.init_node(\"solo_escape_task_env\", anonymous=True, log_level=rospy.INFO)\n # simulation parameters\n self.rate = rospy.Rate(100)\n # environment parameters\n self.observation = dict(\n pose=Pose(),\n twist=Twist()\n )\n self.action = np.zeros(2)\n self.info = dict(status=\"\")\n self.reward = 0\n self._episode_done = False\n self.success_count = 0\n self.max_step = 1000\n self.steps = 0\n self.status = \"trapped\"\n self.model_states = ModelStates()\n # services\n self.reset_world_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)\n self.reset_simulation_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)\n self.unpause_physics_proxy = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n self.pause_physics_proxy = rospy.ServiceProxy('/gazebo/pause_physics', Empty)\n self.set_model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n # topic publisher\n self.cmd_vel_pub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n # topic subscriber\n rospy.Subscriber(\"/gazebo/model_states\", ModelStates, self._model_states_callback)\n\n def pausePhysics(self):\n rospy.wait_for_service(\"/gazebo/pause_physics\")\n try:\n self.pause_physics_proxy()\n except rospy.ServiceException as e:\n rospy.logerr(\"/gazebo/pause_physics service call failed\")\n\n def unpausePhysics(self):\n rospy.wait_for_service(\"/gazebo/unpause_physics\")\n try:\n self.unpause_physics_proxy()\n except rospy.ServiceException as e:\n rospy.logerr(\"/gazebo/unpause_physics service call failed\")\n\n def resetSimulation(self):\n rospy.wait_for_service(\"/gazebo/reset_simulation\")\n try:\n self.reset_simulation_proxy()\n except rospy.ServiceException as e:\n rospy.logerr(\"/gazebo/reset_simulation service call failed\")\n\n def resetWorld(self):\n rospy.wait_for_service(\"/gazebo/reset_world\")\n try:\n self.reset_world_proxy()\n except rospy.ServiceException as e:\n rospy.logerr(\"/gazebo/reset_world service call failed\")\n\n def setModelState(self, model_state):\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n self.set_model_state_proxy(model_state)\n except rospy.ServiceException as e:\n rospy.logerr(\"Service call failed: {}\".format(e))\n\n def reset(self, init_pose=[]):\n \"\"\"\n Reset environment\n obs, info = env.reset()\n \"\"\"\n rospy.logdebug(\"\\nStart Environment Reset\")\n self._set_init(init_pose)\n obs = self._get_observation()\n info = self._post_information()\n self.steps = 0\n rospy.logerr(\"\\nEnvironment Reset!!!\\n\")\n\n return obs, info\n\n def step(self, action):\n \"\"\"\n Manipulate the environment with an action\n obs, rew, done, info = env.step(action)\n \"\"\"\n rospy.logdebug(\"\\nStart Environment Step\")\n self._take_action(action)\n obs = self._get_observation()\n reward, done = self._compute_reward()\n info = self._post_information()\n self.steps += 1\n rospy.logdebug(\"End Environment Step\\n\")\n\n return obs, reward, done, info\n\n def _set_init(self, init_pose):\n \"\"\"\n Set initial condition for single logger, Set the logger at a random pose inside cell.\n Args:\n init_pose: [x, y, theta]\n \"\"\"\n rospy.logdebug(\"\\nStart Initializing Robot\")\n # prepare\n self._take_action(np.zeros(2))\n self.pausePhysics()\n self.resetWorld()\n robot_pose = ModelState()\n robot_pose.model_name = \"logger\"\n robot_pose.reference_frame = \"world\"\n robot_pose.pose.position.z = 0.2\n if init_pose: # inialize randomly\n assert np.absolute(init_pose[0]) <= 4.5\n assert np.absolute(init_pose[1]) <= 4.5\n assert -pi<=init_pose[2]<= pi # theta within [-pi,pi]\n else: # inialize accordingly\n init_pose = [0]*3\n init_pose[0] = random.uniform(-4.5, 4.5)\n init_pose[1] = random.uniform(-4.5, 4.5)\n init_pose[2] = random.uniform(-pi, pi)\n robot_pose.pose.position.x = init_pose[0]\n robot_pose.pose.position.y = init_pose[1]\n quat = tf.transformations.quaternion_from_euler(0, 0, init_pose[2])\n robot_pose.pose.orientation.z = quat[2]\n robot_pose.pose.orientation.w = quat[3]\n # call '/gazebo/set_model_state' service\n self.setModelState(model_state=robot_pose)\n rospy.logdebug(\"Logger was initialized at {}\".format(robot_pose))\n self.unpausePhysics()\n self._take_action(np.zeros(2))\n # episode should not be done\n self._episode_done = False\n rospy.logdebug(\"End Initializing Robot\\n\")\n\n def _get_observation(self):\n \"\"\"\n Get observations from env\n Return:\n observation: [x, y, v_x, v_y, cos(yaw), sin(yaw), yaw_dot]\n \"\"\"\n rospy.logdebug(\"\\nStart Getting Observation\")\n model_states = self._get_model_states()\n id_logger = model_states.name.index(\"logger\")\n self.observation[\"pose\"] = model_states.pose[id_logger]\n self.observation[\"twist\"] = model_states.twist[id_logger]\n # compute status\n if self.observation[\"pose\"].position.x > 4.79:\n self.status = \"east\"\n elif self.observation[\"pose\"].position.x < -4.79:\n self.status = \"west\"\n elif self.observation[\"pose\"].position.y > 4.79:\n self.status = \"north\"\n elif -6 <= self.observation[\"pose\"].position.y < -4.79:\n if np.absolute(self.observation[\"pose\"].position.x) > 1:\n self.status = \"south\"\n else:\n if np.absolute(self.observation[\"pose\"].position.x) > 0.79:\n self.status = \"sdoor\" # stuck at door\n else:\n self.status = \"tdoor\" # through door\n elif self.observation[\"pose\"].position.y < -6:\n self.status = \"escaped\"\n elif self.observation['pose'].position.z > 0.25 or self.observation['pose'].position.z < 0.15:\n self.status = \"blew\"\n else:\n self.status = \"trapped\"\n rospy.logdebug(\"Observation Get ==> {}\".format(self.observation))\n rospy.logdebug(\"End Getting Observation\\n\")\n\n return self.observation\n\n def _take_action(self, action):\n \"\"\"\n Set linear and angular speed for logger to execute.\n Args:\n action: np.array([v_lin, v_ang]).\n \"\"\"\n rospy.logdebug(\"\\nStart Taking Action\")\n cmd_vel = Twist()\n cmd_vel.linear.x = action[0]\n cmd_vel.angular.z = action[1]\n self.cmd_vel_pub.publish(cmd_vel)\n for _ in range(15): # 6.667Hz\n self.cmd_vel_pub.publish(cmd_vel)\n self.rate.sleep()\n rospy.logdebug(\"Logger take action ==> {}\".format(cmd_vel))\n rospy.logdebug(\"End Taking Action\\n\")\n\n def _compute_reward(self):\n \"\"\"\n Return:\n reward: reward in current step\n \"\"\"\n rospy.logdebug(\"\\nStart Computing Reward\")\n if self.status == \"escaped\":\n self.reward = 1\n self.success_count += 1\n self._episode_done = True\n rospy.logerr(\"\\n!!!\\nLogger Escaped !\\n!!!\")\n else:\n self.reward = -0.\n self._episode_done = False\n rospy.loginfo(\"\\nLogger is trapped\\n!!!\")\n rospy.logdebug(\"Stepwise Reward: {}, success count : {}\".format(self.reward, self.success_count))\n # check if steps out of range\n if self.steps > self.max_step:\n self._episode_done = True\n rospy.logwarn(\"Step: {}, \\nMax step reached, env will reset...\".format(self.steps))\n rospy.logdebug(\"End Computing Reward\\n\")\n\n return self.reward, self._episode_done\n\n def _post_information(self):\n \"\"\"\n Return:\n info: {\"status\"}\n \"\"\"\n rospy.logdebug(\"\\nStart Posting Information\")\n self.info[\"status\"] = self.status\n rospy.logdebug(\"End Posting Information\\n\")\n\n return self.info\n\n def _model_states_callback(self, data):\n self.model_states = data\n\n def _get_model_states(self):\n return self.model_states\n"
] | [
[
"numpy.random.uniform",
"numpy.absolute",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ArielMAJ/Data-Science-and-Machine-Learning_Bootcamp | [
"afae685c96d9fc8af0b2ee1be4d817df505c6c8d"
] | [
"Section_05/01_Boston/load.py"
] | [
"import urllib\nimport numpy as np\nimport pandas as pd\n\ndef boston():\n\tdata_url = \"http://lib.stat.cmu.edu/datasets/boston\"\n\n\traw_df = pd.read_csv(data_url, sep=\"\\s+\", skiprows=22, header=None)\n\tdata = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])\n\ttarget = raw_df.values[1::2, 2]\n\n\tfile = urllib.request.urlopen(data_url)\n\tcolumns = list(map(lambda x: x.split(' ')[1], [line.decode('utf-8') for line in file][7:20]))\n\tdata = pd.DataFrame(data=data, columns=columns)\n\tdata[\"PRICE\"] = target\n\treturn data"
] | [
[
"numpy.hstack",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
nielsleadholm/DelayLineObjectCoding | [
"f0fc07476db6bceb0c69060a9fe0411611708967"
] | [
"analyse_sim_results.py"
] | [
"from brian2 import *\nimport copy\nimport json\nimport numpy as np\nimport os\nimport yaml\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport scipy\n\ndef exclude_indices(input_array, excluding_indices):\n\t\n\tinput_array = np.ma.array(input_array, mask=False)\n\tinput_array.mask[excluding_indices] = True\n\tinput_array = input_array.compressed()\n\t\n\treturn input_array\n\n\ndef isi_analysis(params, data_set, drift_iter, seed_iter, layer):\n\n\tprint(\"\\nISI, CV and Fano-Factor analysis for layer: \" + str(layer))\n\tdata_dic = dict()\n\tdata_dic[\"times\"] = np.fromfile(\"raw_data/\" + str(seed_iter) + \"/\"\n\t\t\t\t\t\t\t\t\t+ data_set + \"_drift_iter_\" + str(drift_iter) + \"/raster_\"\n\t\t\t\t\t\t\t\t\t+ layer + \"_layer_times.txt\", dtype=np.float64, sep='\\n')\n\tdata_dic[\"ids\"] = np.fromfile(\"raw_data/\" + str(seed_iter) + \"/\"\n\t\t\t\t\t\t\t\t\t+ data_set + \"_drift_iter_\" + str(drift_iter) + \"/raster_\"\n\t\t\t\t\t\t\t\t\t+ layer + \"_layer_IDs.txt\", sep='\\n')\n\n\tall_isi = [] # Data combined from all neurons\n\tcv_of_isi = [] # Data represented for each individual neuron\n\n\t# Iterate through each neuron, masking the spike times, and then determining the ISIs\n\t# and the CV of the ISI\n\tfor ID_iter in range(0, params[layer + '_layer_size']):\n\n\t\tID_mask = data_dic[\"ids\"]==ID_iter\n\n\t\tcurrent_times = data_dic[\"times\"][ID_mask]\n\n\t\t# Take the temporal difference between two adjacent spikes\n\t\tcurrent_isi = current_times[1:] - current_times[:-1]\n\n\t\tall_isi.extend(current_isi)\n\n\t\t# Track the CV of the ISI for each individual neuron\n\t\t# NB CV is calculated using the standard deviation, while the Fano Factor\n\t\t# is calculated using the variance\n\t\tcv_of_isi.append(np.std(current_isi)/(np.mean(current_isi)+0.00001))\n\n\n\t# Plot the ISI distribution across all neurons\n\tplt.hist(all_isi)\n\tplt.xlabel(\"Interspike Interval (ms)\")\n\tplt.xlim(0)\n\tplt.title(\"ISI Distribution in Layer : \" + layer)\n\tplt.savefig(\"analysis_results/isi_distribution_\" + layer + \"_\" + data_set + \".png\", dpi=300)\n\tplt.clf()\n\n\t# Plot the distribution of CV of the ISI across the network\n\tplt.hist(cv_of_isi)\n\tplt.xlabel(\"CV of the Interspike Interval\")\n\tplt.xlim(0,2)\n\tplt.title(\"CV of the ISI in Layer : \" + layer)\n\tplt.savefig(\"analysis_results/cv_of_isi_distribution_\" + layer + \"_\" + data_set + \".png\", dpi=300)\n\tplt.clf()\n\n\tfano_factor_layer = calculate_fano_factor(params, data_dic, layer)\n\n\t# Plot the distribution of Fano Factor across the network\n\tplt.hist(cv_of_isi)\n\tplt.xlabel(\"Fano Factor\")\n\tplt.xlim(0,2)\n\tplt.title(\"Distribution of Fano Factor in Layer : \" + layer)\n\tplt.savefig(\"analysis_results/fano_factor_\" + layer + \"_\" + data_set + \".png\", dpi=300)\n\tplt.clf()\n\ndef main_fr_analysis(params, data_set, drift_iter, seed_iter, excluding_indices=None, save_fig=True, layer=\"output\"):\n\t'''\n\tVisualize the firing rates of neurons from a simulation, as well as to evaluate the\n\tinformation content in the firing rates of the neurons\n\n\texcluding_indices (None or list) : if list, exclude these indices when calculating \n\t\tmean firing rate; this can be useful e.g. when performing a binary search if it is\n\t\tknown that some neurons do not have delay lines that align with any of the inputs\n\t'''\n\tdata_dic = dict()\n\tdata_dic[\"times\"] = np.fromfile(\"raw_data/\" + str(seed_iter) + \"/\"\n\t\t\t\t\t\t\t\t\t+ data_set + \"_drift_iter_\" + str(drift_iter) + \"/raster_\" + layer + \"_layer_times.txt\", dtype=np.float64, sep='\\n')\n\tdata_dic[\"ids\"] = np.fromfile(\"raw_data/\" + str(seed_iter) + \"/\"\n\t\t\t\t\t\t\t\t\t+ data_set + \"_drift_iter_\" + str(drift_iter) + \"/raster_\" + layer + \"_layer_IDs.txt\", sep='\\n')\n\n\n\t# == INFORMATION THEORY analysis ==\n\n\tFR_array, binary_activation_array = extract_firing_rates(params, data_dic, layer)\n\n\t# Information theory based on whether a neuron fires or not during presentation\n\tinformation_theory_results_binary_1st = binary_information_theory_calculation(params, binary_activation_array, stimulus_info_calc=0)\n\tinformation_theory_results_binary_2nd = binary_information_theory_calculation(params, binary_activation_array, stimulus_info_calc=1)\n\tUnitTest_binary_information_theory_calculation(params, binary_activation_array, layer)\n\n\t# Information theory based on a neurons firing rate\n\t(mid_threshold) = information_theory_discretize(params, FR_array, layer)\n\tinformation_theory_counts = information_theory_counting(params, FR_array, mid_threshold, layer)\n\tinformation_theory_results_fr = fr_information_theory_calculation(params, information_theory_counts)\n\tUnitTest_FR_information_theory_calculation(params, information_theory_counts)\n\n\n\t# == RAW FIRING RATE analysis ==\n\n\tmean_FR_list = []\n\n\t# Iterate through the stimuli to extract mean firing rates\n\tfor stimuli_iter in range(len(params[\"stimuli_names\"])):\n\n\t\tmean_FR_list.append(find_mean_firing_rates(params, FR_array[stimuli_iter], layer))\n\n\t# Take the difference in the mean firing rates\n\tmean_FR_difference = mean_FR_list[1] - mean_FR_list[0]\n\n\t# Sort FR by the difference in firing rates\n\tsorting_indices = np.argsort(mean_FR_difference)\n\tmean_FR_difference = np.take_along_axis(mean_FR_difference, sorting_indices, axis=0)\n\tstim1_FR = np.take_along_axis(mean_FR_list[1], sorting_indices, axis=0)\n\tstim2_FR = np.take_along_axis(mean_FR_list[0], sorting_indices, axis=0)\n\n\t# Mean firing rates regardless of the stimulus\n\tmean_rate_across_stim = np.mean(np.asarray(mean_FR_list), axis=0)\n\n\n\t# == PLOT firing rate results ==\n\n\t#Plot the results\n\tplt.bar(np.arange(0,(params[layer + '_layer_size'])), height=stim1_FR+stim2_FR, bottom=-stim2_FR, alpha=0.5, color=\"dodgerblue\")\n\tplt.scatter(np.arange(0,(params[layer + '_layer_size'])), mean_FR_difference, marker='x', color='k')\n\tplt.plot(np.arange(0,(params[layer + '_layer_size'])),np.zeros((params[layer + '_layer_size'])),linestyle='dashed', color='k') # Plot a horizontal line to indicate 0\n\tplt.ylabel(\"Difference in Firing Rate (Hz)\")\n\tplt.xlabel(\"Neurons, Sorted by Firing Rate Difference\")\n\tplt.title(data_set + \" : Difference in Firing Rates Across Stimuli, Layer \" + layer)\n\tplt.ylim(-15, 15)\n\tif save_fig:\n\t\tplt.savefig(\"analysis_results/difference_in_FR_rates_\" + layer + \"_\" + data_set + \".png\", dpi=300)\n\tplt.clf()\n\n\n\t# == MASK any required neurons known not to carry information (i.e. in hand-crafted networks) ==\n\n\tif excluding_indices is not None:\n\n\t\tmean_rate_across_stim = exclude_indices(mean_rate_across_stim, excluding_indices)\n\t\tinformation_theory_results_fr = exclude_indices(information_theory_results_fr, excluding_indices)\n\t\tinformation_theory_results_binary = exclude_indices(information_theory_results_binary, excluding_indices)\n\n\treturn (information_theory_results_fr, information_theory_results_binary_1st,\n\t\t\tinformation_theory_results_binary_2nd, mean_rate_across_stim)\n\ndef calculate_fano_factor(params, data_dic, layer):\n\n\tno_math_error = 0.000000000001\n\n\tduration_of_simulation = (stimuli_params['number_of_eval_presentations']\n\t\t\t \t\t\t\t * stimuli_params['duration_of_presentations']\n\t\t\t \t\t\t\t * len(stimuli_params['stimuli_names']))\n\n\t# Determine the upper limit of simulation time used for calculating the Fano Factor in 1 second bins\n\tfano_duration = (duration_of_simulation // 1000) * 1000 \n\n\tprint(\"\\nCalculating Fano Factor in 1 second bins over a simulation period of seconds: \" + str(fano_duration))\n\n\tassert (fano_duration % 1000) == 0, \"Duration is \" + str(fano_duration) + \" but is not a multiple of 1000ms\"\n\n\tfano_factor = []\n\n\t# Iterate through the neurons\n\tfor ID_iter in range(params[layer + '_layer_size']):\n\n\t\tID_window_rates = []\n\n\t\t# Iterate through the temporal windows \n\t\tfor presentation_iter in range(0, fano_duration, 1000):\n\n\t\t\tmask = np.nonzero((data_dic[\"times\"]>=presentation_iter) & (data_dic[\"times\"]<(presentation_iter+1000)))\n\n\t\t\t# NB that because we're looking at 1000msec windows, the number of spikes is equivalent to the rate in Hz\n\t\t\tID_window_rates.append(np.sum(data_dic[\"ids\"][mask]==ID_iter))\n\n\t\tfano_factor.append(np.var(ID_window_rates)/(np.mean(ID_window_rates)+no_math_error))\n\n\treturn fano_factor\n\n\n\ndef extract_firing_rates(params, data_dic, layer):\n\n\t#Initialize a vector to hold the firing rates of each neuron\n\tFR_array = np.zeros([len(params[\"stimuli_names\"]), params[layer + '_layer_size'], params['number_of_eval_presentations']])\n\tbinary_activation_array = np.zeros([len(params[\"stimuli_names\"]), params[layer + '_layer_size']])\n\tfano_factor = np.zeros([len(params[\"stimuli_names\"]), params[layer + '_layer_size']])\n\n\tfor presentation_iter in range(0,params['number_of_eval_presentations']):\n\n\t\t# print(\"\\nPresentation iter:\")\n\t\t# print(presentation_iter)\n\n\t\tfor stimuli_iter in range(len(params[\"stimuli_names\"])):\n\n\t\t\t# print(\"Stimuli iter:\")\n\t\t\t# print(stimuli_iter)\n\t\t\t# print(\"Lower bound:\")\n\t\t\t# print(((len(params[\"stimuli_names\"])*presentation_iter+stimuli_iter)*params['duration_of_presentations']))\n\t\t\t# print(\"Upper bound:\")\n\t\t\t# print(((len(params[\"stimuli_names\"])*presentation_iter+stimuli_iter+1)*params['duration_of_presentations']))\n\n\t\t\t#Apply a mask to the times data to extract spikes in the period of interest\n\t\t\tmask = ((data_dic[\"times\"] > ((len(params[\"stimuli_names\"])*presentation_iter+stimuli_iter)*params['duration_of_presentations'])) & \n\t\t\t\t(data_dic[\"times\"] <= ((len(params[\"stimuli_names\"])*presentation_iter+stimuli_iter+1)*params['duration_of_presentations'])))\n\n\t\t\t#Iterate through each neuron ID, counting the total number of appearances in the masked-array\n\t\t\tfor ID_iter in range(0, params[layer + '_layer_size']):\n\n\t\t\t\tFR_array[stimuli_iter][ID_iter][presentation_iter] = np.sum(data_dic[\"ids\"][mask]==ID_iter)\n\n\t\t\t#Divide these values by the duration of the presentation, in seconds\n\t\t\tFR_array[stimuli_iter,:,presentation_iter] = FR_array[stimuli_iter,:,presentation_iter] / (params['duration_of_presentations']/1000)\n\n\t\n\t# print(\"\\nNew extraction\")\n\n\tfor stimuli_iter in range(len(params[\"stimuli_names\"])):\n\n\t\t# print(\"Stimulus specific array\")\n\t\t# print(FR_array[stimuli_iter,:,:])\n\t\t# print(np.shape(FR_array[stimuli_iter,:,:]))\n\t\t# print(np.shape(np.sum(FR_array[stimuli_iter,:,:]>0,axis=1)))\n\t\tbinary_activation_array[stimuli_iter,:] = np.sum(FR_array[stimuli_iter,:,:]>0,axis=1)\n\n\t# print(\"Final arrays\")\n\t# print(FR_array)\n\t# print(binary_activation_array)\n\n\treturn FR_array, binary_activation_array\n\n\ndef find_mean_firing_rates(params, FR_array, layer):\n\tmean_FR = np.zeros(params[layer + '_layer_size'])\n\n\tmean_FR = np.sum(FR_array, axis = 1)\n\tmean_FR = mean_FR / params['number_of_eval_presentations']\n\n\treturn mean_FR\n\n\n#Find the firing rate thresholds that determine if a firing rate is low, medium or high\ndef information_theory_discretize(params, FR_array, layer):\n\t#Note that as used in the Hutter thesis (2018), each neuron has its own thresholds\n\t#These are based on the minimal and maximal firing rate obtained across all presentations, the difference of which is divided into three equal bins\n\n\t#Vector of minimum firing rates for each neuron (across presentations of all stimuli)\n\t#Minimum is first taken for each particular stimulus (and so iterating through them), and then across all stimuli\n\ttemp_min_array = np.zeros([params[layer + '_layer_size'], len(params[\"stimuli_names\"])])\n\tfor stimuli_iter in range(0, len(params[\"stimuli_names\"])):\n\t\ttemp_min_array[:, stimuli_iter] = np.amin(FR_array[stimuli_iter], axis=1)\n\tmin_vector = np.amin(temp_min_array, axis = 1)\n\t# print(\"Minimum firing rates:\")\n\t# print(min_vector)\n\n\t#Vector of maximum firing rates for each neuron (across presentations of all stimuli)\n\ttemp_max_array = np.zeros([params[layer + '_layer_size'], len(params[\"stimuli_names\"])])\n\tfor stimuli_iter in range(0, len(params[\"stimuli_names\"])):\n\t\ttemp_max_array[:, stimuli_iter] = np.amax(FR_array[stimuli_iter], axis=1)\n\tmax_vector = np.amax(temp_max_array, axis = 1)\n\t# print(\"Maximum firing rates:\")\n\t# print(max_vector)\n\n\t#Generate the vector containing the thresholds for separating low-medium and medium-high for each neuron\n\tmid_threshold = (max_vector - min_vector)*(0.5)\n\t# upper_threshold = (max_vector - min_vector)*(2/3)\n\n\t# print(\"Lower and upper thresholds\")\n\t# print(lower_threshold)\n\t# print(upper_threshold)\n\n\treturn (mid_threshold)\n\n\ndef information_theory_counting(params, FR_array, mid_threshold, layer):\n\t#Information can be encoded in firing rates by discretizing the rates into e.g. low, medium, and high rates, which will be done here\n\n\tinformation_theory_dic = dict()\t\n\t#For each stimulus, find the number of times that a particular neuron's firing rate was low, medium, or high\n\tfor stimuli_iter in range(0, len(params[\"stimuli_names\"])):\n\t\tfiring_rate_counter = np.zeros([params[layer + '_layer_size'], 2]) #Array to store these counts\n\n\t\t#Apply a mask such that all firing rates relative to a particula threshold return a 1, then sum their values\n\t\tfiring_rate_counter[:, 0] = np.sum(FR_array[stimuli_iter]<=mid_threshold[:, None], axis=1) #lower counts\n\t\tfiring_rate_counter[:, 1] = np.sum(FR_array[stimuli_iter]>mid_threshold[:, None], axis=1) #upper counts\n\t\t# firing_rate_counter[:, 1] = params['number_of_eval_presentations'] - (firing_rate_counter[:, 0] + firing_rate_counter[:, 2]) #mid firing rate counts\n\n\t\t#Check that all of the separate counts sum appropriately\n\t\tassert np.all((firing_rate_counter[:, 0]+firing_rate_counter[:, 1]) == (np.ones(params[layer + '_layer_size'])*params['number_of_eval_presentations']))\n\n\t\tinformation_theory_dic[stimuli_iter] = firing_rate_counter\n\n\t#Return an array containing the number of presentations where the neuron activity was low, medium, and high respectively\n\treturn information_theory_dic\n\n\ndef fr_information_theory_calculation(params, information_theory_dic):\n\t#Information_theory_dic contains an array for each stimulus presentation\n\t#This array contains the number of counts of low and high firing rates for each neuron\n\t# NB that in this code-base, the low and high e.g. firing rates are calculated separately (this is because\n\t# the older code actually had three different possible responses - low, medium or high)\n\t# For binary_information_theory_calculation, because there were always only two responses, the probability can\n\t# be found as p(R_b) = 1 - p(R_a)\n\n\tno_math_error = 0.000000000001 #Prevent division by zero\n\n\n\t# *** Initially find the result for stimulus 1 presentation\n\t# print(\"\\nFiring rate info theory\")\n\t# print(information_theory_dic)\n\n\n\t#The conditional probabilities of a particular neuron having low or high activity for a particular stimulus\n\tconditional_prob_array = information_theory_dic[0]/params['number_of_eval_presentations']\n\t# print(\"Conditional\")\n\t# print(np.shape(conditional_prob_array))\n\t# print(conditional_prob_array[0:3, 0:3])\n\n\t#The marginal propabailities of a particular neuron having low or high activity \n\tmarginal_prob_array = (information_theory_dic[0]+information_theory_dic[1])/(params['number_of_eval_presentations']*len(params[\"stimuli_names\"]))\n\t# print(\"Marginal\")\n\t# print(np.shape(marginal_prob_array))\n\t# print(marginal_prob_array)\n\t# print(marginal_prob_array[0:3, 0:3])\n\n\tinformation_low = np.multiply(conditional_prob_array[:, 0], np.log2(np.divide(conditional_prob_array[:, 0], marginal_prob_array[:, 0]+no_math_error)+no_math_error))\n\t#information_mid = np.multiply(conditional_prob_array[:, 1], np.log2(np.divide(conditional_prob_array[:, 1], marginal_prob_array[:, 1]+no_math_error)+no_math_error))\n\tinformation_high = np.multiply(conditional_prob_array[:, 1], np.log2(np.divide(conditional_prob_array[:, 1], marginal_prob_array[:, 1]+no_math_error)+no_math_error))\n\t\n\n\tinformation_theory_results = information_low + information_high\n\t# print(\"FR information results\")\n\t# print(information_theory_results)\n\n\tassert np.all(information_theory_results>=-(10**(-8))), \"Some information theory values are negative!\"\n\n\tinformation_theory_results = np.clip(information_theory_results, 0, 1)\n\n\t# print(\"Clipped info results:\")\n\t# print(information_theory_results)\n\n\n\treturn information_theory_results\n\n\ndef binary_information_theory_calculation(params, information_theory_data, stimulus_info_calc=0):\n\t'''\n\tstimulus_info_calc should be set to 0 or 1, depending on whether information should specifically\n\tbe calculated for the first or second stimulus\n\t'''\n\n\t#Information_theory_data is indexed by [dataset_iter, assembly_iter]; thus the row indicates which stimulus was presented, and the \n\t#column value indicates how many presentations were associated with at least one activation of that assembly\n\n\tno_math_error = 0.000000000001 #Prevent division by zero\n\n\t# print(information_theory_data)\n\n\t#The probabilities of a particular assembly being active for each stimulus\n\tconditional_prob_array = information_theory_data/params['number_of_eval_presentations']\n\t# print(\"Conditional\")\n\t# print(np.shape(conditional_prob_array))\n\t# print(conditional_prob_array)\n\n\t# The marginal probability of a particular Response, regardless of the Stimulus\n\tmarginal_prob_array = np.sum(information_theory_data, axis=0)/(params['number_of_eval_presentations']*len(params[\"stimuli_names\"]))\n\t# print(\"Marginal\")\n\t# print(np.shape(marginal_prob_array))\n\t# print(marginal_prob_array)\n\n\t# print(\"\\nThe inverse probs (i.e. probability of *no* response)\")\n\t# print(\"Conditional\")\n\t# print(1-conditional_prob_array)\n\t# print(\"Marginal\")\n\t# print(1-marginal_prob_array)\n\n\t# Calculate information theory for the first stimulus (i.e. indexed with 0) across all of the neurons\n\t# print(\"\\nDiv values\")\n\t# print(np.divide(conditional_prob_array[0, :], marginal_prob_array+no_math_error))\n\t# print(np.divide(1-conditional_prob_array[0, :], (1-marginal_prob_array+no_math_error)))\n\n\t# print(\"\\nLog values\")\n\t# print(np.log2(np.divide(conditional_prob_array[0, :], marginal_prob_array+no_math_error)+no_math_error))\n\t# print(np.log2(np.divide(1-conditional_prob_array[0, :], (1-marginal_prob_array+no_math_error))+no_math_error))\n\n\tinformation1 = np.multiply(conditional_prob_array[stimulus_info_calc, :], np.log2(np.divide(conditional_prob_array[stimulus_info_calc, :], marginal_prob_array+no_math_error)+no_math_error))\n\t# By taking (1-p) where p is the above probabilities, we look at the probabilities for the alternative response (i.e. no spike)\n\tinformation2 = np.multiply(1-conditional_prob_array[stimulus_info_calc, :], np.log2(np.divide(1-conditional_prob_array[stimulus_info_calc, :], (1-marginal_prob_array+no_math_error))+no_math_error))\n\n\tinformation_theory_results = information1+information2\n\t# print(\"\\nFinal info results\")\n\t# print(information_theory_results)\n\n\t# print(\"\\nRounded info results\")\n\t# print(np.round(information_theory_results, decimals=8))\n\n\t# print(\"Checking values are greater than : \" + str(-(10**(-8))))\n\n\n\tassert np.all(information_theory_results>=-(10**(-8))), \"Some information theory values are negative!\"\n\n\tinformation_theory_results = np.clip(information_theory_results, 0, 1)\n\n\t# print(\"Clipped info results:\")\n\t# print(information_theory_results)\n\n\n\treturn information_theory_results\n\n\n# Test information theory calculation by analysing idealised data\ndef UnitTest_FR_information_theory_calculation(params, information_theory_dic):\n\t\ttemp_information_theory_data = information_theory_dic # Copy of information theory data\n\n\t\t# print(np.shape(information_theory_dic))\n\t\t# print(information_theory_dic)\n\t\t# print(np.shape(temp_information_theory_data[0][:,0]))\n\t\t# # exit()\n\n\t\t# Set every assembly (second dimension) to have a high firing rate for stimulus 1 only\n\t\ttemp_information_theory_data[0][:,0] = 0\n\t\ttemp_information_theory_data[0][:,1] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[1][:,0] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[1][:,1] = 0\n\t\t#print(temp_information_theory_data)\n\t\ttemp_information_theory_results = fr_information_theory_calculation(params, temp_information_theory_data)\n\t\t#print(temp_information_theory_results)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == len(temp_information_theory_results), \"Unit Test Failure: Idealized data does not have perfect information.\"\n\n\t\t# Set every assembly to be highly active for presentation of stimulus 2 only\n\t\ttemp_information_theory_data[0][:,0] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[0][:,1] = 0\n\t\ttemp_information_theory_data[1][:,0] = 0\n\t\ttemp_information_theory_data[1][:,1] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_results = fr_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == len(temp_information_theory_results), \"Unit Test Failure: Idealized data does not have perfect information.\"\n\n\t\t# Test in a case of no information (activation for either stimulus equally likely)\n\t\ttemp_information_theory_data[0][:,0] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_data[0][:,1] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_data[1][:,0] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_data[1][:,1] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_results = fr_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == 0, \"Unit Test Failure: Artificially uninformative data still has information.\"\n\n\t\tprint(\"Unit tests passed: Firing rate information theory.\")\n\n\t\treturn None\n\n\n# Test information theory calculation by analysing idealised data\ndef UnitTest_binary_information_theory_calculation(params, information_theory_data, layer):\n\t\ttemp_information_theory_data = np.asarray(information_theory_data) #Copy of information theory data\n\n\t\t#Set every neuron (second dimension) to be active for every presentation of stimulus 1 (first dimension) only\n\t\ttemp_information_theory_data[0, :] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[1, :] = 0\n\t\ttemp_information_theory_results = binary_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == len(temp_information_theory_results), \"Unit Test Failure: Idealized data does not have perfect information.\"\n\n\t\t#Set half the neurons to always be active for stimulus 1, and the other half for stimulus 2\n\t\ttemp_information_theory_data[0, :int(params[layer + \"_layer_size\"]/2)] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[0, int(params[layer + \"_layer_size\"]/2):] = 0\n\t\ttemp_information_theory_data[1, :int(params[layer + \"_layer_size\"]/2)] = 0\n\t\ttemp_information_theory_data[1, int(params[layer + \"_layer_size\"]/2):] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_results = binary_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == len(temp_information_theory_results), \"Unit Test Failure: Idealized data does not have perfect information.\"\n\n\n\t\t#Set every neuron to be active for presentation of stimulus 2 only\n\t\ttemp_information_theory_data[0, :] = 0\n\t\ttemp_information_theory_data[1, :] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_results = binary_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == len(temp_information_theory_results), \"Unit Test Failure: Idealized data does not have perfect information.\"\n\n\t\t# Test in a case of no information (activation for either stimulus equally likely)\n\t\ttemp_information_theory_data[0, :] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_data[1, :] = params['number_of_eval_presentations']/2\n\t\ttemp_information_theory_results = binary_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == 0, \"Unit Test Failure: Artificially uninformative data still has information.\"\n\n\t\t# Test in a case of no information (activate to every presentation)\n\t\ttemp_information_theory_data[0, :] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_data[1, :] = params['number_of_eval_presentations']\n\t\ttemp_information_theory_results = binary_information_theory_calculation(params, temp_information_theory_data)\n\t\tassert np.sum(temp_information_theory_results>=0.80) == 0, \"Unit Test Failure: Artificially uninformative data still has information.\"\n\n\t\tprint(\"Unit tests passed: Binary activity information theory.\")\n\n\t\treturn None\n\n\ndef perform_primary_analyses(stimuli_params, layer):\n\n\n\t# List of evaluation conditions to look at data from (excludes data collected during training)\n\teval_data_list_with_drift = [\n\t\t\"untrained_spikepair_inputs\",\n\t\t#\"untrained_alternating_inputs\",\n\t\t\"spikepair_trained_spikepair_inputs\",\n\t\t#\"spikepair_trained_alternating_inputs\"\n\t\t]\n\n\tinformation_theory_fr_dic = {}\n\tinformation_theory_binary_dic_1st = {}\n\tinformation_theory_binary_dic_2nd = {}\n\tmean_rate_dic = {}\n\n\tfor data_set in eval_data_list_with_drift:\n\n\t\tinformation_theory_fr_dic[data_set] = []\n\t\tmean_rate_dic[data_set] = []\n\t\tinformation_theory_binary_dic_1st[data_set] = []\n\t\tinformation_theory_binary_dic_2nd[data_set] = []\n\n\t\tfor seed_iter in stimuli_params[\"seeds_list\"]:\n\n\t\t\tfor drift_iter in stimuli_params[\"drift_coef_list\"]:\n\n\t\t\t\t# print(\"\\n=====Temporarily setting drift iter=====\")\n\t\t\t\t# drift_iter = \"30_WMAX_1.8\"\n\n\t\t\t\tinfo_fr_temp, info_binary_temp_1st, info_binary_temp_2nd, rate_temp = main_fr_analysis(stimuli_params, data_set,\n\t\t\t\t\t\tdrift_iter, seed_iter, layer=layer)\n\t\t\t\tinformation_theory_fr_dic[data_set].append(info_fr_temp)\n\t\t\t\tinformation_theory_binary_dic_1st[data_set].append(info_binary_temp_1st)\n\t\t\t\tinformation_theory_binary_dic_2nd[data_set].append(info_binary_temp_2nd)\n\t\t\t\tmean_rate_dic[data_set].append(rate_temp)\n\n\t\t\t\tisi_analysis(stimuli_params, data_set, drift_iter, seed_iter, layer)\n\n\n\t# ==== PLOT RESULTS ====\n\n\t# Information theory for binary values\n\tfor key, val in information_theory_binary_dic_1st.items():\n\t\t# Histogram\n\t\tplt.hist(val, bins=np.array([0, 0.2, 0.4, 0.6, 0.8, 1.0]), alpha=0.5, label=key)\n\t\tplt.legend()\n\t\tplt.xlabel(\"Information (bits)\")\n\t\tplt.ylim(0, stimuli_params[\"output_layer_size\"])\n\t\tplt.title(\"Information in Binary Activity - 1st stimulus\")\n\t\tplt.savefig(\"analysis_results/hist_info_1_binary_\" + layer + \"_\" + key + \".png\", dpi=300)\n\t\tplt.clf()\n\n\t\t# Display indices associated with the most information\n\t\tprint(\"\\nOn 1st object for : \" + key)\n\t\tind = np.argpartition(val[0], -5)[-5:]\n\t\ttop_vals = val[0][ind]\n\t\tsorting_top = np.argsort(val[0][ind])\n\t\tprint(\"Top neuron IDs:\")\n\t\tprint(np.flip(ind[sorting_top]))\n\t\tprint(\"Associated info:\")\n\t\tprint(np.flip(top_vals[sorting_top]))\n\n\t\t# Ranked information\n\t\t# Sorts the array (ascending) and then reverses the order\n\t\tplt.plot(np.arange(stimuli_params[\"output_layer_size\"]), np.flip(np.sort(val))[0,:],\n\t\t\t\t\tcolor=\"dodgerblue\", alpha=0.5, label=key)\n\t\tplt.legend()\n\t\tplt.xlabel(\"Cell Rank\")\n\t\tplt.ylabel(\"Information (bits)\")\n\t\tplt.ylim(0, 1)\n\t\tplt.xlim(0, stimuli_params[\"output_layer_size\"])\n\t\tplt.title(\"Information in Binary Activity - 1st stimulus\")\n\t\tplt.savefig(\"analysis_results/ranked_info_1_binary_\" + layer + \"_\" + key + \".png\", dpi=300)\n\t\tplt.clf()\n\n\tfor key, val in information_theory_binary_dic_2nd.items():\n\t\t# Histogram\n\t\tplt.hist(val, bins=np.array([0, 0.2, 0.4, 0.6, 0.8, 1.0]), alpha=0.5, label=key)\n\t\tplt.legend()\n\t\tplt.xlabel(\"Information (bits)\")\n\t\tplt.ylim(0, stimuli_params[\"output_layer_size\"])\n\t\tplt.title(\"Information in Binary Activity - 2nd stimulus\")\n\t\tplt.savefig(\"analysis_results/hist_info_2_binary_\" + layer + \"_\" + key + \".png\", dpi=300)\n\t\tplt.clf()\n\n\t\t# Display indices associated with the most information\n\t\tprint(\"\\nOn 2nd object for : \" + key)\n\t\tind = np.argpartition(val[0], -5)[-5:]\n\t\ttop_vals = val[0][ind]\n\t\tsorting_top = np.argsort(val[0][ind])\n\t\tprint(\"Top neuron IDs:\")\n\t\tprint(np.flip(ind[sorting_top]))\n\t\tprint(\"Associated info:\")\n\t\tprint(np.flip(top_vals[sorting_top]))\n\n\t\t# Ranked information\n\t\t# Sorts the array (ascending) and then reverses the order\n\t\tplt.plot(np.arange(stimuli_params[\"output_layer_size\"]), np.flip(np.sort(val))[0,:],\n\t\t\t\t\tcolor=\"dodgerblue\", alpha=0.5, label=key)\n\t\tplt.legend()\n\t\tplt.xlabel(\"Cell Rank\")\n\t\tplt.ylabel(\"Information (bits)\")\n\t\tplt.ylim(0, 1)\n\t\tplt.xlim(0, stimuli_params[\"output_layer_size\"])\n\t\tplt.title(\"Information in Binary Activity - 2nd stimulus\")\n\t\tplt.savefig(\"analysis_results/ranked_info_2_binary_\" + layer + \"_\" + key + \".png\", dpi=300)\n\t\tplt.clf()\n\n\t# Mean rates\n\tfor key, val in mean_rate_dic.items():\n\t\tplt.hist(val, alpha=0.3, label=key)\n\tplt.legend()\n\tplt.xlabel(\"Mean FR (Hz)\")\n\tplt.title(\"Distributions of Firing Rates Across Stimuli\")\n\tplt.savefig(\"analysis_results/mean_FR.png\", dpi=300)\n\tplt.clf()\n\n\n\t# Typical sparsity in a presentation window\n\tfor key, val in mean_rate_dic.items():\n\t\tplt.hist(np.minimum(np.asarray(val)*stimuli_params[\"duration_of_presentations\"]/1000,1)[0], \n\t\t\t\t bins=np.array([0, 0.2, 0.4, 0.6, 0.8, 1.0]), alpha=0.3, label=key)\n\tplt.legend()\n\tplt.xlabel(\"Average Proportion of Stimulus Windows With a Spike\")\n\tplt.title(\"Distributions of Sparsity Across Stimuli\")\n\tplt.savefig(\"analysis_results/temporal_sparsity\" + layer + \".png\", dpi=300)\n\tplt.clf()\n\n\ndef dummy_visualise_cellular_properties():\n\t'''\n\tBasic provisional code to test loading of saved\n\tcellular property data such as membrane voltage\n\t'''\n\n\t# # Test loading of membrane variables\n\t# drift_iter = 32\n\t# seed_iter = 0\n\t# fname = (str(seed_iter) + \"/untrained_spikepair_inputs\"\n\t# \t\t + \"_drift_iter_\" + str(drift_iter) + \"/g_e_output_layer_both_aligned\")\n\t# variable = np.load(\"raw_data/\" + fname + \"_values.npy\")\n\t# times = np.load(\"raw_data/\" + fname + \"_times.npy\")\n\n\ndef weights_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\t'''\n\tVisualization of the distribtuion of all weights across epochs\n\t'''\n\n\n\tinitial_weights = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_weights.txt\")\n\n\tlearning_weights_list = np.loadtxt(\"weights/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/weights_over_epochs_vals.txt\")\n\n\n\tlearning_weights_list = np.insert(learning_weights_list, 0, initial_weights, axis=0)\n\n\tepoch_markers_str = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/epoch_markers.txt\")\n\tepoch_markers_str = np.insert(epoch_markers_str, 0, 0)\n\n\tepoch_markers_int = list(range(stimuli_params[\"num_intervals_for_weight_saving\"]+1))\n\n\tdf_weights_across_epochs = pd.DataFrame(np.transpose(learning_weights_list))\n\tdf_weights_across_epochs = pd.melt(df_weights_across_epochs, var_name=\"epoch\", value_name=\"weight\")\n\n\tsns.violinplot(x=\"epoch\", y=\"weight\", data=df_weights_across_epochs,\n\t\t\t\t scale=\"area\", color=\"dodgerblue\", cut=0, inner=None)\n\t# see more options at https://seaborn.pydata.org/generated/seaborn.violinplot.html\n\t# including options for \"inner\"\n\n\txlabel(\"Duration of Training (sec)\")\n\txticks(epoch_markers_int, epoch_markers_str)\n\txlim(-0.5)\n\tylabel(\"Weights\")\n\ttitle(\"Weights across epochs of STDP training\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_weights_across_epochs.png\", dpi=300)\n\tclf()\n\n\ndef weights_across_epochs_by_alignment(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\t'''\n\tDistribution of weights across epochs, broken down by whether the input has a delay-line alignment\n\t'''\n\n\t# Load alignment results\n\twith open(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/alignment_results.json\") as f:\n\t\talignment_results = json.load(f)\n\n\tall_aligned = copy.copy(alignment_results[\"upright_aligned_weights\"])\n\tall_aligned.extend(alignment_results[\"inverted_aligned_weights\"])\n\tall_aligned.extend(alignment_results[\"both_aligned_weights\"])\n\n\tassert ((len(all_aligned) + len(alignment_results[\"non_aligned_weights\"]))\n\t\t\t== stimuli_params[\"input_layer_size\"]*stimuli_params[\"output_layer_size\"]), \"Total number of alignment-checked weight indices should equal total number of weights\"\n\n\t# Load weights across epochs\n\tinitial_weights = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_weights.txt\")\n\n\tlearning_weights_list = np.loadtxt(\"weights/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/weights_over_epochs_vals.txt\")\n\n\n\tlearning_weights_list = np.insert(learning_weights_list, 0, initial_weights, axis=0)\n\n\tepoch_markers_str = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/epoch_markers.txt\")\n\tepoch_markers_str = np.insert(epoch_markers_str, 0, 0)\n\n\tepoch_markers_int = list(range(stimuli_params[\"num_intervals_for_weight_saving\"]+1))\n\n\n\t# Mask the weights for the different conditions\n\tlearning_weights_list_aligned = learning_weights_list[:,all_aligned]\n\tlearning_weights_list_nonaligned = learning_weights_list[:,alignment_results[\"non_aligned_weights\"]]\n\n\t# Process results\n\tdf_weights_across_epochs_aligned = pd.DataFrame(np.transpose(learning_weights_list_aligned))\n\tdf_weights_across_epochs_aligned = pd.melt(df_weights_across_epochs_aligned, var_name=\"epoch\", value_name=\"weight\")\n\n\tdf_weights_across_epochs_nonaligned = pd.DataFrame(np.transpose(learning_weights_list_nonaligned))\n\tdf_weights_across_epochs_nonaligned = pd.melt(df_weights_across_epochs_nonaligned, var_name=\"epoch\", value_name=\"weight\")\n\n\n\t# Plot aligned results\n\tsns.violinplot(x=\"epoch\", y=\"weight\", data=df_weights_across_epochs_aligned,\n\t\t\t\t scale=\"area\", color=\"dodgerblue\", cut=0, inner=None)\n\txlabel(\"Duration of Training (sec)\")\n\txticks(epoch_markers_int, epoch_markers_str)\n\txlim(-0.5)\n\tylabel(\"Weights\")\n\ttitle(\"Weights across epochs of STDP training - Aligned\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_weights_across_epochs_aligned.png\", dpi=300)\n\tclf()\n\n\t# Plot non-aligned results\n\tsns.violinplot(x=\"epoch\", y=\"weight\", data=df_weights_across_epochs_nonaligned,\n\t\t\t\t scale=\"area\", color=\"crimson\", cut=0, inner=None)\n\txlabel(\"Duration of Training (sec)\")\n\txticks(epoch_markers_int, epoch_markers_str)\n\txlim(-0.5)\n\tylabel(\"Weights\")\n\ttitle(\"Weights across epochs of STDP training - Non-Aligned\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_weights_across_epochs_nonaligned.png\", dpi=300)\n\tclf()\n\n\n\t# For the aligned weights, plot each one before and after training, drawing a line\n\t# to make it clear how each particular weight evolved\n\tnumber_of_weights_to_vis = len(all_aligned)\n\n\tfor current_weight_iter in range(number_of_weights_to_vis):\n\n\t\tplot([0, 1], [learning_weights_list[0, all_aligned[current_weight_iter]],\n\t\t\t\t\t learning_weights_list[-1, all_aligned[current_weight_iter]]],\n\t\t \t alpha=0.5)\n\t\n\txlabel(\"Evolution of Individual Weights\")\n\txticks([0,1], [\"Pre-StDP\", \"Post-STDP\"])\n\tylabel(\"Weights\")\n\ttitle(\"Tracking Evolution of Specific Weights - Aligned\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/specific_weights_evolution_aligned.png\", dpi=300)\n\tclf()\n\n\t# As above, but for non-aligned weights; NB that typically not all non-aligned weights are visualized,\n\t# i.e. instead the same number as there are aligned weights are (in effect randomly) selected\n\tfor current_weight_iter in range(number_of_weights_to_vis):\n\n\t\tplot([0, 1], [learning_weights_list[0, alignment_results[\"non_aligned_weights\"][current_weight_iter]],\n\t\t\t\t\t learning_weights_list[-1, alignment_results[\"non_aligned_weights\"][current_weight_iter]]],\n\t\t \t alpha=0.5)\n\t\n\txlabel(\"Evolution of Individual Weights\")\n\txticks([0,1], [\"Pre-StDP\", \"Post-STDP\"])\n\tylabel(\"Weights\")\n\ttitle(\"Tracking Evolution of Specific Weights - Non-Aligned\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/specific_weights_evolution_nonaligned.png\", dpi=300)\n\tclf()\n\n\t# Scatter plot for aligned weights --> weight pre and post STDP\n\t# Looking for how much of a benefit there is to starting with an an initially higher weight\n\tscatter(learning_weights_list[0, all_aligned],learning_weights_list[-1, all_aligned])\n\txlabel(\"Initial Weight, Aligned Neurons\")\n\tylabel(\"Final Weight, Aligned Neurons\")\n\ttitle(\"Effect of Initial Weight - Aligned\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/effect_of_initial_weight.png\", dpi=300)\n\tclf()\n\n\ndef strong_delays_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\t'''\n\tVisualize the distribution of delay lines associated with strong (wmax/2)\n\tweights across the epochs of training\n\t'''\n\n\tinitial_weights = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_weights.txt\")\n\n\tdelays = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_delays.txt\")\n\n\tlearning_weights_list = np.loadtxt(\"weights/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/weights_over_epochs_vals.txt\")\n\n\n\tlearning_weights_list = np.insert(learning_weights_list, 0, initial_weights, axis=0)\n\n\n\tstrong_delays_list = []\n\n\tfor epoch_iter in range(len(learning_weights_list)):\n\n\t\tmask = learning_weights_list[epoch_iter] >= network_params[\"wmax\"]/2\n\n\t\t# As arrays associated with strong delays will be of different sizes, pad with NaN values\n\t\tcurrent_delays = np.empty(np.shape(learning_weights_list[epoch_iter]))\n\t\tcurrent_delays[:] = numpy.nan\n\n\t\tcurrent_delays[mask] = delays[mask]\n\n\t\tstrong_delays_list.append(current_delays)\n\n\n\tepoch_markers_str = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/epoch_markers.txt\")\n\tepoch_markers_str = np.insert(epoch_markers_str, 0, 0)\n\n\tepoch_markers_int = list(range(stimuli_params[\"num_intervals_for_weight_saving\"]+1))\n\n\tdf_delays_across_epochs = pd.DataFrame(np.transpose(strong_delays_list))\n\tdf_delays_across_epochs = pd.melt(df_delays_across_epochs, var_name=\"epoch\", value_name=\"delay\")\n\n\tsns.violinplot(x=\"epoch\", y=\"delay\", data=df_delays_across_epochs,\n\t\t\t\t scale=\"count\", color=\"dodgerblue\", cut=0, inner=None)\n\t# see more options at https://seaborn.pydata.org/generated/seaborn.violinplot.html\n\t# including options for \"inner\"\n\n\txlabel(\"Duration of Training (sec)\")\n\txticks(epoch_markers_int, epoch_markers_str)\n\txlim(-0.5)\n\tylabel(\"Delays (ms)\")\n\ttitle(\"Delay with strong weights across epochs of STDP training\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_delays_across_epochs.png\", dpi=300)\n\tclf()\n\n\ndef FR_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\n\t# Load firing rate data, including pre-STDP firing rates\n\tinitial_FR = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/untrained_alternating_inputs_drift_iter_\"\n\t\t+ str(drift_iter) + \"/fr_output_layer.txt\")\n\n\tlearning_FR_list = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/rates_over_epochs_vals.txt\")\n\n\tlearning_FR_list = np.insert(learning_FR_list, 0, initial_FR, axis=0)\n\n\t# Load epoch markers\n\tepoch_markers_str = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/epoch_markers.txt\")\n\tepoch_markers_str = np.insert(epoch_markers_str, 0, 0)\n\n\t# Load alignment results; this will enable visualising the change in firing rates\n\t# the the correspondence (if any) to a particular type of alignment\n\twith open(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/alignment_results.json\") as f:\n\t\talignment_results = json.load(f)\n\tall_aligned = copy.copy(alignment_results[\"upright_aligned_indices\"])\n\tall_aligned.extend(alignment_results[\"inverted_aligned_indices\"])\n\n\talignment_type_results = [all_aligned, alignment_results[\"both_aligned_indices\"], alignment_results[\"non_aligned_indices\"]]\n\talignment_type_labels = [\"Single Aligned\", \"Both Aligned\", \"None Aligned\"]\n\talignment_type_colors = [\"dodgerblue\", \"purple\", \"crimson\"]\n\n\tassert ((len(all_aligned) + len(alignment_results[\"both_aligned_indices\"])\n\t\t\t+ len(alignment_results[\"non_aligned_indices\"])) == stimuli_params[\"output_layer_size\"]), \"Aligned indices do not match layer size\"\n\n\tfor epoch_iter in range(len(learning_FR_list)):\n\n\t\tcurrent_fr = learning_FR_list[epoch_iter]\n\n\t\tfor current_alignment_iter in range(len(alignment_type_results)):\n\n\t\t\tmask = alignment_type_results[current_alignment_iter]\n\n\t\t\t# Only include legend on the first plotting\n\t\t\tif epoch_iter == 0:\n\t\t\t\tlabels = alignment_type_labels[current_alignment_iter]\n\t\t\telse:\n\t\t\t\tlabels = None\n\n\t\t\tplt.scatter(np.ones(len(mask))*epoch_markers_str[epoch_iter],\n\t\t\t\t\t\tcurrent_fr[mask], color=alignment_type_colors[current_alignment_iter],\n\t\t\t\t\t\talpha=0.2, label=labels)\n\t\t\n\t\tplt.scatter(epoch_markers_str[epoch_iter], np.mean(current_fr), s=60, marker='x', color='k')\n\n\tplt.axhline((1/(stimuli_params[\"duration_of_presentations\"]/1000)) / len(stimuli_params[\"stimuli_names\"]),\n\t\t\t label=\"Ideal Rate\",\n\t\t\t linestyle='--', alpha=0.5, color='k')\n\n\txlabel(\"Duration of Training (sec)\")\n\txlim(-10)\n\tylabel(\"Firing Rates (Hz)\")\n\tlegend()\n\ttitle(\"Firing rates across epochs of STDP training\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_rates_across_epochs.png\", dpi=300)\n\tclf()\n\n\ndef visualize_strong_weights(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\n\tdelays = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_delays.txt\")\n\n\tinitial_weights = np.loadtxt(\"weights/\" + str(seed_iter) + \"/rand_weights.txt\")\n\n\tfinal_weights = np.loadtxt(\"weights/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/final_weights.txt\")\n\n\tnum_bins = 6\n\n\n\t# Histogram of weights exceeding the threshold of interest\n\tmask = initial_weights >= network_params[\"wmax\"]/2\n\thist(np.asarray(delays)[mask], bins=num_bins, alpha=0.5, color=\"dodgerblue\")\n\txlabel(\"Synapse Delay (ms)\")\n\ttitle(\"Initial Delays with Strong Weight\")\n\tsavefig(\"analysis_results/strong_weights_initial.png\")\n\tclf()\n\n\tmask = final_weights >= network_params[\"wmax\"]/2\n\thist(np.asarray(delays)[mask], bins=num_bins, alpha=0.5, color=\"dodgerblue\")\n\txlabel(\"Synapse Delay (ms)\")\n\ttitle(\"Final Delays with Strong Weight\")\n\tsavefig(\"analysis_results/strong_weights_final.png\")\n\tclf()\n\n\t# Scatterplot showing correlation (if any) between delay and weight\n\tscatter(delays, initial_weights, label=\"Initial\", color=\"crimson\", alpha=0.5)\n\tscatter(delays, final_weights, label=\"Final\", color=\"dodgerblue\", alpha=0.5)\n\txlabel(\"Synapse Delay (ms)\")\n\tylabel(\"Weight\")\n\ttitle(\"Weight vs Delay\")\n\tsavefig(\"analysis_results/delay_vs_weight_scatter.png\")\n\tclf()\n\n\n\t# Use Spearman as data generally not very normally distributed\n\tcoef, p_val = scipy.stats.spearmanr(delays, initial_weights)\n\tprint(\"\\nCorrelation before STDP\")\n\tprint(\"Coef : \" + str(coef))\n\tprint(\"p-value : \" + str(p_val))\n\n\tcoef, p_val = scipy.stats.spearmanr(delays, final_weights)\n\tprint(\"\\nCorrelation AFTER STDP\")\n\tprint(\"Coef : \" + str(coef))\n\tprint(\"p-value : \" + str(p_val))\n\n\ndef specific_weights_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter):\n\t'''\n\tVisualize the change in aligned weights across epochs, specifically looking at those associated\n\twith aligned inputs/delays, vs. those that are not\n\t'''\n\n\n\t# Load firing rate data, including pre-STDP firing rates\n\tinitial_FR = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/untrained_alternating_inputs_drift_iter_\"\n\t\t+ str(drift_iter) + \"/fr_output_layer.txt\")\n\n\tlearning_FR_list = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/rates_over_epochs_vals.txt\")\n\n\tlearning_FR_list = np.insert(learning_FR_list, 0, initial_FR, axis=0)\n\n\t# Load epoch markers\n\tepoch_markers_str = np.loadtxt(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/epoch_markers.txt\")\n\tepoch_markers_str = np.insert(epoch_markers_str, 0, 0)\n\n\t# Load alignment results; this will enable visualising the change in firing rates\n\t# the the correspondence (if any) to a particular type of alignment\n\twith open(\"raw_data/\" + str(seed_iter) + \"/\" + data_set + \"_drift_iter_\"\n\t\t\t\t\t\t+ str(drift_iter) + \"/alignment_results.json\") as f:\n\t\talignment_results = json.load(f)\n\tall_aligned = copy.copy(alignment_results[\"upright_aligned_indices\"])\n\tall_aligned.extend(alignment_results[\"inverted_aligned_indices\"])\n\n\talignment_type_results = [all_aligned, alignment_results[\"both_aligned_indices\"], alignment_results[\"non_aligned_indices\"]]\n\talignment_type_labels = [\"Single Aligned\", \"Both Aligned\", \"None Aligned\"]\n\talignment_type_colors = [\"dodgerblue\", \"purple\", \"crimson\"]\n\n\tassert ((len(all_aligned) + len(alignment_results[\"both_aligned_indices\"])\n\t\t\t+ len(alignment_results[\"non_aligned_indices\"])) == stimuli_params[\"output_layer_size\"]), \"Aligned indices do not match layer size\"\n\n\tfor epoch_iter in range(len(learning_FR_list)):\n\n\t\tcurrent_fr = learning_FR_list[epoch_iter]\n\n\t\tfor current_alignment_iter in range(len(alignment_type_results)):\n\n\t\t\tmask = alignment_type_results[current_alignment_iter]\n\n\t\t\t# Only include legend on the first plotting\n\t\t\tif epoch_iter == 0:\n\t\t\t\tlabels = alignment_type_labels[current_alignment_iter]\n\t\t\telse:\n\t\t\t\tlabels = None\n\n\t\t\tplt.scatter(np.ones(len(mask))*epoch_markers_str[epoch_iter],\n\t\t\t\t\t\tcurrent_fr[mask], color=alignment_type_colors[current_alignment_iter],\n\t\t\t\t\t\talpha=0.2, label=labels)\n\t\t\n\t\tplt.scatter(epoch_markers_str[epoch_iter], np.mean(current_fr), s=60, marker='x', color='k')\n\n\tplt.axhline((1/(stimuli_params[\"duration_of_presentations\"]/1000)) / len(stimuli_params[\"stimuli_names\"]),\n\t\t\t label=\"Ideal Rate\",\n\t\t\t linestyle='--', alpha=0.5, color='k')\n\n\txlabel(\"Duration of Training (sec)\")\n\txlim(-10)\n\tylabel(\"Firing Rates (Hz)\")\n\tlegend()\n\ttitle(\"Firing rates across epochs of STDP training\")\n\tsavefig(\"analysis_results/\" + str(seed_iter) + \"/violins_rates_across_epochs.png\", dpi=300)\n\tclf()\n\n\nif __name__ == '__main__':\n\n\tif os.path.exists(\"analysis_results\") == 0:\n\t\ttry:\n\t\t\tos.mkdir(\"analysis_results\")\n\t\texcept OSError:\n\t\t\tpass\n\n\tprint(\"\\nPerforming analysis of simulation results\")\n\twith open('config_TranslationInvariance.yaml') as f:\n\t\tparams = yaml.load(f, Loader=yaml.FullLoader)\n\n\tstimuli_params = params['stimuli_params']\n\tnetwork_params = params[\"network_params\"]\n\n\tlayers_to_analyse = [\"output\"] # \"input\"\n\n\t# for layer in layers_to_analyse:\n\n\t# \tperform_primary_analyses(stimuli_params, layer)\n\n\n\t# Analyses/plotting specific to during training\n\tdata_set = \"during_spikepair_training\"\n\n\tfor seed_iter in stimuli_params[\"seeds_list\"]:\n\n\t\tif os.path.exists(\"analysis_results/\" + str(seed_iter)) == 0:\n\t\t\ttry:\n\t\t\t\tos.mkdir(\"analysis_results/\" + str(seed_iter))\n\t\t\texcept OSError:\n\t\t\t\tpass\n\n\t\tfor drift_iter in stimuli_params[\"drift_coef_list\"]:\n\n\t\t\tweights_across_epochs_by_alignment(stimuli_params, network_params, data_set, seed_iter, drift_iter)\n\n\t\t\tvisualize_strong_weights(stimuli_params, network_params, data_set, seed_iter, drift_iter)\n\n\t\t\tFR_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter)\n\n\t\t\tweights_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter)\n\n\t\t\tstrong_delays_across_epochs(stimuli_params, network_params, data_set, seed_iter, drift_iter)\n\n\n"
] | [
[
"numpy.take_along_axis",
"numpy.amax",
"matplotlib.pyplot.legend",
"numpy.asarray",
"numpy.all",
"numpy.mean",
"numpy.ma.array",
"scipy.stats.spearmanr",
"numpy.var",
"pandas.melt",
"numpy.divide",
"numpy.clip",
"numpy.arange",
"numpy.std",
"numpy.insert",
"numpy.argpartition",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.nonzero",
"numpy.amin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.transpose",
"numpy.argsort",
"numpy.array",
"numpy.flip",
"numpy.sum",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.sort",
"numpy.ones",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"numpy.shape",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
marydlawyer/beneficiary-fhir-data | [
"3e28017ed2bff6e03028da2f41bc368cab946c62"
] | [
"ops/ccs-ops-misc/synthetic-data/scripts/outpatient/convert_outpatient_claims_csv_file_to_rif.py"
] | [
"import csv\nimport sys\nfrom numpy.random import choice\n\n\n'''\nThis tool was used for remapping and formatting the files provided for\nsynthetic outpatient data in the keybase directory:\n\n /keybase/team/oeda_bfs/team/synthetic-data/outpatient-claims\n\n The following command line is accepted:\n\n $1 = Filename of the CSV type file. Example, \"file.txt\"\n\nIt performs the following:\n\n1. Uses a column header ordering list: header_enum_order_list\n This is taken from the OutpatientClaimColumn ENUM list in the following BFD\n code:\n ./apps/bfd-model/bfd-model-rif/target/generated-sources/annotations/\n gov/cms/bfd/model/rif/OutpatientClaimColumn.java\n\n The RIF loader expects the columns in the files to be in this order.\n\n2. Verifies all column headers from the input CSV file's top header row are\n all in the expected headers list: header_enum_order_list.\n\n3. Verifies BENE_ID's are for sythetic. Converts BENE_ID to negative.\n\n4. Validates DATE fields. There have column header names ending in \"_DT\"\n such as \"CLM_FROM_DT\". Also those that end in \"_DT\" and a number, like\n \"_DT1\", \"_DT2\", \"_DT11\", ETC.\n\n Dates in the CSV files are in a format like \"01APR1999\". The RIF loader\n is expecting \"01-Apr-1999\".\n\n These are converted to the RIF loader format and will ERROR if not matching\n this format (and not blank).\n\n5 Validates that the count of fields output is the same\n as header_enum_order_list.\n\n6. If a column header from header_enum_order_list is not included in the file,\n the field entry will be included and empty in the output.\n\n7. Strip leading and trailing whitespace for field entries.\n \n Was getting this error from the RIF processor:\n gov.cms.bfd.model.rif.parse.RifParsingUtils.parseDecimal\n Caused by: gov.cms.bfd.model.rif.parse.InvalidRifValueException:\n Unable to parse decimal value: ' 2267800412'.\n\n8. For _AMT (amount) type decimal values, remove any commas (\",\").\n\n This is in reference to the following RIF loader error: \n In: RifFilesProcessor.java:154\n gov.cms.bfd.model.rif.parse.InvalidRifValueException:\n gov.cms.bfd.model.rif.parse.InvalidRifValueException:\n Unable to parse decimal value: '2,000.00'.\n\n9. For REV_CNTR_STUS_IND_CD with empty values, replace with a random value based\n on the distribution of production data. \n\n10. For PTNT_DSCHRG_STUS_CD with empty values, replace with 0 which indicates an unknown value.\n\n'''\n\nfilename = sys.argv[1]\n\nin_delimiter = \",\"\n\nheader_enum_order_list = [\"DML_IND\", \"BENE_ID\", \"CLM_ID\", \"CLM_GRP_ID\",\n \"FINAL_ACTION\", \"NCH_NEAR_LINE_REC_IDENT_CD\",\n \"NCH_CLM_TYPE_CD\", \"CLM_FROM_DT\", \"CLM_THRU_DT\",\n \"NCH_WKLY_PROC_DT\", \"FI_CLM_PROC_DT\",\n \"CLAIM_QUERY_CODE\", \"PRVDR_NUM\", \"CLM_FAC_TYPE_CD\",\n \"CLM_SRVC_CLSFCTN_TYPE_CD\", \"CLM_FREQ_CD\", \"FI_NUM\",\n \"CLM_MDCR_NON_PMT_RSN_CD\", \"CLM_PMT_AMT\",\n \"NCH_PRMRY_PYR_CLM_PD_AMT\", \"NCH_PRMRY_PYR_CD\",\n \"PRVDR_STATE_CD\", \"ORG_NPI_NUM\", \"AT_PHYSN_UPIN\",\n \"AT_PHYSN_NPI\", \"OP_PHYSN_UPIN\", \"OP_PHYSN_NPI\",\n \"OT_PHYSN_UPIN\", \"OT_PHYSN_NPI\", \"CLM_MCO_PD_SW\",\n \"PTNT_DSCHRG_STUS_CD\", \"CLM_TOT_CHRG_AMT\",\n \"NCH_BENE_BLOOD_DDCTBL_LBLTY_AM\",\n \"NCH_PROFNL_CMPNT_CHRG_AMT\", \"PRNCPAL_DGNS_CD\",\n \"PRNCPAL_DGNS_VRSN_CD\", \"ICD_DGNS_CD1\",\n \"ICD_DGNS_VRSN_CD1\", \"ICD_DGNS_CD2\",\n \"ICD_DGNS_VRSN_CD2\", \"ICD_DGNS_CD3\",\n \"ICD_DGNS_VRSN_CD3\", \"ICD_DGNS_CD4\",\n \"ICD_DGNS_VRSN_CD4\", \"ICD_DGNS_CD5\",\n \"ICD_DGNS_VRSN_CD5\", \"ICD_DGNS_CD6\",\n \"ICD_DGNS_VRSN_CD6\", \"ICD_DGNS_CD7\",\n \"ICD_DGNS_VRSN_CD7\", \"ICD_DGNS_CD8\",\n \"ICD_DGNS_VRSN_CD8\", \"ICD_DGNS_CD9\",\n \"ICD_DGNS_VRSN_CD9\", \"ICD_DGNS_CD10\",\n \"ICD_DGNS_VRSN_CD10\", \"ICD_DGNS_CD11\",\n \"ICD_DGNS_VRSN_CD11\", \"ICD_DGNS_CD12\",\n \"ICD_DGNS_VRSN_CD12\", \"ICD_DGNS_CD13\",\n \"ICD_DGNS_VRSN_CD13\", \"ICD_DGNS_CD14\",\n \"ICD_DGNS_VRSN_CD14\", \"ICD_DGNS_CD15\",\n \"ICD_DGNS_VRSN_CD15\", \"ICD_DGNS_CD16\",\n \"ICD_DGNS_VRSN_CD16\", \"ICD_DGNS_CD17\",\n \"ICD_DGNS_VRSN_CD17\", \"ICD_DGNS_CD18\",\n \"ICD_DGNS_VRSN_CD18\", \"ICD_DGNS_CD19\",\n \"ICD_DGNS_VRSN_CD19\", \"ICD_DGNS_CD20\",\n \"ICD_DGNS_VRSN_CD20\", \"ICD_DGNS_CD21\",\n \"ICD_DGNS_VRSN_CD21\", \"ICD_DGNS_CD22\",\n \"ICD_DGNS_VRSN_CD22\", \"ICD_DGNS_CD23\",\n \"ICD_DGNS_VRSN_CD23\", \"ICD_DGNS_CD24\",\n \"ICD_DGNS_VRSN_CD24\", \"ICD_DGNS_CD25\",\n \"ICD_DGNS_VRSN_CD25\", \"FST_DGNS_E_CD\",\n \"FST_DGNS_E_VRSN_CD\", \"ICD_DGNS_E_CD1\",\n \"ICD_DGNS_E_VRSN_CD1\", \"ICD_DGNS_E_CD2\",\n \"ICD_DGNS_E_VRSN_CD2\", \"ICD_DGNS_E_CD3\",\n \"ICD_DGNS_E_VRSN_CD3\", \"ICD_DGNS_E_CD4\",\n \"ICD_DGNS_E_VRSN_CD4\", \"ICD_DGNS_E_CD5\",\n \"ICD_DGNS_E_VRSN_CD5\", \"ICD_DGNS_E_CD6\",\n \"ICD_DGNS_E_VRSN_CD6\", \"ICD_DGNS_E_CD7\",\n \"ICD_DGNS_E_VRSN_CD7\", \"ICD_DGNS_E_CD8\",\n \"ICD_DGNS_E_VRSN_CD8\", \"ICD_DGNS_E_CD9\",\n \"ICD_DGNS_E_VRSN_CD9\", \"ICD_DGNS_E_CD10\",\n \"ICD_DGNS_E_VRSN_CD10\", \"ICD_DGNS_E_CD11\",\n \"ICD_DGNS_E_VRSN_CD11\", \"ICD_DGNS_E_CD12\",\n \"ICD_DGNS_E_VRSN_CD12\", \"ICD_PRCDR_CD1\",\n \"ICD_PRCDR_VRSN_CD1\", \"PRCDR_DT1\",\n \"ICD_PRCDR_CD2\", \"ICD_PRCDR_VRSN_CD2\",\n \"PRCDR_DT2\", \"ICD_PRCDR_CD3\",\n \"ICD_PRCDR_VRSN_CD3\", \"PRCDR_DT3\",\n \"ICD_PRCDR_CD4\", \"ICD_PRCDR_VRSN_CD4\",\n \"PRCDR_DT4\", \"ICD_PRCDR_CD5\",\n \"ICD_PRCDR_VRSN_CD5\", \"PRCDR_DT5\",\n \"ICD_PRCDR_CD6\", \"ICD_PRCDR_VRSN_CD6\",\n \"PRCDR_DT6\", \"ICD_PRCDR_CD7\",\n \"ICD_PRCDR_VRSN_CD7\", \"PRCDR_DT7\",\n \"ICD_PRCDR_CD8\", \"ICD_PRCDR_VRSN_CD8\",\n \"PRCDR_DT8\", \"ICD_PRCDR_CD9\",\n \"ICD_PRCDR_VRSN_CD9\", \"PRCDR_DT9\",\n \"ICD_PRCDR_CD10\", \"ICD_PRCDR_VRSN_CD10\",\n \"PRCDR_DT10\", \"ICD_PRCDR_CD11\",\n \"ICD_PRCDR_VRSN_CD11\", \"PRCDR_DT11\",\n \"ICD_PRCDR_CD12\", \"ICD_PRCDR_VRSN_CD12\",\n \"PRCDR_DT12\", \"ICD_PRCDR_CD13\",\n \"ICD_PRCDR_VRSN_CD13\", \"PRCDR_DT13\",\n \"ICD_PRCDR_CD14\", \"ICD_PRCDR_VRSN_CD14\",\n \"PRCDR_DT14\", \"ICD_PRCDR_CD15\",\n \"ICD_PRCDR_VRSN_CD15\", \"PRCDR_DT15\",\n \"ICD_PRCDR_CD16\", \"ICD_PRCDR_VRSN_CD16\",\n \"PRCDR_DT16\", \"ICD_PRCDR_CD17\",\n \"ICD_PRCDR_VRSN_CD17\", \"PRCDR_DT17\",\n \"ICD_PRCDR_CD18\", \"ICD_PRCDR_VRSN_CD18\",\n \"PRCDR_DT18\", \"ICD_PRCDR_CD19\",\n \"ICD_PRCDR_VRSN_CD19\", \"PRCDR_DT19\",\n \"ICD_PRCDR_CD20\", \"ICD_PRCDR_VRSN_CD20\",\n \"PRCDR_DT20\", \"ICD_PRCDR_CD21\",\n \"ICD_PRCDR_VRSN_CD21\", \"PRCDR_DT21\",\n \"ICD_PRCDR_CD22\", \"ICD_PRCDR_VRSN_CD22\",\n \"PRCDR_DT22\", \"ICD_PRCDR_CD23\",\n \"ICD_PRCDR_VRSN_CD23\", \"PRCDR_DT23\",\n \"ICD_PRCDR_CD24\", \"ICD_PRCDR_VRSN_CD24\",\n \"PRCDR_DT24\", \"ICD_PRCDR_CD25\",\n \"ICD_PRCDR_VRSN_CD25\", \"PRCDR_DT25\",\n \"RSN_VISIT_CD1\", \"RSN_VISIT_VRSN_CD1\",\n \"RSN_VISIT_CD2\", \"RSN_VISIT_VRSN_CD2\",\n \"RSN_VISIT_CD3\", \"RSN_VISIT_VRSN_CD3\",\n \"NCH_BENE_PTB_DDCTBL_AMT\",\n \"NCH_BENE_PTB_COINSRNC_AMT\",\n \"CLM_OP_PRVDR_PMT_AMT\", \"CLM_OP_BENE_PMT_AMT\",\n \"CLM_LINE_NUM\", \"REV_CNTR\", \"REV_CNTR_DT\",\n \"REV_CNTR_1ST_ANSI_CD\", \"REV_CNTR_2ND_ANSI_CD\",\n \"REV_CNTR_3RD_ANSI_CD\", \"REV_CNTR_4TH_ANSI_CD\",\n \"REV_CNTR_APC_HIPPS_CD\", \"HCPCS_CD\",\n \"HCPCS_1ST_MDFR_CD\", \"HCPCS_2ND_MDFR_CD\",\n \"REV_CNTR_PMT_MTHD_IND_CD\", \"REV_CNTR_DSCNT_IND_CD\",\n \"REV_CNTR_PACKG_IND_CD\", \"REV_CNTR_OTAF_PMT_CD\",\n \"REV_CNTR_IDE_NDC_UPC_NUM\", \"REV_CNTR_UNIT_CNT\",\n \"REV_CNTR_RATE_AMT\", \"REV_CNTR_BLOOD_DDCTBL_AMT\",\n \"REV_CNTR_CASH_DDCTBL_AMT\",\n \"REV_CNTR_COINSRNC_WGE_ADJSTD_C\",\n \"REV_CNTR_RDCD_COINSRNC_AMT\",\n \"REV_CNTR_1ST_MSP_PD_AMT\",\n \"REV_CNTR_2ND_MSP_PD_AMT\", \"REV_CNTR_PRVDR_PMT_AMT\",\n \"REV_CNTR_BENE_PMT_AMT\",\n \"REV_CNTR_PTNT_RSPNSBLTY_PMT\",\n \"REV_CNTR_PMT_AMT_AMT\", \"REV_CNTR_TOT_CHRG_AMT\",\n \"REV_CNTR_NCVRD_CHRG_AMT\", \"REV_CNTR_STUS_IND_CD\",\n \"REV_CNTR_NDC_QTY\", \"REV_CNTR_NDC_QTY_QLFR_CD\",\n \"RNDRNG_PHYSN_UPIN\", \"RNDRNG_PHYSN_NPI\"]\n\nvalid_month_list = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\nclaim_ids = []\n\n# reading csv file\nwith open(filename, 'r') as csvfile:\n # creating a csv reader object\n csvreader = csv.DictReader(csvfile, delimiter=in_delimiter)\n\n # Output header line\n out_line = '|'.join(header_enum_order_list)\n print(out_line)\n\n row_count = 0\n # Iterate over all rows\n for row in csvreader:\n \n row_count = row_count + 1\n\n # 2. Verify all header keys are in the header_enum_order_list.\n if row_count == 1:\n for key in row:\n if key not in header_enum_order_list:\n raise SystemExit(\"ERROR: Input file header key is not\" +\n \" in the header_enum_order_list list: \"\n + key)\n\n # Output row fields based on enum mapping\n row_list = []\n for col_name in header_enum_order_list:\n # Is the col_name include in the input file? If not,make empty.\n if col_name in row:\n # 4. Convert and validate DATE fields.\n #\n # NOTE: For our columns list this string contains is OK.\n # However may want to use a REGEX search if reusing\n # this code for another purpose!\n if \"_DT\" in col_name:\n dt = row[col_name]\n if dt != \"\":\n # Convert from \"01APR1999\" to \"01-Apr-1999\" format.\n new_dt = dt[0:2] + \"-\" \\\n + dt[2:3] \\\n + dt[3:5].lower() \\\n + \"-\" + dt[5:9]\n\n # Validate month is OK:\n if new_dt[3:6] not in valid_month_list:\n raise SystemExit(\"ERROR: Month value in date\"\n \" field is not valid: \"\n + col_name\n + \"=\" + row[col_name])\n row_list.append(new_dt)\n else:\n row_list.append(\"\")\n elif col_name == \"BENE_ID\" and int(row[\"BENE_ID\"]) > 0:\n # Convert BENE_ID to negative\n row_list.append(str(int(row[col_name])*-1))\n elif \"_AMT\" in col_name:\n # Remove commas from _AMT type field.\n row_list.append(row[col_name].strip().replace(',','')) \n elif col_name == \"PTNT_DSCHRG_STUS_CD\" and len(row[col_name]) == 0:\n # Use a default 0 which means Unknown Value (but present in data)\n row_list.append(str(0)) \n else:\n # Copy field as is.\n row_list.append(row[col_name].strip())\n else:\n if col_name == \"REV_CNTR_STUS_IND_CD\":\n # add this column based on a distribution from prod\n rev_status = choice([\"A\", \"B\", \"E\", \"K\", \"N\", \"S\", \"T\", \"V\"],\n p=[0.43, 0.05, 0.03, 0.01, 0.37, 0.06, 0.01, 0.04])\n row_list.append(rev_status)\n else:\n # col_name is not in row, so provide empty value\n row_list.append(\"\")\n\n # Validate row_list count is same as header_enum_order_list count.\n if len(row_list) != len(header_enum_order_list):\n raise SystemExit(\"ERROR: Was expecting: \" +\n str(len(header_enum_order_list)) +\n \" field count but got: \" + str(len(row_list)))\n\n # Output row to stdout\n print('|'.join(row_list))\n"
] | [
[
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GobySoft/pyifcb | [
"f465329e720c826069cdd02b55e35d780f24a3d5"
] | [
"ifcb/data/identifiers.py"
] | [
"\"\"\"\nSupport for parsing IFCB permanent identifiers (a.k.a., pids).\n\"\"\"\n\nimport re\n\nfrom functools import lru_cache\nimport pandas as pd\n\n### parsing\n\n# supports time-like regexes e.g., IFCB9_yyyy_YYY_HHMMSS\n@lru_cache()\ndef timestamp2regex(pattern):\n \"\"\"\n Convert a special \"timestamp\" expression into a regex pattern.\n The expression is treated as an ordinary regex except that special\n patterns are used to define groups that match typical patterns that\n are found in timestamps and timestamp-related formats.\n\n Special patterns that define groups that are supported:\n\n * ``0-9`` - where n is any number of digits (e.g., ``111``, ``88``) fixed-length\n decimal number\n * ``s`` - any number of ``s``'s indicating milliseconds (e.g., ``sss``)\n * ``yyyy`` - four-digit year\n * ``mm`` - two-digit (left-zero-padded) month\n * ``dd`` - two-digit (left-zero-padded) day of month\n * ``DDD`` - three-digit (left-zero-padded) day of year\n * ``HH`` - two-digit (left-zero-padded) hour of day\n * ``MM`` - two-digit (left-zero-padded) minute of hour\n * ``SS`` - two-digit (left-zero-padded) second of minute\n * ``#`` - any string of digits (non-capturing)\n * ``i`` - an \"identifier\" (e.g., ``jpg2000``) (non-capturing)\n * ``.ext`` - a file extension\n * ``.`` - a literal dot\n * ``\\.`` - a regex dot (matches any character)\n * ``any`` - a regex ``.*``\n\n Example patterns:\n\n * ``yyyy-mm-ddTHH:MM:SSZ`` - a UTC ISO8601 timestamp\n * ``yyyyDDD`` - year and day of year\n\n :Example:\n\n >>> timestamp2regex('Dyyyymm')\n 'D(?P<yyyy>[0-9]{4})(?P<mm>0[1-9]|1[0-2])'\n\n :param pattern: the pattern\n :type pattern: str\n \"\"\"\n # FIXME handle unfortunate formats such as\n # - non-zero-padded numbers\n # - full and abbreviated month names\n # first, do fixed-length numbers (requires some trickery)\n start, result = 0, ''\n for m in re.finditer(r'(([0-9])\\2*)',pattern):\n s = m.start()\n result = result + pattern[start:s]\n l, n = len(m.group(0)), m.group(1)\n result += '(?P<n%s>[0-9]{%d})' % (n, l)\n start = m.end()\n pattern = result + pattern[start:]\n pattern = re.sub(r's+','(?P<sss>[0-9]+)',pattern) # milliseconds\n pattern = re.sub(r'yyyy','(?P<yyyy>[0-9]{4})',pattern) # four-digit year\n pattern = re.sub(r'mm','(?P<mm>0[1-9]|1[0-2])',pattern) # two-digit month\n pattern = re.sub(r'dd','(?P<dd>0[1-9]|[1-2][0-9]|3[0-1])',pattern) # two-digit day of month\n pattern = re.sub(r'DDD','(?P<DDD>[0-3][0-9][0-9])',pattern) # three-digit day of year\n pattern = re.sub(r'HH','(?P<HH>[0-1][0-9]|2[0-3])',pattern) # two-digit hour\n pattern = re.sub(r'MM','(?P<MM>[0-5][0-9])',pattern) # two-digit minute\n pattern = re.sub(r'SS','(?P<SS>[0-5][0-9])',pattern) # two-digit second\n pattern = re.sub(r'#','[0-9]+',pattern) # any string of digits (non-capturing)\n pattern = re.sub(r'i','[a-zA-Z][a-zA-Z0-9_]*',pattern) # an identifier (e.g., jpg2000) (non-capturing)\n pattern = re.sub(r'\\.ext',r'(?:.(?P<ext>[a-zA-Z][a-zA-Z0-9_]*))',pattern) # a file extension\n pattern = re.sub(r'\\.',r'\\.',pattern) # a literal '.'\n pattern = re.sub(r'\\\\.','.',pattern) # a regex '.'\n pattern = re.sub(r'any','.*',pattern) # a regex .*\n return pattern\n\n# \"timetsamp\"-style patterns\nV1_PID_PATTERN = '(IFCB1_(yyyy_DDD_HHMMSS))(any)'\nV2_PID_PATTERN = '(D(yyyymmddTHHMMSS)_IFCB111)(any)'\n\n@lru_cache()\ndef c(pattern):\n \"\"\"\n Compile a regex pattern (with caching)\n\n :param pattern: the pattern\n :type pattern: str\n :returns: the compiled pattern\n \"\"\"\n return re.compile(pattern)\n\n@lru_cache()\ndef m(pattern, string):\n \"\"\"\n Match a pattern against a string and return the\n matching groups. Provides several convenience\n features that differ from ``re.match``:\n\n * If the pattern does not match the string, or the\n string is None, return a tuple of Nones the length\n of the number of capturing groups.\n * If there is only one pattern, return a single\n value instead of a one-element tuple.\n\n :param pattern: the pattern\n :type pattern: str\n :param string: the string to match\n :returns: a value or tuple of captured groups\n \"\"\"\n def col_or_scalar(o):\n if len(o) == 1:\n return o[0]\n else:\n return o\n def nones(n):\n return col_or_scalar([None for _ in range(n)])\n r = c(pattern)\n n = r.groups\n if string is None:\n return nones(n)\n m = r.match(string)\n if m is None:\n return nones(n)\n return col_or_scalar(tuple(m.groups()))\n\ndef parse(pid):\n \"\"\"\n Parse an IFCB permanent identifier (a.k.a., \"pid\"). The\n passed-in pid may contain either a pathname prefix or\n a URL prefix, and may include a product identifier and/or\n extension. It can also include a target number.\n The pid syntax is specified elsewhere. Extracted fields\n are returned as a dict.\n\n :param pid: the pid as a string\n\n Example pids:\n\n * ``D20160714T023910_IFCB101``\n * ``IFCB3_2008_013423.adc``\n * ``http://mysite.org/data/D20150321T124431_IFCB103``\n * ``D20160714T023910_IFCB101_00014.png``\n * ``/my/directory/D20160603T002950_IFCB101_blob.zip``\n\n Fields extracted include:\n\n * ``pid`` - the pid, minus any leading path/URL prefix\n * ``lid`` - the pid, minus all prefixes and suffixes\n * ``namespace`` - any leading path/URL prefix\n * ``ts_label`` - for URL patterns, the time series label\n * ``year``, ``month``, ``day`` - the date\n * ``hour``, ``minute``, ``second`` - the time\n * ``instrument`` - the instrument number\n * ``timestamp`` - the complete timestamp\n * ``timestamp_format`` - the format specifier of the timestamp\n * ``schema_version`` - which revision of the instrument\n (1 for ``IFCB...`` pids, 2 for ``D...`` pids)\n * ``yearday`` - the year and day, concatenated\n * ``target`` - the target number (if any)\n * ``extension`` - the extension, not including the leading ``.``\n * ``product`` - the product type identifier, or 'raw' if\n not specified\n\n :param pid: the pid\n :type pid: str\n :returns dict: fields extraced from the pid\n \"\"\"\n pid = c(r'^.*\\\\').sub('',pid) # strip Windows dirs\n namespace, suffix = m('(.*/)?(.*)',pid)\n ts_label = m('(?:.*/)?(.*)/$',namespace)\n # try v2 identifier pattern\n bin_lid, timestamp, year, month, day, hour, minute, second, instrument, tpe = m(timestamp2regex(V2_PID_PATTERN),suffix)\n # try v1 identifier pattern\n if bin_lid is None:\n bin_lid, instrument, timestamp, year, day, hour, minute, second, tpe = m(timestamp2regex(V1_PID_PATTERN),suffix)\n schema_version = 1\n timestamp_format = '%Y_%j_%H%M%S'\n if year is None or day is None:\n raise ValueError('invalid pid: %s' % pid)\n yearday = '_'.join([year, day])\n day_prefix = 'IFCB{}_{}'.format(instrument, yearday)\n else:\n schema_version = 2\n timestamp_format = '%Y%m%dT%H%M%S'\n yearday = ''.join([year, month, day])\n day_prefix = 'D{}'.format(yearday)\n if bin_lid is None: # syntax error\n raise ValueError('invalid pid: %s' % pid)\n # now parse target, product, and extension (tpe)\n # tpe, if not empty, must start with _ or .\n if tpe and (tpe[:1] not in '._' or len(tpe) < 2):\n raise ValueError('invalid target, product, or extension: %s' % pid)\n target, product, extension = m(r'(?:_([0-9]+))?(?:_([a-zA-Z][a-zA-Z0-9_]*))?(?:\\.([a-zA-Z][a-zA-Z0-9]*))?',tpe)\n if product is None:\n product = 'raw'\n if target is not None:\n lid = '_'.join([bin_lid, target])\n else:\n lid = bin_lid # make sure both are present\n # now del non-desired locals\n del tpe\n # this might actually be an acceptable use of locals()\n return locals()\n\ndef unparse(parsed):\n \"\"\"\n Unparse a PID. Accepts a parsed PID or anything containing\n the appropriate fields. Minimally, must include\n\n * ``schema_version``\n * ``instrument``\n * either ``year``, ``month``, ``day`` (day-of-month);\n or, ``year``, ``day`` (day-of-year)\n * ``hour``, ``minute``, ``second``\n\n May also include\n\n * ``namespace``\n * ``product``\n * ``extension``\n * ``target``\n\n \"\"\"\n try:\n schema_version = int(parsed['schema_version'])\n instrument = int(parsed['instrument'])\n year = int(parsed['year'])\n day = int(parsed['day'])\n hour = int(parsed['hour'])\n minute = int(parsed['minute'])\n second = int(parsed['second'])\n\n product = parsed.get('product','raw')\n namespace = parsed.get('namespace')\n\n if namespace is None:\n namespace = ''\n \n if 'target' in parsed and parsed['target'] is not None:\n tstring = '_%05d' % int(parsed['target'])\n else:\n tstring = ''\n\n if product == 'raw' or product is None:\n pstring = ''\n else:\n pstring = '_%s' % product\n\n if 'extension' in parsed and parsed['extension'] is not None:\n estring = '.%s' % parsed['extension']\n else:\n estring = ''\n\n suffix = '%s%s%s' % (tstring, pstring, estring)\n\n if schema_version == 1:\n fmt = '%sIFCB%1d_%4d_%03d_%02d%02d%02d%s'\n vals = (namespace, instrument, year, day, hour, minute, second, suffix)\n\n elif schema_version == 2:\n month = int(parsed['month'])\n fmt = '%sD%4d%02d%02dT%02d%02d%02d_IFCB%03d%s'\n vals = (namespace, year, month, day, hour, minute, second, instrument, suffix)\n \n else:\n raise ValueError('unknown schema version %d' % schema_version)\n \n return fmt % vals\n\n except KeyError:\n raise ValueError('cannot unparse PID')\n \nclass Pid(object):\n \"\"\"\n Represents the permanent identifier of an IFCB bin.\n Provides attribute-based access to the relevant parsed\n fields of a PID. ``Pid``s sort by alpha.\n \"\"\"\n def __init__(self, pid, parse=True):\n \"\"\"\n Construct a Pid object from a string.\n Parsing is optional in case it needs\n to be deferred.\n\n :param pid: the pid\n :param parse: whether to parse\n \"\"\"\n self.pid = pid\n self._parsed = None\n if parse:\n self.parsed\n def isvalid(self):\n \"\"\"\n Check this PID for validity.\n \"\"\"\n try:\n self.parsed # parse if not already\n return True\n except ValueError:\n pass\n return False\n def copy(self):\n new_pid = Pid(self.pid, parse=False)\n if self._parsed is not None:\n # avoid re-parsing\n new_pid._parsed = self._parsed.copy()\n return new_pid\n def __cmp__(self, other):\n try:\n if self.pid < other.pid:\n return -1\n elif self.pid > other.pid:\n return 1\n except AttributeError:\n if self.pid < other:\n return -1\n elif self.pid > other:\n return 1\n else:\n return 0\n def __eq__(self, other):\n try:\n return self.pid == other.pid\n except AttributeError:\n return self.pid == other\n @property\n def parsed(self):\n \"\"\"\n The parsed PID\n \"\"\"\n if self._parsed is None:\n # convert some properties to int\n p = parse(self.pid)\n for ip in ['target', 'instrument', 'schema_version']:\n if p[ip] is not None:\n p[ip] = int(p[ip])\n self._parsed = p\n return self._parsed\n def __getattr__(self, name):\n if name in ['pid','_parsed','parsed']:\n raise AttributeError\n try:\n return self.parsed[name]\n except KeyError:\n raise AttributeError\n def with_target(self, target_number, namespace=True):\n \"\"\"\n Add a target number to the pid's bin_lid. Does not\n include product or extension. Optionally includes\n namespace prefix. This is more efficient than the\n following approach, which will preserve product\n and extension:\n\n >>> my_pid = Pid('IFCB1_2000_001_123456_blob')\n >>> new_pid = my_pid.copy()\n >>> new_pid.target = 927\n >>> new_pid\n <pid IFCB1_2000_001_123456_00927_blob>\n\n :param target_number: the target number\n :type target_number: int\n :param namespace: whether to include the namespace prefix\n :type namespace: bool\n :returns: the target ID (as a string)\n \"\"\"\n ns = ''\n if namespace and self.namespace is not None:\n ns = self.namespace\n return ns + self.bin_lid + '_%05d' % target_number\n @property\n def timestamp(self):\n \"\"\"\n The timestamp of the bin as a ``datetime``\n \"\"\"\n return pd.to_datetime(self.parsed['timestamp'], format=self.parsed['timestamp_format'], utc=True)\n def __setattr__(self, name, value):\n if name == 'target':\n self.parsed # ensure parsing is complete\n self._parsed.update({ name: int(value) })\n self.pid = unparse(self._parsed)\n elif name in ['product', 'extension']:\n self.parsed # ensure parsing is complete\n self._parsed.update({ name: value })\n self.pid = unparse(self._parsed)\n else:\n super(Pid, self).__setattr__(name, value)\n def __repr__(self):\n return '<pid %s>' % self.pid\n def __str__(self):\n return self.pid\n"
] | [
[
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dangpzanco/pytorch-lightning | [
"d70ac99f8cc05f3504cad596440b2670926b01e5"
] | [
"pytorch_lightning/trainer/evaluation_loop.py"
] | [
"\"\"\"\nValidation loop\n===============\n\nThe lightning validation loop handles everything except the actual computations of your model.\nTo decide what will happen in your validation loop, define the `validation_step` function.\nBelow are all the things lightning automates for you in the validation loop.\n\n.. note:: Lightning will run 5 steps of validation in the beginning of training as a sanity\n check so you don't have to wait until a full epoch to catch possible validation issues.\n\nCheck validation every n epochs\n-------------------------------\n\nIf you have a small dataset you might want to check validation every n epochs\n\n.. code-block:: python\n\n # DEFAULT\n trainer = Trainer(check_val_every_n_epoch=1)\n\nSet how much of the validation set to check\n-------------------------------------------\n\nIf you don't want to check 100% of the validation set (for debugging or if it's huge), set this flag\n\nval_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`\n\n.. code-block:: python\n\n # DEFAULT\n trainer = Trainer(val_percent_check=1.0)\n\n # check 10% only\n trainer = Trainer(val_percent_check=0.1)\n\nSet how much of the test set to check\n-------------------------------------\n\nIf you don't want to check 100% of the test set (for debugging or if it's huge), set this flag\n\ntest_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`\n\n.. code-block:: python\n\n # DEFAULT\n trainer = Trainer(test_percent_check=1.0)\n\n # check 10% only\n trainer = Trainer(test_percent_check=0.1)\n\nSet validation check frequency within 1 training epoch\n------------------------------------------------------\n\nFor large datasets it's often desirable to check validation multiple times within a training loop.\n Pass in a float to check that often within 1 training epoch.\n Pass in an int k to check every k training batches. Must use an int if using an IterableDataset.\n\n.. code-block:: python\n\n # DEFAULT\n trainer = Trainer(val_check_interval=0.95)\n\n # check every .25 of an epoch\n trainer = Trainer(val_check_interval=0.25)\n\n # check every 100 train batches (ie: for IterableDatasets or fixed frequency)\n trainer = Trainer(val_check_interval=100)\n\n\nSet the number of validation sanity steps\n-----------------------------------------\n\nLightning runs a few steps of validation in the beginning of training.\n This avoids crashing in the validation loop sometime deep into a lengthy training loop.\n\n.. code-block:: python\n\n # DEFAULT\n trainer = Trainer(num_sanity_val_steps=5)\n\n\nYou can use `Trainer(num_sanity_val_steps=0)` to skip the sanity check.\n\n# Testing loop\n\nTo ensure you don't accidentally use test data to guide training decisions Lightning\n makes running the test set deliberate.\n\n**test**\n\nYou have two options to run the test set.\nFirst case is where you test right after a full training routine.\n\n.. code-block:: python\n\n # run full training\n trainer.fit(model)\n\n # run test set\n trainer.test()\n\n\nSecond case is where you load a model and run the test set\n\n.. code-block:: python\n\n model = MyLightningModule.load_from_metrics(\n weights_path='/path/to/pytorch_checkpoint.ckpt',\n tags_csv='/path/to/test_tube/experiment/version/meta_tags.csv',\n on_gpu=True,\n map_location=None\n )\n\n # init trainer with whatever options\n trainer = Trainer(...)\n\n # test (pass in the model)\n trainer.test(model)\n\nIn this second case, the options you pass to trainer will be used when running\n the test set (ie: 16-bit, dp, ddp, etc...)\n\n\"\"\"\n\nfrom typing import Callable\n\nimport sys\nfrom abc import ABC, abstractmethod\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\nimport warnings\n\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n import torch_xla.distributed.parallel_loader as xla_pl\n import torch_xla.core.xla_model as xm\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\nclass TrainerEvaluationLoopMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n test_progress_bar: ...\n val_progress_bar: ...\n main_progress_bar: ...\n use_ddp: bool\n use_dp: bool\n use_ddp2: bool\n single_gpu: bool\n data_parallel_device_ids: ...\n model: LightningModule\n num_test_batches: int\n num_val_batches: int\n fast_dev_run: ...\n process_position: ...\n show_progress_bar: ...\n process_output: ...\n training_tqdm_dict: ...\n proc_rank: int\n current_epoch: int\n callback_metrics: ...\n test_dataloaders: DataLoader\n val_dataloaders: DataLoader\n use_tpu: bool\n reload_dataloaders_every_epoch: ...\n progress_bar_refresh_rate: ...\n\n # Callback system\n on_validation_start: Callable\n on_validation_end: Callable\n on_test_start: Callable\n on_test_end: Callable\n\n @abstractmethod\n def copy_trainer_model_properties(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def get_model(self):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def transfer_batch_to_tpu(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def transfer_batch_to_gpu(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def add_tqdm_metrics(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def log_metrics(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def reset_test_dataloader(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def reset_val_dataloader(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def evaluate(self, model, dataloaders, max_batches, test_mode: bool = False):\n \"\"\"Run evaluation code.\n\n :param model: PT model\n :param dataloaders: list of PT dataloaders\n :param max_batches: Scalar\n :param test_mode\n :return:\n \"\"\"\n # enable eval mode\n model.zero_grad()\n model.eval()\n\n # copy properties for forward overrides\n self.copy_trainer_model_properties(model)\n\n # disable gradients to save memory\n torch.set_grad_enabled(False)\n\n # bookkeeping\n outputs = []\n\n # run validation\n for dataloader_idx, dataloader in enumerate(dataloaders):\n dl_outputs = []\n\n # on TPU we have to wrap it under the ParallelLoader\n if self.use_tpu:\n device = xm.xla_device()\n dataloader = xla_pl.ParallelLoader(dataloader, [device])\n dataloader = dataloader.per_device_loader(device)\n\n for batch_idx, batch in enumerate(dataloader):\n if batch is None: # pragma: no cover\n continue\n\n # stop short when on fast_dev_run (sets max_batch=1)\n if batch_idx >= max_batches:\n break\n\n # -----------------\n # RUN EVALUATION STEP\n # -----------------\n output = self.evaluation_forward(model, batch, batch_idx, dataloader_idx, test_mode)\n\n # on dp / ddp2 might still want to do something with the batch parts\n if test_mode:\n if self.is_overriden('test_step_end'):\n model_ref = self.get_model()\n with self.profiler.profile('test_step_end'):\n output = model_ref.test_step_end(output)\n else:\n if self.is_overriden('validation_step_end'):\n model_ref = self.get_model()\n with self.profiler.profile('validation_step_end'):\n output = model_ref.validation_step_end(output)\n\n # track outputs for collation\n dl_outputs.append(output)\n\n # batch done\n if batch_idx % self.progress_bar_refresh_rate == 0:\n if test_mode:\n self.test_progress_bar.update(self.progress_bar_refresh_rate)\n else:\n self.val_progress_bar.update(self.progress_bar_refresh_rate)\n self.main_progress_bar.update(self.progress_bar_refresh_rate)\n outputs.append(dl_outputs)\n\n eval_results = {}\n\n # with a single dataloader don't pass an array\n if len(dataloaders) == 1:\n outputs = outputs[0]\n\n # give model a chance to do something with the outputs (and method defined)\n model = self.get_model()\n\n if test_mode and self.is_overriden('test_epoch_end'):\n eval_results = model.test_epoch_end(outputs)\n elif self.is_overriden('validation_epoch_end'):\n eval_results = model.validation_epoch_end(outputs)\n\n # TODO: remove in v 1.0.0\n if test_mode and self.is_overriden('test_end'):\n eval_results = model.test_end(outputs)\n m = 'test_end was deprecated in 0.7.0 and will be removed 1.0.0. ' \\\n 'Use test_epoch_end instead.'\n warnings.warn(m, DeprecationWarning)\n elif self.is_overriden('validation_end'):\n eval_results = model.validation_end(outputs)\n m = 'validation_end was deprecated in 0.7.0 and will be removed 1.0.0. ' \\\n 'Use validation_epoch_end instead.'\n warnings.warn(m, DeprecationWarning)\n\n # enable train mode again\n model.train()\n\n # enable gradients to save memory\n torch.set_grad_enabled(True)\n\n return eval_results\n\n def run_evaluation(self, test_mode: bool = False):\n # when testing make sure user defined a test step\n if test_mode and not self.is_overriden('test_step'):\n m = \"You called `.test()` without defining model's `.test_step()`.\" \\\n \" Please define and try again\"\n raise MisconfigurationException(m)\n\n # Validation/Test begin callbacks\n if test_mode:\n self.on_test_start()\n else:\n self.on_validation_start()\n\n # hook\n model = self.get_model()\n model.on_pre_performance_check()\n\n # select dataloaders\n if test_mode:\n if self.reload_dataloaders_every_epoch or self.test_dataloaders is None:\n self.reset_test_dataloader(model)\n\n dataloaders = self.test_dataloaders\n max_batches = self.num_test_batches\n else:\n # val\n if self.reload_dataloaders_every_epoch or self.val_dataloaders is None:\n self.reset_val_dataloader(model)\n\n dataloaders = self.val_dataloaders\n max_batches = self.num_val_batches\n\n # cap max batches to 1 when using fast_dev_run\n if self.fast_dev_run:\n max_batches = 1\n\n # init validation or test progress bar\n # main progress bar will already be closed when testing so initial position is free\n position = 2 * self.process_position + (not test_mode)\n desc = 'Testing' if test_mode else 'Validating'\n pbar = tqdm(desc=desc, total=max_batches, leave=test_mode, position=position,\n disable=not self.show_progress_bar, dynamic_ncols=True,\n file=sys.stdout)\n setattr(self, f'{\"test\" if test_mode else \"val\"}_progress_bar', pbar)\n\n # run evaluation\n eval_results = self.evaluate(self.model, dataloaders, max_batches, test_mode)\n _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(\n eval_results)\n\n # add metrics to prog bar\n self.add_tqdm_metrics(prog_bar_metrics)\n\n # log results of test\n if test_mode:\n if self.proc_rank == 0:\n print('-' * 100)\n print('TEST RESULTS')\n print(prog_bar_metrics)\n print('-' * 100)\n\n # log metrics\n self.log_metrics(log_metrics, {})\n\n # track metrics for callbacks\n self.callback_metrics.update(callback_metrics)\n\n # hook\n model.on_post_performance_check()\n\n # add model specific metrics\n if not test_mode:\n self.main_progress_bar.set_postfix(**self.training_tqdm_dict)\n\n # close progress bar\n if test_mode:\n self.test_progress_bar.close()\n else:\n self.val_progress_bar.close()\n\n # Validation/Test end callbacks\n if test_mode:\n self.on_test_end()\n\n def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: bool = False):\n # make dataloader_idx arg in validation_step optional\n args = [batch, batch_idx]\n\n if test_mode and len(self.test_dataloaders) > 1:\n args.append(dataloader_idx)\n\n elif not test_mode and len(self.val_dataloaders) > 1:\n args.append(dataloader_idx)\n\n # handle DP, DDP forward\n if self.use_ddp or self.use_dp or self.use_ddp2:\n output = model(*args)\n return output\n\n # single GPU data transfer\n if self.single_gpu:\n # for single GPU put inputs on gpu manually\n root_gpu = 0\n if isinstance(self.data_parallel_device_ids, list):\n root_gpu = self.data_parallel_device_ids[0]\n batch = self.transfer_batch_to_gpu(batch, root_gpu)\n args[0] = batch\n\n # TPU data transfer\n if self.use_tpu:\n batch = self.transfer_batch_to_tpu(batch)\n args[0] = batch\n\n # CPU, TPU or gpu step\n if test_mode:\n output = model.test_step(*args)\n else:\n output = model.validation_step(*args)\n\n return output\n"
] | [
[
"torch.set_grad_enabled"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jtpils/optimesh | [
"24a8276235b1f4e86f2fb92cf814bf81e7fdbc48"
] | [
"test/test_cpt.py"
] | [
"import numpy\nimport pytest\n\nfrom meshes import pacman, simple0, simple1, simple2, simple3\nfrom optimesh import cpt\n\n\[email protected](\n \"mesh, ref\",\n [(simple0, 5.0 / 18.0), (simple1, 17.0 / 60.0), (pacman, 7.320400634147646)],\n)\ndef test_energy(mesh, ref):\n X, cells = mesh()\n energy = cpt.energy_uniform(X, cells)\n assert abs(energy - ref) < 1.0e-12 * ref\n return\n\n\ndef test_simple1_jac():\n X, cells = simple1()\n\n # First assert that the Jacobian at interior points coincides with the finite\n # difference computed for the energy component from that point. Note that the\n # contribution from all other points is disregarded here, just like in the\n # definition of the Jacobian of Chen-Holst; it's only an approximation after all.\n jac = cpt.jac_uniform(X, cells)\n for j in [0, 1]:\n eps = 1.0e-7\n x0 = X.copy()\n x1 = X.copy()\n x0[4, j] -= eps\n x1[4, j] += eps\n f1 = cpt._energy_uniform_per_node(x1, cells)\n f0 = cpt._energy_uniform_per_node(x0, cells)\n dE = (f1 - f0) / (2 * eps)\n assert abs(dE[4] - jac[4, j]) < 1.0e-10\n\n return\n\n\[email protected](\n \"mesh, ref1, ref2, refi\",\n [\n (simple1, 0.96, 0.3262279745178587, 29.0 / 225.0),\n (pacman, 12.35078985438217, 0.5420691555930099, 0.10101179397867549),\n ],\n)\ndef test_jac(mesh, ref1, ref2, refi):\n X, cells = mesh()\n\n jac = cpt.jac_uniform(X, cells)\n\n nc = jac.flatten()\n norm1 = numpy.linalg.norm(nc, ord=1)\n norm2 = numpy.linalg.norm(nc, ord=2)\n normi = numpy.linalg.norm(nc, ord=numpy.inf)\n\n tol = 1.0e-12\n assert abs(norm1 - ref1) < tol * ref1\n assert abs(norm2 - ref2) < tol * ref2\n assert abs(normi - refi) < tol * refi\n return\n\n\[email protected](\n \"method, mesh, ref1, ref2, refi\",\n [\n (cpt.fixed_point_uniform, simple1, 5.0, 2.1213203435596424, 1.0),\n (cpt.fixed_point_uniform, simple2, 7.390123456790124, 2.804687217072868, 1.7),\n (cpt.fixed_point_uniform, simple3, 12.0, 3.9765648779799356, 2.0),\n (cpt.fixed_point_uniform, pacman, 1901.5304112865315, 74.62452940437535, 5.0),\n #\n (cpt.quasi_newton_uniform, simple1, 5.0, 2.1213203435596424, 1.0),\n (cpt.quasi_newton_uniform, simple2, 7.390123456790124, 2.804687217072868, 1.7),\n (cpt.quasi_newton_uniform, simple3, 12.0, 3.976564877979913, 2.0),\n (cpt.quasi_newton_uniform, pacman, 1900.910794007578, 74.58866209782154, 5.0),\n ],\n)\ndef test_methods(method, mesh, ref1, ref2, refi):\n X_in, cells_in = mesh()\n\n # X_before = X_in.copy()\n # cells_before = cells_in.copy()\n\n X, cells = method(X_in, cells_in, 1.0e-12, 100)\n\n # assert numpy.all(cells_in == cells_before)\n # assert numpy.all(numpy.abs(X_in == X_before) < 1.0e-15)\n\n # Test if we're dealing with the mesh we expect.\n nc = X.flatten()\n norm1 = numpy.linalg.norm(nc, ord=1)\n norm2 = numpy.linalg.norm(nc, ord=2)\n normi = numpy.linalg.norm(nc, ord=numpy.inf)\n\n tol = 1.0e-12\n assert abs(norm1 - ref1) < tol * ref1\n assert abs(norm2 - ref2) < tol * ref2\n assert abs(normi - refi) < tol * refi\n return\n\n\[email protected](\n \"mesh, ref1, ref2, refi\",\n [\n (simple1, 5.0, 2.1213203435596424, 1.0),\n (pacman, 1864.2406342781524, 73.19722600427883, 5.0),\n ],\n)\ndef test_density_preserving(mesh, ref1, ref2, refi):\n X, cells = mesh()\n\n X, cells = cpt.linear_solve_density_preserving(X, cells, 0.0, 10)\n\n # Test if we're dealing with the mesh we expect.\n nc = X.flatten()\n norm1 = numpy.linalg.norm(nc, ord=1)\n norm2 = numpy.linalg.norm(nc, ord=2)\n normi = numpy.linalg.norm(nc, ord=numpy.inf)\n\n tol = 1.0e-12\n assert abs(norm1 - ref1) < tol * ref1\n assert abs(norm2 - ref2) < tol * ref2\n assert abs(normi - refi) < tol * refi\n return\n\n\n# if __name__ == \"__main__\":\n# from meshes import circle\n# test_fixed_point()\n# X, cells = circle()\n# X, cells = cpt.fixed_point_uniform(X, cells, 1.0e-3, 100)\n"
] | [
[
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
galyalina/mmdetection | [
"6906a7afccb569831099df4dc1b1568e184b5ba4"
] | [
"mmdet/models/backbones/mobilenet_v2.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import InvertedResidual, make_divisible\n\n\[email protected]_module()\nclass MobileNetV2(BaseModule):\n \"\"\"MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int], optional): Output from which stages.\n Default: (1, 2, 4, 7).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n \"\"\"\n\n # Parameters to build layers. 4 parameters are needed to construct a\n # layer, from left to right: expand_ratio, channel, num_blocks, stride.\n arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],\n [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],\n [6, 320, 1, 1]]\n\n def __init__(self,\n widen_factor=1.,\n out_indices=(1, 2, 4, 7),\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU6'),\n norm_eval=False,\n with_cp=False,\n pretrained=None,\n init_cfg=None):\n super(MobileNetV2, self).__init__(init_cfg)\n\n self.pretrained = pretrained\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be setting at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is None:\n if init_cfg is None:\n self.init_cfg = [\n dict(type='Kaiming', layer='Conv2d'),\n dict(\n type='Constant',\n val=1,\n layer=['_BatchNorm', 'GroupNorm'])\n ]\n else:\n raise TypeError('pretrained must be a str or None')\n\n self.widen_factor = widen_factor\n self.out_indices = out_indices\n if not set(out_indices).issubset(set(range(0, 8))):\n raise ValueError('out_indices must be a subset of range'\n f'(0, 8). But received {out_indices}')\n\n if frozen_stages not in range(-1, 8):\n raise ValueError('frozen_stages must be in range(-1, 8). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n self.in_channels = make_divisible(32 * widen_factor, 8)\n\n self.conv1 = ConvModule(\n in_channels=3,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n self.layers = []\n\n for i, layer_cfg in enumerate(self.arch_settings):\n expand_ratio, channel, num_blocks, stride = layer_cfg\n out_channels = make_divisible(channel * widen_factor, 8)\n inverted_res_layer = self.make_layer(\n out_channels=out_channels,\n num_blocks=num_blocks,\n stride=stride,\n expand_ratio=expand_ratio)\n layer_name = f'layer{i + 1}'\n self.add_module(layer_name, inverted_res_layer)\n self.layers.append(layer_name)\n\n if widen_factor > 1.0:\n self.out_channel = int(1280 * widen_factor)\n else:\n self.out_channel = 1280\n\n layer = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.out_channel,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n self.add_module('conv2', layer)\n self.layers.append('conv2')\n\n def make_layer(self, out_channels, num_blocks, stride, expand_ratio):\n \"\"\"Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n if i >= 1:\n stride = 1\n layers.append(\n InvertedResidual(\n self.in_channels,\n out_channels,\n mid_channels=int(round(self.in_channels * expand_ratio)),\n stride=stride,\n with_expand_conv=expand_ratio != 1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(1, self.frozen_stages + 1):\n layer = getattr(self, f'layer{i}')\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n x = self.conv1(x)\n outs = []\n for i, layer_name in enumerate(self.layers):\n layer = getattr(self, layer_name)\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep normalization layer\n frozen.\"\"\"\n super(MobileNetV2, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n"
] | [
[
"torch.nn.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HeshamMeneisi/AnnaPacman | [
"8055b56efb12a16c2dc3694e59760815bcecb970"
] | [
"env_test.py"
] | [
"import numpy as np\nimport time\nimport sys\n\nprint(\"## Checking Keras\\n\\n\")\nimport keras.backend as K\nbackend = K.backend()\n\nvlen = 10 * 30 * 768\niters = 1000\nv = np.random.rand(vlen)\ntv = np.exp(v)\n\n\nif backend == 'theano':\n print(\"\\n\\n## Checking Theano\\n\\n\")\n import theano\n from theano import function, config, shared, sandbox\n import theano.tensor as T\n\n print(\"Version\", theano.__version__)\n\n print(\"\\nTesting...\")\n\n x = shared(np.asarray(v, config.floatX))\n f = function([], T.exp(x))\n print(f.maker.fgraph.toposort())\n t0 = time.time()\n\n for i in range(iters):\n r = f()\n\n t1 = time.time()\n\n print(\"\\nResult\", r)\n print(\"True Values\", tv)\n print(\"Looping %d times took %f seconds\" % (iters, t1 - t0))\n\n if np.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):\n print('Theano is using the CPU', file=sys.stderr)\n else:\n print('Theano is using the GPU')\n\nelif backend == 'tensorflow':\n print(\"\\n\\n## Checking TensorFlow\\n\\n\")\n import tensorflow as tf\n from tensorflow.python.client import device_lib\n\n print(\"Version\", tf.__version__)\n\n print(\"\\nTesting...\")\n\n x = tf.constant(v, shape=[vlen], name='x')\n\n\n gpu = False\n print(device_lib.list_local_devices())\n for device in device_lib.list_local_devices():\n if device.device_type == 'GPU':\n gpu = True\n\n if gpu:\n with tf.device('/gpu:0'):\n op = tf.exp(x)\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n t0 = time.time()\n for i in range(iters):\n print(\"Result\", sess.run(op))\n print(\"True Values\", tv)\n\n t1 = time.time()\n print(\"Looping %d times took %f seconds\" % (iters, t1 - t0))\n print(\"TensorFlow is using the GPU\")\n else:\n with tf.device('/cpu:0'):\n op = tf.exp(x)\n t0 = time.time()\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n print(\"Result\", sess.run(op))\n print(\"True Values\", tv)\n t1 = time.time()\n print(\"Looping %d times took %f seconds\" % (iters, t1 - t0))\n print(\"TensorFlow is using the CPU\", file=sys.stderr)\nelse: # CNTK?\n print(\"There is no test available for\", backend)"
] | [
[
"tensorflow.device",
"tensorflow.constant",
"tensorflow.python.client.device_lib.list_local_devices",
"numpy.asarray",
"tensorflow.exp",
"tensorflow.ConfigProto",
"numpy.random.rand",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nitinagarwal/style_clustering | [
"b9f2b00df37c11111313ce27246b065b0e22b069"
] | [
"utils/data_prep_util.py"
] | [
"from __future__ import print_function\nimport numpy as np\nimport sys\nimport math\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.init import xavier_normal, xavier_uniform\n\nfrom plyfile import (PlyData, PlyElement)\n\n# --------------------------------\n# MESH IO\n# --------------------------------\n\ndef load_ply_data(filename):\n \"\"\" read ply file, only vertices and faces \"\"\"\n\n plydata = PlyData.read(filename)\n\n vertices = plydata['vertex'].data[:]\n vertices = np.array([[x, y, z] for x,y,z in vertices])\n\n # input are all traingle meshes\n faces = plydata['face'].data['vertex_indices'][:]\n faces = np.array([[f1, f2, f3] for f1,f2,f3 in faces])\n\n return vertices, faces\n\ndef save_ply_data(filename, vertex, face):\n \"\"\" save ply file, only vertices and faces \"\"\"\n\n vertices = np.zeros(vertex.shape[0], dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])\n for i in range(vertex.shape[0]):\n vertices[i] = (vertex[i][0], vertex[i][1], vertex[i][2])\n # print(vertex, vertex.dtype)\n \n faces = np.zeros(face.shape[0], dtype=[('vertex_indices', 'i4', (3,))])\n for i in range(face.shape[0]):\n faces[i] = ([face[i][0], face[i][1], face[i][2]])\n # print(faces.shape, faces.dtype)\n\n e1 = PlyElement.describe(vertices, 'vertex')\n e2 = PlyElement.describe(faces, 'face')\n \n PlyData([e1, e2], text=True).write(filename)\n print('file saved')\n\ndef load_obj_data(filename):\n \"\"\"\n A simply obj reader which reads vertices and faces only. \n i.e. lines starting with v and f only\n \"\"\"\n mesh = {}\n ver =[]\n fac = []\n if not path.endswith('obj'):\n sys.exit('the input file is not a obj file')\n\n with open(filename) as f:\n for line in f:\n if line.strip():\n inp = line.split()\n if(inp[0]=='v'):\n ver.append([float(inp[1]), float(inp[2]), float(inp[3])])\n elif(inp[0]=='f'):\n fac.append([float(inp[1]), float(inp[2]), float(inp[3])])\n\n V = np.array(ver)\n F = np.array(fac)\n \n return V, F\n\n\n# --------------------------------\n# Mesh Utils \n# --------------------------------\n\ndef jitter_vertices(vertices, sigma=0.01, clip=0.05):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n Nx3 array, original shape \n Output:\n Nx3 array, jittered shape \n \"\"\"\n N, C = vertices.shape\n assert(clip > 0)\n jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip)\n jittered_data += vertices\n return jittered_data \n\ndef rotate_vertices(vertices):\n \"\"\" Randomly rotate the points to augument the dataset\n rotation is per shape based along up direction\n Input:\n Nx3 array, input shape\n Output:\n Nx3 array, rotated shape\n \"\"\"\n rotated_data = np.zeros(vertices.shape, dtype=np.float32)\n\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n\n rotated_data = np.dot(vertices, rotation_matrix)\n return rotated_data\n\ndef rotate_vertices_by_angle(vertices, rotation_angle):\n \"\"\" Randomly rotate the points by rotation_angle to augument the dataset\n rotation is per shape based along up direction\n Input:\n Nx3 array, input shape\n rotation_angle in radians\n Output:\n Nx3 array, rotated shape\n \"\"\"\n rotated_data = np.zeros(vertices.shape, dtype=np.float32)\n\n # rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n\n rotated_data = np.dot(vertices, rotation_matrix)\n return rotated_data\n\n\ndef normalize_shape(vertices):\n # normalize shape to fit inside a unit sphere\n ver_max = np.max(vertices, axis=0)\n ver_min = np.min(vertices, axis=0)\n \n centroid = np.stack((ver_max, ver_min), 0)\n centroid = np.mean(centroid, axis=0)\n vertices = vertices - centroid\n\n longest_distance = np.max(np.sqrt(np.sum((vertices**2), axis=1)))\n vertices = vertices / longest_distance\n\n return vertices\n\n\n# --------------------------------\n# Training Utils \n# --------------------------------\n\nclass AverageValueMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef model_summary(model, print_layers=False):\n train_count = 0\n nontrain_count = 0\n \n for name, p in model.named_parameters():\n if(p.requires_grad):\n if(print_layers): \n print('Train: ', name, 'has', p.numel())\n train_count += p.numel()\n \n elif not p.requires_grad:\n if(print_layers):\n print('Non Train: ', name, 'has', p.numel())\n nontrain_count += p.numel()\n \n print('Total Parameters: ', train_count+nontrain_count) \n print('Trainable Parameters: ',train_count)\n print('NonTrainable Parameters: ',nontrain_count)\n\n\ndef init_net(net, cuda=True):\n \"\"\"\n Initialize the network with xavier initialization\n only for the last fc layers\n \"\"\"\n def initialize_weights(m):\n if(isinstance(m, nn.Conv1d)):\n xavier_normal(m.weight.data)\n\n elif(isinstance(m, nn.Linear)):\n xavier_normal(m.weight.data)\n\n net.apply(initialize_weights)\n\n if cuda:\n net.cuda()\n print('network on cuda')\n\n return net\n\n\n\nif __name__ == '__main__':\n\n # filename = '../../scripts/model/chair_aligned_simplified/chair_0001.ply'\n # filename = '../../scripts/model/chair_aligned_simplified/8k/chair_0012.ply'\n filename = '/Users/Nitin/Desktop/8k/chair_0011.ply'\n\n # chair_aligned/chair_0016.ply'\n V, F = load_ply_data(filename)\n print(V.shape, F.shape)\n # print(F)\n # wfile = '../../scripts/model/chair/test.ply'\n # save_ply_data(wfile, V, F)\n # F = F[0:10,:]\n # V = V[0:20,:]\n\n # adj, adj_size = get_adjacency_matrix(V, F)\n Q = compute_Q_matrix(V, F)\n # print(Q)\n print(np.shape(Q), type(Q))\n\n # print([adj[i,:] for i in range(len(adj_size))])\n # print(adj.shape, adj_size.shape)\n # print(adj, adj_size)\n\n\n\n"
] | [
[
"numpy.dot",
"numpy.min",
"torch.nn.init.xavier_normal",
"numpy.cos",
"numpy.stack",
"numpy.sin",
"numpy.max",
"numpy.mean",
"numpy.shape",
"numpy.random.randn",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dany-nonstop/haystack | [
"5ef59b1901da6d51bfa085683321a243228d4fc9"
] | [
"haystack/database/elasticsearch.py"
] | [
"import json\nimport logging\nimport time\nfrom string import Template\nfrom typing import List, Optional, Union, Dict, Any\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk, scan\nimport numpy as np\n\nfrom haystack.database.base import BaseDocumentStore, Document, Label\nfrom haystack.indexing.utils import eval_data_from_file\nfrom haystack.retriever.base import BaseRetriever\n\nlogger = logging.getLogger(__name__)\n\n\nclass ElasticsearchDocumentStore(BaseDocumentStore):\n def __init__(\n self,\n host: str = \"localhost\",\n port: int = 9200,\n username: str = \"\",\n password: str = \"\",\n index: str = \"document\",\n label_index: str = \"label\",\n search_fields: Union[str, list] = \"text\",\n text_field: str = \"text\",\n name_field: str = \"name\",\n embedding_field: str = \"embedding\",\n embedding_dim: int = 768,\n custom_mapping: Optional[dict] = None,\n excluded_meta_data: Optional[list] = None,\n faq_question_field: Optional[str] = None,\n scheme: str = \"http\",\n ca_certs: bool = False,\n verify_certs: bool = True,\n create_index: bool = True,\n update_existing_documents: bool = False,\n ):\n \"\"\"\n A DocumentStore using Elasticsearch to store and query the documents for our search.\n\n * Keeps all the logic to store and query documents from Elastic, incl. mapping of fields, adding filters or boosts to your queries, and storing embeddings\n * You can either use an existing Elasticsearch index or create a new one via haystack\n * Retrievers operate on top of this DocumentStore to find the relevant documents for a query\n\n :param host: url of elasticsearch\n :param port: port of elasticsearch\n :param username: username\n :param password: password\n :param index: Name of index in elasticsearch to use. If not existing yet, we will create one.\n :param search_fields: Name of fields used by ElasticsearchRetriever to find matches in the docs to our incoming query (using elastic's multi_match query), e.g. [\"title\", \"full_text\"]\n :param text_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. \"full_text\").\n If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.\n :param name_field: Name of field that contains the title of the the doc\n :param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)\n :param embedding_dim: Dimensionality of embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)\n :param custom_mapping: If you want to use your own custom mapping for creating a new index in Elasticsearch, you can supply it here as a dictionary.\n :param excluded_meta_data: Name of fields in Elasticsearch that should not be returned (e.g. [field_one, field_two]).\n Helpful if you have fields with long, irrelevant content that you don't want to display in results (e.g. embedding vectors).\n :param scheme: 'https' or 'http', protocol used to connect to your elasticsearch instance\n :param ca_certs: Root certificates for SSL\n :param verify_certs: Whether to be strict about ca certificates\n :param create_index: Whether to try creating a new index (If the index of that name is already existing, we will just continue in any case)\n :param update_existing_documents: Whether to update any existing documents with the same ID when adding\n documents. When set as True, any document with an existing ID gets updated.\n If set to False, an error is raised if the document ID of the document being\n added already exists.\n \"\"\"\n self.client = Elasticsearch(hosts=[{\"host\": host, \"port\": port}], http_auth=(username, password),\n scheme=scheme, ca_certs=ca_certs, verify_certs=verify_certs)\n\n # configure mappings to ES fields that will be used for querying / displaying results\n if type(search_fields) == str:\n search_fields = [search_fields]\n\n #TODO we should implement a more flexible interal mapping here that simplifies the usage of additional,\n # custom fields (e.g. meta data you want to return)\n self.search_fields = search_fields\n self.text_field = text_field\n self.name_field = name_field\n self.embedding_field = embedding_field\n self.embedding_dim = embedding_dim\n self.excluded_meta_data = excluded_meta_data\n self.faq_question_field = faq_question_field\n\n self.custom_mapping = custom_mapping\n if create_index:\n self._create_document_index(index)\n self.index: str = index\n\n self._create_label_index(label_index)\n self.label_index: str = label_index\n self.update_existing_documents = update_existing_documents\n\n def _create_document_index(self, index_name):\n if self.client.indices.exists(index=index_name):\n return\n\n if self.custom_mapping:\n mapping = self.custom_mapping\n else:\n mapping = {\n \"mappings\": {\n \"properties\": {\n self.name_field: {\"type\": \"keyword\"},\n self.text_field: {\"type\": \"text\"},\n },\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\"type\": \"keyword\"}}}\n ],\n }\n }\n if self.embedding_field:\n mapping[\"mappings\"][\"properties\"][self.embedding_field] = {\"type\": \"dense_vector\", \"dims\": self.embedding_dim}\n self.client.indices.create(index=index_name, body=mapping)\n\n def _create_label_index(self, index_name):\n if self.client.indices.exists(index=index_name):\n return\n mapping = {\n \"mappings\": {\n \"properties\": {\n \"question\": {\"type\": \"text\"},\n \"answer\": {\"type\": \"text\"},\n \"is_correct_answer\": {\"type\": \"boolean\"},\n \"is_correct_document\": {\"type\": \"boolean\"},\n \"origin\": {\"type\": \"keyword\"},\n \"document_id\": {\"type\": \"keyword\"},\n \"offset_start_in_doc\": {\"type\": \"long\"},\n \"no_answer\": {\"type\": \"boolean\"},\n \"model_id\": {\"type\": \"keyword\"},\n \"type\": {\"type\": \"keyword\"},\n }\n }\n }\n self.client.indices.create(index=index_name, body=mapping)\n\n # TODO: Add flexibility to define other non-meta and meta fields expected by the Document class\n def _create_document_field_map(self) -> Dict:\n return {\n self.text_field: \"text\",\n self.embedding_field: \"embedding\",\n self.faq_question_field if self.faq_question_field else \"question\": \"question\"\n }\n\n def get_document_by_id(self, id: str, index=None) -> Optional[Document]:\n index = index or self.index\n documents = self.get_documents_by_id([id], index=index)\n if documents:\n return documents[0]\n else:\n return None\n\n def get_documents_by_id(self, ids: List[str], index=None) -> List[Document]:\n index = index or self.index\n query = {\"query\": {\"ids\": {\"values\": ids}}}\n result = self.client.search(index=index, body=query)[\"hits\"][\"hits\"]\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n return documents\n\n def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None):\n \"\"\"\n Indexes documents for later queries in Elasticsearch.\n\n When using explicit document IDs, any existing document with the same ID gets updated.\n\n :param documents: a list of Python dictionaries or a list of Haystack Document objects.\n For documents as dictionaries, the format is {\"text\": \"<the-actual-text>\"}.\n Optionally: Include meta data via {\"text\": \"<the-actual-text>\",\n \"meta\":{\"name\": \"<some-document-name>, \"author\": \"somebody\", ...}}\n It can be used for filtering and is accessible in the responses of the Finder.\n Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary\n should be changed to what you have set for self.text_field and self.name_field.\n :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used.\n :return: None\n \"\"\"\n\n if index and not self.client.indices.exists(index=index):\n self._create_document_index(index)\n\n if index is None:\n index = self.index\n\n # Make sure we comply to Document class format\n documents_objects = [Document.from_dict(d, field_map=self._create_document_field_map())\n if isinstance(d, dict) else d for d in documents]\n\n documents_to_index = []\n for doc in documents_objects:\n\n _doc = {\n \"_op_type\": \"index\" if self.update_existing_documents else \"create\",\n \"_index\": index,\n **doc.to_dict(field_map=self._create_document_field_map())\n } # type: Dict[str, Any]\n\n # rename id for elastic\n _doc[\"_id\"] = str(_doc.pop(\"id\"))\n\n # don't index query score and empty fields\n _ = _doc.pop(\"query_score\", None)\n _doc = {k:v for k,v in _doc.items() if v is not None}\n\n # In order to have a flat structure in elastic + similar behaviour to the other DocumentStores,\n # we \"unnest\" all value within \"meta\"\n if \"meta\" in _doc.keys():\n for k, v in _doc[\"meta\"].items():\n _doc[k] = v\n _doc.pop(\"meta\")\n documents_to_index.append(_doc)\n bulk(self.client, documents_to_index, request_timeout=300, refresh=\"wait_for\")\n\n def write_labels(self, labels: Union[List[Label], List[dict]], index: Optional[str] = None):\n index = index or self.label_index\n if index and not self.client.indices.exists(index=index):\n self._create_label_index(index)\n\n # Make sure we comply to Label class format\n label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]\n\n labels_to_index = []\n for label in label_objects:\n _label = {\n \"_op_type\": \"index\" if self.update_existing_documents else \"create\",\n \"_index\": index,\n **label.to_dict()\n } # type: Dict[str, Any]\n\n labels_to_index.append(_label)\n bulk(self.client, labels_to_index, request_timeout=300, refresh=\"wait_for\")\n\n def update_document_meta(self, id: str, meta: Dict[str, str]):\n body = {\"doc\": meta}\n self.client.update(index=self.index, doc_type=\"_doc\", id=id, body=body, refresh=\"wait_for\")\n\n def get_document_count(self, index: Optional[str] = None) -> int:\n if index is None:\n index = self.index\n result = self.client.count(index=index)\n count = result[\"count\"]\n return count\n\n def get_label_count(self, index: Optional[str] = None) -> int:\n return self.get_document_count(index=index)\n\n def get_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Document]:\n if index is None:\n index = self.index\n\n result = self.get_all_documents_in_index(index=index, filters=filters)\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n\n return documents\n\n def get_all_labels(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]:\n index = index or self.label_index\n result = self.get_all_documents_in_index(index=index, filters=filters)\n labels = [Label.from_dict(hit[\"_source\"]) for hit in result]\n return labels\n\n def get_all_documents_in_index(self, index: str, filters: Optional[Dict[str, List[str]]] = None) -> List[dict]:\n body = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"match_all\": {}\n }\n }\n }\n } # type: Dict[str, Any]\n\n if filters:\n filter_clause = []\n for key, values in filters.items():\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n result = scan(self.client, query=body, index=index)\n\n return result\n\n def query(\n self,\n query: Optional[str],\n filters: Optional[Dict[str, List[str]]] = None,\n top_k: int = 10,\n custom_query: Optional[str] = None,\n index: Optional[str] = None,\n ) -> List[Document]:\n\n if index is None:\n index = self.index\n\n # Naive retrieval without BM25, only filtering\n if query is None:\n body = {\"query\":\n {\"bool\": {\"must\":\n {\"match_all\": {}}}}} # type: Dict[str, Any]\n if filters:\n filter_clause = []\n for key, values in filters.items():\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n\n # Retrieval via custom query\n elif custom_query: # substitute placeholder for question and filters for the custom_query template string\n template = Template(custom_query)\n # replace all \"${question}\" placeholder(s) with query\n substitutions = {\"question\": query}\n # For each filter we got passed, we'll try to find & replace the corresponding placeholder in the template\n # Example: filters={\"years\":[2018]} => replaces {$years} in custom_query with '[2018]'\n if filters:\n for key, values in filters.items():\n values_str = json.dumps(values)\n substitutions[key] = values_str\n custom_query_json = template.substitute(**substitutions)\n body = json.loads(custom_query_json)\n # add top_k\n body[\"size\"] = str(top_k)\n\n # Default Retrieval via BM25 using the user query on `self.search_fields`\n else:\n body = {\n \"size\": str(top_k),\n \"query\": {\n \"bool\": {\n \"should\": [{\"multi_match\": {\"query\": query, \"type\": \"most_fields\", \"fields\": self.search_fields}}]\n }\n },\n }\n\n if filters:\n filter_clause = []\n for key, values in filters.items():\n if type(values) != list:\n raise ValueError(f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '\n 'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} ')\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n\n if self.excluded_meta_data:\n body[\"_source\"] = {\"excludes\": self.excluded_meta_data}\n\n logger.debug(f\"Retriever query: {body}\")\n result = self.client.search(index=index, body=body)[\"hits\"][\"hits\"]\n\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n return documents\n\n def query_by_embedding(self,\n query_emb: np.array,\n filters: Optional[Dict[str, List[str]]] = None,\n top_k: int = 10,\n index: Optional[str] = None) -> List[Document]:\n if index is None:\n index = self.index\n\n if not self.embedding_field:\n raise RuntimeError(\"Please specify arg `embedding_field` in ElasticsearchDocumentStore()\")\n else:\n # +1 in cosine similarity to avoid negative numbers\n body= {\n \"size\": top_k,\n \"query\": {\n \"script_score\": {\n \"query\": {\"match_all\": {}},\n \"script\": {\n \"source\": f\"cosineSimilarity(params.query_vector,doc['{self.embedding_field}']) + 1.0\",\n \"params\": {\n \"query_vector\": query_emb.tolist()\n }\n }\n }\n }\n } # type: Dict[str,Any]\n\n if filters:\n filter_clause = []\n for key, values in filters.items():\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n\n if self.excluded_meta_data:\n body[\"_source\"] = {\"excludes\": self.excluded_meta_data}\n\n logger.debug(f\"Retriever query: {body}\")\n result = self.client.search(index=index, body=body, request_timeout=300)[\"hits\"][\"hits\"]\n\n documents = [self._convert_es_hit_to_document(hit, score_adjustment=-1) for hit in result]\n return documents\n\n def _convert_es_hit_to_document(self, hit: dict, score_adjustment: int = 0) -> Document:\n # We put all additional data of the doc into meta_data and return it in the API\n meta_data = {k:v for k,v in hit[\"_source\"].items() if k not in (self.text_field, self.faq_question_field, self.embedding_field)}\n name = meta_data.pop(self.name_field, None)\n if name:\n meta_data[\"name\"] = name\n\n document = Document(\n id=hit[\"_id\"],\n text=hit[\"_source\"].get(self.text_field),\n meta=meta_data,\n query_score=hit[\"_score\"] + score_adjustment if hit[\"_score\"] else None,\n question=hit[\"_source\"].get(self.faq_question_field),\n embedding=hit[\"_source\"].get(self.embedding_field)\n )\n return document\n\n def describe_documents(self, index=None):\n if index is None:\n index = self.index\n docs = self.get_all_documents(index)\n\n l = [len(d.text) for d in docs]\n stats = {\"count\": len(docs),\n \"chars_mean\": np.mean(l),\n \"chars_max\": max(l),\n \"chars_min\": min(l),\n \"chars_median\": np.median(l),\n }\n return stats\n\n def update_embeddings(self, retriever: BaseRetriever, index: Optional[str] = None):\n \"\"\"\n Updates the embeddings in the the document store using the encoding model specified in the retriever.\n This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).\n\n :param retriever: Retriever\n :param index: Index name to update\n :return: None\n \"\"\"\n if index is None:\n index = self.index\n\n if not self.embedding_field:\n raise RuntimeError(\"Specify the arg `embedding_field` when initializing ElasticsearchDocumentStore()\")\n\n docs = self.get_all_documents(index)\n passages = [d.text for d in docs]\n\n #TODO Index embeddings every X batches to avoid OOM for huge document collections\n logger.info(f\"Updating embeddings for {len(passages)} docs ...\")\n\n # TODO send whole Document to retriever and let retriever decide what fields to embed\n from haystack.retriever.dense import DensePassageRetriever\n if isinstance(retriever,DensePassageRetriever):\n titles = []\n for d in docs:\n if d.meta is not None:\n titles.append(d.meta['name'] if 'name' in d.meta.keys() else None)\n if len(titles) == len(passages):\n embeddings = retriever.embed_passages(passages,titles) # type: ignore\n else:\n embeddings = retriever.embed_passages(passages) # type: ignore\n else: #EmbeddingRetriever\n embeddings = retriever.embed_passages(passages) # type: ignore\n\n assert len(docs) == len(embeddings)\n\n if embeddings[0].shape[0] != self.embedding_dim:\n raise RuntimeError(f\"Embedding dim. of model ({embeddings[0].shape[0]})\"\n f\" doesn't match embedding dim. in documentstore ({self.embedding_dim}).\"\n \"Specify the arg `embedding_dim` when initializing ElasticsearchDocumentStore()\")\n doc_updates = []\n for doc, emb in zip(docs, embeddings):\n update = {\"_op_type\": \"update\",\n \"_index\": index,\n \"_id\": doc.id,\n \"doc\": {self.embedding_field: emb.tolist()},\n }\n doc_updates.append(update)\n\n bulk(self.client, doc_updates, request_timeout=300)\n\n def add_eval_data(self, filename: str, doc_index: str = \"eval_document\", label_index: str = \"label\"):\n \"\"\"\n Adds a SQuAD-formatted file to the DocumentStore in order to be able to perform evaluation on it.\n\n :param filename: Name of the file containing evaluation data\n :type filename: str\n :param doc_index: Elasticsearch index where evaluation documents should be stored\n :type doc_index: str\n :param label_index: Elasticsearch index where labeled questions should be stored\n :type label_index: str\n \"\"\"\n\n docs, labels = eval_data_from_file(filename)\n self.write_documents(docs, index=doc_index)\n self.write_labels(labels, index=label_index)\n\n def delete_all_documents(self, index: str):\n \"\"\"\n Delete all documents in an index.\n\n :param index: index name\n :return: None\n \"\"\"\n self.client.delete_by_query(index=index, body={\"query\": {\"match_all\": {}}}, ignore=[404])\n # We want to be sure that all docs are deleted before continuing (delete_by_query doesn't support wait_for)\n time.sleep(1)\n\n\n\n\n\n\n"
] | [
[
"numpy.median",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndreCNF/Deep_Q_Learning_Flappy_Compass | [
"47992d770bd81a6e3be18f76ab4c4558b5e23333"
] | [
"train.py"
] | [
"import argparse # Allows parsing arguments in the command line\nimport os # os handles directory/workspace changes\nfrom random import random, randint, sample # Handles random operations\nfrom comet_ml import Experiment # Comet.ml can log training metrics, parameters and do version control\nfrom datetime import datetime # datetime to use proper date and time formats\nimport numpy as np # NumPy to handle numeric and NaN operations\nimport torch # PyTorch to create and apply deep learning models\nfrom torch import nn # nn for neural network layers\nfrom src.deep_q_network import DeepQNetwork # Deep Q Network model\nfrom src.flappy_bird import FlappyBird # Flappy Compass game interface\nfrom src.utils import pre_processing # Image pre-processing method\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n \"\"\"Implementation of Deep Q Network to play Flappy Bird\"\"\")\n parser.add_argument(\"--image_size\", type=int, default=84, help=\"The common width and height for all images\")\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"The number of images per batch\")\n parser.add_argument(\"--optimizer\", type=str, choices=[\"sgd\", \"adam\"], default=\"adam\", help=\"Optimization algorithm.\")\n parser.add_argument(\"--lr\", type=float, default=1e-6, help=\"The learning rate applied in the training process.\")\n parser.add_argument(\"--gamma\", type=float, default=0.99, help=\"Reinforcement learning policy parameter that influences non-terminal reward values.\")\n parser.add_argument(\"--initial_epsilon\", type=float, default=0.1, help=\"Initial epsilon value that indicates the initial probability of random actions.\")\n parser.add_argument(\"--final_epsilon\", type=float, default=1e-4, help=\"Final epsilon value that indicates the final probability of random actions.\")\n parser.add_argument(\"--num_iters\", type=int, default=2000000, help=\"Number of training iterations.\")\n parser.add_argument(\"--iters_to_save\", type=int, default=1000000, help=\"Iterations intervals when the model is saved\")\n parser.add_argument(\"--replay_memory_size\", type=int, default=50000, help=\"Number of epoches between testing phases\")\n parser.add_argument(\"--conv_dim\", type=int, nargs='+', help=\"Number of filters, i.e. filter depth, of each convolutional layer.\")\n parser.add_argument(\"--conv_kernel_sizes\", nargs='+', type=int, help=\"Kernel size (or convolutional matrix dimension) of each convolutional layer.\")\n parser.add_argument(\"--conv_strides\", nargs='+', type=int, help=\"Stride used in each convolutional layer.\")\n parser.add_argument(\"--fc_dim\", type=int, nargs='*', help=\"Output dimension of each hidden fully connected layer (if there are any).\")\n parser.add_argument(\"--random_seed\", type=int, default=123, help=\"Seed value used in random operations. Using the same value allows reproducibility.\")\n parser.add_argument(\"--saved_path\", type=str, default=\"models\", help=\"Directory where trained models will be saved.\")\n parser.add_argument(\"--log_comet_ml\", type=bool, default=False, help=\"Indicates whether to save training stats on Comet.ml.\")\n parser.add_argument(\"--comet_ml_api_key\", type=str, default=\"\", help=\"API key to be able to log data to Comet.ml.\")\n parser.add_argument(\"--comet_ml_project_name\", type=str, default=\"\", help=\"Comet.ml project name.\")\n parser.add_argument(\"--comet_ml_workspace\", type=str, default=\"\", help=\"Comet.ml workspace.\")\n parser.add_argument(\"--comet_ml_save_model\", type=bool, default=False, help=\"If true, models are also uploaded to Comet.ml.\")\n\n args = parser.parse_args()\n return args\n\n\ndef train(opt):\n # Set random seed\n if torch.cuda.is_available():\n torch.cuda.manual_seed(opt.random_seed)\n else:\n torch.manual_seed(opt.random_seed)\n # Instantiate the model\n if opt.conv_dim is not None and \\\n opt.conv_kernel_sizes is not None and \\\n opt.conv_strides is not None and \\\n opt.fc_dim is not None:\n model = DeepQNetwork(opt.image_size, opt.image_size, conv_dim=opt.conv_dim, conv_kernel_sizes=opt.conv_kernel_sizes, \n conv_strides=opt.conv_strides, fc_dim=opt.fc_dim)\n else:\n model = DeepQNetwork(opt.image_size, opt.image_size)\n\n if opt.log_comet_ml:\n # Create a Comet.ml experiment\n experiment = Experiment(api_key=opt.comet_ml_api_key,\n project_name=opt.comet_ml_project_name, workspace=opt.comet_ml_workspace)\n experiment.log_other(\"iters_to_save\", opt.iters_to_save)\n experiment.log_other(\"completed\", False)\n experiment.log_other(\"random_seed\", opt.random_seed)\n\n # Report hyperparameters to Comet.ml\n hyper_params = {\"image_size\": opt.image_size,\n \"batch_size\": opt.batch_size,\n \"optimizer\": opt.optimizer,\n \"learning_rate\": opt.lr,\n \"gamma\": opt.gamma,\n \"initial_epsilon\": opt.initial_epsilon,\n \"final_epsilon\": opt.final_epsilon,\n \"num_iters\": opt.num_iters,\n \"replay_memory_size\": opt.replay_memory_size,\n \"random_seed\": opt.random_seed,\n \"conv_dim\": opt.conv_dim,\n \"conv_kernel_sizes\": opt.conv_kernel_sizes,\n \"conv_strides\": opt.conv_strides,\n \"fc_dim\": opt.fc_dim}\n experiment.log_parameters(hyper_params)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-6) # Optimization algorithm\n criterion = nn.MSELoss() # Loss function\n game_state = FlappyBird() # Instantiate the Flappy Compass game\n image, reward, terminal = game_state.next_frame(0) # Get the next image, along with its reward and an indication if it's a terminal state\n\n # Image preprocessing step (scaling, color removal and convertion to a PyTorch tensor)\n image = pre_processing(image[:game_state.screen_width, :int(game_state.base_y)], opt.image_size, opt.image_size)\n image = torch.from_numpy(image)\n\n # Move the model and the current image data to the GPU, if available\n if torch.cuda.is_available():\n model.cuda()\n image = image.cuda()\n\n # Prepare the state variable, which will host the last 4 frames\n state = torch.cat(tuple(image for _ in range(4)))[None, :, :, :]\n\n # Initialize the replay memory, which saves sets of consecutive game states, the reward and terminal state indicator\n # so that the model can learn from them (essentially constitutes the training data, which grows with every new iteration)\n replay_memory = []\n \n iter = 0 # Iteration counter\n\n # Main training loop performing the number of iterations specified by num_iters\n while iter < opt.num_iters:\n prediction = model(state)[0] # Get a prediction from the current state\n epsilon = opt.final_epsilon + (\n (opt.num_iters - iter) * (opt.initial_epsilon - opt.final_epsilon) / opt.num_iters) # Set the decay of the probability of random actions\n u = random()\n random_action = u <= epsilon\n if random_action:\n print(\"Perform a random action\")\n action = randint(0, 1)\n else:\n # Use the model's prediction to decide the next action\n action = torch.argmax(prediction).item()\n\n # Get a new frame and process it\n next_image, reward, terminal = game_state.next_frame(action)\n next_image = pre_processing(next_image[:game_state.screen_width, :int(game_state.base_y)], opt.image_size, opt.image_size)\n next_image = torch.from_numpy(next_image)\n\n # Move the next image data to the GPU, if available\n if torch.cuda.is_available():\n next_image = next_image.cuda()\n\n next_state = torch.cat((state[0, 1:, :, :], next_image))[None, :, :, :] # Prepare the next state variable, which will host the last 4 frames\n replay_memory.append([state, action, reward, next_state, terminal]) # Save the current state, action, next state and terminal state indicator in the replay memory\n if len(replay_memory) > opt.replay_memory_size:\n del replay_memory[0] # Delete the oldest reolay from memory if full capacity has been reached\n batch = sample(replay_memory, min(len(replay_memory), opt.batch_size)) # Retrieve past play sequences from the replay memory\n state_batch, action_batch, reward_batch, next_state_batch, terminal_batch = zip(*batch)\n\n state_batch = torch.cat(tuple(state for state in state_batch)) # States of the current batch\n action_batch = torch.from_numpy(\n np.array([[1, 0] if action == 0 else [0, 1] for action in action_batch], dtype=np.float32)) # Actions taken in the current batch\n reward_batch = torch.from_numpy(np.array(reward_batch, dtype=np.float32)[:, None]) # Rewards in the current batch\n next_state_batch = torch.cat(tuple(state for state in next_state_batch)) # Next states of the current batch\n\n # Move batch data to the GPU, if available\n if torch.cuda.is_available():\n state_batch = state_batch.cuda()\n action_batch = action_batch.cuda()\n reward_batch = reward_batch.cuda()\n next_state_batch = next_state_batch.cuda()\n\n current_prediction_batch = model(state_batch) # Predictions of the model for the replays of the current batch\n next_prediction_batch = model(next_state_batch) # Next predictions of the model for the replays of the current batch\n\n # Set ground truth for the rewards for the current batch, considering whether the state is terminal or not\n y_batch = torch.cat(\n tuple(reward if terminal else reward + opt.gamma * torch.max(prediction) for reward, terminal, prediction in\n zip(reward_batch, terminal_batch, next_prediction_batch)))\n\n q_value = torch.sum(current_prediction_batch * action_batch, dim=1) # Predicted Q values (i.e. estimated return for each action)\n optimizer.zero_grad() # Reset the gradients to zero before a new optimization step\n loss = criterion(q_value, y_batch) # Calculate the loss\n loss.backward() # Backpropagation\n optimizer.step() # Weights optimization step\n\n state = next_state # Move to the next frame\n iter += 1\n print(\"Iteration: {}/{}, Action: {}, Loss: {}, Epsilon {}, Reward: {}, Q-value: {}\".format(\n iter + 1,\n opt.num_iters,\n action,\n loss,\n epsilon, reward, torch.max(prediction)))\n \n if opt.log_comet_ml:\n # Log metrics to Comet.ml\n experiment.log_metric(\"train_loss\", loss, step=iter)\n experiment.log_metric(\"train_epsilon\", epsilon, step=iter)\n experiment.log_metric(\"train_reward\", reward, step=iter)\n experiment.log_metric(\"train_Q_value\", torch.max(prediction), step=iter)\n\n if (iter+1) % opt.iters_to_save == 0:\n # Get the current day and time to attach to the saved model's name\n current_datetime = datetime.now().strftime('%d_%m_%Y_%H_%M')\n\n # Set saved model name\n model_filename = f'{opt.saved_path}/flappy_compass_{current_datetime}_{iter+1}.pth'\n\n # Save model every iters_to_save iterations\n torch.save(model, model_filename)\n\n if opt.log_comet_ml and opt.comet_ml_save_model:\n # Upload model to Comet.ml\n experiment.log_asset(file_path=model_filename, overwrite=True)\n\n # Get the current day and time to attach to the saved model's name\n current_datetime = datetime.now().strftime('%d_%m_%Y_%H_%M')\n\n # Set saved model name\n model_filename = f'{opt.saved_path}/flappy_compass_{current_datetime}_{iter+1}.pth'\n\n # Save the model after reaching the final iteration\n torch.save(model, model_filename)\n\n if opt.log_comet_ml:\n # Only report that the experiment completed successfully if it finished the training without errors\n experiment.log_other(\"completed\", True)\n\n if opt.comet_ml_save_model:\n # Upload model to Comet.ml\n experiment.log_asset(file_path=model_filename, overwrite=True)\n\n\nif __name__ == \"__main__\":\n opt = get_args()\n train(opt)\n"
] | [
[
"torch.max",
"torch.cuda.manual_seed",
"torch.cat",
"torch.manual_seed",
"torch.argmax",
"torch.sum",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.array",
"torch.nn.MSELoss",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dido1998/CausalMBRL | [
"2c595988b06b3d568fdde53213029e97841acb03"
] | [
"eval.py"
] | [
" \nimport argparse\nimport torch\nimport pickle\nfrom pathlib import Path\n\nfrom torch.utils import data\nimport numpy as np\n\nfrom cswm import utils\nfrom cswm.models.modules import CausalTransitionModel, CausalTransitionModelLSTM\nfrom cswm.utils import OneHot\n\ntorch.backends.cudnn.deterministic = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--save-folder', type=Path,\n default='checkpoints',\n help='Path to checkpoints.')\n\nparser.add_argument('--dataset', type=Path,\n default=Path('data/shapes_eval.h5'),\n help='Dataset file name.')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disable CUDA training.')\nparser.add_argument('--finetune', action='store_true')\nparser.add_argument('--recurrent', action='store_true')\nparser.add_argument('--save', type=str, default='Default')\nargs_eval = parser.parse_args()\n\n\nmeta_file = args_eval.save_folder / 'metadata.pkl'\nif args_eval.finetune:\n model_file = args_eval.save_folder / 'finetuned_model.pt'\nelse:\n model_file = args_eval.save_folder / 'model.pt'\n\nwith open(meta_file, 'rb') as f:\n args = pickle.load(f)['args']\n\nargs.cuda = not args_eval.no_cuda and torch.cuda.is_available()\nargs_eval.batch_size = 100\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\ndevice = torch.device('cuda' if args.cuda else 'cpu')\n\nprint(\"Loading data...\")\ndataset = utils.PathDataset(\n hdf5_file=args.dataset, path_length=10,\n action_transform=OneHot(args.num_objects * args.action_dim), in_memory=False)\neval_loader = data.DataLoader(\n dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)\n\nprint(\"Loading model...\")\n\n# Get data sample\nobs = next(iter(eval_loader))[0]\ninput_shape = obs[0][0].size()\n\nif not args_eval.recurrent:\n model = CausalTransitionModel(\n embedding_dim_per_object=args.embedding_dim_per_object,\n hidden_dim=args.hidden_dim,\n action_dim=args.action_dim,\n input_dims=input_shape,\n input_shape=input_shape,\n modular=args.modular,\n predict_diff=args.predict_diff,\n vae=args.vae,\n num_objects=args.num_objects,\n encoder=args.encoder,\n gnn=args.gnn,\n multiplier=args.multiplier,\n ignore_action=args.ignore_action,\n copy_action=args.copy_action).to(device)\nelse:\n model = CausalTransitionModelLSTM(\n embedding_dim_per_object=args.embedding_dim_per_object,\n hidden_dim=args.hidden_dim,\n action_dim=args.action_dim,\n input_dims=(3, 50, 50),\n input_shape=(3, 50, 50),\n modular=args.modular,\n predict_diff=args.predict_diff,\n vae=args.vae,\n num_objects=args.num_objects,\n encoder=args.encoder, \n rim = args.rim,\n scoff = args.scoff).to(device)\n\nmodel.load_state_dict(torch.load(model_file))\nmodel.eval()\n\nmodel_name = '/'.join(str(args_eval.save_folder).split('/')[-2:])\n\nif args_eval.recurrent:\n utils.eval_steps_lstm(\n model, [1,5,10], name=model_name,\n filename=args_eval.dataset, batch_size=args_eval.batch_size,\n device=device, save_folder=args_eval.save, contrastive=args.contrastive)\nelse:\n utils.eval_steps(\n model, [1, 5, 10], name=model_name,\n filename=args_eval.dataset, batch_size=args_eval.batch_size,\n device=device, save_folder=args_eval.save, contrastive=args.contrastive)\n"
] | [
[
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daifengwanglab/deepalignomics | [
"e644b565d5d295769f246420a90eff18fd3f15d6"
] | [
"deepManReg/viz.py"
] | [
"''' adapted from https://github.com/all-umass/ManifoldWarping '''\n\nfrom matplotlib import pyplot\n\n\n# def show_alignment(X,Y,title=None,marker='o-',legend=True):\n# '''plot two data sets on the same figure'''\n# dim = X.shape[1]\n# assert dim == Y.shape[1], 'dimensionality must match'\n# assert dim in (1,2,3), ('can only plot 1, 2, or 3-dimensional data, X has shape %dx%d' % X.shape)\n# if dim == 1:\n# pyplot.plot(X[:,0],marker,label='X',hold=True)\n# pyplot.plot(Y[:,0],marker,label='Y',hold=True)\n# elif dim == 2:\n# pyplot.plot(X[:,0],X[:,1],marker,label='X',hold=True)\n# pyplot.plot(Y[:,0],Y[:,1],marker,label='Y',hold=True)\n# else: # dim == 3\n# from mpl_toolkits.mplot3d import Axes3D\n# fig = pyplot.gcf()\n# ax = Axes3D(fig)\n# ax.plot(X[:,0],X[:,1],X[:,2],marker,label='X')\n# ax.plot(Y[:,0],Y[:,1],Y[:,2],marker,label='Y')\n# if title:\n# pyplot.title(title)\n# if legend:\n# pyplot.legend(loc='best')\n# return pyplot.show\n\ndef show_alignment(X,Y,titX=None,titY=None,title=None,legend=True):\n '''plot two data sets on the same figure'''\n dim = X.shape[1]\n assert dim == Y.shape[1], 'dimensionality must match'\n assert dim in (1,2,3), ('can only plot 1, 2, or 3-dimensional data, X has shape %dx%d' % X.shape)\n if dim == 1:\n pyplot.plot(X[:,0],label=titX,alpha=0.5)\n pyplot.plot(Y[:,0],label=titX,alpha=0.5)\n elif dim == 2:\n pyplot.scatter(X[:,0],X[:,1],label=titX,alpha=0.5)\n pyplot.scatter(Y[:,0],Y[:,1],label=titY,alpha=0.5)\n else: # dim == 3\n from mpl_toolkits.mplot3d import Axes3D\n fig = pyplot.gcf()\n ax = Axes3D(fig)\n ax.scatter(X[:,0],X[:,1],X[:,2],label=titX,alpha=0.5)\n ax.scatter(Y[:,0],Y[:,1],Y[:,2],label=titY,alpha=0.5)\n if title:\n pyplot.title(title)\n if legend:\n pyplot.legend(loc='best')\n return pyplot.show\n\ndef show_neighbor_graph(X,corr,title=None,fig=None,ax=None):\n '''Plot the neighbor connections between points in a data set.\n Note: plotting correspondences for 3d points is slow!'''\n assert X.shape[1] in (2,3), 'can only show neighbor graph for 2d or 3d data'\n if X.shape[1] == 2:\n if ax is None:\n ax = pyplot.gca()\n for pair in corr.pairs():\n ax.plot(X[pair,0], X[pair,1], 'r-')\n ax.plot(X[:,0],X[:,1],'o')\n else:\n if ax is None:\n from mpl_toolkits.mplot3d import Axes3D\n if fig is None:\n fig = pyplot.gcf()\n ax = Axes3D(fig)\n for pair in corr.pairs():\n ax.plot(X[pair,0], X[pair,1], X[pair,2], 'r-')\n ax.plot(X[:,0],X[:,1],X[:,2],'o')\n if title:\n ax.set_title(title)\n return pyplot.show"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qiskit-community/lindbladmpo | [
"e5d07fc2ce226b19e831d06334e33afb8d131e33"
] | [
"lindbladmpo/examples/simulation_building/operators.py"
] | [
"# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\nfrom typing import OrderedDict, Dict, List, Union, Any, Optional\nfrom abc import ABC, abstractmethod\nimport numpy as np\n\n\"\"\"\nAn implementation of a mechanism for building operators for creating dynamical system simulations.\n\nThe DynamicalOperator class introduced below is used by LindbladMatrixSolver to generate operators\nfor qiskit-dynamics simulations.\n\"\"\"\n\n\nclass DynamicalOperator(ABC):\n \"\"\"A class for operators used in defining dynamical simulations.\"\"\"\n\n def __init__(self, system_id: Any = \"\", s_type=\"\", matrix: Optional[Any] = None):\n \"\"\"Initialization of an operator using (optional) id of the subsystem, type, and a matrix.\n\n Args:\n system_id: A unique identifier of the subsystem (degree of freedom) of the operator,\n or an empty string to defer such an identification.\n s_type: A string name of the type of operator. If not given, and the matrix argument\n is not None, then a unique id is automatically generated from the matrix's\n ``__hash__()`` or ``__str()__`` methods.\n matrix: An explicit matrix realization of the operator, for use when building\n matrices using ``OperatorBuilder``.\n\n Raises:\n An exception if the matrix argument is not None, s_type is an empty string, but\n the matrix does not implement either __hash__() or __str__().\n \"\"\"\n self._matrix = matrix\n self.system_id = system_id\n if matrix is not None and s_type == \"\":\n if matrix.__hash__ is not None:\n s_type = str(matrix.__hash__())\n elif matrix.__str__ is not None:\n s_type = str(matrix.__str__().__hash__())\n else:\n raise Exception(\n \"A unique type string for the argument matrix could not be generated.\"\n )\n s_type = s_type.lower()\n self._s_type = s_type\n self.compound_type = \"\"\n self.compound_ops = None\n\n @property\n def s_type(self) -> str:\n \"\"\"A string defining the operator type. Must be unique to identify the type.\"\"\"\n return self._s_type\n\n def __add__(self, other):\n \"\"\"Addition of two DynamicalOperators. Returns a new (compound) DynamicalOperator.\"\"\"\n if not isinstance(other, DynamicalOperator):\n raise Exception(\n \"Both operands in an addition must be instances of a DynamicalOperator.\"\n )\n result = self.new_operator()\n result.compound_type = \"+\"\n result.compound_ops = [self, other]\n return result\n\n def __sub__(self, other):\n \"\"\"Subtraction of two DynamicalOperators. Returns a new (compound) DynamicalOperator.\"\"\"\n return self.__add__(-other)\n\n def __mul__(self, other):\n \"\"\"Multiplication by a DynamicalOperator or a scalar.\"\"\"\n result = self.new_operator()\n if isinstance(other, DynamicalOperator):\n result.compound_type = (\n \"@\" # Indicates operator * operator for OperatorBuilder\n )\n result.compound_ops = [self, other]\n # For a product of two operators, their order must be preserved\n return result\n else:\n other_type = type(other)\n if other_type is complex or other_type is float or other_type is int:\n result.compound_type = (\n \"*\" # Indicates operator * scalar for OperatorBuilder\n )\n result.compound_ops = [self, other]\n # For a product of an operator and a scalar, we can put the operator first always.\n # This is used to simplify OperatorBuilder code below, and must not be changed.\n return result\n raise Exception(\n \"The second operand of a multiplication must be\"\n \" a DynamicalOperator class or a scalar.\"\n )\n\n def __rmul__(self, other):\n \"\"\"Multiplication of a DynamicalOperator by a scalar.\"\"\"\n result = self.__mul__(other)\n return result\n\n def __neg__(self):\n \"\"\"Unary negation of a DynamicalOperator.\"\"\"\n result = self.__rmul__(-1.0)\n return result\n\n def __pos__(self):\n \"\"\"Unary plus operator prepending a DynamicalOperator.\"\"\"\n return self\n\n def new_operator(self):\n \"\"\"A method that must be implemented by subclasses, to return the correct instance subclass.\n\n Called from operators to create new compound operators in the tree of expressions.\n \"\"\"\n return DynamicalOperator()\n\n def get_operator_matrix(self, dim: int) -> Any:\n \"\"\"Returns a matrix describing a realization of the operator specified in the parameters.\n\n This method must be overridden by subclasses in order to support building to a matrix.\n Args:\n dim: The physical dimension of the matrix to generate.\n \"\"\"\n raise Exception(\n f\"Operator type {self.s_type} unknown or unsupported for matrix generation with dimension {dim}.\"\n )\n\n def kron_two_matrices(self, left_matrix: Any, right_matrix: Any):\n \"\"\"Returns the matrix Kronecker product of the two arguments.\n\n This function is not declared as static in order to allow subclasses to override the\n default implementation. However, fields of the ``self`` object are not being used.\n Args:\n left_matrix: First matrix.\n right_matrix: Second matrix.\n\n Returns:\n The Kronecker product of the arguments.\n \"\"\"\n return np.kron(left_matrix, right_matrix)\n\n def build_one_dict(\n self, operators_repo: dict, prune_subsystems: Optional[dict] = None\n ) -> dict:\n \"\"\"Recursively build a flat dictionary out of a (sub-)tree of DynamicalOperators.\n\n Args:\n operators_repo: A dictionary referencing the DynamicalOperators used in the building.\n It is being updated by this method.\n prune_subsystems: An optional dict specifying subsystem_dims to remove, as the keys of\n entries in the dict. The values are not used.\n Returns:\n The structure of the returned flat dict is as follows: Each key identifies uniquely an\n operator that is a product of operators, e.g. \"X_0 * Z_0 * Y_2\" is the unique operator\n that is the ordered product of 3 operators, X on subsystem 0, Z on subsystem 0, and Y on\n subsystem 2. The value is a multiplicative scalar coefficient for this operator.\n The different entries of the dictionary are to be summed over.\n Raises:\n An exception if an unidentified operation was found in the tree.\n \"\"\"\n if self.compound_type == \"+\": # The sub-tree root is a sum of two operators\n result = {}\n for op in self.compound_ops:\n # Build a dict out of each summand.\n op_dict: dict = op.build_one_dict(operators_repo, prune_subsystems)\n # We now iterate over all members in the flattened dict, and add them to the result\n # dict - if the unique key already appears there, the scalars are added.\n for key, val in op_dict.items():\n val_sum = val + result.get(key, complex(0.0))\n result[key] = val_sum\n elif (\n self.compound_type == \"@\"\n ): # The sub-tree root is a product of two operators\n new_key = []\n new_val = complex(1.0)\n for op in self.compound_ops:\n op_dict = op.build_one_dict(operators_repo, prune_subsystems)\n # Note that operators_repo can be extended with operators that appear in products\n # that will be later pruned, and hence operators_repo does not represent only\n # operators that actually appear in the final built dictionary, in the case of pruning.\n if len(op_dict) == 0:\n # If one of the product terms is an empty dictionary (as result of pruning),\n # we remove completely the product as well.\n new_val = complex(0.0)\n break\n for key, val in op_dict.items():\n if key is not None:\n for key_element in key:\n new_key.append(key_element)\n # The key of the product operator will be a concatenation of unique keys,\n # order preserved.\n new_val *= (\n val # The scalar factor will be a product of the scalars.\n )\n if new_val != complex(0.0):\n result = {tuple(new_key): new_val}\n else: # Return an empty dictionary, when one of the product terms is pruned.\n result = dict()\n elif (\n self.compound_type == \"*\"\n ): # The sub-tree root is a product of operator * scalar\n # Since this product is commutative, the operator is always first in order,\n # as implemented in DynamicalOperator.__mul__ and DynamicalOperator.__rmul__\n op = self.compound_ops[0]\n scalar = self.compound_ops[1]\n op_dict = op.build_one_dict(operators_repo, prune_subsystems)\n for key, val in op_dict.items():\n op_dict[key] = val * scalar\n result = op_dict\n elif self.compound_type == \"\":\n val = complex(1.0)\n if prune_subsystems is not None and self.system_id in prune_subsystems:\n result = dict()\n else:\n operators_repo[DynamicalOperatorKey(self)] = self\n # Note that there's no way to verify uniqueness of the implementing operator\n result = {tuple([DynamicalOperatorKey(self)]): val}\n else:\n raise Exception(\n f\"Unknown/unsupported composite operator {self.compound_type}.\"\n )\n return result\n\n\nclass DynamicalOperatorKey:\n \"\"\"A container for a unique key identifying an operator and a subsystem.\"\"\"\n\n def __init__(self, op: DynamicalOperator):\n self.system_id = op.system_id\n self.s_type = op.s_type\n\n def __hash__(self):\n return hash((self.system_id, self.s_type))\n\n def __eq__(self, other):\n return (\n isinstance(other, DynamicalOperatorKey)\n and self.system_id == other.system_id\n and self.s_type == other.s_type\n )\n\n def __str__(self):\n return f\"({self.system_id}, {self.s_type})\"\n\n\nclass Id(DynamicalOperator):\n \"\"\"A dynamical operator that builds a numpy identity matrix.\"\"\"\n\n def __init__(self, system_id=\"\"):\n super().__init__(system_id, \"i\")\n\n def get_operator_matrix(self, dim: int) -> Any:\n \"\"\"Returns a matrix describing a realization of the operator specified in the parameters.\n\n Args:\n dim: The physical dimension of the matrix to generate.\n \"\"\"\n if self.s_type == \"i\":\n return np.identity(dim, complex)\n super().get_operator_matrix(dim)\n\n\nclass Zero(DynamicalOperator):\n \"\"\"A dynamical operator that builds a numpy null (zero) matrix.\"\"\"\n\n def __init__(self, system_id=\"\"):\n super().__init__(system_id, \"null\")\n\n def get_operator_matrix(self, dim: int) -> Any:\n \"\"\"Returns a matrix describing a realization of the operator specified in the parameters.\n\n Args:\n dim: The physical dimension of the matrix to generate.\n \"\"\"\n if self.s_type == \"null\":\n return np.zeros((dim, dim), complex)\n super().get_operator_matrix(dim)\n\n\ndef build_dictionaries(\n operators: Union[DynamicalOperator, List[DynamicalOperator]],\n prune_subsystems: Optional[dict] = None,\n) -> (Union[dict, List[dict]], dict):\n \"\"\"Builds a list of flat descriptive dictionaries from a list of DynamicalOperator trees.\n\n Args:\n operators: A DynamicalOperator or a list of DynamicalOperators, for each one the return\n value will contain a flattened descriptive dict.\n prune_subsystems: An optional dict specifying subsystem_dims to remove, as the keys of\n entries in the dict. The values are not used.\n\n Returns:\n A tuple with the first entry being a dictionary or a list of dictionaries (matching the\n operators parameter), and the second entry being an ``operators_repo``: a dictionary\n referencing the DynamicalOperators used in the building. This operators_repo is\n necessary for building into matrices.\n The structure of each resulting flat dict is as follows: Each key identifies uniquely an\n operator that is a product of operators, e.g. \"X_0 * Z_0 * Y_2\" is the unique operator\n that is the ordered product of 3 operators, X on subsystem 0, Z on subsystem 0, and Y on\n subsystem 2. The value is a multiplicative scalar coefficient for this operator.\n The different entries of the dictionary are to be summed over.\n \"\"\"\n results = []\n b_flatten = (\n False # If operators is one instance return a dict, otherwise a list of dicts\n )\n if type(operators) != list:\n b_flatten = True\n operators = [operators]\n operators_repo = dict()\n for op in operators:\n results.append(op.build_one_dict(operators_repo, prune_subsystems))\n if b_flatten:\n results = results[0]\n return results, operators_repo\n\n\ndef build_matrices(\n operators: Union[DynamicalOperator, Dict, List[DynamicalOperator], List[Dict]],\n subsystem_dims: OrderedDict,\n prune_subsystems: Optional[dict] = None,\n operators_repo: Optional[Dict] = None,\n null_matrix_op: Optional[DynamicalOperator] = None,\n id_matrix_op: Optional[DynamicalOperator] = None,\n) -> Any:\n \"\"\"Build a (possibly list) of matrices from DynamicalOperator or dictionaries thereof.\n\n Args:\n operators: A DynamicalOperator, a list of DynamicalOperators, a flattened dictionary\n previously built using ``build_dictionaries``, or a list of such dictionaries.\n subsystem_dims: An ordered dictionary for each subsystem (identified using the system_id\n field of the DynamicalOperator), indicating the matrix dimension to assign for\n it. Subsystems which are to be removed from the matrix building, must be specified\n in the ``prune_subsystems`` parameter, if relevant.\n prune_subsystems: An optional dict specifying subsystem_dims to remove, as the keys of\n entries in the dict. The values are not used. This parameter can only be used if\n the ``operators`` parameter corresponds to DynamicalOperators.\n operators_repo: A dictionary referencing the DynamicalOperators used in the building.\n null_matrix_op: An optional DynamicalOperator instance for building an null (zeros) matrix.\n The default building implementation returns numpy matrices. The corresponding instance\n is also being used to invoke ``kron_two_matrices()`` to implement a kronecker product\n of two subsystem matrices.\n id_matrix_op: An optional DynamicalOperator instance for building an Identity matrix.\n The default building implementation returns numpy matrices.\n Returns:\n A matrix or a list of matrices, of the type as returned by\n DynamicOperator.get_operator_matrix() or the subclass instances passed in the\n ``operators`` parameter.\n Raises:\n An exception if an unidentified operation was found in the tree.\n \"\"\"\n if len(subsystem_dims) == 0:\n return None\n b_flatten = False\n if type(operators) != list:\n b_flatten = True\n operators = [operators]\n dims = subsystem_dims.values()\n if null_matrix_op is None:\n null_matrix_op = Zero()\n if id_matrix_op is None:\n id_matrix_op = Id()\n total_dim = 1\n for dim in dims:\n if dim > 0:\n total_dim *= dim\n if len(operators) == 0:\n return null_matrix_op.get_operator_matrix(total_dim)\n b_dictionaries = False\n for op in operators:\n # Verify operator types are known and identical\n op_type = type(op)\n if op_type is dict:\n b_dictionaries = True\n elif not isinstance(op, DynamicalOperator):\n raise Exception(\n f\"Unsupported class type in parameter operators: {op_type}.\"\n )\n elif b_dictionaries:\n raise Exception(\n \"All operators must be of the same type (a dictionary or a DynamicalOperator).\"\n )\n\n sys_ids = []\n for sys_id in subsystem_dims.keys():\n sys_ids.append(sys_id)\n identity_matrices = []\n if b_dictionaries:\n operators_dict: List[Dict] = operators\n else:\n operators_dict, operators_repo = build_dictionaries(operators, prune_subsystems)\n results = []\n for dim in dims:\n identity_matrices.append(id_matrix_op.get_operator_matrix(dim))\n for op_dict in operators_dict:\n results.append(\n _build_one_matrix(\n op_dict,\n operators_repo,\n null_matrix_op,\n total_dim,\n subsystem_dims,\n sys_ids,\n identity_matrices,\n )\n )\n if b_flatten:\n results = results[0]\n return results\n\n\ndef _build_one_matrix(\n operator_dict: Dict,\n operators_repo: Dict,\n null_matrix_op,\n total_dim,\n subsystem_dims,\n sys_ids,\n identity_matrices,\n) -> np.ndarray:\n matrix = null_matrix_op.get_operator_matrix(total_dim)\n for key, val in operator_dict.items():\n sub_matrices = {}\n for key_element in key:\n operator_key: DynamicalOperatorKey = key_element\n dim = subsystem_dims.get(operator_key.system_id, None)\n if dim is None:\n raise Exception(\n f\"An operator was defined with id = {operator_key.system_id}, \"\n \"but this id does not appear in the subsystem_dims parameter.\"\n )\n dyn_op: DynamicalOperator = operators_repo[operator_key]\n new_sub_matrix = dyn_op.get_operator_matrix(dim)\n sub_matrix = sub_matrices.get(operator_key.system_id, None)\n if sub_matrix is not None:\n new_sub_matrix = (\n sub_matrix @ new_sub_matrix\n ) # note that order of matrix product matters\n sub_matrices[operator_key.system_id] = new_sub_matrix\n op_matrix = None\n n_subsystems = len(sys_ids)\n for i in range(n_subsystems):\n sub_matrix = sub_matrices.get(sys_ids[i], identity_matrices[i])\n if i == 0:\n op_matrix = sub_matrix\n else:\n op_matrix = null_matrix_op.kron_two_matrices(op_matrix, sub_matrix)\n matrix += val * op_matrix\n return matrix\n"
] | [
[
"numpy.identity",
"numpy.kron",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sharanry/UncertaintyQuantification | [
"dab4e6176798451edbbe8f2e681b45a2d00709e3"
] | [
"asymptotic_analysis.py"
] | [
"import os\n\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as onp\nimport jax.numpy as np\nimport jax.random as random\nfrom jax import vmap\nfrom jax.config import config as jax_config\nimport numpyro.distributions as dist\nfrom numpyro.handlers import seed, substitute, trace\nfrom numpyro.hmc_util import initialize_model\nfrom numpyro.mcmc import mcmc\nfrom numpyro import sample\nimport numpyro\nfrom numpy import linalg as LA\nfrom jax import device_get\nfrom sklearn.utils import shuffle\n\nfrom utils import *\n\n# CONFIG\nargs = {\n \"num_samples\" : 1000, #def: 1000\n \"num_warmup\" : 3000, #def: 3000\n \"num_data\" : 100, #def: 100\n \"num_hidden\" : 10, #def: 10\n \"device\" : 'cpu', #def: cpu\n \"save_directory\": \"./results\",\n}\n\n# PREPARE TO SAVE RESULTS\ntry:\n os.stat(args[\"save_directory\"])\nexcept:\n os.mkdir(args[\"save_directory\"]) \n\n\n\namt_data = [5*10**i for i in range(1, 4)] # def: [50, 500, 5000]\n\n\njax_config.update('jax_platform_name', args[\"device\"])\nN, D_X, D_H = args[\"num_data\"], 1, args[\"num_hidden\"]\n\n\n# GENERATE ARTIFICIAL DATA\nX, Y, X_test = get_data(functions, ranges, num_samples=5000)\nmean = X.mean()\nprint(\"Normalising Mean = \" + str(mean))\n\nX = X/mean\n\nX, Y = shuffle(X, Y)\n\n\nX_test=onp.arange(0,2,0.01).reshape(-1,1)\n\n\n# PLOTTING\nplt.cla() # Clear axis\nplt.clf() # Clear figure\nplt.close() # Close a figure window\n# make plots\nfig, ax = plt.subplots(1, len(amt_data), sharey=True)\nfig.set_figheight(5)\nfig.set_figwidth(len(amt_data)*7)\n\nsamples_collected = []\n\n# INFERENCE\nfor i, num_data in enumerate(amt_data):\n sigma=1.0\n print(\"Model with weights prior sigma \", sigma, \" and \", num_data, \" data points\")\n rng, rng_predict = random.split(random.PRNGKey(0));\n samples = run_inference(model, args, rng, X[:num_data], Y[:num_data], D_H, sigma);\n samples_collected.append((num_data, samples))\n \n \n \n # predict Y_test at inputs X_test\n vmap_args = (samples, random.split(rng_predict, args[\"num_samples\"]));\n predictions = vmap(lambda samples, rng: predict(model, rng, samples, X_test, D_H, sigma))(*vmap_args)\n predictions = predictions[..., 0]\n \n train_predictions = vmap(lambda samples, rng: predict(model, rng, samples, X[:num_data], D_H, sigma))(*vmap_args)\n train_predictions = train_predictions[..., 0]\n \n \n \n # compute mean prediction and 95% confidence interval around median\n mean_prediction = np.mean(predictions, axis=0)\n percentiles = onp.percentile(predictions, [2.5, 97.5], axis=0)\n \n # compute mean prediction and confidence interval around median\n train_mean_prediction = np.mean(train_predictions, axis=0)\n \n # plot training data\n ax[i].plot(X[:num_data], Y[:num_data], 'kx', c=\"red\", alpha=0.3, label=\"Data samples\")\n # plot 90% confidence level of predictions\n ax[i].fill_between(X_test[:,0], percentiles[0, :], percentiles[1, :], color='lightblue', label=\"95% CI\", step='mid')\n # plot mean prediction\n ax[i].plot(X_test, mean_prediction, c='blue', alpha=0.6, label=\"Predicted\")\n \n ax[i].plot(X_test[:100], [data_gen_func(x, normalizing_mean=mean) for x in X_test[:100]], c='purple', alpha=0.6, label=\"True\")\n ax[i].plot(X_test[100:], [data_gen_func(x, normalizing_mean=mean) for x in X_test[100:]], c='purple', alpha=0.6)\n \n ax[i].set(xlabel=\"X\", ylabel=\"Y\", title=\"n = \" + str(num_data))\n ax[i].title.set_size(30)\n ax[i].xaxis.label.set_size(30)\n ax[i].yaxis.label.set_size(30)\n ax[i].set_ylim([-2,3])\n ax[i].tick_params(labelsize=30)\n if(i==len(samples_collected)-1):\n ax[i].legend(fontsize=15, loc=\"lower left\")\n \n\nprint(\"Saving asymtotic confidence interval plot...\") \nplt.savefig(os.path.join(args[\"save_directory\"], \"asymp_ci.png\"))\n\n\nplt.cla() # Clear axis\nplt.clf() # Clear figure\nplt.close() # Close a figure window\nfig, ax = plt.subplots(1,len(samples_collected), sharey=True)\nfig.set_figheight(5)\nfig.set_figwidth(len(samples_collected)*7)\nfor i in range(len(samples_collected)):\n to_plot = []\n for name, value in samples_collected[i][1].items():\n value = device_get(value)\n neffs = numpyro.diagnostics.effective_sample_size(value[None, ...])\n \n if isinstance(neffs, onp.ndarray):\n to_plot.append(onp.log(neffs.flatten()))\n bplot = ax[i].boxplot(\n to_plot, labels=list(samples_collected[i][1].keys()),\n patch_artist=True,\n \n )\n \n for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:\n plt.setp(bplot[element], color=\"black\")\n for patch in bplot['boxes']:\n patch.set_facecolor(\"lightblue\")\n \n ax[i].set(ylabel=\"log ESS\", title=\"n = \" + str(samples_collected[i][0]))\n ax[i].title.set_size(30)\n ax[i].xaxis.label.set_size(30)\n ax[i].yaxis.set_label(\"neff\")\n ax[i].yaxis.label.set_size(30)\n ax[i].tick_params(labelsize=25.0)\n\nprint(\"Saving asymtotic effective sample size plot...\") \nplt.savefig(os.path.join(args[\"save_directory\"], \"asymp_ess.png\"))\n \n"
] | [
[
"sklearn.utils.shuffle",
"matplotlib.pyplot.cla",
"numpy.arange",
"numpy.percentile",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kathyatskiv/Background-remover-Back-end | [
"cb9c2273594c158c604e2fc08f0954b866dadf9c"
] | [
"model/u2net.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass REBNCONV(nn.Module):\n def __init__(self,in_ch=3,out_ch=3,dirate=1):\n super(REBNCONV,self).__init__()\n\n self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)\n self.bn_s1 = nn.BatchNorm2d(out_ch)\n self.relu_s1 = nn.ReLU(inplace=True)\n\n def forward(self,x):\n\n hx = x\n xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))\n\n return xout\n\n## upsample tensor 'src' to have the same spatial size with tensor 'tar'\ndef _upsample_like(src,tar):\n\n src = F.upsample(src,size=tar.shape[2:],mode='bilinear')\n\n return src\n\n\n### RSU-7 ###\nclass RSU7(nn.Module):#UNet07DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU7,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n hx = self.pool5(hx5)\n\n hx6 = self.rebnconv6(hx)\n\n hx7 = self.rebnconv7(hx6)\n\n hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))\n hx6dup = _upsample_like(hx6d,hx5)\n\n hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))\n\n return hx1d + hxin\n\n### RSU-6 ###\nclass RSU6(nn.Module):#UNet06DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU6,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n\n hx6 = self.rebnconv6(hx5)\n\n\n hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))\n\n return hx1d + hxin\n\n### RSU-5 ###\nclass RSU5(nn.Module):#UNet05DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU5,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n\n hx5 = self.rebnconv5(hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))\n\n return hx1d + hxin\n\n### RSU-4 ###\nclass RSU4(nn.Module):#UNet04DRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU4,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)\n self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)\n\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n\n hx4 = self.rebnconv4(hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))\n\n return hx1d + hxin\n\n### RSU-4F ###\nclass RSU4F(nn.Module):#UNet04FRES(nn.Module):\n\n def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU4F,self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)\n self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)\n self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)\n\n self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)\n\n self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)\n self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)\n self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)\n\n def forward(self,x):\n\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx2 = self.rebnconv2(hx1)\n hx3 = self.rebnconv3(hx2)\n\n hx4 = self.rebnconv4(hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))\n hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))\n hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))\n\n return hx1d + hxin\n\n\n##### U^2-Net ####\nclass U2NET(nn.Module):\n\n def __init__(self,in_ch=3,out_ch=1):\n super(U2NET,self).__init__()\n\n self.stage1 = RSU7(in_ch,32,64)\n self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage2 = RSU6(64,32,128)\n self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage3 = RSU5(128,64,256)\n self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage4 = RSU4(256,128,512)\n self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage5 = RSU4F(512,256,512)\n self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage6 = RSU4F(512,256,512)\n\n # decoder\n self.stage5d = RSU4F(1024,256,512)\n self.stage4d = RSU4(1024,128,256)\n self.stage3d = RSU5(512,64,128)\n self.stage2d = RSU6(256,32,64)\n self.stage1d = RSU7(128,16,64)\n\n self.side1 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side2 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side3 = nn.Conv2d(128,out_ch,3,padding=1)\n self.side4 = nn.Conv2d(256,out_ch,3,padding=1)\n self.side5 = nn.Conv2d(512,out_ch,3,padding=1)\n self.side6 = nn.Conv2d(512,out_ch,3,padding=1)\n\n self.outconv = nn.Conv2d(6,out_ch,1)\n\n def forward(self,x):\n\n hx = x\n\n #stage 1\n hx1 = self.stage1(hx)\n hx = self.pool12(hx1)\n\n #stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n #stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n #stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n #stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n #stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6,hx5)\n\n #-------------------- decoder --------------------\n hx5d = self.stage5d(torch.cat((hx6up,hx5),1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))\n\n\n #side output\n d1 = self.side1(hx1d)\n\n d2 = self.side2(hx2d)\n d2 = _upsample_like(d2,d1)\n\n d3 = self.side3(hx3d)\n d3 = _upsample_like(d3,d1)\n\n d4 = self.side4(hx4d)\n d4 = _upsample_like(d4,d1)\n\n d5 = self.side5(hx5d)\n d5 = _upsample_like(d5,d1)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6,d1)\n\n d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)\n\n### U^2-Net small ###\nclass U2NETP(nn.Module):\n\n def __init__(self,in_ch=3,out_ch=1):\n super(U2NETP,self).__init__()\n\n self.stage1 = RSU7(in_ch,16,64)\n self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage2 = RSU6(64,16,64)\n self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage3 = RSU5(64,16,64)\n self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage4 = RSU4(64,16,64)\n self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage5 = RSU4F(64,16,64)\n self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)\n\n self.stage6 = RSU4F(64,16,64)\n\n # decoder\n self.stage5d = RSU4F(128,16,64)\n self.stage4d = RSU4(128,16,64)\n self.stage3d = RSU5(128,16,64)\n self.stage2d = RSU6(128,16,64)\n self.stage1d = RSU7(128,16,64)\n\n self.side1 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side2 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side3 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side4 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side5 = nn.Conv2d(64,out_ch,3,padding=1)\n self.side6 = nn.Conv2d(64,out_ch,3,padding=1)\n\n self.outconv = nn.Conv2d(6,out_ch,1)\n\n def forward(self,x):\n\n hx = x\n\n #stage 1\n hx1 = self.stage1(hx)\n hx = self.pool12(hx1)\n\n #stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n #stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n #stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n #stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n #stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6,hx5)\n\n #decoder\n hx5d = self.stage5d(torch.cat((hx6up,hx5),1))\n hx5dup = _upsample_like(hx5d,hx4)\n\n hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))\n hx4dup = _upsample_like(hx4d,hx3)\n\n hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))\n hx3dup = _upsample_like(hx3d,hx2)\n\n hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))\n hx2dup = _upsample_like(hx2d,hx1)\n\n hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))\n\n\n #side output\n d1 = self.side1(hx1d)\n\n d2 = self.side2(hx2d)\n d2 = _upsample_like(d2,d1)\n\n d3 = self.side3(hx3d)\n d3 = _upsample_like(d3,d1)\n\n d4 = self.side4(hx4d)\n d4 = _upsample_like(d4,d1)\n\n d5 = self.side5(hx5d)\n d5 = _upsample_like(d5,d1)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6,d1)\n\n d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)\n"
] | [
[
"torch.nn.functional.upsample",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.sigmoid",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Denesh1998/pytorch-a2c-ppo-acktr-gail | [
"fc187845a4c562cbf9b2b2b3afb19b4fdda07a90"
] | [
"a2c_ppo_acktr/arguments.py"
] | [
"import argparse\n\nimport torch\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='RL')\n parser.add_argument(\n '--algo', default='a2c', help='algorithm to use: a2c | ppo | acktr')\n parser.add_argument(\n '--gail',\n action='store_true',\n default=False,\n help='do imitation learning with gail')\n parser.add_argument(\n '--gail-experts-dir',\n default='./gail_experts',\n help='directory that contains expert demonstrations for gail')\n parser.add_argument(\n '--gail-batch-size',\n type=int,\n default=128,\n help='gail batch size (default: 128)')\n parser.add_argument(\n '--gail-epoch', type=int, default=5, help='gail epochs (default: 5)')\n parser.add_argument(\n '--lr', type=float, default=2.5e-4, help='learning rate (default: 7e-4)')\n parser.add_argument(\n '--eps',\n type=float,\n default=1e-5,\n help='RMSprop optimizer epsilon (default: 1e-5)')\n parser.add_argument(\n '--alpha',\n type=float,\n default=0.99,\n help='RMSprop optimizer apha (default: 0.99)')\n parser.add_argument(\n '--gamma',\n type=float,\n default=0.99,\n help='discount factor for rewards (default: 0.99)')\n parser.add_argument(\n '--use-gae',\n action='store_true',\n default=False,\n help='use generalized advantage estimation')\n parser.add_argument(\n '--gae-lambda',\n type=float,\n default=0.95,\n help='gae lambda parameter (default: 0.95)')\n parser.add_argument(\n '--entropy-coef',\n type=float,\n default=0.01,\n help='entropy term coefficient (default: 0.01)')\n parser.add_argument(\n '--value-loss-coef',\n type=float,\n default=0.5,\n help='value loss coefficient (default: 0.5)')\n parser.add_argument(\n '--max-grad-norm',\n type=float,\n default=0.5,\n help='max norm of gradients (default: 0.5)')\n parser.add_argument(\n '--seed', type=int, default=1, help='random seed (default: 1)')\n parser.add_argument(\n '--cuda-deterministic',\n action='store_true',\n default=False,\n help=\"sets flags for determinism when using CUDA (potentially slow!)\")\n parser.add_argument(\n '--num-processes',\n type=int,\n # default=16,\n default=4,\n help='how many training CPU processes to use (default: 16)')\n parser.add_argument(\n '--num-steps',\n type=int,\n default=5,\n help='number of forward steps in A2C (default: 5)')\n parser.add_argument(\n '--ppo-epoch',\n type=int,\n default=4,\n help='number of ppo epochs (default: 4)')\n parser.add_argument(\n '--num-mini-batch',\n type=int,\n default=32,\n help='number of batches for ppo (default: 32)')\n parser.add_argument(\n '--clip-param',\n type=float,\n default=0.2,\n help='ppo clip parameter (default: 0.2)')\n parser.add_argument(\n '--log-interval',\n type=int,\n default=10,\n help='log interval, one log per n updates (default: 10)')\n parser.add_argument(\n '--save-interval',\n type=int,\n default=100,\n help='save interval, one save per n updates (default: 100)')\n parser.add_argument(\n '--eval-interval',\n type=int,\n default=None,\n help='eval interval, one eval per n updates (default: None)')\n parser.add_argument(\n '--num-env-steps',\n type=int,\n # default=10e6,\n default=4e3,\n help='number of environment steps to train (default: 10e6)')\n parser.add_argument(\n '--env-name',\n default='PongNoFrameskip-v4',\n help='environment to train on (default: PongNoFrameskip-v4)')\n parser.add_argument(\n '--log-dir',\n default='/tmp/gym/',\n help='directory to save agent logs (default: /tmp/gym)')\n parser.add_argument(\n '--save-dir',\n default='./trained_models/',\n help='directory to save agent logs (default: ./trained_models/)')\n parser.add_argument(\n '--no-cuda',\n action='store_true',\n default=False,\n help='disables CUDA training')\n parser.add_argument(\n '--use-proper-time-limits',\n action='store_true',\n default=False,\n help='compute returns taking into account time limits')\n parser.add_argument(\n '--recurrent-policy',\n action='store_true',\n default=False,\n help='use a recurrent policy')\n parser.add_argument(\n '--IAM-policy',\n action='store_true',\n default=True,\n help='use a recurrent policy')\n parser.add_argument(\n '--use-linear-lr-decay',\n action='store_true',\n default=False,\n help='use a linear schedule on the learning rate')\n args = parser.parse_args()\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n assert args.algo in ['a2c', 'ppo', 'acktr']\n if args.recurrent_policy:\n assert args.algo in ['a2c', 'ppo'], \\\n 'Recurrent policy is not implemented for ACKTR'\n\n return args\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gchhablani/code-soup | [
"eec666b6cd76bad9c7133a185bb85021b4a390f0"
] | [
"code_soup/common/vision/models/simple_cnn_classifier.py"
] | [
"\"\"\"Implements a simple CNN Classifier model\"\"\"\n\nimport torch.nn as nn\n\n\nclass SimpleCnnClassifier(nn.Module):\n def __init__(self, input_shape=(1, 28, 28), num_labels=10):\n super().__init__()\n self.num_channels = input_shape[0]\n self.image_size = input_shape[1:]\n self.num_labels = num_labels\n self.model = nn.Sequential(\n nn.Conv2d(\n in_channels=self.num_channels, out_channels=32, kernel_size=(3, 3)\n ),\n nn.ReLU(),\n nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2)),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2)),\n nn.Flatten(),\n nn.Linear(\n in_features=(((self.image_size[0] - 4) // 2 - 4) // 2)\n * (((self.image_size[1] - 4) // 2 - 4) // 2)\n * 64,\n out_features=200,\n ),\n nn.ReLU(),\n nn.Linear(in_features=200, out_features=200),\n nn.ReLU(),\n nn.Linear(in_features=200, out_features=num_labels),\n )\n\n def forward(self, x):\n return self.model(x)\n"
] | [
[
"torch.nn.Conv2d",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dpetrovykh/FIAR | [
"45bca950184ea87399f4630bb601b2fccf8795f9"
] | [
"FIAR_Analyzer.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 7 12:26:54 2021\n\n@author: dpetrovykh\n\"\"\"\n\nimport matplotlib.pyplot as plt\nplt.ion()\n## TODO\n## Make sure that ion() and ioff() when neccessary.\nimport numpy as np\nimport pandas as pd\nfrom collections import namedtuple\nimport math\nimport random\n\n\n## DEBUGGING CONSTANTS\nPRINT_CELL_CHOICE = False\nSHOW_QUEUES = False\nSHOW_PoTs = False\nSHOW_SPots = False\nSHOW_HPots =False\nSHOW_Evals = False\n\n\nMAX_MOVE_REACH = 5\nEMPTY_CHAR = '+'\nPLAYER2MARKER = {'red':'r',\n 'black':'b'}\nBLACK_VICTORY = ['b']*5\nRED_VICTORY = ['r']*5\n\nPoT_HISTORY_TEMP = pd.DataFrame({'marker':[],\n 'SPoTs':[],\n 'HPoTs':[],\n 'softPower':[],\n 'hardPower':[],\n 'softThreat':[],\n 'hardThreat':[],\n 'player':[]})\n\n## Power and Threat Patterns which have not been processed.\n\nRAW_SPs = [\n ['SP1','efftte'],\n ['SP1r','ettffe'],\n ['SP2','eftfte'],\n ['SP2r','etftfe'],\n ['SP3','efttfe'],\n ['SP4','etffte']]\nRAW_HPs = [\n ['HP1','ttfff'],\n ['HP1r','ffftt'],\n ['HP2','tftff'],\n ['HP2r','fftft'],\n ['HP3','tfftf'],\n ['HP3r', 'ftfft'],\n ['HP4','tffft'],\n ['HP5','fttff'],\n ['HP5r','ffttf'],\n ['HP6','ftftf']]\nRAW_STs = [\n ['ST1','dfffde'],\n ['ST1r','edfffd'],\n ['ST2','dffdfd'],\n ['ST2r','dfdffd']\n ]\nRAW_HTs = [\n ['HT1','dffff'],\n ['HT1r','ffffd'],\n ['HT2','fdfff'],\n ['HT2r','fffdf'],\n ['HT3','ffdff']\n ]\nRAW_Spots = [\n ['Spot1','efttte'],\n ['Spot1r','etttfe'],\n ['Spot2','etftte'],\n ['Spot2r','ettfte']\n ]\nRAW_Hpots = [\n ['Hpot1','bbbff'],\n ['Hpot1r','ffbbb'],\n ['Hpot2','bbfbf'],\n ['Hpot2r','fbfbb'],\n ['Hpot3','bbffb'],\n ['Hpot3r','bffbb'],\n ['Hpot4','bfbbf'],\n ['Hpot4r','fbbfb'],\n ['Hpot5','bfbfb'],\n ['Hpot6','fbbbf']]\n\nPATTERN_TEMPLATE = namedtuple('Pattern','name player match_pat rel_markers rel_triggers rel_defusers rel_boosters')\nPoT_TEMPLATE = namedtuple('PoT','names player marker_locs trigger_locs')\nSPOT_TEMPLATE = namedtuple('SPot','names player marker_locs trigger_locs')\nHPOT_TEMPLATE = namedtuple('HPot','names player marker_locs booster_locs')\nSOFT_POWER = namedtuple('SoftPower','names player marker_locs trigger_locs booster_locs defuser_locs')\nHARD_POWER = namedtuple('HardPower','names player marker_locs trigger_locs defuser_locs')\nSOFT_THREAT = namedtuple('SoftThreat','names player marker_locs defuser_locs')\nHARD_THREAT = namedtuple('HardThreat','names player marker_locs defuser_locs')\n\nIJ_TO_XY_R = [[0,1],[-1,0]] #Matrix for transforming unit vectors from ij to xy coords\nXY_TO_IJ_R = [[0,-1],[1,0]] #Matrix for transforming unit vectors from xy to ij coords.\n\n## Evaluator Constants\n\nEVAL_CONSTANTS = namedtuple('Eval_Constants','HT_fins ST_fins HP_trigs SP_trigs SPot_trigs boosts HT_defs ST_defs SP_defs HP_defs SPot_blocks')\nEv_sum_Ks = EVAL_CONSTANTS(HT_fins= 1000,\n ST_fins = 50,\n HP_trigs = 1,\n SP_trigs = 1.25,\n SPot_trigs = 1,\n boosts = 0.5,\n HT_defs = 100,\n ST_defs = 10,\n SP_defs = 0.75,\n HP_defs = 0.75,\n SPot_blocks = 0.01)\n # Mult_HP_trigs = 25,\n # Mult_SP_trigs = 5,\n # EN_mult_SP_trigs = 5,\n # EN_mult_HP_trigs = 7.5)\n\n# Ev_funcs = EVAL_CONSTANTS(HT_fins = lambda cnt: cnt*Ev_sum_Ks.HT_fins,\n# ST_fins = lambda cnt: cnt*Ev_sum_Ks.ST_fins,\n# #HP_SP_trigs = lambda cnt: cnt*100000,\n# SPot_trigs = lambda cnt: cnt*Ev_sum_Ks.SPot_trigs,\n# boosts = lambda cnt: cnt*Ev_sum_Ks.boosts,\n# HT_defs = lambda cnt: cnt*Ev_sum_Ks.HT_defs,\n# ST_defs = lambda cnt: cnt*Ev_sum_Ks.ST_defs,\n# SP_defs = lambda cnt: cnt*Ev_sum_Ks.SP_defs,\n# HP_defs = lambda cnt: cnt*Ev_sum_Ks.HP_defs,\n# SPot_blocks= lambda cnt: cnt*Ev_sum_Ks.SPot_blocks)\n \ndef pattern_processing(raw_patterns, player):\n list_ = [] #Empty list to be returned\n for name, pattern in raw_patterns:\n matching_pattern = []\n rel_triggers = []\n rel_markers = []\n rel_defusers = []\n rel_boosters = []\n for index, char in enumerate(pattern):\n rel_loc = index-(len(pattern)-1)\n if char == 'e':\n matching_pattern.append(EMPTY_CHAR)\n #if this is a soft power\n if name[0:2] == 'SP':\n #Then the empty spots that are not triggers are boosters.\n rel_boosters.append(rel_loc)\n # Then the empty spots are also defusers\n rel_defusers.append(rel_loc)\n elif char == 't':\n matching_pattern.append(EMPTY_CHAR)\n rel_triggers.append(rel_loc)\n if name[0:2] in ['SP','HP']:\n rel_defusers.append(rel_loc)\n elif char == 'f':\n matching_pattern.append(PLAYER2MARKER[player])\n rel_markers.append(rel_loc)\n elif char == 'd':\n matching_pattern.append(EMPTY_CHAR)\n rel_defusers.append(rel_loc)\n elif char == 'b':\n matching_pattern.append(EMPTY_CHAR)\n rel_boosters.append(rel_loc)\n list_.append(PATTERN_TEMPLATE(name,player, matching_pattern, rel_markers, rel_triggers, rel_defusers, rel_boosters))\n return list_\n \n## Constants Requiring Processing\n## HPotentail\nRed_HPot_Temps = pattern_processing(RAW_Hpots, 'red')\nBlack_HPot_Temps = pattern_processing(RAW_Hpots, 'black')\nHPot_Temps = list(Red_HPot_Temps)\nHPot_Temps.extend(Black_HPot_Temps)\n## SPotential\nRed_SPot_Temps = pattern_processing(RAW_Spots, 'red')\nBlack_SPot_Temps = pattern_processing(RAW_Spots, 'black')\nSPot_Temps = list(Red_SPot_Temps)\nSPot_Temps.extend(Black_SPot_Temps)\n#print(f\"SPot_Temps: {SPot_Temps}\")\n##Soft Powers\nRed_SP_Temps = pattern_processing(RAW_SPs, 'red')\nBlack_SP_Temps = pattern_processing(RAW_SPs, 'black')\nSP_Temps = list(Red_SP_Temps)\nSP_Temps.extend(Black_SP_Temps)\n#print(SP_Temps)\n##Hard Powers\nRed_HP_Temps = pattern_processing(RAW_HPs, 'red')\nBlack_HP_Temps = pattern_processing(RAW_HPs, 'black')\nHP_Temps = list(Red_HP_Temps)\nHP_Temps.extend(Black_HP_Temps)\n##Soft Threats\nRed_ST_Temps = pattern_processing(RAW_STs,'red')\nBlack_ST_Temps = pattern_processing(RAW_STs, 'black')\nST_Temps = list(Red_ST_Temps)\nST_Temps.extend(Black_ST_Temps)\n# print(ST_Temps)\n##Hard Threats\nRed_HT_Temps = pattern_processing(RAW_HTs, 'red')\nBlack_HT_Temps = pattern_processing(RAW_HTs, 'black')\nHT_Temps = list(Red_HT_Temps)\nHT_Temps.extend(Black_HT_Temps)\n#print(HT_Temps)\n\nclass Cell():\n def __init__(self,coords):\n self.coords = coords\n self.rating = None\n ## Good for us\n self.HT_finish = 0\n self.ST_finish = 0\n self.HP_triggers = 0\n self.SP_triggers = 0\n self.SPot_triggers = 0\n self.boosters = 0\n ## Bad for them\n self.HT_defusers = 0\n self.ST_defusers = 0\n self.SP_defusers = 0\n self.HP_defusers = 0\n self.EN_SPot_triggers = 0\n\n def __str__(self):\n return f\"\"\"coords = {self.coords}\n rating = {self.rating}\n HT_finish = {self.HT_finish} * {Ev_sum_Ks.HT_fins}\n ST_finish = {self.ST_finish} * {Ev_sum_Ks.ST_fins}\n HP_triggers = {self.HP_triggers} * {Ev_sum_Ks.HP_trigs}\n SP_triggers = {self.SP_triggers} * {Ev_sum_Ks.SP_trigs}\n SPot_triggers = {self.SPot_triggers} * {Ev_sum_Ks.SPot_trigs}\n boosters = {self.boosters} * {Ev_sum_Ks.boosts}\n HT_defusers = {self.HT_defusers} * {Ev_sum_Ks.HT_defs}\n ST_defusers = {self.ST_defusers} * {Ev_sum_Ks.ST_defs}\n SP_defusers = {self.SP_defusers} * {Ev_sum_Ks.SP_defs}\n HP_defusers = {self.HP_defusers} * {Ev_sum_Ks.HP_defs}\n EN_SPot_triggers={self.EN_SPot_triggers} * {Ev_sum_Ks.SPot_blocks}\"\"\"\n\n\nclass FIAR_Analyzer():\n '''\n Performs analysis of a FIAR game. Useful for visualizing patterns and for allowing an AI to make moves.\n '''\n def __init__(self, game=None):\n '''\n Test documentation for FIAR __init__()\n '''\n if game:\n self.new_game(game)\n else:\n self.clear()\n # print(\"Powers Or Threats:\")\n # for PoT in self.PoTs:\n # print(PoT)\n \n def new_game(self, game):\n self.game = game\n ## Placeholders for analysis results\n self.victory = None #black, red, or False once game is processed.\n self.matrix = None\n self.PoTs = None\n self.PoTs_dict = None\n self.PoTs_count = None\n self.PoT_history = None\n ## Update all results\n self.update_matrix()\n self.update_PoTs()\n self.update_PoTs_dict()\n self.update_PoTs_count()\n # self.update_PoT_History()\n \n def clear(self):\n self.game = None\n self.victory = None #black, red, or False once game is processed.\n self.matrix = None\n self.PoTs = None\n self.PoTs_dict = None\n self.PoTs_count= None\n self.PoT_history = None\n \n def update_matrix(self):\n '''\n Runs through the game's df and generates a matrix of string characters that represent the markers on each tile.\n\n Returns\n -------\n None.\n\n '''\n matrix = np.ones((self.game.height,self.game.width), dtype=str)\n #print(f\"width: {self.width}, height: {self.height}\")\n matrix[:,:] = EMPTY_CHAR\n for rowi in range(self.game.df.shape[0]):\n x,y,player = self.game.df[['x','y','player']].iloc[rowi,:]\n i,j = self.xy_to_ij((x,y))\n #print(f\" x,y: {x,y} to ij: {i,j}\")\n matrix[i,j]= PLAYER2MARKER[player]\n self.matrix = matrix\n \n def update_PoTs(self):\n ## create all queues\n all_queues = self.queues_gen()\n all_PoTs = []\n for scan_queues in all_queues:\n ##Context of a single scanning direction\n for line_queues in scan_queues:\n ## Context of queues generated from the same starting location\n ## Scan this line of queues and return powers/threats\n line_SPs = []\n line_HPs = []\n line_STs = []\n line_HTs = []\n line_Spots = []\n line_Hpots = []\n ## for each type of power and threat\n for queue6, head_loc, tail_dir in line_queues:\n if SHOW_QUEUES:\n print(f\"queue6: {queue6}, head_loc: {head_loc}, tail_dir: {tail_dir}\")\n ## Context of a single queue.\n queue5 = list(queue6[1:])\n ## Scan queue for powers and threats\n if queue5 == RED_VICTORY:\n #print(\"Red victory detected\")\n self.victory = 'red'\n elif queue5 == BLACK_VICTORY:\n self.victory = 'black'\n #print(\"Black victory detected\")\n for line_storage, PATTERNS, queue in[[line_SPs, SP_Temps,queue6],\n [line_HPs, HP_Temps, queue5],\n [line_STs, ST_Temps, queue6 ],\n [line_HTs, HT_Temps, queue5],\n [line_Spots, SPot_Temps, queue6],\n [line_Hpots, HPot_Temps, queue5]]: \n for pattern in PATTERNS:\n ## Context of a single queue being compaed to a single pattern\n if queue == pattern.match_pat:\n ## Context of a match having been found.\n marker_locs = [(int(head_loc[0]-tail_dir[0]*rel_loc),int( head_loc[1]-tail_dir[1]*rel_loc)) for rel_loc in pattern.rel_markers]\n #print(f\"marker_locs: {marker_locs}\")\n trigger_locs = [(int(head_loc[0]-tail_dir[0]*rel_loc), int(head_loc[1]-tail_dir[1]*rel_loc)) for rel_loc in pattern.rel_triggers]\n defuser_locs = [(int(head_loc[0]-tail_dir[0]*rel_loc), int(head_loc[1]-tail_dir[1]*rel_loc)) for rel_loc in pattern.rel_defusers]\n booster_locs = [(int(head_loc[0]-tail_dir[0]*rel_loc), int(head_loc[1]-tail_dir[1]*rel_loc)) for rel_loc in pattern.rel_boosters]\n PoTtype = pattern.name[0:2]\n #print(f\"PoTtype: {PoTtype}\")\n PoT_Template = {'SP':SOFT_POWER,\n 'HP':HARD_POWER,\n 'ST':SOFT_THREAT,\n 'HT':HARD_THREAT,\n 'Sp':SPOT_TEMPLATE,\n 'Hp':HPOT_TEMPLATE}[PoTtype]\n PoT = None\n if PoTtype == 'SP':\n PoT = PoT_Template([pattern.name], pattern.player, marker_locs, trigger_locs, booster_locs, defuser_locs)\n elif PoTtype == 'Hp':\n PoT = PoT_Template([pattern.name], pattern.player, marker_locs, booster_locs)\n elif PoTtype =='Sp':\n PoT = PoT_Template([pattern.name], pattern.player, marker_locs, trigger_locs)\n elif PoTtype =='HP':\n PoT = PoT_Template([pattern.name], pattern.player, marker_locs, trigger_locs, defuser_locs)\n elif PoTtype in ['ST','HT']:\n PoT = PoT_Template([pattern.name], pattern.player, marker_locs, defuser_locs)\n else:\n raise Exception('Shouldnt be here')\n line_storage.append(PoT)\n #print(f\"Match found: {PoT}\")\n ## Collapse nonunique pattern matches within each type of power and threat\n line_SPs = self.collapse_PoTs(line_SPs)\n line_HPs = self.collapse_PoTs(line_HPs)\n line_STs = self.collapse_PoTs(line_STs)\n line_HTs = self.collapse_PoTs(line_HTs)\n line_Spots = self.collapse_PoTs(line_Spots)\n line_Hpots= self.collapse_PoTs(line_Hpots)\n ## compare PoTs heirarchically to eliminate duplicates.\n line_master_PoTs = list(line_HTs)\n line_master_PoTs.extend(self.nonrepeat_PoTs(line_master_PoTs, line_STs))\n line_master_PoTs.extend(self.nonrepeat_PoTs(line_master_PoTs, line_HPs))\n line_master_PoTs.extend(self.nonrepeat_PoTs(line_master_PoTs, line_SPs)) \n line_master_PoTs.extend(self.nonrepeat_PoTs(line_master_PoTs, line_Hpots)) \n line_master_PoTs.extend(self.nonrepeat_PoTs(line_master_PoTs, line_Spots))\n all_PoTs.extend(line_master_PoTs)\n self.PoTs = all_PoTs\n ## Debugging displays of various things found.\n if SHOW_PoTs:\n print(\"Powers Or Threats:\")\n for PoT in self.PoTs:\n if type(PoT) not in [SPOT_TEMPLATE,HPOT_TEMPLATE]:\n print(PoT)\n if SHOW_SPots:\n print(\"SPots:\")\n for PoT in self.PoTs:\n if type(PoT) == SPOT_TEMPLATE:\n print(PoT)\n if SHOW_HPots:\n print(\"HPots:\")\n for PoT in self.PoTs:\n if type(PoT) == HPOT_TEMPLATE:\n print(PoT)\n \n def update_PoTs_dict(self):\n PoTs_dict = {\n 'red':{SPOT_TEMPLATE:[],\n HPOT_TEMPLATE:[],\n SOFT_POWER:[],\n HARD_POWER:[],\n SOFT_THREAT:[],\n HARD_THREAT:[]},\n 'black':{SPOT_TEMPLATE:[],\n HPOT_TEMPLATE:[],\n SOFT_POWER:[],\n HARD_POWER:[],\n SOFT_THREAT:[],\n HARD_THREAT:[]}\n }\n for PoT in self.PoTs:\n PoTs_dict[PoT.player][type(PoT)].append(PoT) \n self.PoTs_dict = PoTs_dict\n \n def update_PoTs_count(self):\n PoTs_count = {\n 'red':{SPOT_TEMPLATE:0,\n HPOT_TEMPLATE:0,\n SOFT_POWER:0,\n HARD_POWER:0,\n SOFT_THREAT:0,\n HARD_THREAT:0},\n 'black':{SPOT_TEMPLATE:0,\n HPOT_TEMPLATE:0,\n SOFT_POWER:0,\n HARD_POWER:0,\n SOFT_THREAT:0,\n HARD_THREAT:0}}\n for PoT in self.PoTs:\n PoTs_count[PoT.player][type(PoT)] +=1\n self.PoTs_count= PoTs_count\n \n def update_PoT_History(self):\n '''\n Updates the PoT count for the entire game history\n\n Returns\n -------\n None.\n\n '''\n df = PoT_HISTORY_TEMP.copy(deep=True)\n for game_state in self.game:\n analysis = FIAR_Analyzer(game_state)\n new_rows = FIAR_Analyzer.PoT_count_to_df(game_state, analysis.PoTs_count)\n df = pd.concat([df, new_rows], ignore_index=True)\n self.PoT_History = df\n \n @staticmethod\n def PoT_count_to_df(game, PoT_count):\n df_return = PoT_HISTORY_TEMP.copy(deep=True)\n for player in ['red','black']:\n df = PoT_HISTORY_TEMP.copy(deep=True)\n df.marker = [game.num_moves]\n df.SPoTs = [PoT_count[player][SPOT_TEMPLATE]]\n df.HPoTs = [PoT_count[player][HPOT_TEMPLATE]]\n df.softPower = [PoT_count[player][SOFT_POWER]]\n df.hardPower = [PoT_count[player][HARD_POWER]]\n df.softThreat = [PoT_count[player][SOFT_THREAT]]\n df.hardThreat = [PoT_count[player][HARD_THREAT]]\n df.player = [player]\n df_return = pd.concat([df_return, df], ignore_index=True)\n return df_return\n \n \n \n # {'SPoTs':[],\n # 'HPoTs':[],\n # 'softPower':[],\n # 'hardPower':[],\n # 'softThreat':[],\n # 'hardThreat':[],\n # 'player':[]})\n \n def point_evaluater(self, evaluator):\n '''\n A closure which returns a function for the querying of any location's move rating.'\n\n Parameters\n ----------\n evaluator : TYPE\n DESCRIPTION.\n\n Returns\n -------\n evaluate_point : function\n Given a board location, prints the rating of potentially moving there.\n\n '''\n cell_dict = self.cell_dict_gen()\n def evaluate_point(x,y): \n cell = cell_dict[(x,y)]\n cell.rating = evaluator(cell)\n print(cell)\n return evaluate_point \n \n def game_decider(self, game, evaluator):\n '''\n Accepts a FIAR game, applies an evaluator to its cells, and returns a reccomended move\n\n Parameters\n ----------\n game : FIAR\n A FIAR game.\n\n Returns\n -------\n location : (x,y) int tuple\n Coordinates of recommended move\n\n '''\n ## Generate cell_dict\n cell_dict = self.cell_dict_gen()\n # print(f\"cell_dict_keys: {cell_dict.keys()}\")\n #print(f\"original cell_dict: {cell_dict}\")\n ## Limit entries in cell dict\n #limited_cell_dict = {key:val for key, val in cell_dict.items() if key in game.playable_points()}\n ## Apply evaluator to cell_dict\n cell_dict = FIAR_Analyzer.evaluate_cell_dict(cell_dict, evaluator)\n #print(f\"eval'd, limited cell_dict: {limited_cell_dict}\")\n ## Choose cell with maximum value\n \n ## From internet\n cells = cell_dict.values()\n maxRating = max(cells, key = lambda cell: cell.rating).rating\n chosen_cell = None\n if maxRating > Ev_sum_Ks.SPot_blocks:\n max_cells = [cell for cell in cells if cell.rating ==maxRating]\n chosen_cell = random.choice(max_cells)\n elif maxRating ==0:\n chosen_cell = cell_dict[(0,0)]\n else: #0>x>'Ev_sum_Ks.SPot_blocks\n ## find a cell close to previous cell\n print(\"game_decider: There are no good choices!\")\n random_taken = random.choice(game.taken_locs())\n chosen = False\n while chosen == False:\n location = (random_taken[0]+random.randint(-2,2),\n random_taken[1]+random.randint(-2,2))\n if location not in game.taken_locs():\n chosen = True\n chosen_cell = cell_dict[location]\n if PRINT_CELL_CHOICE:\n print(f\"chosen_cell: {chosen_cell}\")\n \n ## return location of cell with maximum value\n return chosen_cell.coords\n \n @staticmethod\n def eval_HPs_SPs(HP_count, SP_count):\n '''\n NOT CURRENTLY USED. INTENDED FOR FUTURE VERSION WHERE SPECIAL CASES ARE CONSIDERED, like Ficks.\n Evaluates the point rating that the Soft Power count and Hard Power count contribute to the total cell rating. Considers the importance of special cases where there are multiple of some or each.\n\n Parameters\n ----------\n HP_count : int\n Number of friendly hard power triggers at this cell location\n SP_count : int\n Number of friendly soft power triggers at this cell location\n\n Returns\n -------\n HP_rating : float\n Point value attributed to friendly Hard Power triggers\n SP_rating : float\n Point value attributed to friendly Soft Power triggers\n\n '''\n if HP_count >1:\n HP_rating = 25\n SP_rating = SP_count*Ev_sum_Ks.SP_trigs\n elif HP_count==1 and SP_count >0:\n SP_rating = 12.5\n HP_rating = 12.5\n elif SP_count >1:\n HP_rating = HP_count*Ev_sum_Ks.HP_trigs\n SP_rating = 5\n else:\n HP_rating = HP_count*Ev_sum_Ks.HP_trigs\n SP_rating = SP_count*Ev_sum_Ks.SP_trigs\n return HP_rating, SP_rating\n \n # @staticmethod\n # def evaluate_point_piecewise(cell):\n # return (Ev_funcs.HT_fins(cell.HT_finish) + \n # Ev_funcs.ST_fins(cell.ST_finish) +\n # Ev_funcs.HP_SP_trigs(cell.HP_triggers, cell.SP_triggers) + \n # Ev_funcs.SPot_trigs(cell.SPot_triggers) + \n # Ev_funcs.boosts(cell.boosters) + \n # Ev_funcs.HT_defs(cell.HT_defusers) +\n # Ev_funcs.ST_defs(cell.ST_defusers) +\n # Ev_funcs.SP_defs(cell.SP_defusers) +\n # Ev_funcs.HP_defs(cell.HP_defusers) + \n # Ev_funcs.SPot_blocks(cell.EN_SPot_triggers))\n \n @staticmethod\n def evaluate_point_sum(cell):\n '''\n Applies a weighted sum to the features stored in an individual cell.\n\n Parameters\n ----------\n cell : TYPE\n DESCRIPTION.\n\n Returns\n -------\n float\n Weighted sum.\n\n '''\n return (cell.HT_finish*Ev_sum_Ks.HT_fins + \n cell.ST_finish*Ev_sum_Ks.ST_fins + \n cell.HP_triggers*Ev_sum_Ks.HP_trigs + \n cell.SP_triggers*Ev_sum_Ks.SP_trigs +\n cell.SPot_triggers*Ev_sum_Ks.SPot_trigs + \n cell.boosters*Ev_sum_Ks.boosts + \n cell.HT_defusers*Ev_sum_Ks.HT_defs + \n cell.ST_defusers*Ev_sum_Ks.ST_defs + \n cell.SP_defusers*Ev_sum_Ks.SP_defs + \n cell.HP_defusers*Ev_sum_Ks.HP_defs +\n cell.EN_SPot_triggers*Ev_sum_Ks.SPot_blocks) \n \n @staticmethod\n def evaluate_cell_dict(cell_dict, evaluator):\n '''\n Scans every cell in a cell_dict and populates the 'rating' field. \n \n SideEffects:\n ----------\n cell_dict:\n Adds a value for the 'rating' field\n \n Parameters\n ----------\n cell_dict : {(x,y):Cell}.\n keys are coordinate tuples of positions on board\n Cell objects with populated fields except for 'rating'\n\n Returns\n -------\n evald_cell_dict : {(x,y):Cell}.\n keys are coordinate tuples of positions on board\n Cell objects with all populated fields, including 'rating'\n\n '''\n for location, cell in cell_dict.items():\n rating = evaluator(cell)\n if SHOW_Evals:\n print(f\"{location} rated as: {rating}\")\n cell_dict[location].rating = rating\n return cell_dict \n \n def nonrepeat_PoTs(self,master_list, list_):\n '''\n returns a list of PoTs in list_ whose marker locations are not contained by any one PoT in master_list \n\n Parameters\n ----------\n master_list : list of PoTs\n list of\n added_list : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n unique_PoTs = []\n for PoT in list_:\n match_none = True #Assume that current PoT is unique\n for master_PoT in master_list:\n match = True #Assume that current master PoT is a match\n ## Check if PoT marker locations are a subset of master_PoT's marker locations.\n for loc in PoT.marker_locs:\n if loc not in master_PoT.marker_locs:\n match = False\n if match:\n match_none = False\n #print(f\"PoT eliminated for non-uniqueness: {PoT}. Matched {master_PoT}\")\n if match_none:\n unique_PoTs.append(PoT)\n #print(f\"PoT found to be unique: {PoT}\")\n return unique_PoTs\n \n def collapse_PoTs(self,line_PoTs):\n '''Takes a list of Powers or Threats (PoTs) and returns a list of \"collapsed\" ones, where any PoTs with matching marker locations are combined'''\n collapsed_PoTs = [] #list of PoTs with unique marker locations.\n for PoT in line_PoTs:\n match_none = True #Assume that current match has no collapsed matches with the same marker locations.\n ## Check if the PoT matches any collapsed ones\n for col_PoT in collapsed_PoTs:\n match = True #assume that the PoT and col_PoT match each other\n for loc in col_PoT.marker_locs:\n if loc not in PoT.marker_locs:\n match = False\n if match: #If marker locations are shared\n match_none = False\n ##Combine all triggers. Perform an OR on the sets.\n for trigger_loc in getattr(PoT,'trigger_locs',[]):\n if trigger_loc not in getattr(col_PoT,'trigger_locs', []):\n col_PoT.trigger_locs.append(trigger_loc)\n ## Combine all boosters. Perform an OR on the sets.\n for booster_loc in getattr(PoT,'booster_locs',[]):\n if booster_loc not in getattr(col_PoT,'booster_locs', []):\n col_PoT.booster_locs.append(booster_loc)\n ## Find defusers in common. Perform an AND on the sets.\n defuser_locs = []\n for defuser_loc in getattr(col_PoT,'defuser_locs',[]):\n if defuser_loc in getattr(PoT, 'defuser_locs',[]):\n defuser_locs.append(defuser_loc)\n if defuser_locs:\n # Empty list\n for _ in range(len(col_PoT.defuser_locs)):\n col_PoT.defuser_locs.pop()\n # Extend list with new contents\n col_PoT.defuser_locs.extend(defuser_locs)\n col_PoT.names.extend(PoT.names)\n if match_none: #This is a unique PoT\n collapsed_PoTs.append(PoT)\n return collapsed_PoTs\n \n def queues_gen(self, length=6):\n '''\n Returns a list of all queues of interest, along with their head location, and tail direction. Linearly scans through the matrix attribute of a FIAR game and generates the queues of length 'queue_length' along the downward, right-ward, down-and-to-the-left, and down-and-to-the-right directions.\n\n Returns\n -------\n 3-tuple of (queue,head_location, tail_direction)\n - queue (list of individual string characters: Each character represents the content of a scanned position in the\n - head_location (2-tuple of ints): The location of the head of the queue in the x,y convention pertaining to the game coordinates, not matrix coordinates..\n - tail_direction (2-tuple of ints): The unit vector describing the direction from which the queue is coming from in (x,y) notation. \n\n'''\n # Define the scans done in all directions\n scan_inputs = [\n [np.array([1,0]),['top']],\n [np.array([1,1]),['left','top']],\n [np.array([0,1]),['left']],\n [np.array([1,-1]),['right','top']]\n ]\n all_queues = []\n for dir_, start_sides in scan_inputs:\n # Convert tail_dir to xy, because it is yielded\n tail_dir = self.rot_ij_to_xy(-dir_)\n #print(f\"head_dir,ij: {dir_}, tail_dir,xy: {tail_dir}\")\n scan_queues = [] #queues all with the same scanning direction.\n for starting_loc in self.merged_edge_coords(start_sides):\n #print(f\"starting_loc: {starting_loc}\")\n line_queues = [] #queues all with the same starting location \n starting_loc = np.array(starting_loc)\n num_steps = self.num_steps(starting_loc,dir_)\n queue6 =[EMPTY_CHAR]*length\n for n in range(num_steps):\n i,j = starting_loc + dir_*n\n queue6.pop(0)\n # If indexing into negative position\n if i<0 or j<0:\n queue6.append(EMPTY_CHAR)\n # If indexing into normal, positive position\n else:\n try: \n queue6.append(self.matrix[i,j])\n except IndexError: #In case index is out of bounds.\n queue6.append(EMPTY_CHAR)\n x,y= self.ij_to_xy((i,j))\n # if (x,y) == (-6,-3):\n # print(f\"Head_loc (i:{i},j:{j})->(x:{x},y:{y})\") \n # print(f\"starting_loc\")\n # print(\"----------------------------------------\")\n head_loc = (x,y)\n #print(f\"queue6: {queue6}\")\n line_queues.append([list(queue6), head_loc, tail_dir])\n scan_queues.append(list(line_queues))\n all_queues.append(list(scan_queues))\n return all_queues\n \n \n \n def num_steps(self, start_loc, dir_):\n \"\"\"\n Calculates the number of queues that must be generated if starting at start_loc and moving in dir.\n\n Parameters\n ----------\n start_loc : tuple of ints\n i,j coordinates describing starting location for scan.\n dir_ : tuple of ints\n i,j coordinates defining unit vector along which queue will advance.\n\n Returns\n -------\n Num_steps: The total number of queues that will be generated as a result of moving from the start_loc in direction dir_.\n\n \"\"\"\n step_limits = {}\n ## Iterate once for i-direction and once for j-direction\n for max_dim, axis, loc, u_vec in zip((self.game.height, self.game.width),\n ('i','j'),\n start_loc, \n dir_):\n if u_vec == 1:\n step_limits[axis] = max_dim-loc\n elif u_vec == -1:\n step_limits[axis] = loc+1\n elif u_vec == 0:\n step_limits[axis] = math.inf\n return min(step_limits.values())+4\n \n\n \n \n def edge_coords(self,side):\n '''\n Generates a list of coordinates for the points along an edge of a matrix.\n\n Returns\n -------\n coords : list of int pairs.\n Define the centers of the tiles which make up the edge of a matrix. Uses i,j notation\n\n '''\n assert side in ['right','left','top','bottom']\n ## \n vert_list = list(range(0,self.game.height))\n horz_list = list(range(0,self.game.width))\n if side=='left':\n i = list(vert_list)\n j = [0]*self.game.height\n elif side=='right':\n i = list(vert_list)\n j = [self.game.width-1]*self.game.height\n elif side == 'top':\n i = [0]*self.game.width\n j = list(horz_list)\n elif side == 'bottom':\n i = [self.game.height-1]*self.game.width\n j = list(horz_list)\n #return coords\n return list(zip(i,j)) \n\n def merged_edge_coords(self, edges):\n '''\n Generates a set of coordinates of locations along the edge of the game's matrix.\n\n Parameters\n ----------\n edges : list of strings\n Each element describes a direction whos points are to be combined (EX: 'top','bottom','right','left')\n\n Returns\n -------\n locs\n set of ij coordinate pairs describing locations along the outside of the game's matrix.\n\n '''\n locs = []\n for edge in edges:\n locs.extend(self.edge_coords(edge))\n return set(locs) \n\n def cell_dict_gen(self):\n cell_dict = {}\n for loc in self.playable_locs():\n cell_dict[loc] = Cell(loc)\n friend = self.game.next_player\n enemy = self.game.previous_player\n for PoT in self.PoTs:\n ptype = type(PoT)\n if PoT.player == friend:\n if ptype == HARD_THREAT:\n ## add hard threat defusers to the Cell at the proper location.\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].HT_finish+=1\n elif ptype == SOFT_THREAT:\n ## Add hard threat defusers\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].ST_finish +=1\n elif ptype == HARD_POWER:\n ## Add hard power triggers\n for trig_loc in PoT.trigger_locs:\n cell_dict[trig_loc].HP_triggers +=1\n elif ptype == SOFT_POWER:\n ## Add soft power triggers\n for trig_loc in PoT.trigger_locs:\n cell_dict[trig_loc].SP_triggers +=1\n for bstr_loc in PoT.booster_locs:\n cell_dict[bstr_loc].boosters +=1\n elif ptype == SPOT_TEMPLATE:\n ## Add SPot triggers\n for trig_loc in PoT.trigger_locs:\n cell_dict[trig_loc].SPot_triggers +=1\n elif ptype == HPOT_TEMPLATE:\n ## Add HPot template\n for bstr_loc in PoT.booster_locs:\n cell_dict[bstr_loc].boosters +=1\n elif PoT.player == enemy:\n if ptype == HARD_THREAT:\n ## add hard threat defusers\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].HT_defusers += 1\n elif ptype == SOFT_THREAT:\n ## add soft threat defusers\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].ST_defusers += 1\n elif ptype == SOFT_POWER:\n ## add soft power defusers\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].SP_defusers += 1\n elif ptype == HARD_POWER:\n ## add hard power defusers\n for def_loc in PoT.defuser_locs:\n cell_dict[def_loc].HP_defusers += 1\n elif ptype == SPOT_TEMPLATE:\n ## Add SPot triggers\n for trig_loc in PoT.trigger_locs:\n cell_dict[trig_loc].EN_SPot_triggers +=1\n else:\n raise Exception(f\"'{PoT.player}' is not supposed to be an option.\")\n # print(f\"cell_dict: {cell_dict}\")\n # print(f\"playable_points: {self.playable_points()}\")\n # limited_cell_dict = {key:val for key, val in cell_dict.items() if key in self.playable_points()}\n return cell_dict\n\n def playable_locs(self, scan_reach = int(4)):\n '''\n returns a list of empty points within scan_reach of previously-played points\n\n Returns\n -------\n points: list of (x,y) int tuples.\n Empty points within scan_reach of previous points\n\n '''\n points = []\n for x in range(self.game.game_edges['left']-scan_reach, self.game.game_edges['right']+scan_reach+1):\n for y in range(self.game.game_edges['bottom']-scan_reach,self.game.game_edges['top']+scan_reach+1):\n points.append((int(x),int(y)))\n return points\n \n def xy_to_ij(self, xy):\n '''\n Transforms a coordinate pair from the game coordinate system to the matrix coordinate system\n '''\n x,y = xy\n return int(self.game.game_edges['top']-y), int(x-self.game.game_edges['left'])\n \n def ij_to_xy(self, ij):\n '''\n Transforms a coordinate pair from the matrix coordinate system to the game coordinate system.\n '''\n i,j = ij\n return j+self.game.game_edges['left'], self.game.game_edges['top']-i\n\n def rot_ij_to_xy(self,ij):\n '''\n rotates a unit vector in ij coords into xy coords\n\n Parameters\n ----------\n ij : array\n Describes unit vector in ij coords\n\n Returns\n -------\n xy : array\n Describes unit vector in xy coords\n\n '''\n return np.dot(IJ_TO_XY_R, ij)\n \n def rot_xy_to_ij(self, xy):\n '''\n Rotates a unit vector from xy to ij space\n\n Parameters\n ----------\n xy : array\n Describes unit vector in xy space\n\n Returns\n -------\n ij : array\n Describes unit vector in ij space\n\n '''\n return np.dot(XY_TO_IJ_R, xy)"
] | [
[
"numpy.dot",
"pandas.concat",
"pandas.DataFrame",
"numpy.ones",
"numpy.array",
"matplotlib.pyplot.ion"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Aoi-hosizora/UNet-pytorch | [
"96951d5d1fdc6c6266a11e1bd97fbf72010bc87d"
] | [
"unet.py"
] | [
"import argparse\nimport time\nimport os\nimport json\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom model import UNet, loss_fn\nfrom dataset import SSDataset, load_test_image\n\n\ndef train(args):\n \"\"\"\n Train UNet from datasets\n \"\"\"\n\n # dataset\n print('Reading dataset from {}...'.format(args.dataset_path))\n train_dataset = SSDataset(dataset_path=args.dataset_path, is_train=True)\n val_dataset = SSDataset(dataset_path=args.dataset_path, is_train=False)\n train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)\n val_dataloader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False)\n\n # mask\n with open(args.mask_json_path, 'w', encoding='utf-8') as mask:\n colors = SSDataset.all_colors\n mask.write(json.dumps(colors))\n print('Mask colors list has been saved in {}'.format(args.mask_json_path))\n\n # model\n net = UNet(in_channels=3, out_channels=5)\n if args.cuda:\n net = net.cuda()\n\n # setting\n lr = args.lr # 1e-3\n optimizer = optim.Adam(net.parameters(), lr=lr)\n criterion = loss_fn\n\n # run\n train_losses = []\n val_losses = []\n print('Start training...')\n for epoch_idx in range(args.epochs):\n # train\n net.train()\n train_loss = 0\n for batch_idx, batch_data in enumerate(train_dataloader):\n xs, ys = batch_data\n if args.cuda:\n xs = xs.cuda()\n ys = ys.cuda()\n ys_pred = net(xs)\n loss = criterion(ys_pred, ys)\n train_loss += loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # val\n net.eval()\n val_loss = 0\n for batch_idx, batch_data in enumerate(val_dataloader):\n xs, ys = batch_data\n if args.cuda:\n xs = xs.cuda()\n ys = ys.cuda()\n ys_pred = net(xs)\n loss = loss_fn(ys_pred, ys)\n val_loss += loss\n\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n print('Epoch: {}, Train total loss: {}, Val total loss: {}'.format(epoch_idx + 1, train_loss.item(), val_loss.item()))\n\n # save\n if (epoch_idx + 1) % args.save_epoch == 0:\n checkpoint_path = os.path.join(args.checkpoint_path, 'checkpoint_{}.pth'.format(epoch_idx + 1))\n torch.save(net.state_dict(), checkpoint_path)\n print('Saved Checkpoint at Epoch {} to {}'.format(epoch_idx + 1, checkpoint_path))\n\n # summary\n if args.do_save_summary:\n epoch_range = list(range(1, args.epochs + 1))\n plt.plot(epoch_range, train_losses, 'r', label='Train loss')\n plt.plot(epoch_range, val_loss, 'g', label='Val loss')\n plt.imsave(args.summary_image)\n print('Summary images have been saved in {}'.format(args.summary_image))\n\n # save\n net.eval()\n torch.save(net.state_dict(), args.model_state_dict)\n print('Saved state_dict in {}'.format(args.model_state_dict))\n\n\ndef test(args):\n \"\"\"\n Test some data from trained UNet\n \"\"\"\n image = load_test_image(args.test_image) # 1 c w h\n net = UNet(in_channels=3, out_channels=5)\n if args.cuda:\n net = net.cuda()\n image = image.cuda()\n print('Loading model param from {}'.format(args.model_state_dict))\n net.load_state_dict(torch.load(args.model_state_dict))\n net.eval()\n\n print('Predicting for {}...'.format(args.test_image))\n ys_pred = net(image) # 1 ch w h\n\n colors = []\n with open(args.mask_json_path, 'r', encoding='utf-8') as mask:\n print('Reading mask colors list from {}'.format(args.mask_json_path))\n colors = json.loads(mask.read())\n colors = [tuple(c) for c in colors]\n print('Mask colors: {}'.format(colors))\n\n ys_pred = ys_pred.cpu().detach().numpy()[0]\n ys_pred[ys_pred < 0.5] = 0\n ys_pred[ys_pred >= 0.5] = 1\n ys_pred = ys_pred.astype(np.int)\n image_w = ys_pred.shape[1]\n image_h = ys_pred.shape[2]\n out_image = np.zeros((image_w, image_h, 3))\n\n for w in range(image_w):\n for h in range(image_h):\n for ch in range(ys_pred.shape[0]):\n if ys_pred[ch][w][h] == 1:\n out_image[w][h][0] = colors[ch][0]\n out_image[w][h][1] = colors[ch][1]\n out_image[w][h][2] = colors[ch][2]\n\n out_image = out_image.astype(np.uint8) # w h c\n out_image = out_image.transpose((1, 0, 2)) # h w c\n out_image = Image.fromarray(out_image)\n out_image.save(args.test_save_path)\n print('Segmentation result has been saved to {}'.format(args.test_save_path))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--do_train', action='store_true', help='Do train')\n parser.add_argument('--do_test', action='store_true', help='Do test')\n parser.add_argument('--no_gpu', action='store_true', help='Do not use GPU to train and test')\n parser.add_argument('--dataset_path', type=str, default='./dataset/', help='Dataset folder path')\n parser.add_argument('--mask_json_path', type=str, default='./mask.json', help='Mask json path')\n parser.add_argument('--batch_size', type=int, default=128, help='Train and validate batch size')\n parser.add_argument('--epochs', type=int, default=10, help='Train epoch number')\n parser.add_argument('--lr', type=float, default=1e-3, help='Adam learning rate')\n parser.add_argument('--save_epoch', type=int, default=1, help='Save checkpoint every epoch')\n parser.add_argument('--checkpoint_path', type=str, default='./model/', help='Model checkpoint save path')\n parser.add_argument('--model_state_dict', type=str, default='./model/model.pth', help='Model load and sav path')\n parser.add_argument('--do_save_summary', action='store_true', help='Do save summary image')\n parser.add_argument('--summary_image', type=str, default='./summary.png', help='Summary image save path')\n parser.add_argument('--test_image', type=str, default='./test.png', help='Test image path')\n parser.add_argument('--test_save_path', type=str, default='./test_seg.png', help='Test predict save path')\n args = parser.parse_args()\n assert (args.do_train or args.do_test), 'You must do train or test'\n\n args.cuda = not args.no_gpu and torch.cuda.is_available()\n print('\\nParameters: ')\n for k, v in zip(args.__dict__.keys(), args.__dict__.values()):\n print('\\t{}: {}'.format(k, v))\n print('\\n')\n\n if args.do_train:\n train(args)\n\n if args.do_test:\n test(args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.imsave",
"torch.load",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.plot",
"torch.cuda.is_available",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
poppin-mice/ShiftAddNet | [
"a17369a50da5bba6250fdeac7c065bd00f293f3c"
] | [
"models/wideres_shift.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport sys\nimport numpy as np\nfrom adder import adder\n# from models import adder\n\n__all__ = ['wideres_shift']\n\ndef conv3x3(in_planes, out_planes, stride=1, quantize=False, weight_bits=8, sparsity=0):\n \"\"\"3x3 convolution with padding\"\"\"\n shift = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n # add = adder.Adder2D(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)\n # return nn.Sequential(shift, add)\n return shift\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=0, quantize=False, weight_bits=8, sparsity=0):\n \" 3x3 convolution with padding \"\n shift = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n # add = adder.Adder2D(in_planes, out_planes, kernel_size=kernel_size, stride=1, padding=padding, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)\n # return nn.Sequential(shift, add)\n return shift\n\n# def conv3x3(in_planes, out_planes, stride=1):\n# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\ndef conv_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\nclass wide_basic(nn.Module):\n def __init__(self, in_planes, planes, dropout_rate, stride=1, quantize=False, weight_bits=8, sparsity=0):\n super(wide_basic, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n # self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.conv1 = conv(in_planes, planes, kernel_size=3, padding=1, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)\n self.dropout = nn.Dropout(p=dropout_rate)\n self.bn2 = nn.BatchNorm2d(planes)\n # self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.conv2 = conv(planes, planes, kernel_size=3, stride=stride, padding=1, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n # nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),\n conv(in_planes, planes, kernel_size=1, stride=stride, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity),\n )\n\n def forward(self, x):\n # print(x.shape)\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n # print(out.shape)\n # print(self.shortcut(x).shape)\n out += self.shortcut(x)\n\n return out\n\nclass Wide_ResNet(nn.Module):\n def __init__(self, depth, widen_factor, dropout_rate, num_classes=10, quantize=False, weight_bits=8, sparsity=0):\n super(Wide_ResNet, self).__init__()\n self.in_planes = 16\n self.quantize = quantize\n self.weight_bits = weight_bits\n self.sparsity = sparsity\n\n assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'\n n = (depth-4)/6\n k = widen_factor\n\n print('| Wide-Resnet %dx%d' %(depth, k))\n nStages = [16, 16*k, 32*k, 64*k]\n\n self.conv1 = conv3x3(3,nStages[0])\n self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)\n self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)\n self.linear = nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1]*(int(num_blocks)-1)\n layers = []\n\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride, quantize=self.quantize, weight_bits=self.weight_bits, sparsity=self.sparsity))\n self.in_planes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n\n return out\n\ndef wideres_shift(num_classes=10, quantize=False, weight_bits=8, sparsity=0, **kwargs):\n return Wide_ResNet(16, 8, 0.3, num_classes=num_classes, quantize=quantize,\n weight_bits=weight_bits, sparsity=sparsity)\n\nif __name__ == '__main__':\n net=Wide_ResNet(28, 10, 0.3, 10)\n y = net(Variable(torch.randn(1,3,32,32)))\n\n print(y.size())"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ps789/torchkit | [
"7bbe19c1b0f709097417bd4277f7b7f1e7fb79a3"
] | [
"torchkit/gradient_estimator.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 4 21:41:42 2018\n\n@author: chin-weihuang\n\"\"\"\n\n\nimport torch\nfrom torch.autograd import backward\n\n\n\n\ndef reinforce(reward, log_prob, retain_graph=False, iib=None, idb=None):\n reward = reward.detach()\n if idb is not None:\n b = idb.detach()\n else:\n b = torch.zeros_like(reward)\n if iib is not None:\n c = iib.detach()\n else:\n c = torch.zeros(1)\n if idb is not None and iib is not None:\n ((reward - idb - iib)**2).mean().backward()\n if idb is not None and iib is None:\n ((reward - idb)**2).mean().backward()\n if idb is None and iib is not None:\n ((reward - iib)**2).mean().backward()\n\n r = reward-b-c\n backward(-(log_prob*r).mean(), retain_graph=retain_graph)\n\n\n\n\n\n\nif __name__ == '__main__':\n\n from torchvision import datasets, transforms\n from torchkit import transforms as transforms_\n from torchkit import helpers\n from torchkit import autoencoders as aes\n from torch import optim, nn\n from itertools import chain\n from torchkit import utils\n import numpy as np\n\n nmc = 3\n lr1 = 0.0015\n lr2 = 0.0003\n batch_size = 20\n zdim = 200\n epoch = 10\n print_every = 50\n\n droot, sroot, spath = helpers.getpaths()\n helpers.create(droot, 'mnist')\n\n\n ds_transforms = transforms.Compose(\n [transforms.ToTensor(), transforms_.binarize()])\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(droot+'/mnist', download=True, train=True,\n transform=ds_transforms), batch_size=batch_size,\n shuffle=True)\n\n enc = aes.BinaryLinear(784, zdim)\n dec = aes.BinaryLinear(zdim, 784)\n prior = aes.BinaryPrior(zdim)\n iib = nn.parameter.Parameter(torch.zeros(1)-200)\n\n optim1 = optim.Adam(\n chain(dec.parameters(),\n prior.parameters(),\n [iib]),\n lr=lr1/float(nmc))\n optim2 = optim.Adam(\n chain(enc.parameters()),\n lr=lr2/float(nmc))\n zero = utils.varify(np.zeros(1).astype('float32'))\n\n def ELBO(x):\n z = enc.sample(x)\n px_z = dec.evaluate(z,x)\n qz_x = enc.evaluate(x,z)\n pz = prior.evaluate(z)\n elbo = px_z + pz - qz_x.detach()\n return elbo, qz_x\n\n def get_grad(x, multiply=1):\n n = x.size(0)\n x = x.repeat([multiply, 1])\n elbo, q = ELBO(x)\n reinforce(elbo, q, idb=None, iib=iib)\n iwlb = utils.log_mean_exp(elbo.view(multiply,n).permute(1,0),1)\n loss = (-iwlb).mean()\n loss.backward()\n return loss.data.cpu().numpy()\n\n # begin training\n count = 0\n for e in range(epoch):\n for x, _ in train_loader:\n optim1.zero_grad()\n optim2.zero_grad()\n x = x.view(-1,784) * 2.0 - 1.0\n loss = get_grad(x, nmc)\n optim1.step()\n optim2.step()\n count += 1\n if count % print_every == 0:\n print(('[{}] {}'.format(e, loss)))\n"
] | [
[
"torch.zeros_like",
"numpy.zeros",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Andrewwango/femda | [
"c072a065687ab32805bdfa48d34c75e05ffd959e"
] | [
"femda/_fem.py"
] | [
"import numpy as np\nimport random\nimport pandas as pd\n\n# MATH and STATS:\nimport math\nfrom scipy.stats import multivariate_normal\nfrom scipy.stats import chi2\nfrom scipy.stats._multivariate import _PSD \n\n# for initialization of cluster's centers:\nfrom sklearn.cluster import KMeans\nfrom scipy.spatial import cKDTree\n\nfrom._algo_utils import regularize\nimport matplotlib.pyplot as plt\n\nclass FEM():\n '''Implements the F-EM algorithm \n \n Parameters\n ----------\n K : int\n The number of mixture components.\n max_iter: int\n maximum number of iterations of the algorithm.\n rand_initialization: bool\n True if random initialization\n False if K-Means initialization.\n version: {1, 2, 3, 4}\n version of the algorithm\n 1: with old, old, not square root\n 2: with old, new, not square root\n 3: with old, old, with square root\n 4: with old, new, with square root.\n max_iter_fp: integer>0\n maximum number of fixed-point iterations\n \n \n Attributes\n ----------\n alpha_ : array-like, shape (n,)\n The weight of each mixture components.\n mu_ : array-like, shape (n, p)\n The mean of each mixture component.\n Sigma_ : array-like, shape (p, p)\n The covariance of each mixture component.\n tau_ : array-like, shape (n, K)\n The collection of tau values.\n labels_ : array-like, shape (n,)\n The clustering labels of the data points.\n converged_ : bool\n True when convergence was reached in fit(), False otherwise.\n n_iter_ : int\n Number of step used to reach the convergence. \n '''\n \n def __init__(self, K, max_iter = 200, \n rand_initialization = False, \n version = 1, max_iter_fp = 20, thres = None):\n self.K = K\n self.converged_ = False\n self.version = version\n self.rand_initialization = rand_initialization\n self.max_iter = max_iter\n self.max_iter_fp = max_iter_fp\n self.thres = thres\n self.alpha_ = None\n self.mu_ = None\n self.Sigma_ = None\n self.tau_ = None\n self.n_iter_ = None\n self.labels_ = None\n \n def _initialize(self, X):\n '''Initialize all the parameters of the model:\n theta = (alpha, mu, sigma, tau)\n Either randomly or with kmeans centers.\n \n Parameters\n ----------\n X: array-like, shape (n, p)\n \n '''\n \n n, p = X.shape\n\n if self.rand_initialization:\n self.alpha_ = np.random.rand(self.K)\n self.alpha_ /= np.sum(self.alpha_) \n self.mu_ = (np.amax(X, axis=0)-np.amin(X, axis=0)) * np.random.random_sample((self.K, p))+ np.amin(X, axis=0)\n self.Sigma_ = np.zeros((self.K, p, p))\n self.tau_ = np.ones((n, self.K))\n for k in range(self.K):\n self.Sigma_[k] = np.eye(p)\n\n else:\n \n one_point_clusters = False\n \n kmeans = KMeans(n_clusters = self.K, max_iter = 200).fit(X)\n \n for k in range(self.K):\n \n nk = np.count_nonzero(kmeans.labels_ == k)\n \n if nk <= 2 and n>10:\n one_point_clusters = True\n \n ite_filter = 0\n n_filter = n\n \n if one_point_clusters:\n \n tree = cKDTree(X)#tree of nearest neighbors\n KNN=4\n dd, index = tree.query(X, k=[KNN]) # query for all points in data the Kth NN, returns distances and indexes\n \n dd = np.reshape(dd, (n,))\n \n alpha_quantile = 0.95\n \n while one_point_clusters and alpha_quantile > 0.5:\n \n ite_filter += 1\n \n alpha_quantile -= (0.1) * (ite_filter - 1)\n \n one_point_clusters = False\n \n X_without_extremes = X[dd < np.quantile(dd, alpha_quantile) , :]\n \n n_filter = X_without_extremes.shape[0]\n\n kmeans = KMeans(n_clusters=self.K, max_iter=200).fit(X_without_extremes)\n \n for k in range(self.K):\n \n nk = np.count_nonzero(kmeans.labels_ == k)\n \n if nk <= 2:\n \n one_point_clusters = True\n \n self.alpha_ = np.zeros((self.K,))\n self.mu_ = np.zeros((self.K, p))\n self.Sigma_ = np.zeros((self.K, p, p)) \n\n for k in range(self.K):\n nk = np.count_nonzero(kmeans.labels_ == k)\n self.alpha_[k] = float(nk)/float(n_filter)\n self.mu_[k] = kmeans.cluster_centers_[k]\n self.Sigma_[k] = np.eye(p) # cov result in nan sometimes\n\n self.tau_ = np.ones((n, self.K)) \n\n \n def _e_step(self, X):\n ''' E-step of the algorithm\n Computes the conditional probability of the model\n \n Parameters\n ----------\n X: array-like, shape (n, p)\n data\n \n Returns\n ----------\n cond_prob_matrix: array-like, shape (n, K)\n (cond_prob_matrix)_ik = P(Z_i=k|X_i=x_i)\n '''\n n, p = X.shape\n \n K = len(self.alpha_)\n \n cond_prob_matrix = np.zeros((n,K))\n \n for k in range(K):\n \n psd = _PSD(self.Sigma_[k])\n prec_U, logdet = psd.U, psd.log_pdet\n diff = X - self.mu_[k]\n logdensity = -0.5 * (p * np.log(2 * np.pi) + p * np.log(self.tau_[:, k]) + logdet + p)\n #print(self.tau_[:,k])\n cond_prob_matrix[:, k] = np.exp(logdensity) * self.alpha_[k] \n \n sum_row = np.sum(cond_prob_matrix, axis = 1) \n bool_sum_zero = (sum_row == 0)\n \n cond_prob_matrix[bool_sum_zero, :] = self.alpha_ \n cond_prob_matrix /= cond_prob_matrix.sum(axis=1)[:,np.newaxis]\n\n return cond_prob_matrix\n \n def _m_step(self, X, cond_prob):\n ''' M-step of the algorithm\n Updates all the parameters with the new conditional probabilities\n \n Parameters\n ----------\n X: array-like, shape (n, p)\n data \n cond_prob_matrix: array-like, shape (n, K)\n (cond_prob_matrix)_ik = P(Z_i=k|X_i=x_i)\n \n Returns\n ----------\n alpha_new: array-like, shape (n,)\n The new weights of each mixture components.\n mu_new: array-like, shape (n, p)\n The new mean of each mixture component.\n Sigma_new: array-like, shape (p, p)\n The new covariance of each mixture component.\n tau_new: array-like, shape (n, K)\n The collection of tau values.\n '''\n \n n, p = X.shape\n \n alpha_new = np.zeros((self.K,))\n mu_new = np.zeros((self.K, p))\n Sigma_new = np.zeros((self.K, p, p))\n tau_new = np.ones((n, self.K))\n\n for k in range(self.K):\n\n # UPDATE alpha:\n alpha_new[k] = np.mean(cond_prob[:, k])\n\n # Fixed-point equation for Sigma and mu:\n # UPDATE mu\n # UPDATE Sigma\n mu_fixed_point = self.mu_[k].copy()\n Sigma_fixed_point = self.Sigma_[k].copy()\n tau_ite = np.ones((n, ))\n tau_ite_sr = np.ones((n, ))\n convergence_fp = False\n ite_fp = 1\n mean_error = []\n while not(convergence_fp) and ite_fp<self.max_iter_fp:\n if ite_fp > 198: \n print(\"m-step not converged\", mean_error[-1])\n inv_Sigma_fixed_point = np.linalg.inv(regularize(Sigma_fixed_point))\n diff = X - mu_fixed_point \n sq_maha = (np.dot(diff, inv_Sigma_fixed_point) * diff).sum(1) # multiple quadratic form\n \n tau_ite = sq_maha / p \n tau_ite_sr = (sq_maha**(0.5))/p\n tau_ite = np.where(tau_ite<10**(-8) , 10**(-8),\n np.where(tau_ite>10**(8), 10**(8), tau_ite))\n tau_ite_sr = np.where(tau_ite_sr<10**(-8) , 10**(-8),\n np.where(tau_ite_sr>10**(8), 10**(8), tau_ite_sr))\n\n if self.version == 1 or self.version ==2:\n Ck = (cond_prob[:, k]/tau_ite)/np.sum(cond_prob[:,k]/tau_ite)\n else: # 3 or 4\n Ck = (cond_prob[:, k]/tau_ite_sr)/np.sum(cond_prob[:,k]/tau_ite_sr)\n\n mu_fixed_point_new = np.sum(np.multiply(X, Ck[:, np.newaxis]), 0)\n \n if self.version == 2 or self.version == 4: # if usig new estim, update denominator\n \n diff = X - mu_fixed_point_new \n sq_maha = (np.dot(diff, inv_Sigma_fixed_point) * diff).sum(1) # multiple quadratic form\n tau_ite = sq_maha / p \n tau_ite_sr = (sq_maha**(0.5))/p\n tau_ite = np.where(tau_ite<10**(-8) , 10**(-8),\n np.where(tau_ite>10**(8), 10**(8), tau_ite))\n tau_ite_sr = np.where(tau_ite_sr<10**(-8) , 10**(-8),\n np.where(tau_ite_sr>10**(8), 10**(8), tau_ite_sr))\n \n if self.version==1:\n \n diff = X - mu_fixed_point\n Sigma_fixed_point_new = np.dot(cond_prob[:, k]/tau_ite * diff.T, diff) / (n * alpha_new[k])\n Sigma_fixed_point_new *= p / np.trace(Sigma_fixed_point_new)\n \n if self.version==2:\n \n diff = X - mu_fixed_point_new\n Sigma_fixed_point_new = np.dot(cond_prob[:, k]/tau_ite * diff.T, diff) / (n * alpha_new[k])\n Sigma_fixed_point_new *= p / np.trace(Sigma_fixed_point_new)\n \n if self.version==3:\n \n diff = X - mu_fixed_point\n Sigma_fixed_point_new = np.dot(cond_prob[:, k]/tau_ite_sr * diff.T, diff) / (n * alpha_new[k])\n Sigma_fixed_point_new *= p / np.trace(Sigma_fixed_point_new)\n\n if self.version==4: \n \n diff = X - mu_fixed_point_new\n Sigma_fixed_point_new = np.dot(cond_prob[:, k]/tau_ite_sr * diff.T, diff) / (n * alpha_new[k])\n Sigma_fixed_point_new *= p / np.trace(Sigma_fixed_point_new)\n\n convergence_fp = True\n convergence_fp = convergence_fp and (math.sqrt(np.inner(mu_fixed_point - mu_fixed_point_new, mu_fixed_point - mu_fixed_point_new)/p) < 10**(-6))\n mean_error += [(math.sqrt(np.inner(mu_fixed_point - mu_fixed_point_new, mu_fixed_point - mu_fixed_point_new)/p))]\n #if convergence_fp : print(\"mean convergence\")\n convergence_fp = convergence_fp and (np.linalg.norm(Sigma_fixed_point_new-Sigma_fixed_point, ord='fro')/p) < 10**(-6)\n if convergence_fp: print(\"m-step converged\")\n mu_fixed_point = mu_fixed_point_new.copy()\n Sigma_fixed_point = Sigma_fixed_point_new.copy() \n\n ite_fp += 1\n\n mu_new[k] = mu_fixed_point\n Sigma_new[k] = regularize(Sigma_fixed_point)\n\n # UPDATE tau\n diff = X - mu_new[k]\n tau_new[:, k] = (np.dot(diff, np.linalg.inv(regularize(Sigma_new[k]))) * diff).sum(1) / p\n tau_new[:, k] = np.where(tau_new[:, k] < 10**(-12) , 10**(-12),\n np.where(tau_new[:, k] > 10**(12), 10**(12), tau_new[:, k]))\n #print(mean_error)\n #plt.plot(mean_error)\n #plt.show()\n\n return alpha_new, mu_new, Sigma_new, tau_new\n \n def fit(self, X):\n ''' Fit the data to the model running the F-EM algorithm\n \n Parameters\n ----------\n X: array-like, shape (n, p)\n data \n \n Returns\n ----------\n self\n '''\n \n n, p = X.shape\n \n self._initialize(X)\n\n convergence = False\n\n ite = 0\n \n while not(convergence) and ite < self.max_iter:\n\n # Compute conditional probabilities:\n cond_prob = self._e_step(X)\n\n # Update estimators:\n alpha_new, mu_new, Sigma_new, tau_new = self._m_step(X, cond_prob)\n\n # Check convergence:\n if ite > 5: # tol from fixed point should be bigger than general tolerance rate \n convergence = True\n k = 0\n while convergence and k<self.K:\n \n convergence = convergence and math.sqrt(np.inner(mu_new[k]-self.mu_[k], mu_new[k]-self.mu_[k])/p) < 10**(-5)\n convergence = convergence and ((np.linalg.norm(Sigma_new[k]-self.Sigma_[k], ord='fro')/(p)) < 10**(-5))\n convergence = convergence and (math.fabs(alpha_new[k]-self.alpha_[k]) < 10**(-3))\n \n k += 1\n \n self.alpha_ = np.copy(alpha_new)\n self.mu_ = np.copy(mu_new)\n self.Sigma_ = np.copy(Sigma_new)\n self.tau_ = np.copy(tau_new)\n \n ite += 1\n \n self.labels_ = np.array([i for i in np.argmax(cond_prob, axis=1)])\n self.n_iter_ = ite\n self.converged_ = convergence\n \n # Outlier rejection \n \n outlierness = np.zeros((n, )).astype(bool)\n \n if self.thres is None :\n self.thres = 0.05 \n thres = chi2.ppf(1 - self.thres, p)\n \n for k in range(self.K):\n \n data_cluster = X[self.labels_ == k,:]\n diff_cluster = data_cluster - self.mu_[k]\n sig_cluster = np.mean(diff_cluster * diff_cluster) \n maha_cluster = (np.dot(diff_cluster, np.linalg.inv(self.Sigma_[k])) * diff_cluster).sum(1) / sig_cluster\n outlierness[self.labels_ == k] = (maha_cluster > thres)\n \n self.labels_[outlierness] = -1\n \n self.labels_ = self.labels_.astype(str)\n \n return(self)\n \n def predict(self, Xnew, thres = None):\n \n n, p = Xnew.shape\n \n cond_prob_matrix = np.zeros((n, self.K))\n \n for k in range(self.K):\n \n psd = _PSD(self.Sigma_[k])\n prec_U, logdet = psd.U, psd.log_pdet\n diff = Xnew - self.mu_[k]\n sig = np.mean(diff * diff) \n maha = (np.dot(diff, np.linalg.inv(self.Sigma_[k])) * diff).sum(1) \n logdensity = -0.5 * (logdet + maha) \n cond_prob_matrix[:, k] = np.exp(logdensity) * self.alpha_[k] \n \n sum_row = np.sum(cond_prob_matrix, axis = 1) \n bool_sum_zero = (sum_row == 0)\n \n cond_prob_matrix[bool_sum_zero, :] = self.alpha_ \n cond_prob_matrix /= cond_prob_matrix.sum(axis=1)[:,np.newaxis]\n \n new_labels = np.array([i for i in np.argmax(cond_prob_matrix, axis=1)])\n \n outlierness = np.zeros((n, )).astype(bool)\n \n if thres is None :\n thres = self.thres \n thres = chi2.ppf(1 - thres, p)\n \n for k in range(self.K):\n \n data_cluster = Xnew[new_labels == k,:]\n diff_cluster = data_cluster - self.mu_[k]\n sig_cluster = np.mean(diff_cluster * diff_cluster) \n maha_cluster = (np.dot(diff_cluster, np.linalg.inv(self.Sigma_[k])) * diff_cluster).sum(1) / sig_cluster\n outlierness[new_labels == k] = (maha_cluster > thres)\n \n new_labels[outlierness] = -1\n \n new_labels = new_labels.astype(str)\n \n return(new_labels)\n"
] | [
[
"numpy.dot",
"numpy.amax",
"sklearn.cluster.KMeans",
"numpy.random.random_sample",
"numpy.mean",
"numpy.exp",
"numpy.where",
"scipy.spatial.cKDTree",
"numpy.trace",
"numpy.reshape",
"numpy.eye",
"numpy.copy",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.zeros",
"scipy.stats.chi2.ppf",
"numpy.log",
"numpy.multiply",
"numpy.amin",
"numpy.linalg.inv",
"numpy.quantile",
"numpy.random.rand",
"scipy.stats._multivariate._PSD",
"numpy.sum",
"numpy.inner",
"numpy.linalg.norm",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jayaram-r/adversarial-detection | [
"0173b19a7352a2ec769f24a89d4e2cf8f4423514"
] | [
"expts/nets/svhn.py"
] | [
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\n\nclass SVHN(nn.Module):\n def __init__(self):\n super(SVHN, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, 3, 1)\n self.conv2 = nn.Conv2d(64, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.5)\n self.dropout2 = nn.Dropout2d(0.5)\n self.dropout3 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(12544, 512)\n self.fc2 = nn.Linear(512, 128)\n self.fc3 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.dropout3(x)\n x = self.fc3(x)\n output = F.log_softmax(x, dim=1)\n return output\n \n def penultimate_forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n x = F.relu(x)\n pen = self.dropout3(x)\n x = self.fc3(pen)\n output = F.log_softmax(x, dim=1)\n return output, pen\n\n def intermediate_forward(self, x, layer_index):\n if layer_index == 0:\n return x\n\n x = self.conv1(x)\n x = F.relu(x)\n if layer_index == 1:\n return x\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n if layer_index == 2:\n return x\n\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n if layer_index == 3:\n return x\n\n x = self.fc2(x)\n x = F.relu(x)\n x = self.dropout3(x)\n if layer_index == 4:\n return x\n\n x = self.fc3(x)\n if layer_index == 5:\n return x\n\n def layer_wise(self, x):\n # Method to get the layer-wise embeddings for the proposed method\n # Input is included as the first layer\n output = [x] # 1\n\n x = self.conv1(x)\n # output.append(x)\n x = F.relu(x)\n output.append(x) # 2\n\n x = self.conv2(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = F.max_pool2d(x, 2)\n # output.append(x)\n x = self.dropout1(x)\n output.append(x) # 3\n\n x = torch.flatten(x, 1)\n # output.append(x)\n x = self.fc1(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = self.dropout2(x)\n output.append(x) # 4\n\n x = self.fc2(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = self.dropout3(x)\n output.append(x) # 5\n\n x = self.fc3(x)\n output.append(x) # 6 (logits)\n\n # final = F.log_softmax(x, dim=1)\n # output.append(x)\n return output\n\n def layer_wise_deep_mahalanobis(self, x):\n # Method to get the layer-wise embeddings for the deep mahalanobis detection method\n # Input is included as the first layer\n output = [x] # 1\n\n x = self.conv1(x)\n # output.append(x)\n x = F.relu(x)\n output.append(x) # 2\n\n x = self.conv2(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = F.max_pool2d(x, 2)\n # output.append(x)\n x = self.dropout1(x)\n output.append(x) # 3\n\n x = torch.flatten(x, 1)\n # output.append(x)\n x = self.fc1(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = self.dropout2(x)\n output.append(x) # 4\n\n x = self.fc2(x)\n # output.append(x)\n x = F.relu(x)\n # output.append(x)\n x = self.dropout3(x)\n output.append(x) # 5\n\n x = self.fc3(x)\n output.append(x) # 6 (logits)\n\n final = F.log_softmax(x, dim=1)\n # output.append(x)\n return final, output\n\n def layer_wise_odds_are_odd(self, x):\n # Method to get the latent layer and logit layer outputs for the \"odds-are-odd\" method\n output = []\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.dropout3(x)\n output.append(x) # latents\n x = self.fc3(x)\n output.append(x) # logits\n # final = F.log_softmax(x, dim=1)\n\n return output\n\n def layer_wise_lid_method(self, x):\n # Method to get the layer-wise embeddings for the LID adversarial subspaces paper\n # Input is included as the first layer\n output = [x] # 1\n x = self.conv1(x)\n output.append(x) # 2\n x = F.relu(x)\n output.append(x) # 3\n x = self.conv2(x)\n output.append(x) # 4\n x = F.relu(x)\n output.append(x) # 5\n x = F.max_pool2d(x, 2)\n output.append(x) # 6\n x = self.dropout1(x)\n # Skipping this layer and taking its flattened version\n # output.append(x)\n x = torch.flatten(x, 1)\n output.append(x) # 7\n x = self.fc1(x)\n output.append(x) # 8\n x = F.relu(x)\n output.append(x) # 9\n x = self.dropout2(x)\n output.append(x) # 10\n x = self.fc2(x)\n output.append(x) # 11\n x = F.relu(x)\n output.append(x) # 12\n x = self.dropout3(x)\n output.append(x) # 13\n x = self.fc3(x)\n output.append(x) # 14 (logits)\n\n # Skipping this layer because it is simply a shifted version of the logit layer\n # final = F.log_softmax(x, dim=1)\n # output.append(x)\n return output\n"
] | [
[
"torch.nn.Dropout2d",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.flatten",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
redb0/gotpy | [
"b3f2e12aff429e0bff0faa079a3694378293c974"
] | [
"algorithm/algabc.py"
] | [
"# TODO: добавить типы\nimport itertools\n\nimport numpy as np\n\nimport support\n\n\nclass Options:\n _alias_map = {\n 'number_points': ['n', 'np'],\n 'number_iter': ['ni', 'iter'],\n 'k_noise': ['kn']\n }\n _required_keys = ('number_points', 'number_iter')\n\n def __init__(self, **kwargs):\n kw = support.normalize_kwargs(kwargs,\n alias_map=Options._alias_map,\n required=Options._required_keys)\n print(kw)\n self._number_points = kw['number_points']\n self._number_iter = kw['number_iter']\n self._k_noise = 0 if 'k_noise' not in kw else kw['k_noise']\n\n def update_op(self, **kwargs):\n kw = support.normalize_kwargs(kwargs, alias_map=Options._alias_map)\n for k, v in kw.items():\n print(k, v)\n if k in Options._alias_map:\n self.__setattr__(k, v)\n\n @property\n def number_points(self):\n return self._number_points\n\n @number_points.setter\n def number_points(self, val):\n self._number_points = val\n\n @property\n def number_iter(self):\n return self._number_iter\n\n @number_iter.setter\n def number_iter(self, val):\n self._number_iter = val\n\n @property\n def k_noise(self):\n return self._k_noise\n\n @k_noise.setter\n def k_noise(self, val):\n self._k_noise = val\n\n\nclass Algorithm:\n def __init__(self, class_name='', name='', full_name='', op=None, **kwargs):\n self._class_name = class_name\n self._name = name\n self._full_name = full_name\n\n self._options = op\n\n def probability_estimate(self, tf, op, iteration: dict, ep: float=0.2, number_runs: int=100, min_flag: int=1,\n *args, **kwargs):\n ar = list(iteration.values())\n size = tuple(len(i) for i in ar)\n idxs = list(itertools.product(*(list(range(len(i))) for i in ar)))\n items = list((dict(zip(iteration.keys(), values)) for values in itertools.product(*iteration.values())))\n res = np.zeros(size)\n for i in range(len(idxs)):\n print('index:', idxs[i])\n print('item:', items[i])\n op.update_op(**items[i])\n print(op)\n p = 0\n for j in range(number_runs):\n x_bests, *_ = self.optimization(tf, min_flag=min_flag, *args, **kwargs)\n if tf.in_vicinity(x_bests, epsilon=ep):\n p += 1\n res[idxs[i]] = p / number_runs\n print('Оценка вероятности', res[idxs[i]])\n print('-' * 20)\n return res\n\n def optimization(self, *args, **kwargs):\n pass\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def name(self):\n return self._name\n\n @property\n def full_name(self):\n return self._full_name\n\n @property\n def options(self):\n return self._options\n\n @options.setter\n def options(self, val):\n self._options = val\n\n\nclass GSA(Algorithm):\n def __init__(self, **kwargs):\n super().__init__(class_name='GSA', **kwargs)\n\n\nclass SAC(Algorithm):\n def __init__(self, **kwargs):\n super().__init__(class_name='SAC', **kwargs)\n\n\nclass ASA(Algorithm):\n def __init__(self, **kwargs):\n super().__init__(class_name='ASA', **kwargs)\n\n\nclass PSO(Algorithm):\n def __init__(self, **kwargs):\n super().__init__(class_name='PSO', **kwargs)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dainis-boumber/AA_CNN | [
"649612215c7e290ede1c51625268ad9fd7b46228"
] | [
"networks/input_components/TwoEmbChannel.py"
] | [
"import tensorflow as tf\n\n\nclass TwoEmbChannel:\n def __init__(self, sequence_length, num_classes, word_vocab_size, embedding_size,\n init_embedding_glv=None, init_embedding_w2v=None):\n # Placeholders for input, output and dropout, First None is batch size.\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n # Embedding layer\n with tf.variable_scope(\"embedding\"):\n if init_embedding_glv is not None and init_embedding_w2v is not None:\n W_glove = tf.Variable(init_embedding_glv, name=\"W-glove\", dtype=\"float32\")\n self.embedded_glove = tf.nn.embedding_lookup(W_glove, self.input_x)\n\n W_w2v = tf.Variable(init_embedding_w2v, name=\"W-w2v\", dtype=\"float32\")\n self.embedded_w2v = tf.nn.embedding_lookup(W_w2v, self.input_x)\n\n self.embedded_glove = tf.expand_dims(self.embedded_glove, -1)\n self.embedded_w2v = tf.expand_dims(self.embedded_w2v, -1)\n\n self.embedded_expanded = tf.concat(values=[self.embedded_glove, self.embedded_w2v],\n axis=3)\n else:\n raise ValueError(\"un supported.\")\n"
] | [
[
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.expand_dims",
"tensorflow.placeholder",
"tensorflow.variable_scope",
"tensorflow.nn.embedding_lookup"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
gstoica27/google-research | [
"90df0f47ebb79e0c316edba80e75bc4f3736c771"
] | [
"enas_lm/src/child.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"AWD ENAS fixed model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom enas_lm.src import data_utils\nfrom enas_lm.src import utils\nfrom enas_lm.src.scorer import score\nimport pickle\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\nflags.DEFINE_integer('child_batch_size', 128, '')\nflags.DEFINE_integer('child_bptt_steps', 35, '')\n\n\ndef _gen_mask(shape, drop_prob):\n \"\"\"Generate a droppout mask.\"\"\"\n keep_prob = 1. - drop_prob\n mask = tf.random_uniform(shape, dtype=tf.float32)\n mask = tf.floor(mask + keep_prob) / keep_prob\n return mask\n\n\ndef _rnn_fn(sample_arc, x, prev_s, w_prev, w_skip, input_mask, layer_mask,\n params):\n \"\"\"Multi-layer LSTM.\n\n Args:\n sample_arc: [num_layers * 2], sequence of tokens representing architecture.\n x: [batch_size, num_steps, hidden_size].\n prev_s: [batch_size, hidden_size].\n w_prev: [2 * hidden_size, 2 * hidden_size].\n w_skip: [None, [hidden_size, 2 * hidden_size] * (num_layers-1)].\n input_mask: `[batch_size, hidden_size]`.\n layer_mask: `[batch_size, hidden_size]`.\n params: hyper-params object.\n\n Returns:\n next_s: [batch_size, hidden_size].\n all_s: [[batch_size, num_steps, hidden_size] * num_layers].\n \"\"\"\n # TODO: Assert this change is fine!\n prev_s = tf.zeros([tf.shape(x)[0], params.hidden_size], dtype=tf.float32)\n batch_size = x.get_shape()[0].value\n num_steps = tf.shape(x)[1]\n num_layers = len(sample_arc) // 2\n\n all_s = tf.TensorArray(dtype=tf.float32, size=num_steps, infer_shape=False)\n\n # extract the relevant variables, so that you only do L2-reg on them.\n u_skip = []\n start_idx = 0\n for layer_id in range(num_layers):\n prev_idx = sample_arc[start_idx]\n func_idx = sample_arc[start_idx + 1]\n u_skip.append(w_skip[layer_id][func_idx, prev_idx])\n start_idx += 2\n w_skip = u_skip\n var_s = [w_prev] + w_skip[1:]\n\n def _select_function(h, function_id):\n h = tf.stack([tf.tanh(h), tf.nn.relu(h), tf.sigmoid(h), h], axis=0)\n h = h[function_id]\n return h\n\n def _condition(step, *unused_args):\n return tf.less(step, num_steps)\n\n def _body(step, prev_s, all_s):\n \"\"\"Body function.\"\"\"\n inp = x[:, step, :]\n\n # important change: first input uses a tanh()\n if layer_mask is not None:\n assert input_mask is not None\n ht = tf.matmul(tf.concat([inp * input_mask, prev_s * layer_mask],\n axis=1), w_prev)\n else:\n ht = tf.matmul(tf.concat([inp, prev_s], axis=1), w_prev)\n h, t = tf.split(ht, 2, axis=1)\n h = tf.tanh(h)\n t = tf.sigmoid(t)\n s = prev_s + t * (h - prev_s)\n layers = [s]\n\n start_idx = 0\n used = []\n for layer_id in range(num_layers):\n prev_idx = sample_arc[start_idx]\n func_idx = sample_arc[start_idx + 1]\n used.append(tf.one_hot(prev_idx, depth=num_layers, dtype=tf.int32))\n prev_s = tf.stack(layers, axis=0)[prev_idx]\n if layer_mask is not None:\n ht = tf.matmul(prev_s * layer_mask, w_skip[layer_id])\n else:\n ht = tf.matmul(prev_s, w_skip[layer_id])\n h, t = tf.split(ht, 2, axis=1)\n\n h = _select_function(h, func_idx)\n t = tf.sigmoid(t)\n s = prev_s + t * (h - prev_s)\n s.set_shape([batch_size, params.hidden_size])\n layers.append(s)\n start_idx += 2\n\n next_s = tf.add_n(layers[1:]) / tf.cast(num_layers, dtype=tf.float32)\n all_s = all_s.write(step, next_s)\n\n return step + 1, next_s, all_s\n\n loop_inps = [tf.constant(0, dtype=tf.int32), prev_s, all_s]\n _, next_s, all_s = tf.while_loop(_condition, _body, loop_inps)\n all_s = tf.transpose(all_s.stack(), [1, 0, 2])\n\n return next_s, all_s, var_s\n\n\ndef _set_default_params(params):\n \"\"\"Set default hyper-parameters.\"\"\"\n # params.add_hparam('alpha', 0.0) # activation L2 reg\n # params.add_hparam('beta', 1.) # activation slowness reg\n # params.add_hparam('best_valid_ppl_threshold', 5)\n #\n # # params.add_hparam('batch_size', FLAGS.child_batch_size)\n # # params.add_hparam('bptt_steps', FLAGS.child_bptt_steps)\n #\n # # for dropouts: dropping rate, NOT keeping rate\n # params.add_hparam('drop_e', 0.10) # word\n # params.add_hparam('drop_i', 0.20) # embeddings\n # params.add_hparam('drop_x', 0.75) # input to RNN cells\n # params.add_hparam('drop_l', 0.25) # between layers\n # params.add_hparam('drop_o', 0.75) # output\n # params.add_hparam('drop_w', 0.00) # weight\n #\n # params.add_hparam('grad_bound', 0.1)\n # params.add_hparam('hidden_size', 200)\n # params.add_hparam('init_range', 0.04)\n # params.add_hparam('learning_rate', 20.)\n # params.add_hparam('num_train_epochs', 600)\n # # params.add_hparam('vocab_size', 10000)\n #\n # params.add_hparam('weight_decay', 8e-7)\n return params\n\n\nclass LM(object):\n \"\"\"Language model.\"\"\"\n\n def __init__(self, params, controller, name='child'):\n print('-' * 80)\n print('Building LM')\n\n self.params = _set_default_params(params)\n self.controller = controller\n self.sample_arc = tf.unstack(controller.sample_arc)\n self.name = name\n self.base_bptt = params.base_bptt\n # self.num_train_batches = None\n # self.reset_start_idx = None\n # self.should_reset = None\n # train data\n # (self.x_train, self.y_train,\n # self.num_train_batches, self.reset_start_idx,\n # self.should_reset, self.base_bptt) = data_utils.input_producer(\n # x_train, params.batch_size, params.bptt_steps, random_len=True)\n # params.add_hparam(\n # 'num_train_steps', self.num_train_batches * params.num_train_epochs)\n\n # valid data\n # (self.x_valid, self.y_valid,\n # self.num_valid_batches) = data_utils.input_producer(\n # x_valid, params.batch_size, params.bptt_steps)\n\n with tf.device('/CPU:0'):\n self.input_iterator_handle = tf.placeholder(\n tf.string, shape=[], name='input_iterator_handle')\n \"\"\"\n Data Description:\n token_ids: ids of tokens\n masks: array of 1s or 0s indicating if phrase is zero padded to uniform length (1 = pad, 0=no)\n pos_ids: part of speech ids for each token\n ner_ids: named entity recognition ids for each token\n subj_positions: token positions relative to phrase subject\n obj_positions: token positions relative to phrase object\n\n All components share the following size: [BatchSize, NumTokens], where NumTokens is max tokens allowed.\n If phrases are < NumTokens, they are zero padded to reach necessary length\n \"\"\"\n self.input_iterator = tf.data.Iterator.from_string_handle(\n self.input_iterator_handle,\n output_types={\n 'token_ids': tf.int64,\n 'labels': tf.int64,\n 'masks': tf.int64,\n 'pos_ids': tf.int64,\n 'ner_ids': tf.int64,\n 'subj_positions': tf.int64,\n 'obj_positions': tf.int64,\n 'deprel': tf.int64,\n },\n output_shapes={\n 'token_ids': [None, None],\n 'labels': [None],\n 'masks': [None, None],\n 'pos_ids': [None, None],\n 'ner_ids': [None, None],\n 'subj_positions': [None, None],\n 'obj_positions': [None, None],\n 'deprel': [None, None]\n }\n )\n self.batch_input = self.input_iterator.get_next()\n self.labels = self.batch_input['labels']\n\n self._build_params()\n self._build_train()\n self._build_valid()\n\n def _build_params(self):\n \"\"\"Create model parameters.\"\"\"\n\n print('-' * 80)\n print('Building model params')\n initializer = tf.initializers.random_uniform(minval=-self.params.init_range,\n maxval=self.params.init_range)\n # number of activation functions available\n num_functions = self.params.controller_num_functions\n # number of layers in RNN\n num_layers = self.params.controller_num_layers\n hidden_size = self.params.hidden_size\n with tf.variable_scope(self.name, initializer=initializer):\n with tf.variable_scope('embedding'):\n if self.params.token_embeddings is not None:\n token_initializer = tf.constant_initializer(self.params.token_embeddings)\n else:\n token_initializer = initializer\n w_emb = tf.get_variable('w', [self.params.vocab_size, self.params.vocab_dim],\n initializer=token_initializer)\n dropped_w_emb = tf.layers.dropout(\n w_emb, self.params.drop_e, [self.params.vocab_size, 1],\n training=True)\n\n pos_emb = tf.get_variable(name='pos_emb',\n shape=[self.params.num_pos, self.params.pos_dim])\n dropped_pos_emb = tf.layers.dropout(pos_emb, self.params.drop_e, [self.params.num_pos, 1], training=True)\n\n ner_emb = tf.get_variable(name='ner_emb', shape=[self.params.num_ner, self.params.ner_dim])\n dropped_ner_emb = tf.layers.dropout(ner_emb,\n self.params.drop_e,\n [self.params.num_ner, 1],\n training=True)\n\n position_embs = tf.get_variable(name='position_embs',\n shape=[2 * self.params.max_len + 1, self.params.position_dim])\n dropped_position_embs = tf.layers.dropout(position_embs,\n self.params.drop_e,\n [2 * self.params.max_len + 1, 1],\n training=True)\n with tf.variable_scope('encoding'):\n enc_weight = tf.get_variable('encoding_weight',\n shape=[self.params.vocab_dim + self.params.ner_dim + \\\n self.params.pos_dim + 2*self.params.position_dim, hidden_size])\n enc_bias = tf.get_variable('encoding_bias',\n shape=[1, hidden_size])\n\n with tf.variable_scope('rnn_cell'):\n w_prev = tf.get_variable('w_prev', [2 * hidden_size, 2 * hidden_size])\n i_mask = tf.ones([hidden_size, 2 * hidden_size], dtype=tf.float32)\n h_mask = _gen_mask([hidden_size, 2 * hidden_size], self.params.drop_w)\n mask = tf.concat([i_mask, h_mask], axis=0)\n dropped_w_prev = w_prev * mask\n\n w_skip, dropped_w_skip = [], []\n for layer_id in range(1, num_layers+1):\n with tf.variable_scope('layer_{}'.format(layer_id)):\n w = tf.get_variable(\n 'w', [num_functions, layer_id, hidden_size, 2 * hidden_size])\n mask = _gen_mask([1, 1, hidden_size, 2 * hidden_size],\n self.params.drop_w)\n dropped_w = w * mask\n w_skip.append(w)\n dropped_w_skip.append(dropped_w)\n\n with tf.variable_scope('init_states'):\n with tf.variable_scope('batch'):\n init_shape = [self.params.batch_size, hidden_size]\n batch_prev_s = tf.get_variable(\n 's', init_shape, dtype=tf.float32, trainable=False)\n zeros = np.zeros(init_shape, dtype=np.float32)\n batch_reset = tf.assign(batch_prev_s, zeros)\n\n with tf.variable_scope('class_projection'):\n class_weight = tf.get_variable(name='weight', shape=[hidden_size, self.params.num_classes])\n class_bias = tf.get_variable(name='bias', shape=[1, 1, self.params.num_classes])\n\n self.num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()\n if v.name.startswith(self.name)]).value\n print('All children have {0} params'.format(self.num_params))\n\n num_params_per_child = 0\n for v in tf.trainable_variables():\n if v.name.startswith(self.name):\n if 'rnn_cell' in v.name:\n num_params_per_child += v.shape[-2].value * v.shape[-1].value\n else:\n num_params_per_child += np.prod([d.value for d in v.shape])\n print('Each child has {0} params'.format(num_params_per_child))\n\n self.batch_init_states = {\n 's': batch_prev_s,\n 'reset': batch_reset,\n }\n self.train_params = {\n 'w_emb': dropped_w_emb,\n 'pos_emb': dropped_pos_emb,\n 'ner_emb': dropped_ner_emb,\n 'position_emb': dropped_position_embs,\n 'w_prev': dropped_w_prev,\n 'w_skip': dropped_w_skip,\n 'w_soft': class_weight,\n 'b_soft': class_bias,\n 'enc_w': enc_weight,\n 'enc_b': enc_bias\n }\n self.eval_params = {\n 'w_emb': w_emb,\n 'pos_emb': pos_emb,\n 'ner_emb': ner_emb,\n 'position_emb': position_embs,\n 'w_prev': w_prev,\n 'w_skip': w_skip,\n 'w_soft': class_weight,\n 'b_soft': class_bias,\n 'enc_w': enc_weight,\n 'enc_b': enc_bias\n }\n\n def _forward(self, x, y, model_params, init_states, is_training=False):\n \"\"\"Computes the logits.\n\n Args:\n x: [batch_size, num_steps], input batch.\n y: [batch_size, num_steps], output batch.\n model_params: a `dict` of params to use.\n init_states: a `dict` of params to use.\n is_training: if `True`, will apply regularizations.\n\n Returns:\n loss: scalar, cross-entropy loss\n \"\"\"\n # embedding weights\n w_emb = model_params['w_emb']\n ner_embs = model_params['ner_emb']\n pos_embs = model_params['pos_emb']\n position_embs = model_params['position_emb']\n # rest of model\n enc_w = model_params['enc_w']\n enc_b = model_params['enc_b']\n w_prev = model_params['w_prev']\n w_skip = model_params['w_skip']\n w_soft = model_params['w_soft']\n b_soft = model_params['b_soft']\n prev_s = init_states['s']\n\n tokens = x['token_ids']\n ners = x['ner_ids']\n poss = x['pos_ids']\n obj_pos = x['obj_positions']\n subj_pos = x['subj_positions']\n token_mask = tf.reshape(tf.cast(x['masks'], dtype=tf.float32), [tf.shape(tokens)[0], tf.shape(tokens)[1], 1])\n\n token_emb = tf.nn.embedding_lookup(w_emb, tokens)\n ner_emb = tf.nn.embedding_lookup(ner_embs, ners)\n pos_emb = tf.nn.embedding_lookup(pos_embs, poss)\n subj_pos_emb = tf.nn.embedding_lookup(position_embs, subj_pos + self.params.max_len)\n obj_pos_emb = tf.nn.embedding_lookup(position_embs, obj_pos + self.params.max_len)\n\n emb = tf.concat([token_emb, ner_emb, pos_emb, subj_pos_emb, obj_pos_emb], axis=2)\n # --> [BatchSize, HiddenSize]\n emb = tf.matmul(emb, enc_w) + enc_b\n\n # emb = tf.nn.embedding_lookup(w_emb, x)\n batch_size = self.params.batch_size\n hidden_size = self.params.hidden_size\n sample_arc = self.sample_arc\n if is_training:\n emb = tf.layers.dropout(\n emb, self.params.drop_i, [batch_size, 1, hidden_size], training=True)\n\n input_mask = _gen_mask([batch_size, hidden_size], self.params.drop_x)\n layer_mask = _gen_mask([batch_size, hidden_size], self.params.drop_l)\n else:\n input_mask = None\n layer_mask = None\n\n out_s, all_s, var_s = _rnn_fn(sample_arc, emb, prev_s, w_prev, w_skip,\n input_mask, layer_mask, params=self.params)\n\n top_s = all_s\n if is_training:\n top_s = tf.layers.dropout(\n top_s, self.params.drop_o,\n [self.params.batch_size, 1, self.params.hidden_size], training=True)\n\n logits = tf.einsum('ijk,kl->ijl', top_s, w_soft) + b_soft\n # token mask: 1=padding, 0 = no padding. So we flip the mask before applying the filter\n logits = logits * (1. - token_mask)\n # [BatchSize, NumSteps, NumClass] -> [BatchSize, NumClass]\n self.logits = tf.reduce_mean(logits, axis=1)\n\n # carry_on = [tf.assign(prev_s, out_s)]\n # logits = tf.einsum('bnh,vh->bnv', top_s, w_soft)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n logits=self.logits)\n loss = tf.reduce_mean(loss)\n\n reg_loss = loss # `loss + regularization_terms` is for training only\n if is_training:\n # L2 weight reg\n self.l2_reg_loss = tf.add_n([tf.nn.l2_loss(w) for w in var_s])\n reg_loss += self.params.weight_decay * self.l2_reg_loss\n\n # activation L2 reg\n reg_loss += self.params.alpha * tf.reduce_mean(all_s ** 2)\n\n # activation slowness reg\n reg_loss += self.params.beta * tf.reduce_mean(\n (all_s[:, 1:, :] - all_s[:, :-1, :]) ** 2)\n\n # with tf.control_dependencies(carry_on):\n loss = tf.identity(loss)\n if is_training:\n reg_loss = tf.identity(reg_loss)\n\n return reg_loss, loss\n\n def _build_train(self):\n \"\"\"Build training ops.\"\"\"\n print('-' * 80)\n print('Building train graph')\n reg_loss, loss = self._forward(self.batch_input, self.labels,\n self.train_params, self.batch_init_states,\n is_training=True)\n\n tf_vars = [v for v in tf.trainable_variables()\n if v.name.startswith(self.name)]\n global_step = tf.train.get_or_create_global_step()\n lr_scale = (tf.cast(tf.shape(self.labels)[-1], dtype=tf.float32) /\n tf.cast(self.params.bptt_steps, dtype=tf.float32))\n learning_rate = utils.get_lr(global_step, self.params) * lr_scale\n if self.params.grad_bound:\n grads = tf.gradients(reg_loss, tf_vars)\n clipped_grads, grad_norm = tf.clip_by_global_norm(grads,\n self.params.grad_bound)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n train_op = optimizer.apply_gradients(zip(clipped_grads, tf_vars),\n global_step=global_step)\n\n self.train_loss = loss\n self.train_op = train_op\n self.grad_norm = grad_norm\n self.learning_rate = learning_rate\n\n def _build_valid(self):\n print('Building valid graph')\n _, loss = self._forward(self.batch_input, self.labels,\n self.eval_params, self.batch_init_states)\n self.valid_loss = loss\n self.rl_loss = loss\n\n def eval_valid(self, sess, handle_iterator, handle_string):\n \"\"\"Eval 1 round on valid set.\"\"\"\n total_loss = 0\n\n sess.run(handle_iterator.initializer)\n tot_batches = 0\n all_predictions = []\n all_labels = []\n while True:\n try:\n sess.run(self.batch_init_states['reset'])\n logits, labels, batch_loss = sess.run([self.logits, self.labels, self.valid_loss],\n feed_dict={self.input_iterator_handle: handle_string})\n total_loss += batch_loss\n tot_batches += 1\n # Compute Validation Metrics\n # [BatchSize, NumClasses]\n predictions = np.reshape(np.argmax(logits, axis=1), [-1])\n labels = np.reshape(labels, [-1])\n all_predictions += list(predictions)\n all_labels += list(labels)\n\n except tf.errors.OutOfRangeError:\n break\n\n # for debugging score function\n predictions_save_path = '/usr0/home/gis/research/enas_re/tmp/datasets/tacred/output/prediction_debugging.pkl'\n predictions_debugging = {'all_labels': all_labels, 'all_predictions': all_predictions}\n print('saving predictions to: {}'.format(predictions_save_path))\n with open(predictions_save_path, 'wb') as handle:\n pickle.dump(predictions_debugging, handle)\n\n prec_micro, recall_micro, f1_micro = score(all_labels, all_predictions)\n valid_ppl = total_loss / tot_batches\n print('valid_ppl={0:<.2f}'.format(valid_ppl))\n\n return valid_ppl, prec_micro, recall_micro, f1_micro\n"
] | [
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.layers.dropout",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.tanh",
"tensorflow.add_n",
"tensorflow.while_loop",
"tensorflow.data.Iterator.from_string_handle",
"numpy.reshape",
"tensorflow.floor",
"tensorflow.gradients",
"tensorflow.train.get_or_create_global_step",
"numpy.argmax",
"tensorflow.trainable_variables",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.TensorArray",
"tensorflow.less",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.one_hot",
"tensorflow.initializers.random_uniform",
"tensorflow.split",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.relu",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.assign",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.einsum",
"tensorflow.constant_initializer",
"tensorflow.clip_by_global_norm",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kibernetika-ai/Image-Denoising-with-Deep-CNNs | [
"c081b85fee0fb72e74b0bf4beae90f0bb8bb0a6a"
] | [
"src/inference.py"
] | [
"import argparse\nimport time\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.quantization.quantize_fx as quantize_fx\nfrom torch.quantization.fuse_modules import fuse_known_modules\n\nimport model\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-D', default=6, type=int)\n parser.add_argument('-C', default=64, type=int)\n parser.add_argument('--image')\n parser.add_argument('--output')\n parser.add_argument(\n '--quantize',\n choices=[None, 'dynamic', 'static', 'fx_dynamic', 'fx_static'],\n default=None\n )\n parser.add_argument('--show', action='store_true')\n parser.add_argument('--model', required=True)\n\n return parser.parse_args()\n\n\ndef load_model(path, D=6, C=64, device=torch.device('cpu')):\n net = model.DUDnCNN(D, C)\n checkpoint = torch.load(path, map_location=torch.device('cpu'))\n if 'QAT' not in checkpoint:\n net.load_state_dict(checkpoint['Net'])\n net.eval()\n\n return net.to(device), checkpoint.get('QAT')\n\n\ndef img_to_tensor(img, device):\n tensor = torch.FloatTensor(img).to(device)\n tensor = tensor.permute([2, 0, 1]) / 255.\n tensor = (tensor - 0.5) / 0.5\n\n return tensor.unsqueeze(0)\n\n\ndef tensor_to_img(tensor):\n tensor = tensor[0].permute([1, 2, 0])\n tensor = (tensor * 0.5 + 0.5) * 255\n tensor = tensor.clamp(0, 255)\n return tensor.cpu().numpy().astype(np.uint8)\n\n\ndef quantize_model(quantize_type, model, input_example=None, qat_state=None):\n if quantize_type == 'dynamic':\n model = torch.quantization.quantize_dynamic(\n model,\n {torch.nn.Conv2d},\n dtype=torch.qint8\n )\n elif quantize_type == 'static':\n model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n for i in range(len(model.bn)):\n conv, bn = model.conv[i+1], model.bn[i]\n conv_new, bn_new = fuse_known_modules([conv, bn])\n setattr(model.conv, str(i+1), conv_new)\n setattr(model.bn, str(i), bn_new)\n model_fp32_fused = model\n model_fp32_prepared = torch.quantization.prepare(model_fp32_fused)\n if input_example is not None:\n model_fp32_prepared(input_example)\n model = torch.quantization.convert(model_fp32_prepared)\n elif quantize_type == 'fx_dynamic':\n qconfig_dict = {\"\": torch.quantization.default_dynamic_qconfig}\n # prepare\n model_prepared = quantize_fx.prepare_fx(model, qconfig_dict)\n # no calibration needed when we only have dynamici/weight_only quantization\n # quantize\n model = quantize_fx.convert_fx(model_prepared)\n elif quantize_type == 'fx_static':\n # qconfig_dict = {\"\": torch.quantization.get_default_qconfig('qnnpack')}\n qconfig_dict = {\"\": torch.quantization.get_default_qconfig('fbgemm')}\n # prepare\n model_prepared = quantize_fx.prepare_fx(model, qconfig_dict)\n # calibrate (not shown)\n if input_example is not None:\n model_prepared(input_example)\n # quantize\n model = quantize_fx.convert_fx(model_prepared)\n if qat_state is not None:\n model.load_state_dict(qat_state)\n\n return model\n\n\ndef main():\n args = parse_args()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n denoise, qat_state = load_model(args.model, args.D, args.C, device=device)\n\n img = cv2.cvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB)\n small = cv2.resize(img, (720, 720), interpolation=cv2.INTER_AREA)\n\n tensor = img_to_tensor(small, device)\n\n if args.quantize:\n print('Quantize model...')\n denoise = quantize_model(\n args.quantize,\n denoise,\n input_example=tensor,\n qat_state=qat_state,\n )\n print('Done.')\n\n t = time.time()\n with torch.no_grad():\n output = denoise(tensor)\n\n print(f'Elapsed: {(time.time() - t) * 1000:.2f}ms')\n output = tensor_to_img(output)\n combined = np.hstack([small, output])\n\n if args.show:\n cv2.imshow('Image', combined[:, :, ::-1])\n cv2.waitKey(0)\n\n if args.output:\n cv2.imwrite(args.output, combined[:, :, ::-1])\n print(f'Image saved to {args.output}.')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.hstack",
"torch.quantization.prepare",
"torch.quantization.fuse_modules.fuse_known_modules",
"torch.quantization.quantize_fx.convert_fx",
"torch.quantization.get_default_qconfig",
"torch.quantization.quantize_dynamic",
"torch.no_grad",
"torch.FloatTensor",
"torch.quantization.convert",
"torch.cuda.is_available",
"torch.device",
"torch.quantization.quantize_fx.prepare_fx"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jianwen-xie/Spatial-Temporal-CoopNets | [
"3aeae2bb78c90ad027387d602a94839d83439f3a"
] | [
"src/util.py"
] | [
"from __future__ import division\n\n\nimport os\nimport numpy as np\nimport math\nfrom PIL import Image\nimport scipy.misc\nimport subprocess\n\ndef loadVideoToFrames(data_path, syn_path, ffmpeg_loglevel = 'quiet'):\n videos = [f for f in os.listdir(data_path) if f.endswith(\".avi\") or f.endswith(\".mp4\")]\n num_videos = len(videos)\n\n for i in range(num_videos):\n video_path = os.path.join(data_path, videos[i])\n out_dir = os.path.join(syn_path, \"sequence_%d\" % i)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n subprocess.call('ffmpeg -loglevel {} -i {} {}/%03d.png'.format(ffmpeg_loglevel,video_path, out_dir), shell=True)\n return num_videos\n\n\ndef cell2img(filename, out_dir='./final_result',image_size=224, margin=2):\n img = scipy.misc.imread(filename, mode='RGB')\n num_cols = img.shape[1] // image_size\n num_rows = img.shape[0] // image_size\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n for ir in range(num_rows):\n for ic in range(num_cols):\n temp = img[ir*(image_size+margin):image_size + ir*(image_size+margin),\n ic*(image_size+margin):image_size + ic*(image_size+margin),:]\n scipy.misc.imsave(\"%s/%03d.png\" % (out_dir,ir*num_cols+ic), temp)\n print(img.shape)\n\ndef img2cell(images, col_num=10, margin=2, scale_method='original'):\n [num_images, size_h, size_w, num_channel] = images.shape\n row_num = int(math.ceil(num_images/col_num))\n saved_img = np.zeros(((row_num * size_h + margin * (row_num - 1)),\n (col_num * size_w + margin * (col_num - 1)),\n num_channel), dtype=np.float32)\n for idx in range(num_images):\n ir = int(math.floor(idx / col_num))\n ic = idx % col_num\n\n temp = images[idx]\n #temp = np.squeeze(images[idx])\n\n if scale_method=='original':\n temp = np.maximum(0.0, np.minimum(255.0, np.round(temp)))\n elif scale_method=='tanh':\n temp = np.maximum(-1, np.minimum(1, temp))\n temp = (temp + 1) / 2 * 255\n temp = np.maximum(0, np.minimum(255, np.round(temp)))\n\n gLow = temp.min()\n gHigh = temp.max()\n cscale = gHigh - gLow\n if cscale == 0:\n cscale = 1\n temp = (temp - gLow) / cscale\n saved_img[(size_h + margin) * ir:size_h + (size_h + margin) * ir,\n (size_w + margin) * ic:size_w + (size_w + margin) * ic, :] = temp\n return saved_img\n\n\ndef normalize_data(images, low=-1, high=1):\n num_images = images.shape[0]\n for idx in range(num_images):\n temp = images[idx]\n gLow = temp.min()\n gHigh = temp.max()\n cscale = gHigh - gLow\n if cscale == 0:\n cscale = 1\n images[idx] = (temp - gLow) / cscale * (high - low) + low\n print(images.max())\n return images\n\ndef getTrainingData(data_path, num_frames=70, image_size=100, isColor=True, postfix='.png', scale_method='original'):\n num_channel = 3\n if not isColor:\n num_channel = 1\n videos = [f for f in os.listdir(data_path) if f.startswith('sequence')]\n num_videos = len(videos)\n images = np.zeros(shape=(num_videos, num_frames, image_size, image_size, num_channel))\n for iv in range(num_videos):\n video_path = os.path.join(data_path, 'sequence_%d' % iv)\n imgList = [f for f in os.listdir(video_path) if f.endswith(postfix)]\n imgList.sort()\n imgList = imgList[:num_frames]\n for iI in range(len(imgList)):\n image = Image.open(os.path.join(video_path, imgList[iI])).resize((image_size, image_size), Image.BILINEAR)\n if isColor:\n image = np.asarray(image.convert('RGB')).astype(float)\n else:\n image = np.asarray(image.convert('L')).astype(float)\n image = image[..., np.newaxis]\n\n if scale_method =='tanh':\n max_val = image.max()\n min_val = image.min()\n image = (image - min_val) / (max_val - min_val) * 2 - 1\n\n images[iv, iI, :,:,:] = image\n return images.astype(float)\n\ndef saveSampleVideo(samples, out_dir, original=None, global_step=None, ffmpeg_loglevel='quiet', fps=25, scale_method='original'):\n [num_video, num_frames, image_size, _, _] = samples.shape\n\n for iv in range(num_video):\n result_dir = os.path.join(out_dir, 'sequence_%d' % iv)\n if global_step >= 0:\n result_dir = os.path.join(result_dir, \"step_%04d\" % global_step)\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n for ifr in range(num_frames):\n saved_img = np.squeeze(samples[iv,ifr, :,:,:])\n\n if scale_method == 'original':\n saved_img = np.maximum(0.0, np.minimum(255.0, np.round(saved_img)))\n elif scale_method == 'tanh':\n saved_img = np.maximum(-1, np.minimum(1, saved_img))\n saved_img = (saved_img + 1) / 2 * 255\n saved_img = np.maximum(0, np.minimum(255, np.round(saved_img)))\n\n max_val = saved_img.max()\n min_val = saved_img.min()\n saved_img = (saved_img - min_val) / (max_val - min_val)\n scipy.misc.imsave(\"%s/%03d.png\" % (result_dir, ifr), saved_img)\n subprocess.call('ffmpeg -loglevel {} -r {} -i {}/%03d.png -vcodec mpeg4 -y {}/sample.avi'.format(\n ffmpeg_loglevel,fps, result_dir,result_dir),shell=True)\n\n result_dir = os.path.join(out_dir, 'all') # visualize the original and the synthesized frames\n if global_step >= 0:\n result_dir = os.path.join(result_dir, \"step_%04d\" % global_step)\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n for ifr in range(num_frames):\n if original is None:\n combined_img = samples\n saved_img = img2cell(combined_img[:, ifr, :, :, :], col_num=num_video, margin=10, scale_method=scale_method)\n else:\n [num_video_ori, _, _, _, _] = original.shape\n combined_img = np.concatenate((original, samples), axis=0)\n #saved_img = img2cell(combined_img[:, ifr, :, :, :], col_num=num_video_ori, margin=10, scale_method=scale_method)\n saved_img = img2cell(combined_img[:, ifr, :, :, :], col_num=3, margin=10, scale_method=scale_method)\n scipy.misc.imsave(\"%s/%03d.png\" % (result_dir, ifr), np.squeeze(saved_img))\n subprocess.call('ffmpeg -loglevel {} -r {} -i {}/%03d.png -vcodec mpeg4 -y {}/sample.avi'.format(\n ffmpeg_loglevel, fps, result_dir, result_dir), shell=True)\n\n\ndef saveSampleSequence(samples, sample_dir, iter=None, col_num=10, scale_method='original'):\n num_video = samples.shape[0]\n\n for iv in range(num_video):\n save_dir = os.path.join(sample_dir, \"sequence_%d\" % iv)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n if iter>=0:\n scipy.misc.imsave(\"%s/%04d.png\" % (save_dir, iter), np.squeeze(img2cell(samples[iv], scale_method=scale_method, col_num=col_num)))\n else:\n scipy.misc.imsave(\"%s/sample.png\" % save_dir, np.squeeze(img2cell(samples[iv], scale_method=scale_method, col_num=col_num)))\n\n"
] | [
[
"numpy.minimum",
"numpy.squeeze",
"numpy.concatenate",
"numpy.round",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hasanari/sane | [
"4ee8d7295d3d13beb3171e29bf5b757acaa11137"
] | [
"app/models.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nfrom sklearn.metrics import pairwise_distances\n\ndef range_overlap(\n a_min,\n a_max,\n b_min,\n b_max,\n ):\n '''Neither range is completely greater than the other\n....'''\n\n return a_min <= b_max and b_min <= a_max\n\n\n# https://codereview.stackexchange.com/questions/31352/overlapping-rectangles\n\ndef is_overlap(corners_new, corners):\n '''Overlapping rectangles overlap both horizontally & vertically\n....'''\n\n r1 = {\n 'left': np.min(corners[:, 0]),\n 'right': np.max(corners[:, 0]),\n 'bottom': np.min(corners[:, 1]),\n 'top': np.max(corners[:, 1]),\n }\n r2 = {\n 'left': np.min(corners_new[:, 0]),\n 'right': np.max(corners_new[:, 0]),\n 'bottom': np.min(corners_new[:, 1]),\n 'top': np.max(corners_new[:, 1]),\n }\n\n return range_overlap(r1['left'], r1['right'], r2['left'], r2['right'\n ]) and range_overlap(r1['bottom'], r1['top'],\n r2['bottom'], r2['top'])\n\n\ndef is_overlap_with_other_boxes(box_id, corner_checks, other_boxes):\n\n for bbox in other_boxes:\n if box_id != bbox.id:\n corners = bbox.get_corners()\n if is_overlap(corners, corner_checks):\n return True\n return False\n\n\ndef fixed_annotation_error(json_data, with_kalman_filter=True):\n\n json_bounding_boxes = json_data['frame']['bounding_boxes']\n\n #print ('prior', json_bounding_boxes)\n frame = Frame.parse_json(json_data)\n\n H = frame.H_MATRIX\n R = frame.R_MATRIX\n F = frame.F_MATRIX\n Q = frame.Q_MATRIX\n \n \n \n #print ('F', F)\n #print ('Q', Q)\n #print ('R', R)\n\n dt = frame.dt\n\n box_id = -1\n for bounding_box in frame.bounding_boxes:\n box_id = box_id + 1\n\n # Previous state initialization\n\n z_k = bounding_box.center\n x_k_min_1 = bounding_box.predicted_state\n P_k_min_1 = bounding_box.predicted_error\n\n \n if(np.sum(x_k_min_1[:2])== 0):\n x_k_min_1[:2] = z_k\n x_k_min_1[2:] = 0\n \n \"\"\" \n x_k_min_1[2] = (x_k_min_1[0] - z_k[0]) / dt #V_x \n x_k_min_1[3] = (x_k_min_1[1] - z_k[1]) / dt #V_y \n x_k_min_1[4] = (x_k_min_1[2] - bounding_box.predicted_state[2]) / dt #A_x \n x_k_min_1[5] = (x_k_min_1[3] - bounding_box.predicted_state[3]) / dt #A_y \n \"\"\"\n #print ('predicted_state', bounding_box.predicted_state)\n #print ('predicted_error', bounding_box.predicted_error)\n print ('x_k_min_1', x_k_min_1, x_k_min_1.shape)\n \n x_hat_k_prior = np.matmul(F, x_k_min_1)\n \n P_k_prior = np.matmul(np.matmul(F, P_k_min_1), np.transpose(F)) \\\n + Q\n\n \n #print ('x_hat_k_prior', x_hat_k_prior)\n #print ('P_k_prior', P_k_prior)\n \n y_k = z_k - np.matmul(H, x_hat_k_prior)\n\n \n print ('z_k', z_k)\n print ('y_k', y_k)\n \n _temp = np.linalg.inv(R + np.matmul(np.matmul(H, P_k_prior),\n np.transpose(H)))\n K_k = np.matmul(np.matmul(P_k_prior, np.transpose(H)), _temp)\n\n x_hat_k = x_hat_k_prior + np.matmul(K_k, y_k)\n\n \n #Force using previous velocity and acceleration\n \n v_x = (x_hat_k[0] - x_k_min_1[0]) / dt\n v_y = (x_hat_k[1] - x_k_min_1[1]) / dt\n \n \n a_x = (v_x - x_k_min_1[2]) / dt\n a_y = (v_y - x_k_min_1[3]) / dt\n \n x_hat_k[2:] = [v_x, v_y, a_x, a_y]\n \n #print(x_hat_k)\n _temp = np.eye(6) - np.matmul(K_k, H)\n P_k = np.matmul(np.matmul(_temp, P_k_prior),\n np.transpose(_temp)) + np.matmul(np.matmul(K_k,\n R), np.transpose(K_k))\n\n #print ('P_k', P_k)\n #print ('K_k', K_k)\n print ('x_hat_k', x_hat_k)\n \n json_bounding_boxes[box_id]['center']['x'] = x_hat_k[0]\n json_bounding_boxes[box_id]['center']['y'] = x_hat_k[1]\n \n json_bounding_boxes[box_id]['predicted_error'] = \\\n np.diag(P_k).tolist()\n json_bounding_boxes[box_id]['predicted_state'] = \\\n x_hat_k.tolist()\n\n json_data['frame']['bounding_boxes'] = json_bounding_boxes\n #print ('updated-kalman', json_bounding_boxes)\n return json_data\n\n\nclass NextFrameBBOX:\n\n def __init__(\n self,\n box_id,\n back_tracking_boxes,\n box_state,\n center_location,\n tracking_idx,\n is_bbox_updated=False,\n ):\n self.id = box_id\n self.tracking_idx = tracking_idx\n self.box_id = box_id\n self.is_bbox_updated = is_bbox_updated\n self.center = center_location\n self.back_tracking_boxes = back_tracking_boxes\n self.box_state = box_state\n self.box_track_indices = sorted(back_tracking_boxes.keys())\n self.current_box_track_index = len(self.box_track_indices) - 1 # from very last\n\n def update_index(self):\n self.current_box_track_index = self.current_box_track_index - 1\n if self.current_box_track_index < 0:\n self.current_box_track_index = 0\n self.is_bbox_updated = True\n return self.current_box_track_index\n\n def get_tracking_index(self):\n return self.box_track_indices[self.current_box_track_index]\n\n def get_corners(self):\n return self.back_tracking_boxes[self.get_tracking_index()][0]\n\n def get_center_dist(self):\n return self.back_tracking_boxes[self.get_tracking_index()][1]\n\n def get_bounding_box(self, bbox):\n\n bbox['center_dist'] = self.get_center_dist()\n bbox['object_id'] = self.box_state['object_id']\n bbox['predicted_state'] = self.box_state['predicted_state']\n bbox['predicted_error'] = self.box_state['predicted_error']\n bbox['tracking_idx'] = self.box_state['tracking_idx']\n return bbox\n\n def is_boxes_overlap(self, box_check):\n return is_overlap(self.get_corners(), box_check.get_corners())\n\n\nclass Frame:\n\n def __init__(\n self,\n fname,\n bounding_boxes,\n dt=0.1,\n ):\n self.fname = fname\n self.bounding_boxes = bounding_boxes\n self.dt =dt\n self.F_MATRIX = np.array([[ 1, 0, dt, 0, 0.5 * dt * dt, 0, ], \n [ 0, 1, 0, dt, 0, 0.5 * dt * dt, ], \n [ 0, 0, 1, 0, dt, 0, ], \n [ 0, 0, 0, 1, 0, dt, ], \n [ 0, 0, 0, 0, 1, 0, ], \n [ 0, 0, 0, 0, 0, 1, ], ], dtype=np.float32)\n self.Q_MATRIX = np.eye(6) * [ 0, 0, 0, 0, 0, 0, ]\n self.R_MATRIX = np.eye(2) * [0.0000000001, 0.0000000001]\n self.H_MATRIX = np.array([[ 1, 0, 0, 0, 0, 0, ], \n [ 0, 1, 0, 0, 0, 0, ]], dtype=np.float32)\n\n @staticmethod\n def parse_json(json_frame):\n json_bounding_boxes = json_frame['frame']['bounding_boxes']\n bounding_boxes = BoundingBox.parse_json(json_bounding_boxes)\n return Frame(json_frame['frame']['fname'], bounding_boxes)\n\n\nclass BoundingBox:\n\n def __init__(\n self,\n box_id,\n center,\n height,\n width,\n length,\n angle,\n object_id,\n predicted_state,\n predicted_error,\n settingsControls,\n tracking_idx,\n timestamps,\n islocked\n ):\n self.box_id = box_id\n self.x = center['x']\n self.y = center['y']\n self.center = np.array([self.x, self.y])\n self.height = height\n self.width = width\n self.length = length\n self.angle = angle\n self.settingsControls = settingsControls\n self.object_id = object_id\n self.predicted_error = np.eye(6) * np.array(predicted_error, dtype=np.float32)\n self.predicted_state = np.transpose(np.array(predicted_state, dtype=np.float32))\n self.tracking_idx = tracking_idx\n self.islocked = islocked\n self.timestamps = timestamps\n\n @staticmethod\n def parse_json(json):\n return [BoundingBox(\n json_obj['box_id'],\n json_obj['center'],\n json_obj['height'],\n json_obj['width'],\n json_obj['length'],\n json_obj['angle'],\n json_obj['object_id'],\n json_obj['predicted_state'],\n json_obj['predicted_error'],\n json_obj['settingsControls'],\n json_obj['tracking_idx'],\n json_obj['timestamps'],\n json_obj['islocked'],\n ) for json_obj in json]\n\n def filter_points(self, pointcloud, bounding_factor=.1):\n (l, w, theta) = (self.length, self.width, self.angle)\n center = np.array([[self.x, self.y]])\n rotated_points = pointcloud.rigid_transform(theta, center)\n (x, y) = (rotated_points[:, 0], rotated_points[:, 1])\n indices_within_width = np.where(np.abs(x) <= w / 2 * (1\n + bounding_factor))[0]\n indices_within_length = np.where(np.abs(y) <= l / 2 * (1\n + bounding_factor))[0]\n\n bounded_indices = np.intersect1d(indices_within_width,\n indices_within_length)\n return bounded_indices\n\n def grow_pointcloud(self, pointcloud):\n \n _, filtered_pc = self.filter_pointcloud(np.copy(pointcloud))\n D = pairwise_distances(pointcloud[:,:2], filtered_pc[:,:2])\n _min_results = np.amin(D, axis=1)\n print(\"D\", D.shape, _min_results.shape)\n \n \n \n def filter_pointcloud(self, pointcloud, updated_size = 0):\n theta = self.angle\n transformed_pointcloud = homogeneous_transformation(pointcloud,\n self.center, -theta)\n indices = \\\n np.intersect1d(np.where(np.abs(transformed_pointcloud[:,\n 0]) <= (self.width+updated_size) / 2)[0],\n np.where(np.abs(transformed_pointcloud[:,\n 1]) <= (self.length+updated_size) / 2)[0])\n return (np.delete(pointcloud, indices, axis=0),\n pointcloud[indices, :])\n\n def get_corners(self):\n c1 = np.array([-self.width / 2, -self.length / 2])\n c2 = np.array([self.width / 2, -self.length / 2])\n c3 = np.array([self.width / 2, self.length / 2])\n c4 = np.array([-self.width / 2, self.length / 2])\n corners = homogeneous_transformation(np.vstack([c1, c2, c3,\n c4]), np.zeros(2), self.angle) + self.center\n return corners\n\n\ndef homogeneous_transformation(points, translation, theta):\n return (points[:, :2] - translation).dot(rotation_matrix(theta).T)\n\n\ndef rotation_matrix(theta):\n return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta),\n np.cos(theta)]])\n\n\n"
] | [
[
"numpy.diag",
"sklearn.metrics.pairwise_distances",
"numpy.abs",
"numpy.min",
"numpy.amin",
"numpy.eye",
"numpy.matmul",
"numpy.cos",
"numpy.sin",
"numpy.max",
"numpy.intersect1d",
"numpy.copy",
"numpy.delete",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Maaitrayo/3D-reconstruction-from-Stereo-image | [
"7bafc6c7d88f317738c48ccbc2202163b0dc444f"
] | [
"3D_reconstruction_V1.py"
] | [
"import cv2 \nimport numpy as np\nimport open3d as o3d\n\nleft_img_path = '/home/maaitrayo/Autonomous Vehicle/data_odometry_gray/dataset/sequences/21/image_0/000000.png'\nright_img_path = '/home/maaitrayo/Autonomous Vehicle/data_odometry_gray/dataset/sequences/21/image_1/000000.png'\n\nimg_left = cv2.imread(left_img_path)\nimg_right = cv2.imread(right_img_path)\n\n#cv2.imshow('left image', img_left)\n#cv2.imshow('right image', img_right)\n\nblock = 15\nP1 = block * block * 8\nP2 = block * block * 32\ndisparityEngine = cv2.StereoSGBM_create(minDisparity=0,numDisparities=16, blockSize=block, P1=P1, P2=P2)\ndisparity = disparityEngine.compute(img_left, img_right).astype(np.float32)\n#print(disparity)\n#cv2.imshow('disparity', disparity/255)\n\nrows = img_right.shape[0]\ncolumns = img_right.shape[1]\n\ntotal_points =rows*columns\npoint_cloud = np.zeros([111294,3])\npoint_cloud_world = np.zeros([111294,3])\nproject_ini = np.ones([4,1])\nproject_final = np.ones([4,1])\n#print(len(point_cloud))\n\n#print(total_points)\n#print(rows*columns)\nz=0\na=0\nfx = 718.856 \nfy = 718.856 \ncx = 607.1928 \ncy = 185.2157\nb = 0.573\nQ = np.zeros([4,4])\nQ[0][0] = 1\nQ[1][1] = 1\nQ[0][3] = -cx\nQ[1][3] = -cy\nQ[2][3] = -fx\nQ[3][2] = -1/b\nprint(Q)\n\nQ2 = np.float32([[1,0,0,0],\n\t\t\t\t[0,-1,0,0],\n\t\t\t\t[0,0,fx*0.05,0], #Focal length multiplication obtained experimentally. \n\t\t\t\t[0,0,0,1]])\n\n\nfor r in range(rows):\n\tfor c in range(columns):\n\t\tif(disparity[r][c] <= 0.0 or disparity[r][c] >= 96.0 ):\n\t\t\tcontinue\n\n\t\tx = (c - cx) / fx\n\t\ty = (r - cy) / fy\n\t\tdepth = (fx*b) / disparity[r][c]\n\t\tpoint_cloud[z][0] = x * depth\n\t\tpoint_cloud[z][1] = y * depth\n\t\tpoint_cloud[z][2] = depth\n\n\t\tproject_ini[0][0] = point_cloud[z][0]\n\t\tproject_ini[1][0] = point_cloud[z][1]\n\t\tproject_ini[2][0] = point_cloud[z][2]\n\n\t\tproject_final = np.dot(Q,project_ini)\n\n\t\tpoint_cloud_world[z][0] = project_final[0][0]\n\t\tpoint_cloud_world[z][1] = project_final[1][0]\n\t\tpoint_cloud_world[z][2] = project_final[2][0]\n\t\t\n\t\tz = z+1\n\nprint(point_cloud)\nprint(point_cloud_world)\nprint(project_final)\nprint('points3D')\npoints3D = cv2.reprojectImageTo3D(disparity, Q2)\nprint(points3D)\npcd = o3d.geometry.PointCloud()\n#pcd.points = o3d.utility.Vector3dVector(point_cloud_world)\npcd.points = o3d.utility.Vector3dVector(points3D)\no3d.visualization.draw_geometries([pcd])\n\n\n\ncv2.waitKey(0)"
] | [
[
"numpy.dot",
"numpy.zeros",
"numpy.float32",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frankkloster/dogs-vs-cats | [
"4dc060fd860c73d4a2716eb5f4202211376745c3"
] | [
"keras_trainer/task.py"
] | [
"from keras.applications import VGG16\nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nbase_dir = 'data/'\n\nconv_base = VGG16(weights='imagenet',\n include_top=False,\n input_shape=(150, 150, 3))\n\n\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'val')\ntest_dir = os.path.join(base_dir, 'test')\n\ndatagen = ImageDataGenerator(rescale=1./255)\nbatch_size = 20\n\ndef extract_features(directory, sample_count):\n features = np.zeros(shape=(sample_count, 4, 4, 512))\n labels = np.zeros(shape=(sample_count))\n generator = datagen.flow_from_directory(\n directory,\n target_size=(150, 150),\n batch_size=batch_size,\n class_mode='binary')\n i = 0\n for inputs_batch, labels_batch in generator:\n features_batch = conv_base.predict(inputs_batch)\n features[i * batch_size : (i + 1) * batch_size] = features_batch\n labels[i * batch_size : (i + 1) * batch_size] = labels_batch\n i += 1\n if i * batch_size >= sample_count:\n # Note that since generators yield data indefinitely in a loop,\n # we must `break` after every image has been seen once.\n break\n return features, labels\n\ntrain_features, train_labels = extract_features(train_dir, 2000)\nvalidation_features, validation_labels = extract_features(validation_dir, 1000)\ntest_features, test_labels = extract_features(test_dir, 1000)\n\ntrain_features = np.reshape(train_features, (2000, 4 * 4 * 512))\nvalidation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))\ntest_features = np.reshape(test_features, (1000, 4 * 4 * 512))\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=2e-5),\n loss='binary_crossentropy',\n metrics=['acc'])\n\nhistory = model.fit(train_features, train_labels,\n epochs=30,\n batch_size=20,\n validation_data=(validation_features, validation_labels))"
] | [
[
"numpy.reshape",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
titulebolide/variometer | [
"7e5fbacdb9c403d11dd01abc6f5e20db4b922756"
] | [
"test/models/model10.py"
] | [
"import numpy as np\n\ndef model(td,dt):\n alpha = 100/8\n beta = 1\n\n f = lambda X,U : np.array([\n [X[0,0] + dt*(U[0,0]-X[3,0])],\n [X[1,0] + dt*beta*(X[2,0] - X[1,0])],\n [X[2,0] - dt*alpha*X[0,0]],\n [X[3,0]]\n ])\n F = lambda X,U : np.array([\n [1, 0, 0, -dt],\n [0, 1-dt*beta, dt*beta,0],\n [-dt*alpha, 0, 1,0],\n [0,0,0,1]\n ])\n h = lambda X : np.array([\n [X[1,0]]\n ])\n H = lambda X : np.array([\n [0,1,0,0]\n ])\n\n X = np.array([\n [0],\n [84300],\n [84300],\n [9.81]\n ])\n P = np.array([\n [0.2,0,0,0],\n [0,10,0,0],\n [0,0,10,0],\n [0,0,0,0.1]\n ])**2\n Q = np.array([\n [0.2,0,0,0],\n [0,10,0,0],\n [0,0,10,0],\n [0,0,0,0.1]\n ])**2\n R = np.array([\n [10]\n ])**2\n\n def get_U_Z(td):\n return np.array([[td.az_capt]]),np.array([[td.p_capt]])\n\n return f, F, h, H, X, P, Q, R, get_U_Z\n\ndata_index = {\n \"vz\" : 0,\n \"Pint\" : 1,\n \"Pext\" : 2,\n \"g\" : 3\n}\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jvictor42/astropy | [
"37a441d1f1f80d147e9e71bdcc93bfaac1d58b3c"
] | [
"astropy/cosmology/tests/test_cosmology.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom io import StringIO\n\nimport pytest\n\nimport numpy as np\n\nimport astropy.constants as const\nimport astropy.units as u\nfrom astropy.cosmology import Cosmology, flrw, funcs\nfrom astropy.cosmology.realizations import Planck13, Planck18, default_cosmology\nfrom astropy.units import allclose\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY\nfrom astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning\n\n\ndef test_flrw_moved_deprecation():\n \"\"\"Test the deprecation warning about the move of FLRW classes.\"\"\"\n with pytest.warns(AstropyDeprecationWarning):\n from astropy.cosmology.core import FLRW\n\n assert FLRW is flrw.FLRW\n\n\ndef test_init():\n \"\"\" Tests to make sure the code refuses inputs it is supposed to\"\"\"\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=-0.27)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27,\n Tcmb0=u.Quantity([0.0, 2], u.K))\n with pytest.raises(ValueError):\n h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)\n cosmo = flrw.FlatLambdaCDM(H0=h0bad, Om0=0.27)\n with pytest.raises(ValueError):\n bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)\n with pytest.raises(ValueError):\n bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu)\n with pytest.raises(ValueError):\n bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27)\n cosmo.Ob(1)\n with pytest.raises(ValueError):\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27)\n cosmo.Odm(1)\n with pytest.raises(TypeError):\n default_cosmology.validate(4)\n\n\ndef test_immutability():\n \"\"\"Test immutability of cosmologies.\"\"\"\n cosmo = flrw.FlatLambdaCDM(70, 0.3)\n\n for attr in [*cosmo.__parameters__, \"name\"]:\n with pytest.raises(AttributeError):\n setattr(cosmo, attr, None)\n\n # The metadata is NOT immutable\n assert \"a\" not in cosmo.meta\n cosmo.meta[\"a\"] = 1\n assert \"a\" in cosmo.meta\n\n\ndef test_basic():\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,\n Ob0=0.05, name=\"test\", meta={\"a\": \"b\"})\n assert allclose(cosmo.Om0, 0.27)\n assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)\n assert allclose(cosmo.Ob0, 0.05)\n assert allclose(cosmo.Odm0, 0.27 - 0.05)\n # This next test will fail if astropy.const starts returning non-mks\n # units by default; see the comment at the top of core.py\n assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)\n assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)\n assert allclose(cosmo.Ok0, 0.0)\n assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,\n 1.0, rtol=1e-6)\n assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +\n cosmo.Onu(1), 1.0, rtol=1e-6)\n assert allclose(cosmo.Tcmb0, 2.0 * u.K)\n assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)\n assert allclose(cosmo.Neff, 3.04)\n assert allclose(cosmo.h, 0.7)\n assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)\n assert cosmo.name == \"test\"\n assert cosmo.meta == {\"a\": \"b\"}\n\n # Make sure setting them as quantities gives the same results\n H0 = u.Quantity(70, u.km / (u.s * u.Mpc))\n T = u.Quantity(2.0, u.K)\n cosmo = flrw.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)\n assert allclose(cosmo.Om0, 0.27)\n assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)\n assert allclose(cosmo.Ob0, 0.05)\n assert allclose(cosmo.Odm0, 0.27 - 0.05)\n assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)\n assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)\n assert allclose(cosmo.Ok0, 0.0)\n assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,\n 1.0, rtol=1e-6)\n assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +\n cosmo.Onu(1), 1.0, rtol=1e-6)\n assert allclose(cosmo.Tcmb0, 2.0 * u.K)\n assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)\n assert allclose(cosmo.Neff, 3.04)\n assert allclose(cosmo.h, 0.7)\n assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)\n\n\[email protected]('not HAS_SCIPY')\ndef test_units():\n \"\"\" Test if the right units are being returned\"\"\"\n\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)\n assert cosmo.comoving_distance(1.0).unit == u.Mpc\n assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc\n assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc\n assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc\n assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc\n assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc\n assert cosmo.luminosity_distance(1.0).unit == u.Mpc\n assert cosmo.lookback_time(1.0).unit == u.Gyr\n assert cosmo.lookback_distance(1.0).unit == u.Mpc\n assert cosmo.H0.unit == u.km / u.Mpc / u.s\n assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s\n assert cosmo.Tcmb0.unit == u.K\n assert cosmo.Tcmb(1.0).unit == u.K\n assert cosmo.Tcmb([0.0, 1.0]).unit == u.K\n assert cosmo.Tnu0.unit == u.K\n assert cosmo.Tnu(1.0).unit == u.K\n assert cosmo.Tnu([0.0, 1.0]).unit == u.K\n assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc\n assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc\n assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin\n assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin\n assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3\n assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3\n assert cosmo.age(1.0).unit == u.Gyr\n assert cosmo.distmod(1.0).unit == u.mag\n\n\[email protected]('not HAS_SCIPY')\ndef test_distance_broadcast():\n \"\"\" Test array shape broadcasting for functions with single\n redshift inputs\"\"\"\n\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27,\n m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))\n z = np.linspace(0.1, 1, 6)\n z_reshape2d = z.reshape(2, 3)\n z_reshape3d = z.reshape(3, 2, 1)\n # Things with units\n methods = ['comoving_distance', 'luminosity_distance',\n 'comoving_transverse_distance', 'angular_diameter_distance',\n 'distmod', 'lookback_time', 'age', 'comoving_volume',\n 'differential_comoving_volume', 'kpc_comoving_per_arcmin']\n for method in methods:\n g = getattr(cosmo, method)\n value_flat = g(z)\n assert value_flat.shape == z.shape\n value_2d = g(z_reshape2d)\n assert value_2d.shape == z_reshape2d.shape\n value_3d = g(z_reshape3d)\n assert value_3d.shape == z_reshape3d.shape\n assert value_flat.unit == value_2d.unit\n assert value_flat.unit == value_3d.unit\n assert allclose(value_flat, value_2d.flatten())\n assert allclose(value_flat, value_3d.flatten())\n\n # Also test unitless ones\n methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',\n 'w', 'de_density_scale', 'Onu', 'Ogamma',\n 'nu_relative_density']\n for method in methods:\n g = getattr(cosmo, method)\n value_flat = g(z)\n assert value_flat.shape == z.shape\n value_2d = g(z_reshape2d)\n assert value_2d.shape == z_reshape2d.shape\n value_3d = g(z_reshape3d)\n assert value_3d.shape == z_reshape3d.shape\n assert allclose(value_flat, value_2d.flatten())\n assert allclose(value_flat, value_3d.flatten())\n\n # Test some dark energy models\n methods = ['Om', 'Ode', 'w', 'de_density_scale']\n for tcosmo in [flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),\n flrw.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),\n flrw.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),\n flrw.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,\n wp=-1.2, wa=-0.2, zp=0.9),\n flrw.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:\n for method in methods:\n g = getattr(cosmo, method)\n value_flat = g(z)\n assert value_flat.shape == z.shape\n value_2d = g(z_reshape2d)\n assert value_2d.shape == z_reshape2d.shape\n value_3d = g(z_reshape3d)\n assert value_3d.shape == z_reshape3d.shape\n assert allclose(value_flat, value_2d.flatten())\n assert allclose(value_flat, value_3d.flatten())\n\n\[email protected]('not HAS_SCIPY')\ndef test_clone():\n \"\"\"Test clone operation.\"\"\"\n cosmo = flrw.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,\n Tcmb0=3.0 * u.K, name=\"test\", meta={\"a\":\"b\"})\n z = np.linspace(0.1, 3, 15)\n\n # First, test with no changes, which should return same object\n newclone = cosmo.clone()\n assert newclone is cosmo\n\n # Now change H0\n # Note that H0 affects Ode0 because it changes Ogamma0\n newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)\n assert newclone is not cosmo\n assert newclone.__class__ == cosmo.__class__\n assert newclone.name == cosmo.name + \" (modified)\"\n assert not allclose(newclone.H0.value, cosmo.H0.value)\n assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)\n assert allclose(newclone.Om0, cosmo.Om0)\n assert allclose(newclone.Ok0, cosmo.Ok0)\n assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)\n assert not allclose(newclone.Onu0, cosmo.Onu0)\n assert allclose(newclone.Tcmb0, cosmo.Tcmb0)\n assert allclose(newclone.m_nu, cosmo.m_nu)\n assert allclose(newclone.Neff, cosmo.Neff)\n\n # Compare modified version with directly instantiated one\n cmp = flrw.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,\n Tcmb0=3.0 * u.K)\n assert newclone.__class__ == cmp.__class__\n assert cmp.name is None\n assert allclose(newclone.H0, cmp.H0)\n assert allclose(newclone.Om0, cmp.Om0)\n assert allclose(newclone.Ode0, cmp.Ode0)\n assert allclose(newclone.Ok0, cmp.Ok0)\n assert allclose(newclone.Ogamma0, cmp.Ogamma0)\n assert allclose(newclone.Onu0, cmp.Onu0)\n assert allclose(newclone.Tcmb0, cmp.Tcmb0)\n assert allclose(newclone.m_nu, cmp.m_nu)\n assert allclose(newclone.Neff, cmp.Neff)\n assert allclose(newclone.Om(z), cmp.Om(z))\n assert allclose(newclone.H(z), cmp.H(z))\n assert allclose(newclone.luminosity_distance(z),\n cmp.luminosity_distance(z))\n\n # Now try changing multiple things\n newclone = cosmo.clone(name=\"New name\", H0=65 * u.km / u.s / u.Mpc,\n Tcmb0=2.8 * u.K, meta=dict(zz=\"tops\"))\n assert newclone.__class__ == cosmo.__class__\n assert not newclone.name == cosmo.name\n assert not allclose(newclone.H0.value, cosmo.H0.value)\n assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)\n assert allclose(newclone.Om0, cosmo.Om0)\n assert allclose(newclone.Ok0, cosmo.Ok0)\n assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)\n assert not allclose(newclone.Onu0, cosmo.Onu0)\n assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)\n assert allclose(newclone.Tcmb0, 2.8 * u.K)\n assert allclose(newclone.m_nu, cosmo.m_nu)\n assert allclose(newclone.Neff, cosmo.Neff)\n assert newclone.meta == dict(a=\"b\", zz=\"tops\")\n\n # And direct comparison\n cmp = flrw.FlatLambdaCDM(name=\"New name\", H0=65 * u.km / u.s / u.Mpc,\n Om0=0.27, Tcmb0=2.8 * u.K)\n assert newclone.__class__ == cmp.__class__\n assert newclone.name == cmp.name\n assert allclose(newclone.H0, cmp.H0)\n assert allclose(newclone.Om0, cmp.Om0)\n assert allclose(newclone.Ode0, cmp.Ode0)\n assert allclose(newclone.Ok0, cmp.Ok0)\n assert allclose(newclone.Ogamma0, cmp.Ogamma0)\n assert allclose(newclone.Onu0, cmp.Onu0)\n assert allclose(newclone.Tcmb0, cmp.Tcmb0)\n assert allclose(newclone.m_nu, cmp.m_nu)\n assert allclose(newclone.Neff, cmp.Neff)\n assert allclose(newclone.Om(z), cmp.Om(z))\n assert allclose(newclone.H(z), cmp.H(z))\n assert allclose(newclone.luminosity_distance(z),\n cmp.luminosity_distance(z))\n\n # Try a dark energy class, make sure it can handle w params\n cosmo = flrw.w0waCDM(name=\"test w0wa\", H0=70 * u.km / u.s / u.Mpc,\n Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)\n newclone = cosmo.clone(w0=-1.1, wa=0.2)\n assert newclone.__class__ == cosmo.__class__\n assert newclone.name == cosmo.name + \" (modified)\"\n assert allclose(newclone.H0, cosmo.H0)\n assert allclose(newclone.Om0, cosmo.Om0)\n assert allclose(newclone.Ode0, cosmo.Ode0)\n assert allclose(newclone.Ok0, cosmo.Ok0)\n assert not allclose(newclone.w0, cosmo.w0)\n assert allclose(newclone.w0, -1.1)\n assert not allclose(newclone.wa, cosmo.wa)\n assert allclose(newclone.wa, 0.2)\n\n # Now test exception if user passes non-parameter\n with pytest.raises(TypeError, match=\"unexpected keyword argument\"):\n newclone = cosmo.clone(not_an_arg=4)\n\n\ndef test_equality():\n \"\"\"Test equality and equivalence.\"\"\"\n # Equality\n assert Planck18 == Planck18\n assert Planck13 != Planck18\n\n # just wrong\n assert Planck18 != 2\n assert 2 != Planck18\n\n # mismatched signatures, both directions.\n newcosmo = flrw.w0waCDM(**Planck18._init_arguments, Ode0=0.6)\n assert newcosmo != Planck18\n assert Planck18 != newcosmo\n\n # different arguments\n newcosmo = Planck18.clone(name=\"modified\")\n assert Planck18 != newcosmo # the name was changed!\n assert newcosmo != Planck18 # double check directions.\n\n\ndef test_xtfuncs():\n \"\"\" Test of absorption and lookback integrand\"\"\"\n cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)\n z = np.array([2.0, 3.2])\n assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,\n rtol=1e-4)\n assert allclose(cosmo.lookback_time_integrand(z),\n [0.10333179, 0.04644541], rtol=1e-4)\n assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,\n rtol=1e-4)\n assert allclose(cosmo.abs_distance_integrand(z),\n [2.7899584, 3.44104758], rtol=1e-4)\n\n\ndef test_repr():\n \"\"\" Test string representation of built in classes\"\"\"\n cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)\n expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '\n 'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[0. 0. 0.] eV, '\n 'Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV))\n expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '\n 'Tcmb0=2.725 K, Neff=3.04, m_nu=[0.01 0.01 0.01] eV, '\n 'Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05)\n expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '\n 'Tcmb0=3 K, Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.05)')\n\n assert str(cosmo) == expected\n\n cosmo = flrw.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1')\n expected = ('wCDM(name=\"test1\", H0=60 km / (Mpc s), Om0=0.27, '\n 'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '\n 'm_nu=[0. 0. 0.] eV, Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')\n expected = ('FlatwCDM(name=\"test2\", H0=65 km / (Mpc s), Om0=0.27, '\n 'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3')\n expected = ('w0waCDM(name=\"test3\", H0=60 km / (Mpc s), Om0=0.25, '\n 'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '\n 'm_nu=[0. 0. 0.] eV, Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',\n Ob0=0.0456789)\n expected = ('Flatw0waCDM(name=\"test4\", H0=55 km / (Mpc s), Om0=0.35, '\n 'w0=-0.9, wa=-0.2, Tcmb0=0 K, Neff=3.04, m_nu=None, '\n 'Ob0=0.0457)')\n assert str(cosmo) == expected\n\n cosmo = flrw.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,\n zp=0.3, name='test5')\n expected = ('wpwaCDM(name=\"test5\", H0=50 km / (Mpc s), Om0=0.3, '\n 'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3 redshift, Tcmb0=0 K, '\n 'Neff=3.04, m_nu=None, Ob0=None)')\n assert str(cosmo) == expected\n\n cosmo = flrw.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725,\n m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))\n expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '\n 'wz=-0.2, Tcmb0=2.725 K, Neff=3.04, '\n 'm_nu=[0.001 0.01 0.015] eV, Ob0=None)')\n assert str(cosmo) == expected\n\n\[email protected]('not HAS_SCIPY')\ndef test_flat_z1():\n \"\"\" Test a flat cosmology at z=1 against several other on-line\n calculators.\n \"\"\"\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)\n z = 1\n\n # Test values were taken from the following web cosmology\n # calculators on 27th Feb 2012:\n\n # Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html\n # (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)\n # Kempner: http://www.kempner.net/cosmic.php\n # iCosmos: http://www.icosmos.co.uk/index.html\n\n # The order of values below is Wright, Kempner, iCosmos'\n assert allclose(cosmo.comoving_distance(z),\n [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)\n assert allclose(cosmo.luminosity_distance(z),\n [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)\n assert allclose(cosmo.lookback_time(z),\n [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)\n assert allclose(cosmo.lookback_distance(z),\n [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)\n\n\ndef test_zeroing():\n \"\"\" Tests if setting params to 0s always respects that\"\"\"\n # Make sure Ode = 0 behaves that way\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)\n assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])\n assert allclose(cosmo.Ode(1), 0)\n # Ogamma0 and Onu\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)\n assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])\n assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])\n assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])\n assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])\n # Obaryon\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)\n assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])\n\n\n# This class is to test whether the routines work correctly\n# if one only overloads w(z)\nclass test_cos_sub(flrw.FLRW):\n def __init__(self):\n super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name=\"test_cos\")\n self._w0 = -0.9\n\n def w(self, z):\n return self._w0 * np.ones_like(z)\n\n\n# Similar, but with neutrinos\nclass test_cos_subnu(flrw.FLRW):\n def __init__(self):\n super().__init__(70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name=\"test_cos_nu\")\n self._w0 = -0.8\n\n def w(self, z):\n return self._w0 * np.ones_like(z)\n\n\[email protected]('not HAS_SCIPY')\ndef test_de_subclass():\n # This is the comparison object\n z = [0.2, 0.4, 0.6, 0.9]\n cosmo = flrw.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)\n # Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012\n assert allclose(cosmo.luminosity_distance(z),\n [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)\n # Now try the subclass that only gives w(z)\n cosmo = test_cos_sub()\n assert allclose(cosmo.luminosity_distance(z),\n [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)\n # Test efunc\n assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)\n assert allclose(cosmo.efunc([0.5, 1.0]),\n [1.31744953, 1.7489240754], rtol=1e-5)\n assert allclose(cosmo.inv_efunc([0.5, 1.0]),\n [0.75904236, 0.57178011], rtol=1e-5)\n # Test de_density_scale\n assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)\n assert allclose(cosmo.de_density_scale([0.5, 1.0]),\n [1.12934694, 1.23114444], rtol=1e-4)\n\n # Add neutrinos for efunc, inv_efunc\n\n\[email protected]('not HAS_SCIPY')\ndef test_varyde_lumdist_mathematica():\n \"\"\"Tests a few varying dark energy EOS models against a mathematica\n computation\"\"\"\n\n # w0wa models\n z = np.array([0.2, 0.4, 0.9, 1.2])\n cosmo = flrw.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)\n assert allclose(cosmo.w0, -1.1)\n assert allclose(cosmo.wa, 0.2)\n\n assert allclose(cosmo.luminosity_distance(z),\n [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)\n assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)\n assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),\n [1.0, 0.9246310669529021, 0.9184087000251957])\n\n cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)\n assert allclose(cosmo.luminosity_distance(z),\n [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)\n cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,\n Tcmb0=0.0)\n assert allclose(cosmo.luminosity_distance(z),\n [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)\n\n # wpwa models\n cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,\n Tcmb0=0.0)\n assert allclose(cosmo.wp, -1.1)\n assert allclose(cosmo.wa, 0.2)\n assert allclose(cosmo.zp, 0.5)\n assert allclose(cosmo.luminosity_distance(z),\n [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)\n\n cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,\n Tcmb0=0.0)\n assert allclose(cosmo.wp, -1.1)\n assert allclose(cosmo.wa, 0.2)\n assert allclose(cosmo.zp, 0.9)\n assert allclose(cosmo.luminosity_distance(z),\n [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)\n\n\[email protected]('not HAS_SCIPY')\ndef test_matter():\n # Test non-relativistic matter evolution\n tcos = flrw.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)\n assert allclose(tcos.Om0, 0.3)\n assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)\n assert allclose(tcos.Om(0), 0.3)\n assert allclose(tcos.Ob(0), 0.045)\n z = np.array([0.0, 0.5, 1.0, 2.0])\n assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],\n rtol=1e-4)\n assert allclose(tcos.Ob(z),\n [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)\n assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],\n rtol=1e-4)\n # Consistency of dark and baryonic matter evolution with all\n # non-relativistic matter\n assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))\n\n\[email protected]('not HAS_SCIPY')\ndef test_ocurv():\n # Test Ok evolution\n # Flat, boring case\n tcos = flrw.FlatLambdaCDM(70.0, 0.3)\n assert allclose(tcos.Ok0, 0.0)\n assert allclose(tcos.Ok(0), 0.0)\n z = np.array([0.0, 0.5, 1.0, 2.0])\n assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],\n rtol=1e-6)\n\n # Not flat\n tcos = flrw.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))\n assert allclose(tcos.Ok0, 0.2)\n assert allclose(tcos.Ok(0), 0.2)\n assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],\n rtol=1e-4)\n\n # Test the sum; note that Ogamma/Onu are 0\n assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),\n [1.0, 1.0, 1.0, 1.0], rtol=1e-5)\n\n\[email protected]('not HAS_SCIPY')\ndef test_ode():\n # Test Ode evolution, turn off neutrinos, cmb\n tcos = flrw.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)\n assert allclose(tcos.Ode0, 0.7)\n assert allclose(tcos.Ode(0), 0.7)\n z = np.array([0.0, 0.5, 1.0, 2.0])\n assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],\n rtol=1e-5)\n\n\[email protected]('not HAS_SCIPY')\ndef test_ogamma():\n \"\"\"Tests the effects of changing the temperature of the CMB\"\"\"\n\n # Tested against Ned Wright's advanced cosmology calculator,\n # Sep 7 2012. The accuracy of our comparison is limited by\n # how many digits it outputs, which limits our test to about\n # 0.2% accuracy. The NWACC does not allow one\n # to change the number of nuetrino species, fixing that at 3.\n # Also, inspection of the NWACC code shows it uses inaccurate\n # constants at the 0.2% level (specifically, a_B),\n # so we shouldn't expect to match it that well. The integral is\n # also done rather crudely. Therefore, we should not expect\n # the NWACC to be accurate to better than about 0.5%, which is\n # unfortunate, but reflects a problem with it rather than this code.\n # More accurate tests below using Mathematica\n z = np.array([1.0, 10.0, 500.0, 1000.0])\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)\n\n # Next compare with doing the integral numerically in Mathematica,\n # which allows more precision in the test. It is at least as\n # good as 0.01%, possibly better\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)\n assert allclose(cosmo.angular_diameter_distance(z),\n [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)\n\n # Just to be really sure, we also do a version where the integral\n # is analytic, which is a Ode = 0 flat universe. In this case\n # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)\n # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.\n Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26\n Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04\n Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2\n Om0 = 1.0 - Or0\n hubdis = (299792.458 / 70.0) * u.Mpc\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)\n targvals = 2.0 * hubdis * \\\n (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)\n assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)\n\n # And integers for z\n assert allclose(cosmo.comoving_distance(z.astype(int)),\n targvals, rtol=1e-5)\n\n # Try Tcmb0 = 4\n Or0 *= (4.0 / 2.725) ** 4\n Om0 = 1.0 - Or0\n cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)\n targvals = 2.0 * hubdis * \\\n (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)\n assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)\n\n\[email protected]('not HAS_SCIPY')\ndef test_tcmb():\n cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)\n assert allclose(cosmo.Tcmb0, 2.5 * u.K)\n assert allclose(cosmo.Tcmb(2), 7.5 * u.K)\n z = [0.0, 1.0, 2.0, 3.0, 9.0]\n assert allclose(cosmo.Tcmb(z),\n [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)\n # Make sure it's the same for integers\n z = [0, 1, 2, 3, 9]\n assert allclose(cosmo.Tcmb(z),\n [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)\n\n\[email protected]('not HAS_SCIPY')\ndef test_tnu():\n cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)\n assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)\n assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)\n z = [0.0, 1.0, 2.0, 3.0]\n expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K\n assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)\n\n # Test for integers\n z = [0, 1, 2, 3]\n assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)\n\n\[email protected]('not HAS_SCIPY')\ndef test_efunc_vs_invefunc_flrw():\n \"\"\" Test that efunc and inv_efunc give inverse values\"\"\"\n z0 = 0.5\n z = np.array([0.5, 1.0, 2.0, 5.0])\n\n # FLRW is abstract, so requires test_cos_sub defined earlier\n # This requires scipy, unlike the built-ins, because it\n # calls de_density_scale, which has an integral in it\n cosmo = test_cos_sub()\n assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))\n assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))\n # Add neutrinos\n cosmo = test_cos_subnu()\n assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))\n assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))\n\n\[email protected]('not HAS_SCIPY')\ndef test_kpc_methods():\n cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n assert allclose(cosmo.arcsec_per_kpc_comoving(3),\n 0.0317179167 * u.arcsec / u.kpc)\n assert allclose(cosmo.arcsec_per_kpc_proper(3),\n 0.1268716668 * u.arcsec / u.kpc)\n assert allclose(cosmo.kpc_comoving_per_arcmin(3),\n 1891.6753126 * u.kpc / u.arcmin)\n assert allclose(cosmo.kpc_proper_per_arcmin(3),\n 472.918828 * u.kpc / u.arcmin)\n\n\[email protected]('not HAS_SCIPY')\ndef test_comoving_volume():\n\n c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)\n c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)\n c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)\n\n # test against ned wright's calculator (cubic Gpc)\n redshifts = np.array([0.5, 1, 2, 3, 5, 9])\n wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,\n 3654.802]) * u.Gpc**3\n wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,\n 3123.814]) * u.Gpc**3\n wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,\n 358.992]) * u.Gpc**3\n # The wright calculator isn't very accurate, so we use a rather\n # modest precision\n assert allclose(c_flat.comoving_volume(redshifts), wright_flat,\n rtol=1e-2)\n assert allclose(c_open.comoving_volume(redshifts),\n wright_open, rtol=1e-2)\n assert allclose(c_closed.comoving_volume(redshifts),\n wright_closed, rtol=1e-2)\n\n\[email protected]('not HAS_SCIPY')\ndef test_differential_comoving_volume():\n from scipy.integrate import quad\n\n c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)\n c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)\n c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)\n\n # test that integration of differential_comoving_volume()\n # yields same as comoving_volume()\n redshifts = np.array([0.5, 1, 2, 3, 5, 9])\n wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,\n 3654.802]) * u.Gpc**3\n wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,\n 3123.814]) * u.Gpc**3\n wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,\n 358.992]) * u.Gpc**3\n # The wright calculator isn't very accurate, so we use a rather\n # modest precision.\n ftemp = lambda x: c_flat.differential_comoving_volume(x).value\n otemp = lambda x: c_open.differential_comoving_volume(x).value\n ctemp = lambda x: c_closed.differential_comoving_volume(x).value\n # Multiply by solid_angle (4 * pi)\n assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]\n for redshift in redshifts]) * u.Mpc**3,\n wright_flat, rtol=1e-2)\n assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]\n for redshift in redshifts]) * u.Mpc**3,\n wright_open, rtol=1e-2)\n assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]\n for redshift in redshifts]) * u.Mpc**3,\n wright_closed, rtol=1e-2)\n\n\[email protected]('not HAS_SCIPY')\ndef test_flat_open_closed_icosmo():\n \"\"\" Test against the tabulated values generated from icosmo.org\n with three example cosmologies (flat, open and closed).\n \"\"\"\n\n cosmo_flat = \"\"\"\\\n# from icosmo (icosmo.org)\n# Om 0.3 w -1 h 0.7 Ol 0.7\n# z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 669.77536 576.15085 778.61386\n 0.32500000 1285.5964 970.26143 1703.4152\n 0.50000000 1888.6254 1259.0836 2832.9381\n 0.66250000 2395.5489 1440.9317 3982.6000\n 0.82500000 2855.5732 1564.6976 5211.4210\n 1.0000000 3303.8288 1651.9144 6607.6577\n 1.1625000 3681.1867 1702.2829 7960.5663\n 1.3250000 4025.5229 1731.4077 9359.3408\n 1.5000000 4363.8558 1745.5423 10909.640\n 1.6625000 4651.4830 1747.0359 12384.573\n 1.8250000 4916.5970 1740.3883 13889.387\n 2.0000000 5179.8621 1726.6207 15539.586\n 2.1625000 5406.0204 1709.4136 17096.540\n 2.3250000 5616.5075 1689.1752 18674.888\n 2.5000000 5827.5418 1665.0120 20396.396\n 2.6625000 6010.4886 1641.0890 22013.414\n 2.8250000 6182.1688 1616.2533 23646.796\n 3.0000000 6355.6855 1588.9214 25422.742\n 3.1625000 6507.2491 1563.3031 27086.425\n 3.3250000 6650.4520 1537.6768 28763.205\n 3.5000000 6796.1499 1510.2555 30582.674\n 3.6625000 6924.2096 1485.0852 32284.127\n 3.8250000 7045.8876 1460.2876 33996.408\n 4.0000000 7170.3664 1434.0733 35851.832\n 4.1625000 7280.3423 1410.2358 37584.767\n 4.3250000 7385.3277 1386.9160 39326.870\n 4.5000000 7493.2222 1362.4040 41212.722\n 4.6625000 7588.9589 1340.2135 42972.480\n\"\"\"\n\n cosmo_open = \"\"\"\\\n# from icosmo (icosmo.org)\n# Om 0.3 w -1 h 0.7 Ol 0.1\n# z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 643.08185 553.18868 747.58265\n 0.32500000 1200.9858 906.40441 1591.3062\n 0.50000000 1731.6262 1154.4175 2597.4393\n 0.66250000 2174.3252 1307.8648 3614.8157\n 0.82500000 2578.7616 1413.0201 4706.2399\n 1.0000000 2979.3460 1489.6730 5958.6920\n 1.1625000 3324.2002 1537.2024 7188.5829\n 1.3250000 3646.8432 1568.5347 8478.9104\n 1.5000000 3972.8407 1589.1363 9932.1017\n 1.6625000 4258.1131 1599.2913 11337.226\n 1.8250000 4528.5346 1603.0211 12793.110\n 2.0000000 4804.9314 1601.6438 14414.794\n 2.1625000 5049.2007 1596.5852 15968.097\n 2.3250000 5282.6693 1588.7727 17564.875\n 2.5000000 5523.0914 1578.0261 19330.820\n 2.6625000 5736.9813 1566.4113 21011.694\n 2.8250000 5942.5803 1553.6158 22730.370\n 3.0000000 6155.4289 1538.8572 24621.716\n 3.1625000 6345.6997 1524.4924 26413.975\n 3.3250000 6529.3655 1509.6799 28239.506\n 3.5000000 6720.2676 1493.3928 30241.204\n 3.6625000 6891.5474 1478.0799 32131.840\n 3.8250000 7057.4213 1462.6780 34052.058\n 4.0000000 7230.3723 1446.0745 36151.862\n 4.1625000 7385.9998 1430.7021 38130.224\n 4.3250000 7537.1112 1415.4199 40135.117\n 4.5000000 7695.0718 1399.1040 42322.895\n 4.6625000 7837.5510 1384.1150 44380.133\n\"\"\"\n\n cosmo_closed = \"\"\"\\\n# from icosmo (icosmo.org)\n# Om 2 w -1 h 0.7 Ol 0.1\n# z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 601.80160 517.67879 699.59436\n 0.32500000 1057.9502 798.45297 1401.7840\n 0.50000000 1438.2161 958.81076 2157.3242\n 0.66250000 1718.6778 1033.7912 2857.3019\n 0.82500000 1948.2400 1067.5288 3555.5381\n 1.0000000 2152.7954 1076.3977 4305.5908\n 1.1625000 2312.3427 1069.2914 5000.4410\n 1.3250000 2448.9755 1053.3228 5693.8681\n 1.5000000 2575.6795 1030.2718 6439.1988\n 1.6625000 2677.9671 1005.8092 7130.0873\n 1.8250000 2768.1157 979.86398 7819.9270\n 2.0000000 2853.9222 951.30739 8561.7665\n 2.1625000 2924.8116 924.84161 9249.7167\n 2.3250000 2988.5333 898.80701 9936.8732\n 2.5000000 3050.3065 871.51614 10676.073\n 2.6625000 3102.1909 847.01459 11361.774\n 2.8250000 3149.5043 823.39982 12046.854\n 3.0000000 3195.9966 798.99915 12783.986\n 3.1625000 3235.5334 777.30533 13467.908\n 3.3250000 3271.9832 756.52790 14151.327\n 3.5000000 3308.1758 735.15017 14886.791\n 3.6625000 3339.2521 716.19347 15569.263\n 3.8250000 3368.1489 698.06195 16251.319\n 4.0000000 3397.0803 679.41605 16985.401\n 4.1625000 3422.1142 662.87926 17666.664\n 4.3250000 3445.5542 647.05243 18347.576\n 4.5000000 3469.1805 630.76008 19080.493\n 4.6625000 3489.7534 616.29199 19760.729\n\"\"\"\n\n redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)\n dm = dm * u.Mpc\n da = da * u.Mpc\n dl = dl * u.Mpc\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)\n assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)\n assert allclose(cosmo.angular_diameter_distance(redshifts), da)\n assert allclose(cosmo.luminosity_distance(redshifts), dl)\n\n redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)\n dm = dm * u.Mpc\n da = da * u.Mpc\n dl = dl * u.Mpc\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)\n assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)\n assert allclose(cosmo.angular_diameter_distance(redshifts), da)\n assert allclose(cosmo.luminosity_distance(redshifts), dl)\n\n redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)\n dm = dm * u.Mpc\n da = da * u.Mpc\n dl = dl * u.Mpc\n cosmo = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)\n assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)\n assert allclose(cosmo.angular_diameter_distance(redshifts), da)\n assert allclose(cosmo.luminosity_distance(redshifts), dl)\n\n\[email protected]('not HAS_SCIPY')\ndef test_integral():\n # Test integer vs. floating point inputs\n cosmo = flrw.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)\n assert allclose(cosmo.comoving_distance(3),\n cosmo.comoving_distance(3.0), rtol=1e-7)\n assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),\n cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),\n rtol=1e-7)\n assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)\n assert allclose(cosmo.efunc([1, 2, 6]),\n cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)\n assert allclose(cosmo.inv_efunc([1, 2, 6]),\n cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)\n\n\ndef test_wz():\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)\n assert allclose(cosmo.w(1.0), -1.)\n assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),\n [-1., -1, -1, -1, -1, -1])\n\n cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)\n assert allclose(cosmo.w(1.0), -0.5)\n assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),\n [-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])\n assert allclose(cosmo.w0, -0.5)\n\n cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)\n assert allclose(cosmo.w(1.0), -0.5)\n assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),\n [-1.0, -0.75, -0.5, -0.25, 0.15])\n assert allclose(cosmo.w0, -1.0)\n assert allclose(cosmo.wz, 0.5)\n\n cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)\n assert allclose(cosmo.w0, -1.0)\n assert allclose(cosmo.wa, -0.5)\n assert allclose(cosmo.w(1.0), -1.25)\n assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),\n [-1, -1.16666667, -1.25, -1.3, -1.34848485])\n\n cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,\n wa=0.2, zp=0.5)\n assert allclose(cosmo.wp, -0.9)\n assert allclose(cosmo.wa, 0.2)\n assert allclose(cosmo.zp, 0.5)\n assert allclose(cosmo.w(0.5), -0.9)\n assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),\n [-0.94848485, -0.93333333, -0.9, -0.84666667,\n -0.82380952, -0.78266667])\n\n\[email protected]('not HAS_SCIPY')\ndef test_de_densityscale():\n cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)\n z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])\n assert allclose(cosmo.de_density_scale(z),\n [1.0, 1.0, 1.0, 1.0, 1.0])\n # Integer check\n assert allclose(cosmo.de_density_scale(3),\n cosmo.de_density_scale(3.0), rtol=1e-7)\n assert allclose(cosmo.de_density_scale([1, 2, 3]),\n cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)\n\n cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)\n assert allclose(cosmo.de_density_scale(z),\n [1.15369, 1.31453, 1.83712, 3.95285, 6.5479],\n rtol=1e-4)\n assert allclose(cosmo.de_density_scale(3),\n cosmo.de_density_scale(3.0), rtol=1e-7)\n assert allclose(cosmo.de_density_scale([1, 2, 3]),\n cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)\n\n cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)\n assert allclose(cosmo.de_density_scale(z),\n [0.746048, 0.5635595, 0.25712378, 0.026664129,\n 0.0035916468], rtol=1e-4)\n assert allclose(cosmo.de_density_scale(3),\n cosmo.de_density_scale(3.0), rtol=1e-7)\n assert allclose(cosmo.de_density_scale([1, 2, 3]),\n cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)\n\n cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)\n assert allclose(cosmo.de_density_scale(z),\n [0.9934201, 0.9767912, 0.897450,\n 0.622236, 0.4458753], rtol=1e-4)\n assert allclose(cosmo.de_density_scale(3),\n cosmo.de_density_scale(3.0), rtol=1e-7)\n assert allclose(cosmo.de_density_scale([1, 2, 3]),\n cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)\n\n cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,\n wa=0.2, zp=0.5)\n assert allclose(cosmo.de_density_scale(z),\n [1.012246048, 1.0280102, 1.087439,\n 1.324988, 1.565746], rtol=1e-4)\n assert allclose(cosmo.de_density_scale(3),\n cosmo.de_density_scale(3.0), rtol=1e-7)\n assert allclose(cosmo.de_density_scale([1, 2, 3]),\n cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)\n\n\[email protected]('not HAS_SCIPY')\ndef test_age():\n # WMAP7 but with Omega_relativisitic = 0\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)\n assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)\n assert allclose(tcos.age([1., 5.]),\n [5.97113193, 1.20553129] * u.Gyr)\n assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)\n\n # Add relativistic species\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)\n assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)\n assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)\n\n # And massive neutrinos\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,\n m_nu=0.1 * u.eV)\n assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)\n assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)\n\n\[email protected]('not HAS_SCIPY')\ndef test_distmod():\n # WMAP7 but with Omega_relativisitic = 0\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)\n assert allclose(tcos.distmod([1, 5]),\n [44.124857, 48.40167258] * u.mag)\n assert allclose(tcos.distmod([1., 5.]),\n [44.124857, 48.40167258] * u.mag)\n\n\[email protected]('not HAS_SCIPY')\ndef test_neg_distmod():\n # Cosmology with negative luminosity distances (perfectly okay,\n # if obscure)\n tcos = flrw.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)\n assert allclose(tcos.luminosity_distance([50, 100]),\n [16612.44047622, -46890.79092244] * u.Mpc)\n assert allclose(tcos.distmod([50, 100]),\n [46.102167189, 48.355437790944] * u.mag)\n\n\[email protected]('not HAS_SCIPY')\ndef test_critical_density():\n from astropy.constants import codata2014\n\n # WMAP7 but with Omega_relativistic = 0\n # These tests will fail if astropy.const starts returning non-mks\n # units by default; see the comment at the top of core.py.\n # critical_density0 is inversely proportional to G.\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value\n assert allclose(tcos.critical_density0 * fac,\n 9.309668456020899e-30 * (u.g / u.cm**3))\n assert allclose(tcos.critical_density0,\n tcos.critical_density(0))\n assert allclose(\n tcos.critical_density([1, 5]) * fac,\n [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))\n assert allclose(\n tcos.critical_density([1., 5.]) * fac,\n [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))\n\n\[email protected]('not HAS_SCIPY')\ndef test_comoving_distance_z1z2():\n tcos = flrw.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)\n with pytest.raises(ValueError): # test diff size z1, z2 fail\n tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))\n # Comoving distances are invertible\n assert allclose(tcos._comoving_distance_z1z2(1, 2),\n -tcos._comoving_distance_z1z2(2, 1))\n\n z1 = 0, 0, 2, 0.5, 1\n z2 = 2, 1, 1, 2.5, 1.1\n results = (3767.90579253,\n 2386.25591391,\n -1381.64987862,\n 2893.11776663,\n 174.1524683) * u.Mpc\n\n assert allclose(tcos._comoving_distance_z1z2(z1, z2),\n results)\n\n\[email protected]('not HAS_SCIPY')\ndef test_age_in_special_cosmologies():\n \"\"\"Check that age in de Sitter and Einstein-de Sitter Universes work.\n\n Some analytic solutions fail at these critical points.\n \"\"\"\n c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)\n assert allclose(c_dS.age(z=0), np.inf * u.Gyr)\n assert allclose(c_dS.age(z=1), np.inf * u.Gyr)\n assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)\n assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)\n\n c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)\n assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)\n assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)\n assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)\n assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)\n\n\[email protected]('not HAS_SCIPY')\ndef test_distance_in_special_cosmologies():\n \"\"\"Check that de Sitter and Einstein-de Sitter Universes both work.\n\n Some analytic solutions fail at these critical points.\n \"\"\"\n c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)\n assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)\n assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)\n\n c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)\n assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)\n assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)\n\n c_dS = flrw.LambdaCDM(100, 0, 1, Tcmb0=0)\n assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)\n assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)\n\n c_EdS = flrw.LambdaCDM(100, 1, 0, Tcmb0=0)\n assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)\n assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)\n\n\[email protected]('not HAS_SCIPY')\ndef test_comoving_transverse_distance_z1z2():\n tcos = flrw.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)\n with pytest.raises(ValueError): # test diff size z1, z2 fail\n tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))\n # Tests that should actually work, target values computed with\n # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML\n # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)\n assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),\n 1313.2232194828466 * u.Mpc)\n\n # In a flat universe comoving distance and comoving transverse\n # distance are identical\n z1 = 0, 0, 2, 0.5, 1\n z2 = 2, 1, 1, 2.5, 1.1\n\n assert allclose(tcos._comoving_distance_z1z2(z1, z2),\n tcos._comoving_transverse_distance_z1z2(z1, z2))\n\n # Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.\n tcos = flrw.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)\n results = (2202.72682564,\n 1559.51679971,\n -643.21002593,\n 1408.36365679,\n 85.09286258) * u.Mpc\n\n assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),\n results)\n\n # In a flat universe comoving distance and comoving transverse\n # distance are identical\n z1 = 0, 0, 2, 0.5, 1\n z2 = 2, 1, 1, 2.5, 1.1\n\n assert allclose(tcos._comoving_distance_z1z2(z1, z2),\n tcos._comoving_transverse_distance_z1z2(z1, z2))\n # Test non-flat cases to avoid simply testing\n # comoving_distance_z1z2. Test array, array case.\n tcos = flrw.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)\n results = (3535.931375645655,\n 2226.430046551708,\n -1208.6817970036532,\n 2595.567367601969,\n 151.36592003406884) * u.Mpc\n\n assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),\n results)\n\n # Test positive curvature with scalar, array combination.\n tcos = flrw.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)\n z1 = 0.1\n z2 = 0, 0.1, 0.2, 0.5, 1.1, 2\n results = (-281.31602666724865,\n 0.,\n 248.58093707820436,\n 843.9331377460543,\n 1618.6104987686672,\n 2287.5626543279927) * u.Mpc\n\n assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),\n results)\n\n\[email protected]('not HAS_SCIPY')\ndef test_angular_diameter_distance_z1z2():\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n with pytest.raises(ValueError): # test diff size z1, z2 fail\n tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])\n # Tests that should actually work\n assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),\n 646.22968662822018 * u.Mpc)\n\n z1 = 2 # Separate test for z2<z1, returns negative value with warning\n z2 = 1\n results = -969.34452994 * u.Mpc\n with pytest.warns(AstropyUserWarning, match='less than first redshift'):\n assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)\n\n z1 = 0, 0, 0.5, 1\n z2 = 2, 1, 2.5, 1.1\n results = (1760.0628637762106,\n 1670.7497657219858,\n 1159.0970895962193,\n 115.72768186186921) * u.Mpc\n\n assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),\n results)\n\n z1 = 0.1\n z2 = 0.1, 0.2, 0.5, 1.1, 2\n results = (0.,\n 332.09893173,\n 986.35635069,\n 1508.37010062,\n 1621.07937976) * u.Mpc\n assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),\n results)\n\n # Non-flat (positive Ok0) test\n tcos = flrw.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)\n assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),\n 620.1175337852428 * u.Mpc)\n # Non-flat (negative Ok0) test\n tcos = flrw.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)\n assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),\n 228.42914659246014 * u.Mpc)\n\n\[email protected]('not HAS_SCIPY')\ndef test_absorption_distance():\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)\n assert allclose(tcos.absorption_distance([1, 3]),\n [1.72576635, 7.98685853])\n assert allclose(tcos.absorption_distance([1., 3.]),\n [1.72576635, 7.98685853])\n assert allclose(tcos.absorption_distance(3), 7.98685853)\n assert allclose(tcos.absorption_distance(3.), 7.98685853)\n\n\[email protected]('not HAS_SCIPY')\ndef test_massivenu_basic():\n # Test no neutrinos case\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Neff=4.05,\n Tcmb0=2.725 * u.K, m_nu=0)\n assert allclose(tcos.Neff, 4.05)\n assert not tcos.has_massive_nu\n mnu = tcos.m_nu\n assert len(mnu) == 4\n assert mnu.unit == u.eV\n assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)\n assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,\n rtol=1e-6)\n assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,\n rtol=1e-6)\n\n # Alternative no neutrinos case\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K,\n m_nu=str((0.4 * u.eV).to(u.g, u.mass_energy())))\n assert not tcos.has_massive_nu\n assert tcos.m_nu is None\n\n # Test basic setting, retrieval of values\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K,\n m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))\n assert tcos.has_massive_nu\n mnu = tcos.m_nu\n assert len(mnu) == 3\n assert mnu.unit == u.eV\n assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)\n\n # All massive neutrinos case\n tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725,\n m_nu=u.Quantity(0.1, u.eV), Neff=3.1)\n assert allclose(tcos.Neff, 3.1)\n assert tcos.has_massive_nu\n mnu = tcos.m_nu\n assert len(mnu) == 3\n assert mnu.unit == u.eV\n assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)\n\n\[email protected]('not HAS_SCIPY')\ndef test_distances():\n # Test distance calculations for various special case\n # scenarios (no relativistic species, normal, massive neutrinos)\n # These do not come from external codes -- they are just internal\n # checks to make sure nothing changes if we muck with the distance\n # calculators\n\n z = np.array([1.0, 2.0, 3.0, 4.0])\n\n # The pattern here is: no relativistic species, the relativistic\n # species with massless neutrinos, then massive neutrinos\n cos = flrw.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [2953.93001902, 4616.7134253, 5685.07765971,\n 6440.80611897] * u.Mpc, rtol=1e-4)\n cos = flrw.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [3037.12620424, 4776.86236327, 5889.55164479,\n 6671.85418235] * u.Mpc, rtol=1e-4)\n cos = flrw.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2471.80626824, 3567.1902565, 4207.15995626,\n 4638.20476018] * u.Mpc, rtol=1e-4)\n # Flat\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [3180.83488552, 5060.82054204, 6253.6721173,\n 7083.5374303] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [3180.42662867, 5059.60529655, 6251.62766102,\n 7080.71698117] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2337.54183142, 3371.91131264, 3988.40711188,\n 4409.09346922] * u.Mpc, rtol=1e-4)\n # Add w\n cos = flrw.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [3216.8296894, 5117.2097601, 6317.05995437,\n 7149.68648536] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [3143.56537758, 5000.32196494, 6184.11444601,\n 7009.80166062] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2337.76035371, 3372.1971387, 3988.71362289,\n 4409.40817174] * u.Mpc, rtol=1e-4)\n # Non-flat w\n cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [2849.6163356, 4428.71661565, 5450.97862778,\n 6179.37072324] * u.Mpc, rtol=1e-4)\n cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2904.35580229, 4511.11471267, 5543.43643353,\n 6275.9206788] * u.Mpc, rtol=1e-4)\n cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2473.32522734, 3581.54519631, 4232.41674426,\n 4671.83818117] * u.Mpc, rtol=1e-4)\n # w0wa\n cos = flrw.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [2937.7807638, 4572.59950903, 5611.52821924,\n 6339.8549956] * u.Mpc, rtol=1e-4)\n cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2907.34722624, 4539.01723198, 5593.51611281,\n 6342.3228444] * u.Mpc, rtol=1e-4)\n cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2507.18336722, 3633.33231695, 4292.44746919,\n 4736.35404638] * u.Mpc, rtol=1e-4)\n # Flatw0wa\n cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [3123.29892781, 4956.15204302, 6128.15563818,\n 6948.26480378] * u.Mpc, rtol=1e-4)\n cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [3122.92671907, 4955.03768936, 6126.25719576,\n 6945.61856513] * u.Mpc, rtol=1e-4)\n cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(10.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2337.70072701, 3372.13719963, 3988.6571093,\n 4409.35399673] * u.Mpc, rtol=1e-4)\n # wpwa\n cos = flrw.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [2954.68975298, 4599.83254834, 5643.04013201,\n 6373.36147627] * u.Mpc, rtol=1e-4)\n cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,\n Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2919.00656215, 4558.0218123, 5615.73412391,\n 6366.10224229] * u.Mpc, rtol=1e-4)\n cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,\n Neff=4, m_nu=u.Quantity(5.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2629.48489827, 3874.13392319, 4614.31562397,\n 5116.51184842] * u.Mpc, rtol=1e-4)\n\n # w0wz\n cos = flrw.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)\n assert allclose(cos.comoving_distance(z),\n [3051.68786716, 4756.17714818, 5822.38084257,\n 6562.70873734] * u.Mpc, rtol=1e-4)\n cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,\n Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2997.8115653, 4686.45599916, 5764.54388557,\n 6524.17408738] * u.Mpc, rtol=1e-4)\n cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,\n Neff=4, m_nu=u.Quantity(5.0, u.eV))\n assert allclose(cos.comoving_distance(z),\n [2676.73467639, 3940.57967585, 4686.90810278,\n 5191.54178243] * u.Mpc, rtol=1e-4)\n\n # Also test different numbers of massive neutrinos\n # for FlatLambdaCDM to give the scalar nu density functions a\n # work out\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,\n m_nu=u.Quantity([10.0, 0, 0], u.eV))\n assert allclose(cos.comoving_distance(z),\n [2777.71589173, 4186.91111666, 5046.0300719,\n 5636.10397302] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,\n m_nu=u.Quantity([10.0, 5, 0], u.eV))\n assert allclose(cos.comoving_distance(z),\n [2636.48149391, 3913.14102091, 4684.59108974,\n 5213.07557084] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,\n m_nu=u.Quantity([4.0, 5, 9], u.eV))\n assert allclose(cos.comoving_distance(z),\n [2563.5093049, 3776.63362071, 4506.83448243,\n 5006.50158829] * u.Mpc, rtol=1e-4)\n cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,\n m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))\n assert allclose(cos.comoving_distance(z),\n [2525.58017482, 3706.87633298, 4416.58398847,\n 4901.96669755] * u.Mpc, rtol=1e-4)\n\n\[email protected]('not HAS_SCIPY')\ndef test_massivenu_density():\n # Testing neutrino density calculation\n\n # Simple test cosmology, where we compare rho_nu and rho_gamma\n # against the exact formula (eq 24/25 of Komatsu et al. 2011)\n # computed using Mathematica. The approximation we use for f(y)\n # is only good to ~ 0.5% (with some redshift dependence), so that's\n # what we test to.\n ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])\n nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n # First try 3 massive neutrinos, all 100 eV -- note this is a universe\n # seriously dominated by neutrinos!\n tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(100.0, u.eV))\n assert tcos.has_massive_nu\n assert tcos.Neff == 3\n nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,\n 15633.5, 171.801])\n assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)\n assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)\n\n # Next, slightly less massive\n tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.25, u.eV))\n nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,\n 39.1005, 1.11086])\n assert allclose(tcos.nu_relative_density(ztest), nurel_exp,\n rtol=5e-3)\n\n # For this one also test Onu directly\n onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,\n 0.06999286, 0.1344951])\n assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)\n\n # And fairly light\n tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,\n m_nu=u.Quantity(0.01, u.eV))\n\n nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,\n 1.90671, 1.00021])\n assert allclose(tcos.nu_relative_density(ztest), nurel_exp,\n rtol=5e-3)\n onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,\n 0.00268404, 0.0978313])\n assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)\n assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],\n rtol=1e-4)\n assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],\n rtol=1e-4)\n\n # Now a mixture of neutrino masses, with non-integer Neff\n tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,\n m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))\n nurel_exp = nuprefac * tcos.Neff * \\\n np.array([149.386233, 74.87915, 50.0518,\n 14.002403, 1.03702333])\n assert allclose(tcos.nu_relative_density(ztest), nurel_exp,\n rtol=5e-3)\n onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,\n 0.01963451, 0.10227728])\n assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)\n\n # Integer redshifts\n ztest = ztest.astype(int)\n assert allclose(tcos.nu_relative_density(ztest), nurel_exp,\n rtol=5e-3)\n assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)\n\n\[email protected]('not HAS_SCIPY')\ndef test_elliptic_comoving_distance_z1z2():\n \"\"\"Regression test for #8388.\"\"\"\n cosmo = flrw.LambdaCDM(70., 2.3, 0.05, Tcmb0=0)\n z = 0.2\n assert allclose(cosmo.comoving_distance(z),\n cosmo._integral_comoving_distance_z1z2(0., z))\n assert allclose(cosmo._elliptic_comoving_distance_z1z2(0., z),\n cosmo._integral_comoving_distance_z1z2(0., z))\n\n\nSPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [\n flrw.FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter\n flrw.FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter\n flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric\n flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic\n]\n\n\nITERABLE_REDSHIFTS = [\n (0, 1, 2, 3, 4), # tuple\n [0, 1, 2, 3, 4], # list\n np.array([0, 1, 2, 3, 4]), # array\n]\n\n\[email protected]('not HAS_SCIPY')\[email protected]('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)\[email protected]('z', ITERABLE_REDSHIFTS)\ndef test_comoving_distance_iterable_argument(cosmo, z):\n \"\"\"\n Regression test for #10980\n Test that specialized comoving distance methods handle iterable arguments.\n \"\"\"\n\n assert allclose(cosmo.comoving_distance(z),\n cosmo._integral_comoving_distance_z1z2(0., z))\n\n\[email protected]('not HAS_SCIPY')\[email protected]('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)\ndef test_comoving_distance_broadcast(cosmo):\n \"\"\"\n Regression test for #10980\n Test that specialized comoving distance methods broadcast array arguments.\n \"\"\"\n\n z1 = np.zeros((2, 5))\n z2 = np.ones((3, 1, 5))\n z3 = np.ones((7, 5))\n output_shape = np.broadcast(z1, z2).shape\n\n # Check compatible array arguments return an array with the correct shape\n assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape\n\n # Check incompatible array arguments raise an error\n with pytest.raises(ValueError, match='z1 and z2 have different shapes'):\n cosmo._comoving_distance_z1z2(z1, z3)\n"
] | [
[
"numpy.ones_like",
"numpy.sqrt",
"numpy.linspace",
"numpy.ones",
"numpy.broadcast",
"scipy.integrate.quad",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
venkat1110/Face_emotion | [
"fab432d77dd5136151385e0cd7e77d688dd80c25"
] | [
"src/app.py"
] | [
"from flask import Flask,render_template,Response,request,redirect,jsonify\r\nimport numpy as np\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nfrom psycopg2 import sql\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.layers import MaxPooling2D\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\nimport time\r\nimport datetime\r\nfrom gaze_tracking import GazeTracking\r\nfrom math import sqrt\r\nimport imutils\r\ngaze = GazeTracking()\r\nfrom imutils.video import VideoStream\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport mysql.connector\r\nos.environ['TF_XLA_FLAGS']= '--tf_xla_enable_xla_devices'\r\nOPENCV_PYTHON_DEBUG=1\r\napp = Flask(__name__)\r\nmodel = Sequential()\r\n\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))\r\nmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n\r\nmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(1024, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(7, activation='softmax'))\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n database=\"facial_emotion\", #name of your database\r\n user=\"root\",\r\n password=\"\",\r\n)\r\ncursor = mydb.cursor(buffered=True)\r\nglobal timestamp \r\ntimestamp=datetime.datetime.now()\r\n\r\[email protected]('/send',methods=[\"GET\",\"POST\"])\r\ndef send():\r\n global table1\r\n table1=request.form['user_name'] \r\n return render_template('home.html')\r\n\r\ncamera = cv2.VideoCapture(0) \r\n\r\ndef gen_frames(): # generate frame by frame from camera\r\n global emotion_dict\r\n global maxindex\r\n query = \"\"\"SELECT count(*) FROM information_schema.TABLES WHERE (TABLE_SCHEMA ='facial_emotion') AND (TABLE_NAME='%s')\"\"\" %(table1)\r\n cursor.execute(query)\r\n mydb.commit()\r\n \r\n if cursor.fetchone()[0]==1:\r\n print(\"table exists\")\r\n else:\r\n query1=\"\"\"CREATE TABLE %s(id INT AUTO_INCREMENT PRIMARY KEY,data VARCHAR(255),time timestamp,emotionid int)\"\"\" %(table1)\r\n cursor.execute(query1)\r\n mydb.commit()\r\n\r\n \r\n \r\n \r\n \r\n while True:\r\n # Capture frame-by-frame\r\n success, frame = camera.read() # read the camera frame\r\n\r\n if not success:\r\n break\r\n else:\r\n model.load_weights('model.h5')\r\n emotion_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\r\n gaze.refresh(frame)\r\n\r\n frame = gaze.annotated_frame()\r\n text = \"\"\r\n\r\n if gaze.is_blinking():\r\n text = \"Blinking\"\r\n elif gaze.is_right():\r\n text = \"Looking right\"\r\n elif gaze.is_left():\r\n text = \"Looking left\"\r\n elif gaze.is_center():\r\n text = \"Looking center\"\r\n\r\n cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)\r\n\r\n left_pupil = gaze.pupil_left_coords()\r\n right_pupil = gaze.pupil_right_coords()\r\n cv2.putText(frame, \"Left pupil: \" + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)\r\n cv2.putText(frame, \"Right pupil: \" + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)\r\n \r\n facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)\r\n try:\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)\r\n roi_gray = gray[y:y + h, x:x + w]\r\n cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)\r\n prediction = model.predict(cropped_img)\r\n maxindex = int(np.argmax(prediction))\r\n cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\r\n global buffer\r\n ret,buffer = cv2.imencode('.jpg',frame)\r\n frame = buffer.tobytes()\r\n \r\n\r\n global s\r\n s = \"Emotion:\" + emotion_dict[maxindex]\r\n\r\n query2 = \"\"\"INSERT INTO {} (data,time,emotionid) VALUES ('%s','%s','%d')\"\"\".format(table1) % (s,timestamp,maxindex) #Database query\r\n cursor.execute(query2)\r\n mydb.commit()\r\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(buffer) + b'\\r\\n') # concat frame one by one and show result\r\n except:\r\n print(\"error occured\")\r\n\r\[email protected]('/getAnalytics')\r\n\r\ndef getAnalytics():\r\n try:\r\n table2=request.args['name']\r\n query2 = \"\"\"SELECT COUNT(emotionid) as Count ,time ,data FROM {} GROUP BY emotionid\"\"\".format(table2) #Database query\r\n # query2= \"\"\"SELECT COUNT(emotionid) as COUNT ,time ,data FROM ${table2} GROUP BY emotionid\"\"\" \r\n cursor.execute(query2)\r\n #rows=cursor.fetchone()\r\n columns = cursor.description\r\n result = []\r\n for value in cursor.fetchall():\r\n tmp = {}\r\n for (index,column) in enumerate(value):\r\n tmp[columns[index][0]] = column\r\n result.append(tmp)\r\n response = jsonify(result)\r\n\r\n return response\r\n except Exception as e:\r\n return e\r\n \r\n\r\n\r\[email protected]('/video_feed')\r\ndef video_feed():\r\n #Video streaming route. Put this in the src attribute of an img tag\r\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\n\r\[email protected]('/')\r\ndef index():\r\n \r\n \"\"\"Video streaming home page.\"\"\"\r\n return render_template('index.html')\r\n\r\[email protected](\"/stream\")\r\ndef stream():\r\n def generate():\r\n for s in range(500):\r\n yield \"{}\\n\".format(sqrt(s))\r\n return app.response_class(generate(), mimetype=\"text/plain\")\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)"
] | [
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"numpy.argmax",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
gabrieledamone/DE3-ROB1-CHESS | [
"19ec74f10317d27683817989e729cacd6fe55a3f"
] | [
"perception/Old/shiThomasiCorner.py"
] | [
"import cv2\nimport numpy as np\nimport copy\n\nimg = cv2.imread('emptyBoard.jpg')\nimg_orig = copy.copy(img)\ngrayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ncorners = cv2.goodFeaturesToTrack(grayimg, 83, 0.05, 25)\ncorners = np.float32(corners)\n\nfor item in corners:\n x, y = item[0]\n cv2.circle(img, (x, y), 5, 255, -1)\n\ncv2.imshow(\"Original\", img_orig)\ncv2.imshow(\"Top 10 corners\", img)\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()"
] | [
[
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tiagovla/Chaos | [
"2355b9d2a9de16d3cc4a74165ab26c8190e6056b"
] | [
"logistic_zoom.py"
] | [
"\"\"\"\r\n Written by Jonny Hyman, 2020\r\n www.jonnyhyman.com\r\n www.github.com/jonnyhyman\r\n\r\n MIT License\r\n\r\nThis code renders the logistic map and zooms in to show its fractal nature.\r\nThere is also infrastructure to plot rulers and labels where each bifurcation\r\noccurs; to hint at the emerging feigenbaum constant 4.669\r\n\r\nCode structure is roughly:\r\n - imports\r\n - parameters\r\n - functions\r\n - runtime stuff\r\n\"\"\"\r\n\r\nfrom vispy import app, gloo\r\nimport vispy.plot as vp\r\nimport vispy.io as io\r\nimport numpy as np\r\nimport vispy\r\n\r\nfrom vispy.scene.visuals import Text\r\n\r\nfrom pathlib import Path\r\nfrom time import time\r\n\r\nimport os\r\n\r\nfrom numba import jit, prange\r\n\r\n# --------------------------------------------------------------------------\r\n# --- PARAMETERS\r\n\r\n# record frames?\r\nrecord_project = False\r\n\r\ngens = 100\r\n#gens = 1000 # used in final rendering\r\nrates = 5000\r\n#rates = 20000 # used in final rendering\r\n\r\nrec_prefix = './frames'\r\nproject_name = 'logistic_zoom'\r\n\r\n# Adding this in makes the visualization only create bifurcation labels\r\n#project_name += '_labels'\r\n\r\n# --- DERIVED PARAMETERS ...\r\n\r\nframe_dir = Path(f'{rec_prefix}/{project_name}')\r\n\r\nif not frame_dir.exists() and record_project:\r\n frame_dir.mkdir()\r\n\r\nfeigens = [\r\n 1.0, # 1\r\n 3.0, # 2\r\n 3.4494897, # 4\r\n 3.5440903, # 8\r\n 3.5644073, # 16\r\n 3.5687594, # 32\r\n 3.5696916, # 64\r\n 3.5698913, # 128\r\n 3.569934067807427 # 256 corrected from #3.5699340 # 256\r\n]\r\n\r\nfeigenys = { # y values of each bifurcation (along our zoom path)\r\n 1 : 0.0,\r\n 2 : .67,\r\n 4 : .85,\r\n 8 : .884,\r\n 16 : .8907,\r\n 32 : .8921,\r\n 64 :.89214,\r\n 128 : .89224,\r\n 256 : .89215,\r\n}\r\n\r\n# --------------------------------------------------------------------------\r\n# --- FUNCTIONS\r\n\r\n@jit(cache=True, nopython=True)#, parallel=True)\r\ndef simulate(num_gens=10, rate_min=0, rate_max=3.99, num_rates=10,\r\n num_discard=100, initial_pop=0.5):\r\n\r\n \"\"\" create simulation data of bifurcation at various rates.\r\n performance can improve with parallel = True only at massive scales\r\n\r\n taken from package pynamical by Geoff Boeing\r\n `https://github.com/gboeing/pynamical`\r\n \"\"\"\r\n\r\n pops = np.empty(shape=(num_gens*num_rates, 2), dtype=np.float64)\r\n rates = np.linspace(rate_min, rate_max, num_rates)\r\n\r\n # for each rate, run the function repeatedly, starting at the initial_pop\r\n\r\n for rate_num in prange(len(rates)):\r\n\r\n rate = rates[rate_num]\r\n\r\n pop = initial_pop\r\n\r\n # first run it num_discard times and ignore the results\r\n for _ in range(num_discard):\r\n pop = pop * rate * (1 - pop)\r\n\r\n # now that those gens are discarded, run it num_gens times and keep the results\r\n for gen_num in range(num_gens):\r\n row_num = gen_num + num_gens * rate_num\r\n pops[row_num] = [rate, pop]\r\n\r\n pop = pop * rate * (1 - pop)\r\n\r\n return pops\r\n\r\ndef feigen_ruler(plt, parent, color, x0, y0, x1, y1, x2=None, y2=None):\r\n \"\"\" add a `ruler` to plot to show distance between Bifurcations \"\"\"\r\n\r\n serif = 0.1 # serif in percent of vertical difference\r\n serif = serif * abs(y1-y0)\r\n\r\n verts = np.zeros((6,2))\r\n\r\n verts[0,:] = [x0, y0 + serif]\r\n verts[1,:] = [x0, y0 - serif]\r\n verts[2,:] = [x0, y0 + serif*2] # serif/2 dodges text\r\n # jump x\r\n verts[3,:] = [x1, y0 + serif*2] # serif/2 dodges text\r\n verts[4,:] = [x1, y0 + serif]\r\n verts[5,:] = [x1, y1 - serif]\r\n\r\n ruler = plt.plot(verts, color=color)\r\n\r\n if x2 != None and y2 != None:\r\n f = x0-x1#(x1-x2)/(x0-x1)\r\n\r\n label = f\"\"#{round(f,5)}\"\r\n\r\n t = Text(label, face='Cambria Math', parent=parent, color=color)\r\n t.font_size = 18\r\n t.pos = (x1-x0)/2 + x0, y0 + serif\r\n else:\r\n t = None\r\n\r\n return {'plt': ruler, 'label': t, 'lims':(x1, x0)}\r\n\r\n\r\ndef feigen_lines(target, parent):\r\n \"\"\" create rulers and labels at each feigenvalue \"\"\"\r\n\r\n cmap = vispy.color.get_colormap('prism')\r\n\r\n rulers = []\r\n\r\n for f, val in enumerate(feigens):\r\n\r\n if f == 0:\r\n continue\r\n\r\n c = f / len(feigens)\r\n c = c**4\r\n\r\n color = cmap[c]\r\n #color.alpha = (1-f / len(feigens))**0.1\r\n\r\n splits = lambda x: int((2)**(x))\r\n\r\n if f > 1:\r\n x0 = feigens[f]\r\n y0 = feigenys[splits(f)]\r\n\r\n x1 = feigens[f-1]\r\n y1 = feigenys[splits(f-1)]\r\n\r\n x2 = feigens[f-2]\r\n y2 = feigenys[splits(f-2)]\r\n\r\n # number of decimal places needed to show difference\r\n d = feigens[f] - feigens[f-1]\r\n places = (abs(int(np.log10(d))) + 1)\r\n\r\n r = feigen_ruler(target, parent, color, x0, y0, x1, y1, x2, y2)\r\n\r\n else:\r\n x0 = feigens[f]\r\n y0 = feigenys[splits(f)]\r\n places = 1\r\n\r\n label = f\"{splits(f)} →\"\r\n\r\n t = Text(label, face='Cambria Math',\r\n parent=parent, color=color, anchor_x='right')\r\n\r\n t.font_size = 16\r\n t.pos = x0, y0\r\n\r\n if f > 1:\r\n r['ptr'] = t\r\n rulers += [r]\r\n\r\n return rulers\r\n\r\n\r\ndef zoom_plot(target, RATES, ENDS, first=False):\r\n\r\n global gens, rates\r\n\r\n \"\"\" ---- CREATE DATA ---- \"\"\"\r\n\r\n if not first and not 'labels' in project_name:\r\n start = time()\r\n print('... Simulating between', RATES, ENDS,'gens, rates', gens, rates)\r\n pops = simulate(num_gens=gens, num_rates=rates,\r\n\r\n rate_min=RATES[0], rate_max=RATES[1],\r\n\r\n num_discard = 1000, initial_pop=0.5)\r\n print('>>> DONE', round(time()-start,2),'s')\r\n\r\n elif first and 'labels' in project_name:\r\n\r\n mode = 0\r\n\r\n if mode==0:\r\n start = time()\r\n print('... Simulating between', RATES, ENDS,'gens, rates', gens, rates)\r\n pops = simulate(num_gens=gens, num_rates=rates,\r\n\r\n rate_min=RATES[0], rate_max=RATES[1],\r\n\r\n num_discard = 1000, initial_pop=0.5)\r\n print('>>> DONE', round(time()-start,2),'s')\r\n else:\r\n pops = np.zeros((1,2))\r\n\r\n else:\r\n pops = None\r\n pops = np.zeros((1,2))\r\n\r\n \"\"\" ---- CREATE PLOT ---- \"\"\"\r\n\r\n # Bifurcations\r\n # plot the xy data\r\n\r\n color = vispy.color.ColorArray(\"black\")\r\n color.alpha = 0.8\r\n size = 1\r\n\r\n if first:\r\n\r\n line = target[0,0].plot(pops, symbol='o', width=0, edge_width = 0,\r\n face_color=color, edge_color=color,\r\n marker_size=size)\r\n line.set_gl_state(depth_test=False)\r\n\r\n if 'labels' in project_name:\r\n rulers = feigen_lines(target[0,0], line.parent)\r\n\r\n return line, rulers\r\n else:\r\n return line\r\n\r\n else:\r\n\r\n if pops is not None:\r\n target.set_data(pops, symbol='o', width=0, edge_width = 0,\r\n face_color=color, edge_color=color,\r\n marker_size=size)\r\n target.update()\r\n\r\ndef linear_interp(x, in_min, in_max, out_min, out_max):\r\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\r\n\r\ndef smooth(f, start, end):\r\n f = 0.5*(np.cos(np.pi*(f-1)) + 1) # assumes x between 0-1\r\n return linear_interp(f, 0,1, start, end)\r\n\r\nkeyframes = np.array([\r\n [\r\n # left\r\n -0.1,\r\n # right\r\n 4.0,\r\n # bottom\r\n -.1,\r\n # aspect ratio (w/h)\r\n 4/1.1,\r\n ],\r\n [\r\n # left\r\n 3.56908,\r\n # right\r\n 3.57056,\r\n # bottom\r\n .89206,\r\n # aspect ratio (w/h)\r\n 4/1.1,\r\n ]\r\n])\r\n\r\n#keyframes = np.flip(keyframes, axis=0) # reverse\r\n\r\nclass Figure(vp.Fig):\r\n def __init__(self, *args, **kwargs):\r\n\r\n if 'record' in kwargs:\r\n self.rec = kwargs['record']\r\n del kwargs['record']\r\n else:\r\n self.rec = False\r\n\r\n super(Figure, self).__init__(*args, **kwargs)\r\n self.unfreeze()\r\n\r\n self.rec_fps = 24\r\n\r\n if self.rec:\r\n timer_spf = 'auto'\r\n else:\r\n timer_spf = 1 / self.rec_fps\r\n\r\n self.t = app.Timer(timer_spf, connect=self.on_timer, start=True)#, iterations=1)\r\n self.c_frames = 30 * self.rec_fps # frames per chapter\r\n self.f_max = self.c_frames * (len(keyframes)-1)\r\n self.f = 0\r\n self.on_timer(1)\r\n\r\n def on_key_press(self, event):\r\n if event.text ==' ':\r\n self.t.stop() if self.t.running else self.t.start()\r\n\r\n elif event.text ==',':\r\n self.f -= 2\r\n self.on_timer(1)\r\n\r\n elif event.text =='.':\r\n self.on_timer(1)\r\n\r\n def on_draw(self, event):\r\n\r\n # re-alpha the rulers based on zoom level\r\n rect = self.camera.rect\r\n rates = [rect.left, rect.right]\r\n\r\n for r, ruler in enumerate(self.rulers):\r\n\r\n b = ruler['lims']\r\n d = b[1] - b[0]\r\n a = abs(d) / abs(rates[1] - rates[0])\r\n a = min(a, 1)\r\n a = max(a, 0)\r\n\r\n if r < 5:\r\n peakx = 0.75\r\n elif r == 5:\r\n peakx = 0.1\r\n elif r == 6:\r\n peakx = 0.01\r\n\r\n a = np.interp(a, [0,peakx,1], [0,1,1])\r\n\r\n if ruler['label'] != None:\r\n c = ruler['label'].color\r\n c.alpha = a\r\n ruler['label'].color = c\r\n\r\n c = ruler['ptr'].color\r\n c.alpha = a\r\n ruler['ptr'].color = c\r\n\r\n c = ruler['plt']._line._color\r\n c.alpha = a\r\n ruler['plt']._line._color = c\r\n\r\n super(Figure, self).on_draw(event)\r\n\r\n def on_timer(self, event):\r\n\r\n start = time()\r\n\r\n if not hasattr(self, 'plotted'):\r\n zoom_plot_ret = zoom_plot(self, [0,4], [0,1], first=True)\r\n if type(zoom_plot_ret) is tuple:\r\n self.plotted, self.rulers = zoom_plot_ret\r\n else:\r\n self.plotted = zoom_plot_ret\r\n self.rulers = []\r\n self.camera = self._plot_widgets[0].view.camera\r\n\r\n else:\r\n\r\n C = len(keyframes)\r\n c = self.f // self.c_frames\r\n z = (self.f/self.c_frames) % 1\r\n #print(self.f, z, c, C)\r\n\r\n if c >= C-1:\r\n if self.rec:\r\n self.close()\r\n self.done()\r\n\r\n else:\r\n # 0 1 2 3\r\n # LRBA = left, right, bottom, aspect\r\n LRBA = smooth(z, keyframes[c], keyframes[c+1])\r\n\r\n left = LRBA[0]\r\n bottom = LRBA[2]\r\n width = LRBA[1] - LRBA[0]\r\n height = width * 1/LRBA[3]\r\n\r\n # left, bottom, width, height\r\n rect = (left, bottom, width, height)\r\n\r\n self.camera.rect = tuple(rect)\r\n\r\n rect = self.camera.rect\r\n rates = [rect.left, rect.right]\r\n ends = [rect.bottom, rect.top]\r\n\r\n zoom_plot(self.plotted, rates, ends)\r\n\r\n if self.rec:\r\n rec_prefix = self.rec['pre']\r\n project_name = self.rec['name']\r\n\r\n image = self.render()\r\n io.write_png(f'{rec_prefix}/{project_name}/{project_name}_{self.f}.png', image)\r\n\r\n ETA = (time() - start) * (self.f_max-self.f) # (time / frame) * frames remaining\r\n ETA = (ETA / 60) / 60 # seconds to hours\r\n ETA = np.modf(ETA)\r\n ETA = int(ETA[1]), int(round(ETA[0]*60))\r\n ETA = str(ETA[0]) + \":\" + str(ETA[1]).zfill(2)\r\n\r\n print(f'>>> FRAME: {project_name}_{self.f}.png, ETA',\r\n ETA,',', round(100*self.f/self.f_max,2),'% :',\r\n self.f, '/', self.f_max)\r\n\r\n\r\n self.f += 1\r\n\r\n def done(self):\r\n\r\n if self.rec:\r\n\r\n rec_prefix = self.rec['pre']\r\n project_name = self.rec['name']\r\n\r\n convert_cmd = (f\"\"\"ffmpeg -f image2 -framerate {self.rec_fps}\"\"\"\r\n f\"\"\" -i {rec_prefix}/{project_name}/{project_name}_%d.png\"\"\"\r\n f\"\"\" -c:v prores_ks -profile:v 3 {project_name}.mov\"\"\")\r\n\r\n print('CONVERTING >>>', convert_cmd)\r\n\r\n os.system(convert_cmd)\r\n\r\n dir = (f\"./{rec_prefix}/{project_name}\")\r\n filelist = [f for f in os.listdir(dir) if f.endswith(\".png\") ]\r\n for f in filelist:\r\n os.remove(os.path.join(dir, f))\r\n\r\n print(\"Logistic zoom is completed\")\r\n exit()\r\n\r\n\r\nif record_project:\r\n rec_dict = {'pre':rec_prefix, 'name':project_name}\r\nelse:\r\n rec_dict = None\r\n\r\nfig = Figure(show=False, title=\"Log Zoom\", size=(2538, 1080),\r\n record=rec_dict)\r\n\r\nif __name__ == '__main__':\r\n fig.show(run=True)\r\n"
] | [
[
"numpy.linspace",
"numpy.cos",
"numpy.modf",
"numpy.log10",
"numpy.interp",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
javiunzu/python-games | [
"38757cf2a5f8a1d6777cce9471046f83b42630cb"
] | [
"connect_four/connect_four.py"
] | [
"import numpy\nimport pygame\nimport random\n\n\nclass Board:\n \"\"\"\n The board is represented by a matrix. The values of that matrix represent the following states:\n 0: Free cell.\n 1: First player's token.\n -1: Second player's token.\n \"\"\"\n cell_size = 80\n turn = 1\n game_over = False\n\n def __init__(self, width, height):\n \"\"\"\n Initializes the value matrix. the drawing surface and the color values.\n :param width: Number of rows.\n :param height: Number of columns.\n \"\"\"\n self.width = width\n self.height = height\n self.cells = numpy.zeros((height, width))\n self.surface = pygame.display.set_mode((self.cell_size * width, self.cell_size * (height + 1)))\n self.bgcolor = (0, 0, 0)\n self.fgcolor = (0, 0, 255)\n self.p1color = (255, 0, 0)\n self.p2color = (255, 255, 0)\n self.turn_indicator = (self.cell_size // 2, self.cell_size // 2)\n\n def __repr__(self):\n return str(self.cells)\n\n def draw(self):\n self.surface.fill(self.bgcolor)\n # Draw one big blue rectangle\n pygame.draw.rect(self.surface, self.fgcolor,\n (0, self.cell_size, self.width * self.cell_size, self.height * self.cell_size))\n for y in range(0, self.height):\n for x in range(0, self.width):\n pygame.draw.circle(self.surface,\n {-1: self.p2color,\n 0: self.bgcolor,\n 1: self.p1color}[self.cells[y][x]],\n (x * self.cell_size + self.cell_size // 2, self.cell_size + y * self.cell_size + self.cell_size // 2),\n self.cell_size // 2 - self.cell_size // 10)\n # Draw turn indicator\n pygame.draw.circle(self.surface,\n {-1: self.p2color,\n 1: self.p1color}[self.turn],\n self.turn_indicator,\n self.cell_size // 2 - self.cell_size // 10)\n\n def drop(self, pos):\n \"\"\"\n Get mouse position and map it to a column.\n When doing the conversion from screen coordinates to a cell, it's important to know that screen coordinates\n take as origin the upper-left corner of the screen and indexes columns-first, and numpy indexes arrays rows-first.\n :param pos: Tuple with screen coordinates.\n :returns: Tuple with the position where the token \"drops\". if there is no place in the column, returns None.\n \"\"\"\n column = pos[0] // self.cell_size\n # Find the first non zero cell on that column and set it to the turn marker.\n row = self.height - 1 if numpy.count_nonzero(self.cells[:, column]) == 0 else numpy.nonzero(self.cells[:, column])[0][0] - 1\n if row >= 0:\n self.cells[row, column] = self.turn\n print(self.cells)\n self.turn *= -1 # Change to the other player.\n return row, column\n else:\n return None\n\n def finished(self, row, column):\n \"\"\"\n Checks if a slot is part of a four-in-a-row.\n Slides a 4-position window over the horizontal, the vertical, and the two diagonals.\n It's the most efficient solution I can come up with, as the operational cost remains the same, no matter how big the board is.\n :param row: Integer value of the row.\n :param column: Integer value of the column.\n :return: True if four in a row are found. False otherwise\n \"\"\"\n value = self.cells[row, column]\n if value != 0:\n # Define the boundaries\n min_row = max(0, row - 3)\n max_row = min(self.height - 1, row + 3)\n min_col = max(0, column - 3)\n max_col = min(self.width - 1, column + 3)\n # Check horizontal line\n for c in range(min_col, min(max_col - 2, min_col + 4)):\n if numpy.all(value == self.cells[row, c:c+4]):\n return True\n # Check vertical line\n for r in range(min_row, min(max_row - 2, min_row + 4)):\n if numpy.all(value == self.cells[r:r+4, column]):\n return True\n # Check diagonal with negative slope. It is way easier to build a fixed-sized submatrix and skip the iteration if you end out of bounds rather than calculating the correct indexes.\n for d in range(0, 4):\n try:\n vector = self.cells[row - 3 + d:row + 1 + d, column - 3 + d:column + 1 + d].diagonal()\n if numpy.all(value == vector) and len(vector) == 4:\n return True\n except IndexError:\n continue\n # Check diagonal with positive slope. Same as before, but flipping the submatrix, as diagonal() works with the main diagonal.\n for d in range(0, 4):\n try:\n vector = numpy.fliplr(self.cells[row - d:row + 4 - d, column - 3 + d:column + 1 + d]).diagonal()\n if numpy.all(value == vector) and len(vector) == 4:\n return True\n except IndexError:\n continue\n\n return False\n\n def update_turn_indicator(self, pos):\n \"\"\"\n Update the position of the turn indicator. We are only interested in the X coordinate, as the indicator moves over a single row.\n The Y coordinate is in the middle of the cell, because we will use the position to draw a circle.\n :param pos: Tuple with the screen coordinates of the pointer.\n \"\"\"\n self.turn_indicator = (pos[0], self.cell_size // 2)\n\n def reset(self):\n self.cells = numpy.zeros((self.height, self.width))\n\n\ndef main(com):\n\n board = Board(7, 6)\n pygame.init()\n\n while not board.game_over:\n # COM players take their actions first, if activated and in their turn.\n # TODO: refactor the game-over check.\n if 'red' in com and board.turn == 1:\n # Put a random piece. This doesn't even qualify as AI ... yet.\n move = (random.randint(0, board.width * board.cell_size - 1), random.randint(0, board.height * board.cell_size - 1))\n pos = board.drop(move)\n if board.finished(*pos):\n print(\"Game over\")\n board.game_over = True\n if 'yellow' in com and board.turn == -1:\n move = (random.randint(0, board.width * board.cell_size), random.randint(0, board.height * board.cell_size))\n pos = board.drop(move)\n if board.finished(*pos):\n print(\"Game over\")\n board.game_over = True\n # Handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Check for window close.\n pygame.quit()\n if event.type == pygame.MOUSEMOTION:\n # Move the turn marker across the top row\n board.update_turn_indicator(event.pos)\n if event.type == pygame.MOUSEBUTTONUP:\n pos = board.drop(event.pos)\n # Is the game ended?\n if board.finished(*pos):\n print(\"Game over\")\n board.game_over = True\n\n\n board.draw()\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description=\"Players take turns to try to connect 4 pieces of the same color in a line.\")\n parser.add_argument('-c', dest='com', nargs='+', choices=['red', 'yellow'], required=False)\n args = parser.parse_args()\n print(args)\n main(args.com)\n"
] | [
[
"numpy.nonzero",
"numpy.fliplr",
"numpy.all",
"numpy.count_nonzero",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Spinch/CarND-Advanced-Lane-Lines | [
"618d19b4fc3394809c6d6bfa6872527e129d856f"
] | [
"ChooseThresholdsUI.py"
] | [
"\nimport sys\nimport os\nimport cv2\nimport numpy as np\nfrom PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, SIGNAL, Qt\nfrom PyQt4.QtGui import QApplication, QVBoxLayout, QHBoxLayout, QPushButton, QLineEdit, QWidget, QMainWindow, QLabel, \\\n QCheckBox, QSlider, QFileDialog, QImage, QPixmap, QSpinBox, QComboBox\n\nimport pipeline\n\n\nclass ChooseThresholdsUI(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.baseImages = []\n\n self.SobelXTh = [0,255]\n\n self.SobelYTh = [0,255]\n\n self.SobelMTh = [0,255]\n\n self.SobelATh = [0,255]\n\n self.initUI()\n\n\n @pyqtSlot()\n def openNewFileDialog(self, index):\n \"\"\" @brief slot with open new file dialog\n \"\"\"\n directory = os.path.dirname(self.loadFileLine[index].text())\n fileName = QFileDialog.getOpenFileName(self, 'Choose image', directory, 'All files (*)')\n if isinstance(fileName, tuple): # for PyQt5\n if fileName[0]:\n self.loadFileLine[index].setText(fileName[0])\n else: # for PyQt4\n if fileName:\n self.loadFileLine[index].setText(fileName)\n\n @pyqtSlot()\n def loadImage(self):\n\n self.baseImages = []\n for i in range(0,4):\n if (i>0):\n if not self.loadFileCB[i-1].isChecked():\n continue\n image = cv2.imread(self.loadFileLine[i].text())\n if image is not None:\n self.baseImages.append(image)\n if len(self.baseImages) !=0:\n self.updateImage()\n\n @pyqtSlot()\n def sliderChange(self):\n self.SobelXTh[0] = self.slider1SobelX.value()\n self.SobelXTh[1] = self.slider2SobelX.value()\n self.labelSobelX.setText(\"SobelX th: {:03d} {:03d}\".format(self.SobelXTh[0], self.SobelXTh[1]))\n\n self.SobelYTh[0] = self.slider1SobelY.value()\n self.SobelYTh[1] = self.slider2SobelY.value()\n self.labelSobelY.setText(\"SobelY th: {:03d} {:03d}\".format(self.SobelYTh[0], self.SobelYTh[1]))\n\n self.SobelMTh[0] = self.slider1SobelM.value()\n self.SobelMTh[1] = self.slider2SobelM.value()\n self.labelSobelM.setText(\"SobelM th: {:03d} {:03d}\".format(self.SobelMTh[0], self.SobelMTh[1]))\n\n self.SobelATh[0] = self.slider1SobelA.value()*np.pi/510\n self.SobelATh[1] = self.slider2SobelA.value()*np.pi/510\n self.labelSobelA.setText(\"SobelA th: {:1.3f} {:1.3f}\".format(self.SobelATh[0], self.SobelATh[1]))\n\n self.updateImage()\n\n def initUI(self):\n\n # Choose file dialog\n loadFileBtn = QPushButton(\"Load files:\")\n loadFileBtn.clicked.connect(self.loadImage)\n\n self.loadFileLine = []\n loadFileDialogBtn = []\n self.loadFileCB = []\n for i in range(0,4):\n self.loadFileLine.append(QLineEdit(\"\"))\n loadFileDialogBtn.append(QPushButton(\"...\"))\n loadFileDialogBtn[i].setMaximumWidth(40)\n if (i>0):\n self.loadFileCB.append(QCheckBox())\n # self.loadFileCB[i-1].stateChanged.connect(self.updateImage)\n loadFileDialogBtn[0].clicked.connect(lambda: self.openNewFileDialog(0))\n loadFileDialogBtn[1].clicked.connect(lambda: self.openNewFileDialog(1))\n loadFileDialogBtn[2].clicked.connect(lambda: self.openNewFileDialog(2))\n loadFileDialogBtn[3].clicked.connect(lambda: self.openNewFileDialog(3))\n loadFileDialogBtn[1].clicked.connect(lambda: self.loadFileCB[0].setChecked(True))\n loadFileDialogBtn[2].clicked.connect(lambda: self.loadFileCB[1].setChecked(True))\n loadFileDialogBtn[3].clicked.connect(lambda: self.loadFileCB[2].setChecked(True))\n\n self.labelImage = QLabel()\n\n labelChannels = QLabel(\"Channels:\")\n self.comboChannel0 = QComboBox()\n self.comboChannel0.addItems(['G', 'L', 'S'])\n self.comboChannel0.currentIndexChanged.connect(self.updateImage)\n self.comboChannel1 = QComboBox()\n self.comboChannel1.addItems(['G', 'L', 'S'])\n self.comboChannel1.currentIndexChanged.connect(self.updateImage)\n self.comboChannel2 = QComboBox()\n self.comboChannel2.addItems(['G', 'L', 'S'])\n self.comboChannel2.currentIndexChanged.connect(self.updateImage)\n\n labelKernels = QLabel(\"Kernels:\")\n self.kernel0SB = QSpinBox()\n self.kernel0SB.setMinimum(1)\n self.kernel0SB.setMaximum(31)\n self.kernel0SB.setValue(15)\n self.kernel0SB.setSingleStep(2)\n self.kernel0SB.valueChanged.connect(self.updateImage)\n self.kernel1SB = QSpinBox()\n self.kernel1SB.setMinimum(1)\n self.kernel1SB.setMaximum(31)\n self.kernel1SB.setValue(15)\n self.kernel1SB.setSingleStep(2)\n self.kernel1SB.valueChanged.connect(self.updateImage)\n self.kernel2SB = QSpinBox()\n self.kernel2SB.setMinimum(1)\n self.kernel2SB.setMaximum(31)\n self.kernel2SB.setValue(15)\n self.kernel2SB.setSingleStep(2)\n self.kernel2SB.valueChanged.connect(self.updateImage)\n\n\n self.labelSobelX = QLabel()\n self.chboxSobelX0 = QCheckBox()\n self.chboxSobelX0.stateChanged.connect(self.updateImage)\n self.chboxSobelX1 = QCheckBox()\n self.chboxSobelX1.stateChanged.connect(self.updateImage)\n self.chboxSobelX2 = QCheckBox()\n self.chboxSobelX2.stateChanged.connect(self.updateImage)\n self.slider1SobelX = QSlider(Qt.Horizontal)\n self.slider1SobelX.setMinimum(0)\n self.slider1SobelX.setMaximum(255)\n self.slider1SobelX.setValue(20)\n self.slider1SobelX.valueChanged.connect(self.sliderChange)\n self.slider2SobelX = QSlider(Qt.Horizontal)\n self.slider2SobelX.setMinimum(0)\n self.slider2SobelX.setMaximum(255)\n self.slider2SobelX.setValue(100)\n self.slider2SobelX.valueChanged.connect(self.sliderChange)\n\n self.labelSobelY = QLabel()\n self.chboxSobelY0 = QCheckBox()\n self.chboxSobelY0.stateChanged.connect(self.updateImage)\n self.chboxSobelY1 = QCheckBox()\n self.chboxSobelY1.stateChanged.connect(self.updateImage)\n self.chboxSobelY2 = QCheckBox()\n self.chboxSobelY2.stateChanged.connect(self.updateImage)\n self.slider1SobelY = QSlider(Qt.Horizontal)\n self.slider1SobelY.setMinimum(0)\n self.slider1SobelY.setMaximum(255)\n self.slider1SobelY.setValue(20)\n self.slider1SobelY.valueChanged.connect(self.sliderChange)\n self.slider2SobelY = QSlider(Qt.Horizontal)\n self.slider2SobelY.setMinimum(0)\n self.slider2SobelY.setMaximum(255)\n self.slider2SobelY.setValue(100)\n self.slider2SobelY.valueChanged.connect(self.sliderChange)\n\n self.labelSobelM = QLabel()\n self.chboxSobelM0 = QCheckBox()\n self.chboxSobelM0.stateChanged.connect(self.updateImage)\n self.chboxSobelM1 = QCheckBox()\n self.chboxSobelM1.stateChanged.connect(self.updateImage)\n self.chboxSobelM2 = QCheckBox()\n self.chboxSobelM2.stateChanged.connect(self.updateImage)\n self.slider1SobelM = QSlider(Qt.Horizontal)\n self.slider1SobelM.setMinimum(0)\n self.slider1SobelM.setMaximum(255)\n self.slider1SobelM.setValue(30)\n self.slider1SobelM.valueChanged.connect(self.sliderChange)\n self.slider2SobelM = QSlider(Qt.Horizontal)\n self.slider2SobelM.setMinimum(0)\n self.slider2SobelM.setMaximum(255)\n self.slider2SobelM.setValue(100)\n self.slider2SobelM.valueChanged.connect(self.sliderChange)\n\n self.labelSobelA = QLabel()\n self.chboxSobelA0 = QCheckBox()\n self.chboxSobelA0.stateChanged.connect(self.updateImage)\n self.chboxSobelA1 = QCheckBox()\n self.chboxSobelA1.stateChanged.connect(self.updateImage)\n self.chboxSobelA2 = QCheckBox()\n self.chboxSobelA2.stateChanged.connect(self.updateImage)\n self.slider1SobelA = QSlider(Qt.Horizontal)\n self.slider1SobelA.setMinimum(0)\n self.slider1SobelA.setMaximum(255)\n self.slider1SobelA.setValue(int(0.7/np.pi*510))\n self.slider1SobelA.valueChanged.connect(self.sliderChange)\n self.slider2SobelA = QSlider(Qt.Horizontal)\n self.slider2SobelA.setMinimum(0)\n self.slider2SobelA.setMaximum(255)\n self.slider2SobelA.setValue(int(1.3/np.pi*510))\n self.slider2SobelA.valueChanged.connect(self.sliderChange)\n\n self.sliderChange()\n\n # Layouts\n layoutMain = QVBoxLayout()\n layoutMain2 = QHBoxLayout()\n layoutSettings = QVBoxLayout()\n layoutChannels = QHBoxLayout()\n layoutKernelSB = QHBoxLayout()\n layoutChboxSobelX = QHBoxLayout()\n layoutChboxSobelY = QHBoxLayout()\n layoutChboxSobelM = QHBoxLayout()\n layoutChboxSobelA = QHBoxLayout()\n\n\n layoutsChooseFile = []\n for i in range(0,4):\n layoutsChooseFile.append(QHBoxLayout())\n if i == 0:\n layoutsChooseFile[i].addWidget(loadFileBtn)\n else:\n layoutsChooseFile[i].addWidget(self.loadFileCB[i-1])\n layoutsChooseFile[i].addWidget(self.loadFileLine[i])\n layoutsChooseFile[i].addWidget(loadFileDialogBtn[i])\n layoutMain.addLayout(layoutsChooseFile[i])\n\n layoutMain.addLayout(layoutMain2, 1)\n\n\n layoutMain2.addWidget(self.labelImage, 1)\n layoutMain2.addLayout(layoutSettings)\n\n layoutSettings.addWidget(labelChannels)\n layoutSettings.addLayout(layoutChannels)\n layoutChannels.addWidget(self.comboChannel0)\n layoutChannels.addWidget(self.comboChannel1)\n layoutChannels.addWidget(self.comboChannel2)\n layoutSettings.addWidget(labelKernels)\n layoutSettings.addLayout(layoutKernelSB)\n layoutKernelSB.addWidget(self.kernel0SB)\n layoutKernelSB.addWidget(self.kernel1SB)\n layoutKernelSB.addWidget(self.kernel2SB)\n layoutSettings.addWidget(self.labelSobelX)\n layoutSettings.addLayout(layoutChboxSobelX)\n layoutChboxSobelX.addWidget(self.chboxSobelX0)\n layoutChboxSobelX.addWidget(self.chboxSobelX1)\n layoutChboxSobelX.addWidget(self.chboxSobelX2)\n layoutSettings.addWidget(self.slider1SobelX)\n layoutSettings.addWidget(self.slider2SobelX)\n layoutSettings.addWidget(self.labelSobelY)\n layoutSettings.addLayout(layoutChboxSobelY)\n layoutChboxSobelY.addWidget(self.chboxSobelY0)\n layoutChboxSobelY.addWidget(self.chboxSobelY1)\n layoutChboxSobelY.addWidget(self.chboxSobelY2)\n layoutSettings.addWidget(self.slider1SobelY)\n layoutSettings.addWidget(self.slider2SobelY)\n layoutSettings.addWidget(self.labelSobelM)\n layoutSettings.addLayout(layoutChboxSobelM)\n layoutChboxSobelM.addWidget(self.chboxSobelM0)\n layoutChboxSobelM.addWidget(self.chboxSobelM1)\n layoutChboxSobelM.addWidget(self.chboxSobelM2)\n layoutSettings.addWidget(self.slider1SobelM)\n layoutSettings.addWidget(self.slider2SobelM)\n layoutSettings.addWidget(self.labelSobelA)\n layoutSettings.addLayout(layoutChboxSobelA)\n layoutChboxSobelA.addWidget(self.chboxSobelA0)\n layoutChboxSobelA.addWidget(self.chboxSobelA1)\n layoutChboxSobelA.addWidget(self.chboxSobelA2)\n layoutSettings.addWidget(self.slider1SobelA)\n layoutSettings.addWidget(self.slider2SobelA)\n layoutSettings.addStretch(1)\n\n mainWidget = QWidget()\n mainWidget.setLayout(layoutMain)\n self.setCentralWidget(mainWidget)\n self.showMaximized()\n\n def selectChannel(self, combo, N):\n if combo.currentText() == 'G':\n im = cv2.cvtColor(self.baseImages[N], cv2.COLOR_BGR2GRAY)\n elif combo.currentText() == 'L':\n im = cv2.cvtColor(self.baseImages[N], cv2.COLOR_BGR2HLS)[:, :, 1]\n elif combo.currentText() == 'S':\n im = cv2.cvtColor(self.baseImages[N], cv2.COLOR_BGR2HLS)[:, :, 2]\n return im\n\n def updateOneImg(self, imN):\n\n changed = False\n\n if self.chboxSobelX0.isChecked() or self.chboxSobelY0.isChecked() or self.chboxSobelM0.isChecked() or \\\n self.chboxSobelA0.isChecked():\n changed = True\n im = self.selectChannel(self.comboChannel0, imN)\n th = pipeline.Tresholds(im, kernel=self.kernel0SB.value())\n\n im0 = np.ones_like(im, dtype='uint8')\n if self.chboxSobelX0.isChecked():\n im0 = im0 & th.abs_sobel_thresh(orient='x', thresh=self.SobelXTh)\n if self.chboxSobelY0.isChecked():\n im0 = im0 & th.abs_sobel_thresh(orient='y', thresh=self.SobelYTh)\n if self.chboxSobelM0.isChecked():\n im0 = im0 & th.mag_thresh(thresh=self.SobelMTh)\n if self.chboxSobelA0.isChecked():\n im0 = im0 & th.dir_threshold(thresh=self.SobelATh)\n else:\n im0 = np.zeros_like(self.baseImages[imN][:,:,0], dtype='uint8')\n\n if self.chboxSobelX1.isChecked() or self.chboxSobelY1.isChecked() or self.chboxSobelM1.isChecked() or \\\n self.chboxSobelA1.isChecked():\n changed = True\n im = self.selectChannel(self.comboChannel1, imN)\n th = pipeline.Tresholds(im, kernel=self.kernel1SB.value())\n\n im1 = np.ones_like(im, dtype='uint8')\n if self.chboxSobelX1.isChecked():\n im1 = im1 & th.abs_sobel_thresh(orient='x', thresh=self.SobelXTh)\n if self.chboxSobelY1.isChecked():\n im1 = im1 & th.abs_sobel_thresh(orient='y', thresh=self.SobelYTh)\n if self.chboxSobelM1.isChecked():\n im1 = im1 & th.mag_thresh(thresh=self.SobelMTh)\n if self.chboxSobelA1.isChecked():\n im1 = im1 & th.dir_threshold(thresh=self.SobelATh)\n else:\n im1 = np.zeros_like(self.baseImages[imN][:,:,0], dtype='uint8')\n\n if self.chboxSobelX2.isChecked() or self.chboxSobelY2.isChecked() or self.chboxSobelM2.isChecked() or \\\n self.chboxSobelA2.isChecked():\n changed = True\n im = self.selectChannel(self.comboChannel2, imN)\n th = pipeline.Tresholds(im, kernel=self.kernel2SB.value())\n\n im2 = np.ones_like(im, dtype='uint8')\n if self.chboxSobelX2.isChecked():\n im2 = im2 & th.abs_sobel_thresh(orient='x', thresh=self.SobelXTh)\n if self.chboxSobelY2.isChecked():\n im2 = im2 & th.abs_sobel_thresh(orient='y', thresh=self.SobelYTh)\n if self.chboxSobelM2.isChecked():\n im2 = im2 & th.mag_thresh(thresh=self.SobelMTh)\n if self.chboxSobelA2.isChecked():\n im2 = im2 & th.dir_threshold(thresh=self.SobelATh)\n else:\n im2 = np.zeros_like(self.baseImages[imN][:,:,0], dtype='uint8')\n\n if not changed:\n imf = cv2.cvtColor(self.baseImages[imN], cv2.COLOR_BGR2RGB)\n else:\n imf = np.dstack((im0*255, im1*255, im2*255))\n\n return imf\n\n def updateImage(self):\n\n if len(self.baseImages) == 0:\n return\n\n imf = []\n for i in range(len(self.baseImages)):\n im = self.updateOneImg(i)\n imf.append(im)\n\n\n frameSize = (self.labelImage.size().width(), self.labelImage.size().height(), 3)\n bytesPerLine = 3 * frameSize[0]\n\n if len(self.baseImages) == 1:\n frameImg = cv2.resize(imf[0], frameSize[0:2])\n else:\n frameImg = np.zeros([frameSize[1], frameSize[0], frameSize[2]], dtype='uint8')\n midy = frameSize[1]//2\n midx = frameSize[0]//2\n # frameImg[0:midy, 0:midx, :] = 255\n frameImg[:midy,:midx,:] = cv2.resize(imf[0], (midx, midy))\n frameImg[:midy,midx:midx*2,:] = cv2.resize(imf[1], (midx, midy))\n if len(self.baseImages) >= 3:\n frameImg[midy:midy*2, :midx, :] = cv2.resize(imf[2], (midx, midy))\n if len(self.baseImages) >= 4:\n frameImg[midy:midy*2, midx:midx*2, :] = cv2.resize(imf[3], (midx, midy))\n\n qImg = QImage(frameImg, frameSize[0], frameSize[1], bytesPerLine, QImage.Format_RGB888)\n self.labelImage.setPixmap(QPixmap.fromImage(qImg))\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n ctUI = ChooseThresholdsUI()\n sys.exit(app.exec_())\n"
] | [
[
"numpy.zeros",
"numpy.ones_like",
"numpy.zeros_like",
"numpy.dstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
techthiyanes/DeepPavlov | [
"d73f45733d6b23347871aa293309730303b64450"
] | [
"deeppavlov/models/torch_bert/torch_transformers_sequence_tagger.py"
] | [
"# Copyright 2019 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import List, Union, Dict, Optional\n\nimport numpy as np\nimport torch\nfrom overrides import overrides\nfrom transformers import AutoModelForTokenClassification, AutoConfig\n\nfrom deeppavlov.core.commands.utils import expand_path\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.models.torch_model import TorchModel\n\nlog = getLogger(__name__)\n\n\ndef token_from_subtoken(units: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"\"\" Assemble token level units from subtoken level units\n\n Args:\n units: torch.Tensor of shape [batch_size, SUBTOKEN_seq_length, n_features]\n mask: mask of token beginnings. For example: for tokens\n\n [[``[CLS]`` ``My``, ``capybara``, ``[SEP]``],\n [``[CLS]`` ``Your``, ``aar``, ``##dvark``, ``is``, ``awesome``, ``[SEP]``]]\n\n the mask will be\n\n [[0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 1, 0]]\n\n Returns:\n word_level_units: Units assembled from ones in the mask. For the\n example above this units will correspond to the following\n\n [[``My``, ``capybara``],\n [``Your`, ``aar``, ``is``, ``awesome``,]]\n\n the shape of this tensor will be [batch_size, TOKEN_seq_length, n_features]\n \"\"\"\n shape = units.size()\n batch_size = shape[0]\n nf = shape[2]\n nf_int = units.size()[-1]\n\n # number of TOKENS in each sentence\n token_seq_lengths = torch.sum(mask, 1).to(torch.int64)\n # for a matrix m =\n # [[1, 1, 1],\n # [0, 1, 1],\n # [1, 0, 0]]\n # it will be\n # [3, 2, 1]\n\n n_words = torch.sum(token_seq_lengths)\n # n_words -> 6\n\n max_token_seq_len = torch.max(token_seq_lengths)\n # max_token_seq_len -> 3\n\n idxs = torch.stack(torch.nonzero(mask, as_tuple=True), dim=1)\n # for the matrix mentioned above\n # tf.where(mask) ->\n # [[0, 0],\n # [0, 1]\n # [0, 2],\n # [1, 1],\n # [1, 2]\n # [2, 0]]\n\n sample_ids_in_batch = torch.nn.functional.pad(input=idxs[:, 0], pad=[1, 0])\n # for indices\n # [[0, 0],\n # [0, 1]\n # [0, 2],\n # [1, 1],\n # [1, 2],\n # [2, 0]]\n # it is\n # [0, 0, 0, 0, 1, 1, 2]\n # padding is for computing change from one sample to another in the batch\n\n a = torch.logical_not(torch.eq(sample_ids_in_batch[1:], sample_ids_in_batch[:-1]).to(torch.int64))\n # for the example above the result of this statement equals\n # [0, 0, 0, 1, 0, 1]\n # so data samples begin in 3rd and 5th positions (the indexes of ones)\n\n # transforming sample start masks to the sample starts themselves\n q = a * torch.arange(n_words).to(torch.int64)\n # [0, 0, 0, 3, 0, 5]\n count_to_substract = torch.nn.functional.pad(torch.masked_select(q, q.to(torch.bool)), [1, 0])\n # [0, 3, 5]\n\n new_word_indices = torch.arange(n_words).to(torch.int64) - torch.gather(\n count_to_substract, dim=0, index=torch.cumsum(a, 0))\n # tf.range(n_words) -> [0, 1, 2, 3, 4, 5]\n # tf.cumsum(a) -> [0, 0, 0, 1, 1, 2]\n # tf.gather(count_to_substract, tf.cumsum(a)) -> [0, 0, 0, 3, 3, 5]\n # new_word_indices -> [0, 1, 2, 3, 4, 5] - [0, 0, 0, 3, 3, 5] = [0, 1, 2, 0, 1, 0]\n # new_word_indices is the concatenation of range(word_len(sentence))\n # for all sentences in units\n\n n_total_word_elements = (batch_size * max_token_seq_len).to(torch.int32)\n word_indices_flat = (idxs[:, 0] * max_token_seq_len + new_word_indices).to(torch.int64)\n x_mask = torch.sum(torch.nn.functional.one_hot(word_indices_flat, n_total_word_elements), 0)\n x_mask = x_mask.to(torch.bool)\n # to get absolute indices we add max_token_seq_len:\n # idxs[:, 0] * max_token_seq_len -> [0, 0, 0, 1, 1, 2] * 2 = [0, 0, 0, 3, 3, 6]\n # word_indices_flat -> [0, 0, 0, 3, 3, 6] + [0, 1, 2, 0, 1, 0] = [0, 1, 2, 3, 4, 6]\n # total number of words in the batch (including paddings)\n # batch_size * max_token_seq_len -> 3 * 3 = 9\n # tf.one_hot(...) ->\n # [[1. 0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 1. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 1. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 1. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 1. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 1. 0. 0.]]\n # x_mask -> [1, 1, 1, 1, 1, 0, 1, 0, 0]\n\n full_range = torch.arange(batch_size * max_token_seq_len).to(torch.int64)\n # full_range -> [0, 1, 2, 3, 4, 5, 6, 7, 8]\n nonword_indices_flat = torch.masked_select(full_range, torch.logical_not(x_mask))\n\n # # y_idxs -> [5, 7, 8]\n\n # get a sequence of units corresponding to the start subtokens of the words\n # size: [n_words, n_features]\n def gather_nd(params, indices):\n assert type(indices) == torch.Tensor\n return params[indices.transpose(0, 1).long().numpy().tolist()]\n\n elements = gather_nd(units, idxs)\n\n # prepare zeros for paddings\n # size: [batch_size * TOKEN_seq_length - n_words, n_features]\n sh = tuple(torch.stack([torch.sum(max_token_seq_len - token_seq_lengths), torch.tensor(nf)], 0).numpy())\n paddings = torch.zeros(sh, dtype=torch.float64)\n\n def dynamic_stitch(indices, data):\n # https://discuss.pytorch.org/t/equivalent-of-tf-dynamic-partition/53735/2\n n = sum(idx.numel() for idx in indices)\n res = [None] * n\n for i, data_ in enumerate(data):\n idx = indices[i].view(-1)\n if idx.numel() > 0:\n d = data_.view(idx.numel(), -1)\n k = 0\n for idx_ in idx:\n res[idx_] = d[k].to(torch.float64)\n k += 1\n return res\n\n tensor_flat = torch.stack(dynamic_stitch([word_indices_flat, nonword_indices_flat], [elements, paddings]))\n # tensor_flat -> [x, x, x, x, x, 0, x, 0, 0]\n\n tensor = torch.reshape(tensor_flat, (batch_size, max_token_seq_len.item(), nf_int))\n # tensor -> [[x, x, x],\n # [x, x, 0],\n # [x, 0, 0]]\n\n return tensor\n\n\ndef token_labels_to_subtoken_labels(labels, y_mask, input_mask):\n subtoken_labels = []\n labels_ind = 0\n n_tokens_with_special = int(np.sum(input_mask))\n\n for el in y_mask[1:n_tokens_with_special - 1]:\n if el == 1:\n subtoken_labels += [labels[labels_ind]]\n labels_ind += 1\n else:\n subtoken_labels += [labels[labels_ind - 1]]\n\n subtoken_labels = [0] + subtoken_labels + [0] * (len(input_mask) - n_tokens_with_special + 1)\n return subtoken_labels\n\n\n@register('torch_transformers_sequence_tagger')\nclass TorchTransformersSequenceTagger(TorchModel):\n \"\"\"Transformer-based model on PyTorch for text tagging. It predicts a label for every token (not subtoken)\n in the text. You can use it for sequence labeling tasks, such as morphological tagging or named entity recognition.\n\n Args:\n n_tags: number of distinct tags\n pretrained_bert: pretrained Bert checkpoint path or key title (e.g. \"bert-base-uncased\")\n return_probas: set this to `True` if you need the probabilities instead of raw answers\n bert_config_file: path to Bert configuration file, or None, if `pretrained_bert` is a string name\n attention_probs_keep_prob: keep_prob for Bert self-attention layers\n hidden_keep_prob: keep_prob for Bert hidden layers\n optimizer: optimizer name from `torch.optim`\n optimizer_parameters: dictionary with optimizer's parameters,\n e.g. {'lr': 0.1, 'weight_decay': 0.001, 'momentum': 0.9}\n learning_rate_drop_patience: how many validations with no improvements to wait\n learning_rate_drop_div: the divider of the learning rate after `learning_rate_drop_patience` unsuccessful\n validations\n load_before_drop: whether to load best model before dropping learning rate or not\n clip_norm: clip gradients by norm\n min_learning_rate: min value of learning rate if learning rate decay is used\n \"\"\"\n\n def __init__(self,\n n_tags: int,\n pretrained_bert: str,\n bert_config_file: Optional[str] = None,\n return_probas: bool = False,\n attention_probs_keep_prob: Optional[float] = None,\n hidden_keep_prob: Optional[float] = None,\n optimizer: str = \"AdamW\",\n optimizer_parameters: dict = {\"lr\": 1e-3, \"weight_decay\": 1e-6},\n learning_rate_drop_patience: int = 20,\n learning_rate_drop_div: float = 2.0,\n load_before_drop: bool = True,\n clip_norm: Optional[float] = None,\n min_learning_rate: float = 1e-07,\n **kwargs) -> None:\n\n self.n_classes = n_tags\n self.return_probas = return_probas\n self.attention_probs_keep_prob = attention_probs_keep_prob\n self.hidden_keep_prob = hidden_keep_prob\n self.clip_norm = clip_norm\n\n self.pretrained_bert = pretrained_bert\n self.bert_config_file = bert_config_file\n\n super().__init__(optimizer=optimizer,\n optimizer_parameters=optimizer_parameters,\n learning_rate_drop_patience=learning_rate_drop_patience,\n learning_rate_drop_div=learning_rate_drop_div,\n load_before_drop=load_before_drop,\n min_learning_rate=min_learning_rate,\n **kwargs)\n\n def train_on_batch(self,\n input_ids: Union[List[List[int]], np.ndarray],\n input_masks: Union[List[List[int]], np.ndarray],\n y_masks: Union[List[List[int]], np.ndarray],\n y: List[List[int]],\n *args, **kwargs) -> Dict[str, float]:\n \"\"\"\n\n Args:\n input_ids: batch of indices of subwords\n input_masks: batch of masks which determine what should be attended\n args: arguments passed to _build_feed_dict\n and corresponding to additional input\n and output tensors of the derived class.\n kwargs: keyword arguments passed to _build_feed_dict\n and corresponding to additional input\n and output tensors of the derived class.\n\n Returns:\n dict with fields 'loss', 'head_learning_rate', and 'bert_learning_rate'\n \"\"\"\n b_input_ids = torch.from_numpy(input_ids).to(self.device)\n b_input_masks = torch.from_numpy(input_masks).to(self.device)\n subtoken_labels = [token_labels_to_subtoken_labels(y_el, y_mask, input_mask)\n for y_el, y_mask, input_mask in zip(y, y_masks, input_masks)]\n b_labels = torch.from_numpy(np.array(subtoken_labels)).to(torch.int64).to(self.device)\n self.optimizer.zero_grad()\n\n loss = self.model(input_ids=b_input_ids,\n attention_mask=b_input_masks,\n labels=b_labels).loss\n loss.backward()\n # Clip the norm of the gradients to 1.0.\n # This is to help prevent the \"exploding gradients\" problem.\n if self.clip_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm)\n\n self.optimizer.step()\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return {'loss': loss.item()}\n\n def __call__(self,\n input_ids: Union[List[List[int]], np.ndarray],\n input_masks: Union[List[List[int]], np.ndarray],\n y_masks: Union[List[List[int]], np.ndarray]) -> Union[List[List[int]], List[np.ndarray]]:\n \"\"\" Predicts tag indices for a given subword tokens batch\n\n Args:\n input_ids: indices of the subwords\n input_masks: mask that determines where to attend and where not to\n y_masks: mask which determines the first subword units in the the word\n\n Returns:\n Label indices or class probabilities for each token (not subtoken)\n\n \"\"\"\n b_input_ids = torch.from_numpy(input_ids).to(self.device)\n b_input_masks = torch.from_numpy(input_masks).to(self.device)\n\n with torch.no_grad():\n # Forward pass, calculate logit predictions\n logits = self.model(b_input_ids, attention_mask=b_input_masks)\n\n # Move logits and labels to CPU and to numpy arrays\n logits = token_from_subtoken(logits[0].detach().cpu(), torch.from_numpy(y_masks))\n\n if self.return_probas:\n pred = torch.nn.functional.softmax(logits, dim=-1)\n pred = pred.detach().cpu().numpy()\n else:\n logits = logits.detach().cpu().numpy()\n pred = np.argmax(logits, axis=-1)\n seq_lengths = np.sum(y_masks, axis=1)\n pred = [p[:l] for l, p in zip(seq_lengths, pred)]\n\n return pred\n\n @overrides\n def load(self, fname=None):\n if fname is not None:\n self.load_path = fname\n\n if self.pretrained_bert:\n config = AutoConfig.from_pretrained(self.pretrained_bert, num_labels=self.n_classes,\n output_attentions=False, output_hidden_states=False)\n self.model = AutoModelForTokenClassification.from_pretrained(self.pretrained_bert, config=config)\n elif self.bert_config_file and Path(self.bert_config_file).is_file():\n self.bert_config = AutoConfig.from_json_file(str(expand_path(self.bert_config_file)))\n\n if self.attention_probs_keep_prob is not None:\n self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob\n if self.hidden_keep_prob is not None:\n self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob\n self.model = AutoModelForTokenClassification(config=self.bert_config)\n else:\n raise ConfigError(\"No pre-trained BERT model is given.\")\n\n self.model.to(self.device)\n\n self.optimizer = getattr(torch.optim, self.optimizer_name)(\n self.model.parameters(), **self.optimizer_parameters)\n if self.lr_scheduler_name is not None:\n self.lr_scheduler = getattr(torch.optim.lr_scheduler, self.lr_scheduler_name)(\n self.optimizer, **self.lr_scheduler_parameters)\n\n if self.load_path:\n log.info(f\"Load path {self.load_path} is given.\")\n if isinstance(self.load_path, Path) and not self.load_path.parent.is_dir():\n raise ConfigError(\"Provided load path is incorrect!\")\n\n weights_path = Path(self.load_path.resolve())\n weights_path = weights_path.with_suffix(f\".pth.tar\")\n if weights_path.exists():\n log.info(f\"Load path {weights_path} exists.\")\n log.info(f\"Initializing `{self.__class__.__name__}` from saved.\")\n\n # now load the weights, optimizer from saved\n log.info(f\"Loading weights from {weights_path}.\")\n checkpoint = torch.load(weights_path, map_location=self.device)\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n self.epochs_done = checkpoint.get(\"epochs_done\", 0)\n else:\n log.info(f\"Init from scratch. Load path {weights_path} does not exist.\")\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.zeros",
"torch.eq",
"torch.load",
"torch.sum",
"torch.from_numpy",
"torch.tensor",
"numpy.argmax",
"torch.no_grad",
"torch.nonzero",
"torch.arange",
"torch.nn.functional.one_hot",
"numpy.array",
"torch.cumsum",
"numpy.sum",
"torch.nn.functional.pad",
"torch.logical_not"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vodnalasricharan/Face-Recognistion-using-custom-datasetopenCV | [
"5049f2d4924bfdc60858b21fe63e6440161920fd"
] | [
"faces-train.py"
] | [
"import torch\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.\nimport torchvision.transforms as transforms # Transformations we can perform on our dataset\nimport torchvision\nimport os\nfrom torchvision import models\nimport pandas as pd\nfrom skimage import io\nfrom torch.utils.data import (\n Dataset,\n DataLoader,\n)\n\nimport cv2\nimport os\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport model\n\n# transform= transforms.Compose([\n# transforms.RandomResizedCrop(224),\n# transforms.ToTensor(),\n# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n# ])\nclass Load_data(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n self.annotations = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 1])\n image = io.imread(img_path)\n y_label = torch.tensor(int(self.annotations.iloc[index, 2]))\n\n if self.transform:\n image = self.transform(image)\n\n return (image, y_label)\ndataset = Load_data(\n csv_file=\"new.csv\",\n root_dir=\"train\",\n transform=transforms.ToTensor()\n)\ntrain_loader = DataLoader(dataset=dataset)\nvgg=model.vgg()\nprint(vgg)\nloss_fn = nn.CrossEntropyLoss()\nopt = optim.SGD(vgg.parameters(), lr=0.05)\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nprint(images.shape)\nprint(len(train_loader))\n\n# for image,label in enumerate(train_loader):\n# print(label)\nout=vgg(images)\nprint(out)\nfor epoch in range(2):\n losses = []\n\n for _, tensor in enumerate(train_loader):\n # Get data to cuda if possible\n # data = data.to(device=device)\n # targets = targets.to(device=device)\n\n # forward\n print(tensor)\n data=tensor[0]\n targets=tensor[1]\n opt.zero_grad()\n print(data.shape)\n scores = vgg(data)\n loss = loss_fn(scores, targets)\n\n losses.append(loss.item())\n\n # backward\n\n loss.backward()\n\n # gradient descent or adam step\n opt.step()\n\n print(f\"Cost at epoch {epoch} is {sum(losses)/len(losses)}\")\n#vgg.state_dict('./params')\n# for i in range(3):\n# \tprint('running epoch ',i)\n# \topt.zero_grad()\n# \toutputs = vgg(x_train)\n# \tloss = loss_fn(outputs,y_labels)\n# \tloss.backward()\n# \topt.step()\ntorch.save(vgg.state_dict(),'./params/params2.pth')\n#\n# #print(y_labels)\n# #print(x_train)\n#\n# with open(\"./face-labels.pickle\", 'wb') as f:\n# \tpickle.dump(label_ids, f)\n# vgg.classifier[6] = nn.Linear(final_in_features,len(label_ids))\n# loss_fn = nn.CrossEntropyLoss()\n# opt = optim.SGD(vgg.parameters(), lr=0.05)\n# print(x_train)\n# print(type(y_labels))\n# x_train=np.array(x_train)\n# y_labels=np.array(y_labels)\n# print(x_train)\n# print(type(y_labels))\n# x_train=torch.FloatTensor(x_train)\n# y_labels=torch.from_numpy(y_labels)\n# print(type(x_train))\n# print(type(x_train[1]))\n# print(type(y_labels))\n# for i in range(3):\n# \tprint('running epoch ',i)\n# \topt.zero_grad()\n# \toutputs = vgg(x_train)\n# \tloss = loss_fn(outputs,y_labels)\n# \tloss.backward()\n# \topt.step()\n# torch.save(vgg.state_dict(),'./params')\n# # recognizer.train(x_train, np.array(y_labels))\n# # recognizer.save(\"recognizers/face-trainner.yml\")"
] | [
[
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shahidmuzaffar98/Deep-Image-Processing | [
"ba9b402950fcf78aa7a19228b12d3f3350f02219"
] | [
"data.py"
] | [
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport math\r\nimport random\r\nimport cv2\r\nimport numpy as np\r\nfrom queue import Queue\r\nfrom threading import Thread as Process\r\n#from multiprocessing import Process,Queue\r\nimport time\r\n\r\nfrom .utils import *\r\n\r\nfrom skimage.io import imread\r\nfrom skimage.transform import resize\r\n\r\nclass DataSet(object):\r\n \"\"\"TextDataSet\r\n process text input file dataset \r\n text file format:\r\n image_path\r\n \"\"\"\r\n\r\n def __init__(self, common_params=None, dataset_params=None):\r\n \"\"\"\r\n Args:\r\n common_params: A dict\r\n dataset_params: A dict\r\n \"\"\"\r\n if common_params:\r\n self.image_size = int(common_params['image_size'])\r\n self.batch_size = int(common_params['batch_size'])\r\n \r\n if dataset_params:\r\n self.data_path = str(dataset_params['path'])\r\n self.thread_num = int(int(dataset_params['thread_num']) / 2)\r\n self.thread_num2 = int(int(dataset_params['thread_num']) / 2)\r\n #record and image_label queue\r\n self.record_queue = Queue(maxsize=10000)\r\n self.image_queue = Queue(maxsize=5000)\r\n\r\n self.batch_queue = Queue(maxsize=100)\r\n\r\n self.record_list = [] \r\n\r\n # filling the record_list\r\n input_file = open(self.data_path, 'r')\r\n\r\n for line in input_file:\r\n line = line.strip()\r\n self.record_list.append(line)\r\n\r\n self.record_point = 0\r\n self.record_number = len(self.record_list)\r\n\r\n self.num_batch_per_epoch = int(self.record_number / self.batch_size)\r\n\r\n t_record_producer = Process(target=self.record_producer)\r\n t_record_producer.daemon = True\r\n t_record_producer.start()\r\n\r\n for i in range(self.thread_num):\r\n t = Process(target=self.record_customer)\r\n t.daemon = True\r\n t.start()\r\n\r\n for i in range(self.thread_num2):\r\n t = Process(target=self.image_customer)\r\n t.daemon = True\r\n t.start()\r\n\r\n def record_producer(self):\r\n \"\"\"record_queue's processor\r\n \"\"\"\r\n while True:\r\n if self.record_point % self.record_number == 0:\r\n random.shuffle(self.record_list)\r\n self.record_point = 0\r\n self.record_queue.put(self.record_list[self.record_point])\r\n self.record_point += 1\r\n\r\n def image_process(self, image):\r\n \"\"\"record process \r\n Args: record \r\n Returns:\r\n image: 3-D ndarray\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n\r\n if w > h:\r\n image = cv2.resize(image, (int(self.image_size * w / h), self.image_size))\r\n\r\n mirror = np.random.randint(0, 2)\r\n if mirror:\r\n image = np.fliplr(image)\r\n crop_start = np.random.randint(0, int(self.image_size * w / h) - self.image_size + 1)\r\n image = image[:, crop_start:crop_start + self.image_size, :]\r\n else:\r\n image = cv2.resize(image, (self.image_size, int(self.image_size * h / w)))\r\n mirror = np.random.randint(0, 2)\r\n if mirror:\r\n image = np.fliplr(image)\r\n crop_start = np.random.randint(0, int(self.image_size * h / w) - self.image_size + 1)\r\n image = image[crop_start:crop_start + self.image_size, :, :]\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n return image\r\n\r\n def record_customer(self):\r\n \"\"\"record queue's customer \r\n \"\"\"\r\n while True:\r\n item = self.record_queue.get()\r\n out = cv2.imread(item)\r\n if len(out.shape)==3 and out.shape[2]==3:\r\n self.image_queue.put(out)\r\n def image_customer(self):\r\n while True:\r\n images = []\r\n for i in range(self.batch_size):\r\n image = self.image_queue.get()\r\n image = self.image_process(image)\r\n images.append(image)\r\n images = np.asarray(images, dtype=np.uint8)\r\n\r\n self.batch_queue.put(preprocess(images))\r\n\r\n def batch(self):\r\n \"\"\"get batch\r\n Returns:\r\n images: 4-D ndarray [batch_size, height, width, 3]\r\n \"\"\"\r\n print(self.record_queue.qsize(), self.image_queue.qsize(), self.batch_queue.qsize())\r\n return self.batch_queue.get()\r\n"
] | [
[
"numpy.asarray",
"numpy.fliplr",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
santosh-d3vpl3x/data-validation | [
"b4809803be2d1a0490546f2d21dd4cb7244e6323"
] | [
"tensorflow_data_validation/statistics/generators/image_stats_generator.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module that computes statistics for features of image format.\n\nSpecifically, the following statistics are computed:\n- Maximum image heigh and width\n- Histogram of value count by image format\n- If the rate of recognized formats is high enough and enough values\n have been considered, features get marked with domain_info: image_domain\n used for schema inference.\n\nThe current implementation is using imghdr for identifying image formats\n(efficient, based on metadata) and tf.image.decode_image for image height,\nwidth (possibly expensive, performs decoding).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport functools\nimport imghdr\nimport numpy as np\nimport pandas as pd\nimport six\n\nfrom tensorflow_data_validation import types\nfrom tensorflow_data_validation.pyarrow_tf import pyarrow as pa\nfrom tensorflow_data_validation.pyarrow_tf import tensorflow as tf\nfrom tensorflow_data_validation.statistics.generators import stats_generator\nfrom tensorflow_data_validation.utils import stats_util\nfrom typing import List, Iterable, Text\nfrom tensorflow_metadata.proto.v0 import statistics_pb2\n\n_DOMAIN_INFO = 'domain_info'\n_IMAGE_DOMAIN = 'image_domain {}'\n_IMAGE_MAX_WIDTH_STATISTICS = 'image_max_width'\n_IMAGE_MAX_HEIGHT_STATISTICS = 'image_max_height'\n_IMAGE_FORMAT_HISTOGRAM = 'image_format_histogram'\n\n# ImageStatsGenerator default initialization values.\n_IS_IMAGE_RATIO = 0.8\n_VALUES_THRESHOLD = 100\n\n\nclass ImageDecoderInterface(six.with_metaclass(abc.ABCMeta)):\n \"\"\"Interface for extracting image formats and sizes.\"\"\"\n\n @abc.abstractmethod\n def get_formats(self, values: np.ndarray) -> np.ndarray:\n \"\"\"Returns the image format name for each value if it represents an image.\n\n Args:\n values: a list of values in bytes to check the image format.\n\n Returns:\n A list of string image formats (e.g: 'jpeg', 'bmp', ...) or None\n if the value is not a supported image, in the same order as the input\n value_list.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_sizes(self, values: np.ndarray) -> np.ndarray:\n \"\"\"Returns the image size for each value if it represents an image.\n\n Args:\n values: a list of values in bytes to check the image size.\n\n Returns:\n A list of (image_height, image_width) tuple (if the value represents an\n image) in the same order as the input value list.\n \"\"\"\n raise NotImplementedError\n\n\nclass TfImageDecoder(ImageDecoderInterface):\n \"\"\"ImageDecoderInterface implementation based on tensorflow library.\n\n This image decoder only supports image formats supported by:\n tf.image.decode_image, ['bmp', 'gif', 'jpeg', 'png'].\n\n Image sizes are computed using tf.image.decode_image, which requires tf.\n Initializating and pickling tf objects can be non-trivial, so:\n - Initialization is done lazily when get_sizes computation is needed.\n - __reduce__() is overridden so that tf state is ignored. It is lazily\n initialized as needed, after deserialization.\n \"\"\"\n\n def __init__(self):\n self._lazy_get_sizes_callable = None\n\n def __reduce__(self):\n return TfImageDecoder, tuple()\n\n def _initialize_lazy_get_sizes_callable(self):\n # Initialize the tensorflow graph for decoding images.\n graph = tf.Graph()\n self._session = tf.compat.v1.Session(graph=graph)\n\n def get_image_shape(value):\n image_shape = tf.shape(input=tf.image.decode_image(value))\n # decode_image returns a 3-D array ([height, width, num_channels]) for\n # BMP/JPEG/PNG images, but 4-D array ([num_frames, height, width, 3])\n # for GIF images.\n return tf.cond(\n pred=tf.equal(tf.size(input=image_shape), 4),\n true_fn=lambda: image_shape[1:3],\n false_fn=lambda: image_shape[0:2],\n )\n\n with self._session.graph.as_default(), self._session.as_default():\n self._batch_image_input = tf.compat.v1.placeholder(\n dtype=tf.string, shape=[None])\n self._image_shapes = tf.map_fn(\n get_image_shape,\n elems=self._batch_image_input,\n dtype=tf.int32,\n infer_shape=False)\n graph.finalize()\n self._lazy_get_sizes_callable = self._session.make_callable(\n fetches=self._image_shapes, feed_list=[self._batch_image_input])\n\n def get_formats(self, values: List[np.object]) -> np.ndarray:\n \"\"\"Returns the image format name for each value if it represents an image.\n\n Args:\n values: a list of value in bytes to check the image format.\n\n Returns:\n A list of image format name (e.g. 'JPG'/'GIF'/etc, or None if the\n value is not an image) in the same order as the input value list.\n \"\"\"\n get_format = functools.partial(imghdr.what, None)\n return np.vectorize(get_format, otypes=[np.object])(values)\n\n def get_sizes(self, values: np.ndarray) -> np.ndarray:\n \"\"\"Returns the image size for each value if it represents an image.\n\n Args:\n values: a list of value in bytes to check the image size.\n\n Returns:\n A numpy array containing (image_height, image_width) tuples (if the value\n represents an image) in the same order as the input value list.\n\n Raises:\n ValueError: If any of the input value does not represents an image.\n \"\"\"\n if not self._lazy_get_sizes_callable:\n self._initialize_lazy_get_sizes_callable()\n return self._lazy_get_sizes_callable(values)\n\n\nclass _PartialImageStats(object):\n \"\"\"Partial feature stats for images.\n\n Attributes:\n total_num_values: The total number of values processed for this feature.\n max_width: The maximum image width among all the values for this feature.\n max_height: The maximum image height among all the values for this feature.\n counter_by_format: A dict from image format string to the number of images\n in this format. The format / key '' is used for non supported.\n invalidate: True only if this feature should never be considered, e.g: some\n value_lists have inconsistent formats.\n \"\"\"\n\n def __init__(self):\n self.total_num_values = 0\n self.max_width = 0\n self.max_height = 0\n self.counter_by_format = collections.Counter()\n self.invalidate = False\n\n def __iadd__(self, other: '_PartialImageStats') -> '_PartialImageStats':\n \"\"\"Merge two partial image stats.\"\"\"\n self.total_num_values += other.total_num_values\n self.max_width = max(self.max_width, other.max_width)\n self.max_height = max(self.max_height, other.max_height)\n self.counter_by_format += other.counter_by_format\n self.invalidate |= other.invalidate\n return self\n\n\nclass ImageStatsGenerator(stats_generator.CombinerFeatureStatsGenerator):\n \"\"\"Computes the statistics for features of image format.\"\"\"\n\n def __init__(self,\n image_decoder: ImageDecoderInterface = None,\n name: Text = 'ImageStatsGenerator',\n is_image_ratio_threshold: float = _IS_IMAGE_RATIO,\n values_threshold: int = _VALUES_THRESHOLD,\n enable_size_stats: bool = False):\n \"\"\"Initializes an image statistics generator.\n\n Args:\n image_decoder: ImageDecoderInterface instance for fetching image metadata.\n name: The unique name associated with this statistics generator.\n is_image_ratio_threshold: In order for a feature to be considered \"image\"\n type and respective stats to be generated, at least this ratio of values\n should be supported images.\n values_threshold: In order for a feature to be considered \"image\" type\n and respective stats to be generated, at least so many values should be\n considered.\n enable_size_stats: If True statistics about image sizes are generated.\n This currently requires decoding through TF that could have performance\n implications.\n \"\"\"\n super(ImageStatsGenerator, self).__init__(name)\n if image_decoder is None:\n image_decoder = TfImageDecoder()\n self._image_decoder = image_decoder\n self._is_image_ratio_threshold = is_image_ratio_threshold\n self._values_threshold = values_threshold\n self._enable_size_stats = enable_size_stats\n\n def create_accumulator(self) -> _PartialImageStats:\n \"\"\"Return a fresh, empty accumulator.\n\n Returns:\n An empty accumulator.\n \"\"\"\n return _PartialImageStats()\n\n def add_input(self, accumulator: _PartialImageStats,\n feature_path: types.FeaturePath,\n feature_array: pa.Array) -> _PartialImageStats:\n \"\"\"Return result of folding a batch of inputs into accumulator.\n\n Args:\n accumulator: The current accumulator.\n feature_path: The path of the feature.\n feature_array: An arrow array representing a batch of feature values\n which should be added to the accumulator.\n\n Returns:\n The accumulator after updating the statistics for the batch of inputs.\n \"\"\"\n if accumulator.invalidate:\n return accumulator\n feature_type = stats_util.get_feature_type_from_arrow_type(\n feature_path, feature_array.type)\n # Ignore null array.\n if feature_type is None:\n return accumulator\n # If we see a different type, invalidate.\n if feature_type != statistics_pb2.FeatureNameStatistics.STRING:\n accumulator.invalidate = True\n return accumulator\n\n # Consider using memoryview to avoid copying after upgrading to\n # arrow 0.12. Note that this would involve modifying the subsequent logic\n # to iterate over the values in a loop.\n values = np.asarray(feature_array.flatten())\n accumulator.total_num_values += values.size\n image_formats = self._image_decoder.get_formats(values)\n valid_mask = ~pd.isnull(image_formats)\n valid_formats = image_formats[valid_mask]\n format_counts = np.unique(valid_formats, return_counts=True)\n for (image_format, count) in zip(*format_counts):\n accumulator.counter_by_format[image_format] += count\n unknown_count = image_formats.size - valid_formats.size\n if unknown_count > 0:\n accumulator.counter_by_format[''] += unknown_count\n\n if self._enable_size_stats:\n # Get image height and width.\n image_sizes = self._image_decoder.get_sizes(values[valid_mask])\n if image_sizes.any():\n max_sizes = np.max(image_sizes, axis=0)\n # Update the max image height/width with all image values.\n accumulator.max_height = max(accumulator.max_height, max_sizes[0])\n accumulator.max_width = max(accumulator.max_width, max_sizes[1])\n\n return accumulator\n\n def merge_accumulators(\n self, accumulators: Iterable[_PartialImageStats]) -> _PartialImageStats:\n \"\"\"Merges several accumulators to a single accumulator value.\n\n Args:\n accumulators: The accumulators to merge.\n\n Returns:\n The merged accumulator.\n \"\"\"\n result = _PartialImageStats()\n for accumulator in accumulators:\n result += accumulator\n return result\n\n def extract_output(self, accumulator: _PartialImageStats\n ) -> statistics_pb2.FeatureNameStatistics:\n \"\"\"Return result of converting accumulator into the output value.\n\n Args:\n accumulator: The final accumulator value.\n\n Returns:\n A proto representing the result of this stats generator.\n \"\"\"\n result = statistics_pb2.FeatureNameStatistics()\n # Only generate an image statistics proto if the ratio of image feature\n # values is at or above a threshold.\n if (accumulator.invalidate or\n accumulator.total_num_values < self._values_threshold or\n (1 - (float(accumulator.counter_by_format['']) /\n accumulator.total_num_values)) < self._is_image_ratio_threshold):\n return result\n\n result.custom_stats.add(name=_DOMAIN_INFO, str=_IMAGE_DOMAIN)\n # Image format histogram.\n custom_stats = result.custom_stats.add(name=_IMAGE_FORMAT_HISTOGRAM)\n\n # Add the buckets with sorted image format.\n for image_format in sorted(accumulator.counter_by_format):\n custom_stats.rank_histogram.buckets.add(\n label=image_format if image_format else 'UNKNOWN',\n sample_count=accumulator.counter_by_format[image_format])\n if self._enable_size_stats:\n result.custom_stats.add(\n name=_IMAGE_MAX_WIDTH_STATISTICS, num=accumulator.max_width)\n result.custom_stats.add(\n name=_IMAGE_MAX_HEIGHT_STATISTICS, num=accumulator.max_height)\n return result\n"
] | [
[
"numpy.max",
"numpy.vectorize",
"pandas.isnull",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SunSki/neural_sp | [
"4e4aca9b4cda1c7d95a1774d22f4d3298ad4ba4b"
] | [
"neural_sp/models/seq2seq/speech2text.py"
] | [
"# Copyright 2018 Kyoto University (Hirofumi Inaguma)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Speech to text sequence-to-sequence model.\"\"\"\n\nimport copy\nimport logging\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\n\nfrom neural_sp.bin.train_utils import load_checkpoint\nfrom neural_sp.models.base import ModelBase\nfrom neural_sp.models.lm.rnnlm import RNNLM\nfrom neural_sp.models.seq2seq.decoders.build import build_decoder\nfrom neural_sp.models.seq2seq.decoders.fwd_bwd_attention import fwd_bwd_attention\nfrom neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer\nfrom neural_sp.models.seq2seq.encoders.build import build_encoder\nfrom neural_sp.models.seq2seq.frontends.frame_stacking import stack_frame\nfrom neural_sp.models.seq2seq.frontends.input_noise import add_input_noise\nfrom neural_sp.models.seq2seq.frontends.sequence_summary import SequenceSummaryNetwork\nfrom neural_sp.models.seq2seq.frontends.spec_augment import SpecAugment\nfrom neural_sp.models.seq2seq.frontends.splicing import splice\nfrom neural_sp.models.torch_utils import np2tensor\nfrom neural_sp.models.torch_utils import tensor2np\nfrom neural_sp.models.torch_utils import pad_list\nfrom neural_sp.utils import mkdir_join\n\nrandom.seed(1)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Speech2Text(ModelBase):\n \"\"\"Speech to text sequence-to-sequence model.\"\"\"\n\n def __init__(self, args, save_path=None, idx2token=None):\n\n super(ModelBase, self).__init__()\n\n self.save_path = save_path\n\n # for encoder, decoder\n self.input_type = args.input_type\n self.input_dim = args.input_dim\n self.enc_type = args.enc_type\n self.dec_type = args.dec_type\n\n # for OOV resolution\n self.enc_n_layers = args.enc_n_layers\n self.enc_n_layers_sub1 = args.enc_n_layers_sub1\n self.subsample = [int(s) for s in args.subsample.split('_')]\n\n # for decoder\n self.vocab = args.vocab\n self.vocab_sub1 = args.vocab_sub1\n self.vocab_sub2 = args.vocab_sub2\n self.blank = 0\n self.unk = 1\n self.eos = 2\n self.pad = 3\n # NOTE: reserved in advance\n\n # for the sub tasks\n self.main_weight = 1.0 - args.sub1_weight - args.sub2_weight\n self.sub1_weight = args.sub1_weight\n self.sub2_weight = args.sub2_weight\n self.mtl_per_batch = args.mtl_per_batch\n self.task_specific_layer = args.task_specific_layer\n\n # for CTC\n self.ctc_weight = min(args.ctc_weight, self.main_weight)\n self.ctc_weight_sub1 = min(args.ctc_weight_sub1, self.sub1_weight)\n self.ctc_weight_sub2 = min(args.ctc_weight_sub2, self.sub2_weight)\n\n # for backward decoder\n self.bwd_weight = min(args.bwd_weight, self.main_weight)\n self.fwd_weight = self.main_weight - self.bwd_weight - self.ctc_weight\n self.fwd_weight_sub1 = self.sub1_weight - self.ctc_weight_sub1\n self.fwd_weight_sub2 = self.sub2_weight - self.ctc_weight_sub2\n\n # for MBR\n self.mbr_training = args.mbr_training\n self.recog_params = vars(args)\n self.idx2token = idx2token\n\n # for discourse-aware model\n self.utt_id_prev = None\n\n # Feature extraction\n self.input_noise_std = args.input_noise_std\n self.n_stacks = args.n_stacks\n self.n_skips = args.n_skips\n self.n_splices = args.n_splices\n self.weight_noise_std = args.weight_noise_std\n self.specaug = None\n if args.n_freq_masks > 0 or args.n_time_masks > 0:\n assert args.n_stacks == 1 and args.n_skips == 1\n assert args.n_splices == 1\n self.specaug = SpecAugment(F=args.freq_width,\n T=args.time_width,\n n_freq_masks=args.n_freq_masks,\n n_time_masks=args.n_time_masks,\n p=args.time_width_upper,\n adaptive_number_ratio=args.adaptive_number_ratio,\n adaptive_size_ratio=args.adaptive_size_ratio,\n max_n_time_masks=args.max_n_time_masks)\n\n # Frontend\n self.ssn = None\n if args.sequence_summary_network:\n assert args.input_type == 'speech'\n self.ssn = SequenceSummaryNetwork(args.input_dim,\n n_units=512,\n n_layers=3,\n bottleneck_dim=100,\n dropout=0,\n param_init=args.param_init)\n\n # Encoder\n self.enc = build_encoder(args)\n if args.freeze_encoder:\n for n, p in self.enc.named_parameters():\n p.requires_grad = False\n logger.info('freeze %s' % n)\n\n special_symbols = {\n 'blank': self.blank,\n 'unk': self.unk,\n 'eos': self.eos,\n 'pad': self.pad,\n }\n\n # main task\n external_lm = None\n directions = []\n if self.fwd_weight > 0 or (self.bwd_weight == 0 and self.ctc_weight > 0):\n directions.append('fwd')\n if self.bwd_weight > 0:\n directions.append('bwd')\n\n for dir in directions:\n # Load the LM for LM fusion and decoder initialization\n if args.external_lm and dir == 'fwd':\n external_lm = RNNLM(args.lm_conf)\n load_checkpoint(external_lm, args.external_lm)\n # freeze LM parameters\n for n, p in external_lm.named_parameters():\n p.requires_grad = False\n\n # Decoder\n dec = build_decoder(args, special_symbols,\n self.enc.output_dim,\n args.vocab,\n self.ctc_weight,\n args.ctc_fc_list,\n self.main_weight - self.bwd_weight if dir == 'fwd' else self.bwd_weight,\n external_lm)\n setattr(self, 'dec_' + dir, dec)\n\n # sub task\n for sub in ['sub1', 'sub2']:\n if getattr(self, sub + '_weight') > 0:\n dec_sub = build_decoder(args, special_symbols,\n self.enc.output_dim,\n getattr(self, 'vocab_' + sub),\n getattr(self, 'ctc_weight_' + sub),\n getattr(args, 'ctc_fc_list_' + sub),\n getattr(self, sub + '_weight'),\n external_lm)\n setattr(self, 'dec_fwd_' + sub, dec_sub)\n\n if args.input_type == 'text':\n if args.vocab == args.vocab_sub1:\n # Share the embedding layer between input and output\n self.embed = dec.embed\n else:\n self.embed = nn.Embedding(args.vocab_sub1, args.emb_dim,\n padding_idx=self.pad)\n self.dropout_emb = nn.Dropout(p=args.dropout_emb)\n\n # Initialize bias in forget gate with 1\n # self.init_forget_gate_bias_with_one()\n\n # Fix all parameters except for the gating parts in deep fusion\n if args.lm_fusion == 'deep' and external_lm is not None:\n for n, p in self.named_parameters():\n if 'output' in n or 'output_bn' in n or 'linear' in n:\n p.requires_grad = True\n else:\n p.requires_grad = False\n\n def trigger_scheduled_sampling(self):\n # main task\n for dir in ['fwd', 'bwd']:\n if hasattr(self, 'dec_' + dir):\n logging.info('Activate scheduled sampling (main)')\n getattr(self, 'dec_' + dir).trigger_scheduled_sampling()\n\n # sub task\n for sub in ['sub1', 'sub2']:\n if hasattr(self, 'dec_fwd_' + sub):\n logging.info('Activate scheduled sampling (%s)' % sub)\n getattr(self, 'dec_fwd_' + sub).trigger_scheduled_sampling()\n\n def trigger_quantity_loss(self):\n # main task only now\n if hasattr(self, 'dec_fwd'):\n logging.info('Activate quantity loss')\n getattr(self, 'dec_fwd').trigger_quantity_loss()\n\n def reset_session(self):\n # main task\n for dir in ['fwd', 'bwd']:\n if hasattr(self, 'dec_' + dir):\n getattr(self, 'dec_' + dir).reset_session()\n\n # sub task\n for sub in ['sub1', 'sub2']:\n if hasattr(self, 'dec_fwd_' + sub):\n getattr(self, 'dec_fwd_' + sub).reset_session()\n\n def forward(self, batch, task, is_eval=False, teacher=None, teacher_lm=None):\n \"\"\"Forward pass.\n\n Args:\n batch (dict):\n xs (list): input data of size `[T, input_dim]`\n xlens (list): lengths of each element in xs\n ys (list): reference labels in the main task of size `[L]`\n ys_sub1 (list): reference labels in the 1st auxiliary task of size `[L_sub1]`\n ys_sub2 (list): reference labels in the 2nd auxiliary task of size `[L_sub2]`\n utt_ids (list): name of utterances\n speakers (list): name of speakers\n task (str): all/ys*/ys_sub*\n is_eval (bool): evaluation mode\n This should be used in inference model for memory efficiency.\n teacher (Speech2Text): used for knowledge distillation from ASR\n teacher_lm (RNNLM): used for knowledge distillation from LM\n Returns:\n loss (FloatTensor): `[1]`\n observation (dict):\n\n \"\"\"\n if is_eval:\n self.eval()\n with torch.no_grad():\n loss, observation = self._forward(batch, task)\n else:\n self.train()\n loss, observation = self._forward(batch, task, teacher, teacher_lm)\n\n return loss, observation\n\n def _forward(self, batch, task, teacher=None, teacher_lm=None):\n # Encode input features\n if self.input_type == 'speech':\n if self.mtl_per_batch:\n eout_dict = self.encode(batch['xs'], task)\n else:\n eout_dict = self.encode(batch['xs'], 'all')\n else:\n eout_dict = self.encode(batch['ys_sub1'])\n\n observation = {}\n loss = torch.zeros((1,), dtype=torch.float32, device=self.device)\n\n # for the forward decoder in the main task\n if (self.fwd_weight > 0 or (self.bwd_weight == 0 and self.ctc_weight > 0) or self.mbr_training) and task in ['all', 'ys', 'ys.ctc', 'ys.mbr']:\n teacher_logits = None\n if teacher is not None:\n teacher.eval()\n teacher_logits = teacher.generate_logits(batch)\n # TODO(hirofumi): label smoothing, scheduled sampling, dropout?\n elif teacher_lm is not None:\n teacher_lm.eval()\n teacher_logits = self.generate_lm_logits(batch['ys'], lm=teacher_lm)\n\n loss_fwd, obs_fwd = self.dec_fwd(eout_dict['ys']['xs'], eout_dict['ys']['xlens'],\n batch['ys'], task,\n teacher_logits, self.recog_params, self.idx2token,\n batch['trigger_points'])\n loss += loss_fwd\n if isinstance(self.dec_fwd, RNNTransducer):\n observation['loss.transducer'] = obs_fwd['loss_transducer']\n else:\n observation['acc.att'] = obs_fwd['acc_att']\n observation['ppl.att'] = obs_fwd['ppl_att']\n observation['loss.att'] = obs_fwd['loss_att']\n observation['loss.mbr'] = obs_fwd['loss_mbr']\n if 'loss_quantity' not in obs_fwd.keys():\n obs_fwd['loss_quantity'] = None\n observation['loss.quantity'] = obs_fwd['loss_quantity']\n\n if 'loss_latency' not in obs_fwd.keys():\n obs_fwd['loss_latency'] = None\n observation['loss.latency'] = obs_fwd['loss_latency']\n\n observation['loss.ctc'] = obs_fwd['loss_ctc']\n\n # for the backward decoder in the main task\n if self.bwd_weight > 0 and task in ['all', 'ys.bwd']:\n loss_bwd, obs_bwd = self.dec_bwd(eout_dict['ys']['xs'], eout_dict['ys']['xlens'], batch['ys'], task)\n loss += loss_bwd\n observation['loss.att-bwd'] = obs_bwd['loss_att']\n observation['acc.att-bwd'] = obs_bwd['acc_att']\n observation['ppl.att-bwd'] = obs_bwd['ppl_att']\n observation['loss.ctc-bwd'] = obs_bwd['loss_ctc']\n\n # only fwd for sub tasks\n for sub in ['sub1', 'sub2']:\n # for the forward decoder in the sub tasks\n if (getattr(self, 'fwd_weight_' + sub) > 0 or getattr(self, 'ctc_weight_' + sub) > 0) and task in ['all', 'ys_' + sub, 'ys_' + sub + '.ctc']:\n if len(batch['ys_' + sub]) == 0:\n continue\n # NOTE: this is for evaluation at the end of every opoch\n\n loss_sub, obs_fwd_sub = getattr(self, 'dec_fwd_' + sub)(\n eout_dict['ys_' + sub]['xs'], eout_dict['ys_' + sub]['xlens'],\n batch['ys_' + sub], task)\n loss += loss_sub\n if isinstance(getattr(self, 'dec_fwd_' + sub), RNNTransducer):\n observation['loss.transducer-' + sub] = obs_fwd_sub['loss_transducer']\n else:\n observation['loss.att-' + sub] = obs_fwd_sub['loss_att']\n observation['acc.att-' + sub] = obs_fwd_sub['acc_att']\n observation['ppl.att-' + sub] = obs_fwd_sub['ppl_att']\n observation['loss.ctc-' + sub] = obs_fwd_sub['loss_ctc']\n\n return loss, observation\n\n def generate_logits(self, batch, temperature=1.0):\n # Encode input features\n if self.input_type == 'speech':\n eout_dict = self.encode(batch['xs'], task='ys')\n else:\n eout_dict = self.encode(batch['ys_sub1'], task='ys')\n\n # for the forward decoder in the main task\n logits = self.dec_fwd.forward_att(\n eout_dict['ys']['xs'], eout_dict['ys']['xlens'], batch['ys'],\n return_logits=True)\n return logits\n\n def generate_lm_logits(self, ys, lm, temperature=5.0):\n # Append <sos> and <eos>\n eos = next(lm.parameters()).new_zeros(1).fill_(self.eos).long()\n ys = [np2tensor(np.fromiter(y, dtype=np.int64), self.device)for y in ys]\n ys_in = pad_list([torch.cat([eos, y], dim=0) for y in ys], self.pad)\n lmout, _ = lm.decode(ys_in, None)\n logits = lm.output(lmout)\n return logits\n\n def encode(self, xs, task='all', streaming=False, lookback=False, lookahead=False):\n \"\"\"Encode acoustic or text features.\n\n Args:\n xs (list): A list of length `[B]`, which contains Tensor of size `[T, input_dim]`\n task (str): all/ys*/ys_sub1*/ys_sub2*\n streaming (bool): streaming encoding\n lookback (bool): truncate leftmost frames for lookback in CNN context\n lookahead (bool): truncate rightmost frames for lookahead in CNN context\n Returns:\n eout_dict (dict):\n\n \"\"\"\n if self.input_type == 'speech':\n # Frame stacking\n if self.n_stacks > 1:\n xs = [stack_frame(x, self.n_stacks, self.n_skips) for x in xs]\n\n # Splicing\n if self.n_splices > 1:\n xs = [splice(x, self.n_splices, self.n_stacks) for x in xs]\n\n xlens = torch.IntTensor([len(x) for x in xs])\n xs = pad_list([np2tensor(x, self.device).float() for x in xs], 0.)\n\n # SpecAugment\n if self.specaug is not None and self.training:\n xs = self.specaug(xs)\n\n # Weight noise injection\n if self.weight_noise_std > 0 and self.training:\n self.add_weight_noise(std=self.weight_noise_std)\n\n # Input Gaussian noise injection\n if self.input_noise_std > 0 and self.training:\n xs = add_input_noise(xs, std=self.input_noise_std)\n\n # Sequence summary network\n if self.ssn is not None:\n xs = self.ssn(xs, xlens)\n\n elif self.input_type == 'text':\n xlens = torch.IntTensor([len(x) for x in xs])\n xs = [np2tensor(np.fromiter(x, dtype=np.int64), self.device) for x in xs]\n xs = pad_list(xs, self.pad)\n xs = self.dropout_emb(self.embed(xs))\n # TODO(hirofumi): fix for Transformer\n\n # encoder\n eout_dict = self.enc(xs, xlens, task.split('.')[0], streaming, lookback, lookahead)\n\n if self.main_weight < 1 and self.enc_type in ['conv', 'tds', 'gated_conv']:\n for sub in ['sub1', 'sub2']:\n eout_dict['ys_' + sub]['xs'] = eout_dict['ys']['xs'].clone()\n eout_dict['ys_' + sub]['xlens'] = eout_dict['ys']['xlens'][:]\n\n return eout_dict\n\n def get_ctc_probs(self, xs, task='ys', temperature=1, topk=None):\n self.eval()\n with torch.no_grad():\n eout_dict = self.encode(xs, task)\n dir = 'fwd' if self.fwd_weight >= self.bwd_weight else 'bwd'\n if task == 'ys_sub1':\n dir += '_sub1'\n elif task == 'ys_sub2':\n dir += '_sub2'\n\n if task == 'ys':\n assert self.ctc_weight > 0\n elif task == 'ys_sub1':\n assert self.ctc_weight_sub1 > 0\n elif task == 'ys_sub2':\n assert self.ctc_weight_sub2 > 0\n ctc_probs, indices_topk = getattr(self, 'dec_' + dir).ctc_probs_topk(\n eout_dict[task]['xs'], temperature, topk)\n return tensor2np(ctc_probs), tensor2np(indices_topk), eout_dict[task]['xlens']\n\n def ctc_forced_align(self, xs, ys, task='ys'):\n \"\"\"CTC-based forced alignment.\n\n Args:\n xs (FloatTensor): `[B, T, idim]`\n ys (list): length `B`, each of which contains a list of size `[L]`\n Returns:\n trigger_points (np.ndarray): `[B, L]`\n\n \"\"\"\n self.eval()\n with torch.no_grad():\n eout_dict = self.encode(xs, 'ys')\n # NOTE: support the main task only\n trigger_points = getattr(self, 'dec_fwd').ctc_forced_align(\n eout_dict[task]['xs'], eout_dict[task]['xlens'], ys)\n return tensor2np(trigger_points)\n\n def plot_attention(self):\n \"\"\"Plot attention weights during training.\"\"\"\n # encoder\n self.enc._plot_attention(mkdir_join(self.save_path, 'enc_att_weights'))\n # decoder\n self.dec_fwd._plot_attention(mkdir_join(self.save_path, 'dec_att_weights'))\n if getattr(self, 'dec_fwd_sub1', None) is not None:\n self.dec_fwd_sub1._plot_attention(mkdir_join(self.save_path, 'dec_att_weights_sub1'))\n if getattr(self, 'dec_fwd_sub2', None) is not None:\n self.dec_fwd_sub2._plot_attention(mkdir_join(self.save_path, 'dec_att_weights_sub2'))\n\n def plot_ctc(self):\n \"\"\"Plot CTC posteriors during training.\"\"\"\n self.dec_fwd._plot_ctc(mkdir_join(self.save_path, 'ctc'))\n if getattr(self, 'dec_fwd_sub1', None) is not None:\n self.dec_fwd_sub1._plot_ctc(mkdir_join(self.save_path, 'ctc_sub1'))\n if getattr(self, 'dec_fwd_sub2', None) is not None:\n self.dec_fwd_sub2._plot_ctc(mkdir_join(self.save_path, 'ctc_sub2'))\n\n def decode_streaming(self, xs, params, idx2token, exclude_eos=False, task='ys'):\n from neural_sp.models.seq2seq.frontends.streaming import Streaming\n\n # check configurations\n assert task == 'ys'\n assert self.input_type == 'speech'\n assert self.ctc_weight > 0\n assert self.fwd_weight > 0\n assert len(xs) == 1 # batch size\n # assert params['recog_length_norm']\n global_params = copy.deepcopy(params)\n global_params['recog_max_len_ratio'] = 1.0\n\n streaming = Streaming(xs[0], params, self.enc, idx2token)\n chunk_sync = params['recog_chunk_sync']\n\n hyps = None\n best_hyp_id_stream = []\n is_reset = True # for the first chunk\n\n stdout = False\n\n self.eval()\n with torch.no_grad():\n lm = getattr(self, 'lm_fwd', None)\n lm_second = getattr(self, 'lm_second', None)\n\n while True:\n # Encode input features chunk by chunk\n x_chunk, is_last_chunk, lookback, lookahead = streaming.extract_feature()\n if is_reset:\n self.enc.reset_cache()\n eout_chunk = self.encode([x_chunk], task,\n streaming=True,\n lookback=lookback,\n lookahead=lookahead)[task]['xs']\n is_reset = False # detect the first boundary in the same chunk\n\n # CTC-based VAD\n ctc_log_probs_chunk = None\n if streaming.is_ctc_vad:\n ctc_probs_chunk = self.dec_fwd.ctc_probs(eout_chunk)\n if params['recog_ctc_weight'] > 0:\n ctc_log_probs_chunk = torch.log(ctc_probs_chunk)\n is_reset = streaming.ctc_vad(ctc_probs_chunk, stdout=stdout)\n\n # Truncate the most right frames\n if is_reset and not is_last_chunk and streaming.bd_offset >= 0:\n eout_chunk = eout_chunk[:, :streaming.bd_offset]\n streaming.eout_chunks.append(eout_chunk)\n\n # Chunk-synchronous attention decoding\n if chunk_sync:\n end_hyps, hyps, aws_seg = self.dec_fwd.beam_search_chunk_sync(\n eout_chunk, params, idx2token, lm,\n ctc_log_probs=ctc_log_probs_chunk, hyps=hyps,\n state_carry_over=False,\n ignore_eos=self.enc.enc_type in ['lstm', 'conv_lstm'])\n merged_hyps = sorted(end_hyps + hyps, key=lambda x: x['score'], reverse=True)\n best_hyp_id_prefix = np.array(merged_hyps[0]['hyp'][1:])\n if len(best_hyp_id_prefix) > 0 and best_hyp_id_prefix[-1] == self.eos:\n # reset beam if <eos> is generated from the best hypothesis\n best_hyp_id_prefix = best_hyp_id_prefix[:-1] # exclude <eos>\n # Segmentation strategy 2:\n # If <eos> is emitted from the decoder (not CTC),\n # the current chunk is segmented.\n if not is_reset:\n streaming.bd_offset = eout_chunk.size(1) - 1\n is_reset = True\n if len(best_hyp_id_prefix) > 0:\n # print('\\rStreaming (T:%d [frame], offset:%d [frame], blank:%d [frame]): %s' %\n # (streaming.offset + eout_chunk.size(1) * streaming.factor,\n # self.dec_fwd.n_frames * streaming.factor,\n # streaming.n_blanks * streaming.factor,\n # idx2token(best_hyp_id_prefix)))\n print('\\r%s' % (idx2token(best_hyp_id_prefix)))\n\n if is_reset:\n # Global decoding over the segmented region\n if not chunk_sync:\n eout = torch.cat(streaming.eout_chunks, dim=1)\n elens = torch.IntTensor([eout.size(1)])\n ctc_log_probs = None\n if params['recog_ctc_weight'] > 0:\n ctc_log_probs = torch.log(self.dec_fwd.ctc_probs(eout))\n nbest_hyps_id_offline = self.dec_fwd.beam_search(\n eout, elens, global_params, idx2token, lm, lm_second,\n ctc_log_probs=ctc_log_probs)[0]\n # print('Offline (T:%d [frame]): %s' %\n # (streaming.offset + eout_chunk.size(1) * streaming.factor,\n # idx2token(nbest_hyps_id_offline[0][0])))\n\n # pick up the best hyp from ended and active hypotheses\n if not chunk_sync:\n if len(nbest_hyps_id_offline[0][0]) > 0:\n best_hyp_id_stream.extend(nbest_hyps_id_offline[0][0])\n else:\n if len(best_hyp_id_prefix) > 0:\n best_hyp_id_stream.extend(best_hyp_id_prefix)\n # print('Final (T:%d [frame], offset:%d [frame]): %s' %\n # (streaming.offset + eout_chunk.size(1) * streaming.factor,\n # self.dec_fwd.n_frames * streaming.factor,\n # idx2token(best_hyp_id_prefix)))\n # print('-' * 50)\n # for test\n # eos_hyp = np.zeros(1, dtype=np.int32)\n # eos_hyp[0] = self.eos\n # best_hyp_id_stream.extend(eos_hyp)\n\n # reset\n streaming.reset(stdout=stdout)\n hyps = None\n\n streaming.next_chunk()\n # next chunk will start from the frame next to the boundary\n if not is_last_chunk:\n streaming.backoff(x_chunk, self.dec_fwd, stdout=stdout)\n if is_last_chunk:\n break\n\n # Global decoding over the last chunk\n if not chunk_sync and len(streaming.eout_chunks) > 0:\n eout = torch.cat(streaming.eout_chunks, dim=1)\n elens = torch.IntTensor([eout.size(1)])\n nbest_hyps_id_offline = self.dec_fwd.beam_search(\n eout, elens, global_params, idx2token, lm, lm_second)[0]\n # print('MoChA: ' + idx2token(nbest_hyps_id_offline[0][0]))\n # print('*' * 50)\n if len(nbest_hyps_id_offline[0][0]) > 0:\n best_hyp_id_stream.extend(nbest_hyps_id_offline[0][0])\n\n # pick up the best hyp\n if not is_reset and chunk_sync and len(best_hyp_id_prefix) > 0:\n best_hyp_id_stream.extend(best_hyp_id_prefix)\n\n if len(best_hyp_id_stream) > 0:\n return [np.stack(best_hyp_id_stream, axis=0)], [None]\n else:\n return [[]], [None]\n\n def streamable(self):\n return getattr(self.dec_fwd, 'streamable', False)\n\n def quantity_rate(self):\n return getattr(self.dec_fwd, 'quantity_rate', 1.0)\n\n def last_success_frame_ratio(self):\n return getattr(self.dec_fwd, 'last_success_frame_ratio', 0)\n\n def decode(self, xs, params, idx2token, exclude_eos=False,\n refs_id=None, refs=None, utt_ids=None, speakers=None,\n task='ys', ensemble_models=[]):\n \"\"\"Decode in the inference stage.\n\n Args:\n xs (list): A list of length `[B]`, which contains arrays of size `[T, input_dim]`\n params (dict): hyper-parameters for decoding\n beam_width (int): the size of beam\n min_len_ratio (float):\n max_len_ratio (float):\n len_penalty (float): length penalty\n cov_penalty (float): coverage penalty\n cov_threshold (float): threshold for coverage penalty\n lm_weight (float): the weight of RNNLM score\n resolving_unk (bool): not used (to make compatible)\n fwd_bwd_attention (bool):\n idx2token (): converter from index to token\n exclude_eos (bool): exclude <eos> from best_hyps_id\n refs_id (list): gold token IDs to compute log likelihood\n refs (list): gold transcriptions\n utt_ids (list):\n speakers (list):\n task (str): ys* or ys_sub1* or ys_sub2*\n ensemble_models (list): list of Speech2Text classes\n Returns:\n best_hyps_id (list): A list of length `[B]`, which contains arrays of size `[L]`\n aws (list): A list of length `[B]`, which contains arrays of size `[L, T, n_heads]`\n\n \"\"\"\n if task.split('.')[0] == 'ys':\n dir = 'bwd' if self.bwd_weight > 0 and params['recog_bwd_attention'] else 'fwd'\n elif task.split('.')[0] == 'ys_sub1':\n dir = 'fwd_sub1'\n elif task.split('.')[0] == 'ys_sub2':\n dir = 'fwd_sub2'\n else:\n raise ValueError(task)\n\n if utt_ids is not None:\n if self.utt_id_prev != utt_ids[0]:\n self.reset_session()\n self.utt_id_prev = utt_ids[0]\n\n self.eval()\n with torch.no_grad():\n # Encode input features\n if self.input_type == 'speech' and self.mtl_per_batch and 'bwd' in dir:\n eout_dict = self.encode(xs, task)\n else:\n eout_dict = self.encode(xs, task)\n\n # CTC\n if (self.fwd_weight == 0 and self.bwd_weight == 0) or (self.ctc_weight > 0 and params['recog_ctc_weight'] == 1):\n lm = getattr(self, 'lm_' + dir, None)\n lm_second = getattr(self, 'lm_second', None)\n lm_second_bwd = None # TODO\n\n best_hyps_id = getattr(self, 'dec_' + dir).decode_ctc(\n eout_dict[task]['xs'], eout_dict[task]['xlens'], params, idx2token,\n lm, lm_second, lm_second_bwd, 1, refs_id, utt_ids, speakers)\n return best_hyps_id, None\n\n # Attention/RNN-T\n elif params['recog_beam_width'] == 1 and not params['recog_fwd_bwd_attention']:\n best_hyps_id, aws = getattr(self, 'dec_' + dir).greedy(\n eout_dict[task]['xs'], eout_dict[task]['xlens'],\n params['recog_max_len_ratio'], idx2token,\n exclude_eos, refs_id, utt_ids, speakers)\n else:\n assert params['recog_batch_size'] == 1\n\n ctc_log_probs = None\n if params['recog_ctc_weight'] > 0:\n ctc_log_probs = self.dec_fwd.ctc_log_probs(eout_dict[task]['xs'])\n\n # forward-backward decoding\n if params['recog_fwd_bwd_attention']:\n lm_fwd = getattr(self, 'lm_fwd', None)\n lm_bwd = getattr(self, 'lm_bwd', None)\n\n # forward decoder\n nbest_hyps_id_fwd, aws_fwd, scores_fwd = self.dec_fwd.beam_search(\n eout_dict[task]['xs'], eout_dict[task]['xlens'],\n params, idx2token, lm_fwd, None, lm_bwd, ctc_log_probs,\n params['recog_beam_width'], False, refs_id, utt_ids, speakers)\n\n # backward decoder\n nbest_hyps_id_bwd, aws_bwd, scores_bwd, _ = self.dec_bwd.beam_search(\n eout_dict[task]['xs'], eout_dict[task]['xlens'],\n params, idx2token, lm_bwd, None, lm_fwd, ctc_log_probs,\n params['recog_beam_width'], False, refs_id, utt_ids, speakers)\n\n # forward-backward attention\n best_hyps_id = fwd_bwd_attention(\n nbest_hyps_id_fwd, aws_fwd, scores_fwd,\n nbest_hyps_id_bwd, aws_bwd, scores_bwd,\n self.eos, params['recog_gnmt_decoding'], params['recog_length_penalty'],\n idx2token, refs_id)\n aws = None\n else:\n # ensemble\n ensmbl_eouts, ensmbl_elens, ensmbl_decs = [], [], []\n if len(ensemble_models) > 0:\n for i_e, model in enumerate(ensemble_models):\n if model.input_type == 'speech' and model.mtl_per_batch and 'bwd' in dir:\n enc_outs_e = model.encode(xs, task)\n else:\n enc_outs_e = model.encode(xs, task)\n ensmbl_eouts += [enc_outs_e[task]['xs']]\n ensmbl_elens += [enc_outs_e[task]['xlens']]\n ensmbl_decs += [getattr(model, 'dec_' + dir)]\n # NOTE: only support for the main task now\n\n lm = getattr(self, 'lm_' + dir, None)\n lm_second = getattr(self, 'lm_second', None)\n lm_bwd = getattr(self, 'lm_bwd' if dir == 'fwd' else 'lm_bwd', None)\n\n nbest_hyps_id, aws, scores = getattr(self, 'dec_' + dir).beam_search(\n eout_dict[task]['xs'], eout_dict[task]['xlens'],\n params, idx2token, lm, lm_second, lm_bwd, ctc_log_probs,\n 1, exclude_eos, refs_id, utt_ids, speakers,\n ensmbl_eouts, ensmbl_elens, ensmbl_decs)\n best_hyps_id = [hyp[0] for hyp in nbest_hyps_id]\n\n return best_hyps_id, aws\n"
] | [
[
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"numpy.stack",
"torch.no_grad",
"torch.log",
"numpy.fromiter",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kundajelab/kerasAC | [
"6aa6573f5f07659bfd68deca37de77e47612020e"
] | [
"kerasAC/vis/plot_letters.py"
] | [
"import re\n\nimport matplotlib\nmatplotlib.use('pdf')\nfrom matplotlib import pyplot\nfrom matplotlib.patches import PathPatch\nfrom matplotlib.path import Path\n\nfrom shapely.wkt import loads as load_wkt\nfrom shapely import affinity\n\nimport numpy as np\nfrom pkg_resources import resource_filename\n\n##########################################################################\n# copied from descartes\n# https://pypi.python.org/pypi/descartes\n\n\nclass Polygon(object):\n # Adapt Shapely or GeoJSON/geo_interface polygons to a common interface\n\n def __init__(self, context):\n if hasattr(context, 'interiors'):\n self.context = context\n else:\n self.context = getattr(context, '__geo_interface__', context)\n\n @property\n def geom_type(self):\n return (\n getattr(self.context, 'geom_type', None) or self.context['type'])\n\n @property\n def exterior(self):\n return (\n getattr(self.context, 'exterior', None) or self.context['coordinates'][0])\n\n @property\n def interiors(self):\n value = getattr(self.context, 'interiors', None)\n if value is None:\n value = self.context['coordinates'][1:]\n return value\n\n\ndef PolygonPath(polygon):\n \"\"\"Constructs a compound matplotlib path from a Shapely or GeoJSON-like\n geometric object\"\"\"\n this = Polygon(polygon)\n assert this.geom_type == 'Polygon'\n\n def coding(ob):\n # The codes will be all \"LINETO\" commands, except for \"MOVETO\"s at the\n # beginning of each subpath\n n = len(getattr(ob, 'coords', None) or ob)\n vals = np.ones(n, dtype=Path.code_type) * Path.LINETO\n vals[0] = Path.MOVETO\n return vals\n vertices = np.concatenate(\n [np.asarray(this.exterior)] + [np.asarray(r)\n for r in this.interiors])\n codes = np.concatenate(\n [coding(this.exterior)] + [coding(r)\n for r in this.interiors])\n return Path(vertices, codes)\n\n\ndef PolygonPatch(polygon, **kwargs):\n \"\"\"Constructs a matplotlib patch from a geometric object\n\n The `polygon` may be a Shapely\n or GeoJSON-like object with or without holes.\n The `kwargs` are those supported by the matplotlib.patches.Polygon class\n constructor. Returns an instance of matplotlib.patches.PathPatch.\n\n Example (using Shapely Point and a matplotlib axes):\n\n >>> b = Point(0, 0).buffer(1.0)\n >>> patch = PolygonPatch(b, fc='blue', ec='blue', alpha=0.5)\n >>> axis.add_patch(patch)\n\n \"\"\"\n return PathPatch(PolygonPath(polygon), **kwargs)\n\n#\n# END copied from descartes\n#\n##########################################################################\n\n##########################################################################\n# Initialize the polygon paths for A,C,G,T\n#\n# Geometry taken from JTS TestBuilder Monospace font with fixed precision model\n# of 1000.0\n#\n\nA_data = \"\"\"\nMULTIPOLYGON (\n((24.7631 57.3346, 34.3963 57.3346, 52.391 -1.422, 44.1555 -1.422, 39.8363\n 13.8905, 19.2476 13.8905, 15.0039 -1.422, 6.781 -1.422, 24.7631 57.3346)),\n((29.5608 50.3205, 21.1742 20.2623, 37.9474 20.2623, 29.5608 50.3205))\n)\n\"\"\"\n\nC_data = \"\"\"POLYGON((\n52.391 2.5937, 48.5882 0.8417, 44.68 -0.4142, 40.5998 -1.17, 36.2814 -1.422,\n32.8755 -1.2671, 29.6656 -0.8024, 26.6518 -0.0278, 23.834 1.0565,\n21.2122 2.4507, 18.7865 4.1547, 16.5569 6.1686, 14.5233 8.4922,\n12.7087 11.0966, 11.136 13.9527, 9.8053 17.0606, 8.7166 20.4201,\n7.8698 24.0314, 7.2649 27.8943, 6.902 32.009, 6.781 36.3754, 6.9027 40.7209,\n7.2678 44.8198, 7.8764 48.6722, 8.7283 52.278, 9.8236 55.6371,\n11.1624 58.7497, 12.7446 61.6157, 14.5702 64.2351, 16.6133 66.5753,\n18.8481 68.6034, 21.2745 70.3195, 23.8926 71.7235, 26.7023 72.8156,\n29.7037 73.5956, 32.8967 74.0637, 36.2814 74.2197, 40.5998 73.9697,\n44.68 73.2196, 48.5882 71.9696, 52.391 70.2196, 52.391 60.1101,\n48.6468 62.739, 44.6331 64.657, 40.4709 65.8289, 36.2814 66.2196,\n31.7716 65.7557, 29.7437 65.1758, 27.8672 64.3641, 26.1421 63.3203,\n24.5684 62.0447, 23.146 60.5371, 21.875 58.7976, 19.7831 54.6129,\n18.289 49.481, 17.3925 43.4019, 17.0936 36.3754, 17.3925 29.3763,\n18.289 23.3166, 19.7831 18.1964, 21.875 14.0157, 23.146 12.2762,\n24.5684 10.7686, 26.1421 9.4929, 27.8672 8.4492, 29.7437 7.6375,\n31.7716 7.0576, 36.2814 6.5937, 40.5354 6.9844, 44.7034 8.1563,\n48.6878 10.0743, 52.391 12.7032, 52.391 2.5937))\"\"\"\n\nG_data = \"\"\"POLYGON((\n52.391 5.4974, 50.49 3.8964, 48.4724 2.502, 46.3383 1.3144, 44.0877 0.3334,\n41.7314 -0.4346, 39.2805 -0.9832, 34.0946 -1.422, 30.9504 -1.2772,\n27.9859 -0.843, 25.2009 -0.1191, 22.5956 0.8942, 20.1698 2.197,\n17.9236 3.7894, 15.857 5.6713, 13.9699 7.8428, 12.285 10.2753,\n10.8248 12.9404, 9.5892 15.8381, 8.5782 18.9685, 7.7919 22.3315,\n7.2303 25.9271, 6.8933 29.7553, 6.781 33.8161, 6.8948 37.8674,\n7.2362 41.6888, 7.8053 45.2803, 8.6019 48.6419, 9.6262 51.7737, 10.878 54.6755,\n12.3575 57.3474, 14.0646 59.7895, 15.9743 61.9712, 18.0615 63.862,\n20.3262 65.4618, 22.7685 66.7708, 25.3884 67.789, 28.1857 68.5162,\n31.1606 68.9525, 34.3131 69.098, 38.5048 68.7957, 42.5144 67.8889,\n46.3638 66.3703, 50.0748 64.2325, 50.0748 54.8075, 46.342 57.8466,\n42.5144 59.9716, 38.5266 61.2226, 34.3131 61.6395, 30.1132 61.2053,\n28.2228 60.6624, 26.4723 59.9024, 24.8614 58.9253, 23.3904 57.731,\n22.0591 56.3195, 20.8675 54.691, 18.9046 50.7806, 17.5025 45.998,\n16.6612 40.3432, 16.3808 33.8161, 16.6526 27.1962, 17.4679 21.4959,\n18.8267 16.7151, 20.7291 12.8539, 21.8892 11.2595, 23.1951 9.8776,\n24.6469 8.7084, 26.2446 7.7517, 27.9883 7.0076, 29.8778 6.4762, 34.0946 6.051,\n36.9534 6.2276, 39.4407 6.7575, 41.6331 7.6625, 43.607 8.9644, 43.607 27.2172,\n33.7304 27.2172, 33.7304 34.7776, 52.391 34.7776, 52.391 5.4974\n))\"\"\"\n\nT_data = \"\"\"POLYGON((\n6.781 58.3746, 52.391 58.3746, 52.391 51.5569, 33.6933 51.5569, 33.6933 -1.422,\n25.5684 -1.422, 25.5684 51.5569, 6.781 51.5569, 6.781 58.3746\n))\"\"\"\n\n\ndef standardize_polygons_str(data_str):\n \"\"\"Given a POLYGON string, standardize the coordinates to a 1x1 grid.\n\n Input : data_str (taken from above)\n Output: tuple of polygon objects\n \"\"\"\n # find all of the polygons in the letter (for instance an A\n # needs to be constructed from 2 polygons)\n path_strs = re.findall(\"\\(\\(([^\\)]+?)\\)\\)\", data_str.strip())\n\n # convert the data into a numpy array\n polygons_data = []\n for path_str in path_strs:\n data = np.array([\n tuple(map(float, x.split())) for x in path_str.strip().split(\",\")])\n polygons_data.append(data)\n\n # standardize the coordinates\n min_coords = np.vstack(data.min(0) for data in polygons_data).min(0)\n max_coords = np.vstack(data.max(0) for data in polygons_data).max(0)\n for data in polygons_data:\n data[:, ] -= min_coords\n data[:, ] /= (max_coords - min_coords)\n\n polygons = []\n for data in polygons_data:\n polygons.append(load_wkt(\n \"POLYGON((%s))\" % \",\".join(\" \".join(map(str, x)) for x in data)))\n\n return tuple(polygons)\n\n\nletters_polygons = {}\nletters_polygons['A'] = standardize_polygons_str(A_data)\nletters_polygons['C'] = standardize_polygons_str(C_data)\nletters_polygons['G'] = standardize_polygons_str(G_data)\nletters_polygons['T'] = standardize_polygons_str(T_data)\n\n\ncolors = dict(zip(\n 'ACGT', (('green', 'white'), ('blue',), ('orange',), ('red',))\n))\n\n\ndef add_letter_to_axis(ax, let, x, y, height):\n \"\"\"Add 'let' with position x,y and height height to matplotlib axis 'ax'.\n\n \"\"\"\n for polygon, color in zip(letters_polygons[let], colors[let]):\n new_polygon = affinity.scale(\n polygon, yfact=height, origin=(0, 0, 0))\n new_polygon = affinity.translate(\n new_polygon, xoff=x, yoff=y)\n patch = PolygonPatch(\n new_polygon, edgecolor=color, facecolor=color)\n ax.add_patch(patch)\n return\n\n\ndef plot_bases_on_ax(letter_heights, ax, show_ticks=True):\n \"\"\"\n Plot the N letters with heights taken from the Nx4 matrix letter_heights.\n\n Parameters\n ----------\n letter_heights: Nx4 array\n ax: axis to plot on\n \"\"\"\n\n assert letter_heights.shape[-1] == 4, letter_heights.shape\n letter_heights=np.squeeze(letter_heights) \n for x_pos, heights in enumerate(letter_heights):\n letters_and_heights = sorted(zip(heights, 'ACGT'))\n y_pos_pos = 0.0\n y_neg_pos = 0.0\n for height, letter in letters_and_heights:\n if height > 0:\n add_letter_to_axis(ax, letter, 0.5 + x_pos, y_pos_pos, height)\n y_pos_pos += height\n elif height < 0:\n add_letter_to_axis(ax, letter, 0.5 + x_pos, y_neg_pos, height)\n y_neg_pos += height\n ax.set_xlim(0, letter_heights.shape[0] + 1)\n if show_ticks==True:\n ax.set_xticks(np.arange(1, letter_heights.shape[0] + 1))\n else:\n ax.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n ax.set_aspect(aspect='auto', adjustable='box')\n ax.autoscale_view()\n return ax\n\ndef plot_bases(letter_heights, figsize=(12, 6), ylab='bits'):\n \"\"\"\n Plot the N letters with heights taken from the Nx4 matrix letter_heights.\n\n Parameters\n ----------\n letter_heights: Nx4 array\n ylab: y axis label\n\n Returns\n -------\n pyplot figure\n \"\"\"\n assert letter_heights.shape[1] == 4, letter_heights.shape\n\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.set_xlabel('base pair position')\n ax.set_ylabel(ylab)\n plot_bases_on_ax(letter_heights, ax)\n\n return fig,ax\n\n\n\n\ndef add_letters_to_axis(ax, letter_heights):\n \"\"\"\n Plots letter on user-specified axis.\n\n Parameters\n ----------\n ax : axis\n letter_heights: Nx4 array\n \"\"\"\n assert letter_heights.shape[1] == 4\n\n x_range = [1, letter_heights.shape[0]]\n pos_heights = np.copy(letter_heights)\n pos_heights[letter_heights < 0] = 0\n neg_heights = np.copy(letter_heights)\n neg_heights[letter_heights > 0] = 0\n\n for x_pos, heights in enumerate(letter_heights):\n letters_and_heights = sorted(zip(heights, 'ACGT'))\n y_pos_pos = 0.0\n y_neg_pos = 0.0\n for height, letter in letters_and_heights:\n if height > 0:\n add_letter_to_axis(ax, letter, 0.5 + x_pos, y_pos_pos, height)\n y_pos_pos += height\n else:\n add_letter_to_axis(ax, letter, 0.5 + x_pos, y_neg_pos, height)\n y_neg_pos += height\n\n ax.set_xlim(x_range[0] - 1, x_range[1] + 1)\n ax.set_xticks(list(range(*x_range)) + [x_range[-1]])\n ax.set_aspect(aspect='auto', adjustable='box')\n ax.autoscale_view()\n"
] | [
[
"numpy.asarray",
"matplotlib.use",
"numpy.squeeze",
"matplotlib.path.Path",
"numpy.arange",
"numpy.ones",
"numpy.copy",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WenmuZhou/PaddleDetection | [
"d9187ad5d054bc8a4333b9a8ba686569be91a8c6"
] | [
"ppdet/modeling/necks/centernet_fpn.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport math\nimport paddle\nimport paddle.nn as nn\nfrom paddle import ParamAttr\nfrom paddle.nn.initializer import Uniform\nimport paddle.nn.functional as F\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.modeling.layers import ConvNormLayer\nfrom ppdet.modeling.backbones.hardnet import ConvLayer, HarDBlock\nfrom ..shape_spec import ShapeSpec\n\n__all__ = ['CenterNetDLAFPN', 'CenterNetHarDNetFPN']\n\n\n# SGE attention\nclass BasicConv(nn.Layer):\n def __init__(self,\n in_planes,\n out_planes,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n relu=True,\n bn=True,\n bias_attr=False):\n super(BasicConv, self).__init__()\n self.out_channels = out_planes\n self.conv = nn.Conv2D(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias_attr=bias_attr)\n self.bn = nn.BatchNorm2D(\n out_planes,\n epsilon=1e-5,\n momentum=0.01,\n weight_attr=False,\n bias_attr=False) if bn else None\n self.relu = nn.ReLU() if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\n\nclass ChannelPool(nn.Layer):\n def forward(self, x):\n return paddle.concat(\n (paddle.max(x, 1).unsqueeze(1), paddle.mean(x, 1).unsqueeze(1)),\n axis=1)\n\n\nclass SpatialGate(nn.Layer):\n def __init__(self):\n super(SpatialGate, self).__init__()\n kernel_size = 7\n self.compress = ChannelPool()\n self.spatial = BasicConv(\n 2,\n 1,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n relu=False)\n\n def forward(self, x):\n x_compress = self.compress(x)\n x_out = self.spatial(x_compress)\n scale = F.sigmoid(x_out) # broadcasting\n return x * scale\n\n\ndef fill_up_weights(up):\n weight = up.weight.numpy()\n f = math.ceil(weight.shape[2] / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(weight.shape[2]):\n for j in range(weight.shape[3]):\n weight[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, weight.shape[0]):\n weight[c, 0, :, :] = weight[0, 0, :, :]\n up.weight.set_value(weight)\n\n\nclass IDAUp(nn.Layer):\n def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):\n super(IDAUp, self).__init__()\n for i in range(1, len(ch_ins)):\n ch_in = ch_ins[i]\n up_s = int(up_strides[i])\n fan_in = ch_in * 3 * 3\n stdv = 1. / math.sqrt(fan_in)\n proj = nn.Sequential(\n ConvNormLayer(\n ch_in,\n ch_out,\n filter_size=3,\n stride=1,\n use_dcn=dcn_v2,\n bias_on=dcn_v2,\n norm_decay=None,\n dcn_lr_scale=1.,\n dcn_regularizer=None,\n initializer=Uniform(-stdv, stdv)),\n nn.ReLU())\n node = nn.Sequential(\n ConvNormLayer(\n ch_out,\n ch_out,\n filter_size=3,\n stride=1,\n use_dcn=dcn_v2,\n bias_on=dcn_v2,\n norm_decay=None,\n dcn_lr_scale=1.,\n dcn_regularizer=None,\n initializer=Uniform(-stdv, stdv)),\n nn.ReLU())\n\n kernel_size = up_s * 2\n fan_in = ch_out * kernel_size * kernel_size\n stdv = 1. / math.sqrt(fan_in)\n up = nn.Conv2DTranspose(\n ch_out,\n ch_out,\n kernel_size=up_s * 2,\n stride=up_s,\n padding=up_s // 2,\n groups=ch_out,\n weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),\n bias_attr=False)\n fill_up_weights(up)\n setattr(self, 'proj_' + str(i), proj)\n setattr(self, 'up_' + str(i), up)\n setattr(self, 'node_' + str(i), node)\n\n def forward(self, inputs, start_level, end_level):\n for i in range(start_level + 1, end_level):\n upsample = getattr(self, 'up_' + str(i - start_level))\n project = getattr(self, 'proj_' + str(i - start_level))\n\n inputs[i] = project(inputs[i])\n inputs[i] = upsample(inputs[i])\n node = getattr(self, 'node_' + str(i - start_level))\n inputs[i] = node(paddle.add(inputs[i], inputs[i - 1]))\n\n\nclass DLAUp(nn.Layer):\n def __init__(self, start_level, channels, scales, ch_in=None, dcn_v2=True):\n super(DLAUp, self).__init__()\n self.start_level = start_level\n if ch_in is None:\n ch_in = channels\n self.channels = channels\n channels = list(channels)\n scales = np.array(scales, dtype=int)\n for i in range(len(channels) - 1):\n j = -i - 2\n setattr(\n self,\n 'ida_{}'.format(i),\n IDAUp(\n ch_in[j:],\n channels[j],\n scales[j:] // scales[j],\n dcn_v2=dcn_v2))\n scales[j + 1:] = scales[j]\n ch_in[j + 1:] = [channels[j] for _ in channels[j + 1:]]\n\n def forward(self, inputs):\n out = [inputs[-1]] # start with 32\n for i in range(len(inputs) - self.start_level - 1):\n ida = getattr(self, 'ida_{}'.format(i))\n ida(inputs, len(inputs) - i - 2, len(inputs))\n out.insert(0, inputs[-1])\n return out\n\n\n@register\n@serializable\nclass CenterNetDLAFPN(nn.Layer):\n \"\"\"\n Args:\n in_channels (list): number of input feature channels from backbone.\n [16, 32, 64, 128, 256, 512] by default, means the channels of DLA-34\n down_ratio (int): the down ratio from images to heatmap, 4 by default\n last_level (int): the last level of input feature fed into the upsamplng block\n out_channel (int): the channel of the output feature, 0 by default means\n the channel of the input feature whose down ratio is `down_ratio`\n first_level (None): the first level of input feature fed into the upsamplng block.\n if None, the first level stands for logs(down_ratio)\n dcn_v2 (bool): whether use the DCNv2, True by default\n with_sge (bool): whether use SGE attention, False by default\n \"\"\"\n\n def __init__(self,\n in_channels,\n down_ratio=4,\n last_level=5,\n out_channel=0,\n first_level=None,\n dcn_v2=True,\n with_sge=False):\n super(CenterNetDLAFPN, self).__init__()\n self.first_level = int(np.log2(\n down_ratio)) if first_level is None else first_level\n assert self.first_level >= 0, \"first level in CenterNetDLAFPN should be greater or equal to 0, but received {}\".format(\n self.first_level)\n self.down_ratio = down_ratio\n self.last_level = last_level\n scales = [2**i for i in range(len(in_channels[self.first_level:]))]\n self.dla_up = DLAUp(\n self.first_level,\n in_channels[self.first_level:],\n scales,\n dcn_v2=dcn_v2)\n self.out_channel = out_channel\n if out_channel == 0:\n self.out_channel = in_channels[self.first_level]\n self.ida_up = IDAUp(\n in_channels[self.first_level:self.last_level],\n self.out_channel,\n [2**i for i in range(self.last_level - self.first_level)],\n dcn_v2=dcn_v2)\n\n self.with_sge = with_sge\n if self.with_sge:\n self.sge_attention = SpatialGate()\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {'in_channels': [i.channels for i in input_shape]}\n\n def forward(self, body_feats):\n\n dla_up_feats = self.dla_up(body_feats)\n\n ida_up_feats = []\n for i in range(self.last_level - self.first_level):\n ida_up_feats.append(dla_up_feats[i].clone())\n\n self.ida_up(ida_up_feats, 0, len(ida_up_feats))\n\n feat = ida_up_feats[-1]\n if self.with_sge:\n feat = self.sge_attention(feat)\n return feat\n\n @property\n def out_shape(self):\n return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]\n\n\nclass TransitionUp(nn.Layer):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n\n def forward(self, x, skip, concat=True):\n w, h = skip.shape[2], skip.shape[3]\n out = F.interpolate(x, size=(w, h), mode=\"bilinear\", align_corners=True)\n if concat:\n out = paddle.concat([out, skip], 1)\n return out\n\n\n@register\n@serializable\nclass CenterNetHarDNetFPN(nn.Layer):\n \"\"\"\n Args:\n in_channels (list): number of input feature channels from backbone.\n [96, 214, 458, 784] by default, means the channels of HarDNet85\n num_layers (int): HarDNet laters, 85 by default\n down_ratio (int): the down ratio from images to heatmap, 4 by default\n first_level (int|None): the first level of input feature fed into the upsamplng block.\n if None, the first level stands for logs(down_ratio) - 1\n\n last_level (int): the last level of input feature fed into the upsamplng block\n out_channel (int): the channel of the output feature, 0 by default means\n the channel of the input feature whose down ratio is `down_ratio`\n \"\"\"\n\n def __init__(self,\n in_channels,\n num_layers=85,\n down_ratio=4,\n first_level=None,\n last_level=4,\n out_channel=0):\n super(CenterNetHarDNetFPN, self).__init__()\n self.first_level = int(np.log2(\n down_ratio)) - 1 if first_level is None else first_level\n assert self.first_level >= 0, \"first level in CenterNetDLAFPN should be greater or equal to 0, but received {}\".format(\n self.first_level)\n self.down_ratio = down_ratio\n self.last_level = last_level\n self.last_pool = nn.AvgPool2D(kernel_size=2, stride=2)\n\n assert num_layers in [68, 85], \"HarDNet-{} not support.\".format(\n num_layers)\n if num_layers == 85:\n self.last_proj = ConvLayer(784, 256, kernel_size=1)\n self.last_blk = HarDBlock(768, 80, 1.7, 8)\n self.skip_nodes = [1, 3, 8, 13]\n self.SC = [32, 32, 0]\n gr = [64, 48, 28]\n layers = [8, 8, 4]\n ch_list2 = [224 + self.SC[0], 160 + self.SC[1], 96 + self.SC[2]]\n channels = [96, 214, 458, 784]\n self.skip_lv = 3\n\n elif num_layers == 68:\n self.last_proj = ConvLayer(654, 192, kernel_size=1)\n self.last_blk = HarDBlock(576, 72, 1.7, 8)\n self.skip_nodes = [1, 3, 8, 11]\n self.SC = [32, 32, 0]\n gr = [48, 32, 20]\n layers = [8, 8, 4]\n ch_list2 = [224 + self.SC[0], 96 + self.SC[1], 64 + self.SC[2]]\n channels = [64, 124, 328, 654]\n self.skip_lv = 2\n\n self.transUpBlocks = nn.LayerList([])\n self.denseBlocksUp = nn.LayerList([])\n self.conv1x1_up = nn.LayerList([])\n self.avg9x9 = nn.AvgPool2D(kernel_size=(9, 9), stride=1, padding=(4, 4))\n prev_ch = self.last_blk.get_out_ch()\n\n for i in range(3):\n skip_ch = channels[3 - i]\n self.transUpBlocks.append(TransitionUp(prev_ch, prev_ch))\n if i < self.skip_lv:\n cur_ch = prev_ch + skip_ch\n else:\n cur_ch = prev_ch\n self.conv1x1_up.append(\n ConvLayer(\n cur_ch, ch_list2[i], kernel_size=1))\n cur_ch = ch_list2[i]\n cur_ch -= self.SC[i]\n cur_ch *= 3\n\n blk = HarDBlock(cur_ch, gr[i], 1.7, layers[i])\n self.denseBlocksUp.append(blk)\n prev_ch = blk.get_out_ch()\n\n prev_ch += self.SC[0] + self.SC[1] + self.SC[2]\n self.out_channel = prev_ch\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {'in_channels': [i.channels for i in input_shape]}\n\n def forward(self, body_feats):\n x = body_feats[-1]\n x_sc = []\n x = self.last_proj(x)\n x = self.last_pool(x)\n x2 = self.avg9x9(x)\n x3 = x / (x.sum((2, 3), keepdim=True) + 0.1)\n x = paddle.concat([x, x2, x3], 1)\n x = self.last_blk(x)\n\n for i in range(3):\n skip_x = body_feats[3 - i]\n x = self.transUpBlocks[i](x, skip_x, (i < self.skip_lv))\n x = self.conv1x1_up[i](x)\n if self.SC[i] > 0:\n end = x.shape[1]\n x_sc.append(x[:, end - self.SC[i]:, :, :])\n x = x[:, :end - self.SC[i], :, :]\n x2 = self.avg9x9(x)\n x3 = x / (x.sum((2, 3), keepdim=True) + 0.1)\n x = paddle.concat([x, x2, x3], 1)\n x = self.denseBlocksUp[i](x)\n\n scs = [x]\n for i in range(3):\n if self.SC[i] > 0:\n scs.insert(\n 0,\n F.interpolate(\n x_sc[i],\n size=(x.shape[2], x.shape[3]),\n mode=\"bilinear\",\n align_corners=True))\n neck_feat = paddle.concat(scs, 1)\n return neck_feat\n\n @property\n def out_shape(self):\n return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]\n"
] | [
[
"numpy.log2",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ArnaudTurn/competition_project | [
"76c7f11e97c90c0be9e3a9a89bff5b022f66e98a"
] | [
"data_science_test_AT/preprocess.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###############################################################################\n# #\n# preprocess methods #\n# Developed using Python 3.7.4 #\n# #\n# Author: Arnaud Tauveron #\n# Linkedin: https://www.linkedin.com/in/arnaud-tauveron/ #\n# Date: 2021-12-12 #\n# Version: 1.0.1 #\n# #\n###############################################################################\n\nimport pandas as pd\nimport numpy as np\nimport argparse\nfrom loader import load_request_df, load_individuals_df\nfrom utils_ import check_exist, get_unique_date\nfrom sklearn.preprocessing import MinMaxScaler\nimport re\n\n\ndef preprocess_individuals(df: pd.DataFrame) -> pd.DataFrame:\n df = df.assign(\n creation_date=lambda x: 2019\n - x[\"individual_creation_date\"]\n .fillna(999999)\n .apply(lambda y: str(y)[:4])\n .astype(int),\n age=lambda x: 2019 - x[\"birth_year\"],\n role1_=lambda x: x[\"individual_role\"].astype(str),\n role2_=lambda x: x[\"individual_role_2_label\"].astype(str),\n marital_st=lambda x: x[\"marital_status_label\"].astype(str),\n pregnancy=lambda x: x[\"pregnancy\"].astype(str),\n )\n numeric_indi_variables = df.groupby(\"request_id\").agg(\n {\n \"individual_id\": \"count\",\n \"age\": [\"mean\", \"max\", \"min\"],\n \"creation_date\": [\"mean\", \"max\", \"min\"],\n }\n )\n numeric_indi_variables.columns = [\n i + \"_\" + j for i, j in numeric_indi_variables.columns.tolist()\n ]\n cat_indi_var = (\n pd.get_dummies(\n df.set_index(\"request_id\")[[\"role1_\", \"role2_\", \"marital_st\", \"pregnancy\"]]\n )\n .reset_index()\n .groupby(\"request_id\")\n .sum()\n .reset_index()\n )\n individual_features_df = numeric_indi_variables.merge(\n cat_indi_var, on=\"request_id\", how=\"left\"\n )\n return individual_features_df\n\n\ndef build_requests_dataset_full(\n request_df: pd.DataFrame,\n individual_df: pd.DataFrame,\n select_var: list,\n var_target: str,\n common_var: str = \"request_id\",\n) -> pd.DataFrame:\n df_temp = request_df.copy()\n df_temp = df_temp.merge(individual_df, on=common_var, how=\"left\").set_index(\n common_var\n )\n df_temp_dummies = pd.get_dummies(df_temp[select_var]).copy()\n var_list = df_temp_dummies.columns.tolist()\n re_var_list = [re.sub(\"[^a-zA-Z0-9 \\n\\.]\", \"_\", i) for i in var_list]\n df_temp_dummies.columns = re_var_list\n df_temp_dummies[var_target] = df_temp[var_target]\n return df_temp_dummies\n\n\ndef preprocess_pipes_from_files(\n request_df_path: str,\n individual_df_path: str,\n select_var: list = None,\n var_target: str = None,\n common_var: str = None,\n output_directory: str = None,\n table_name: str = None,\n) -> pd.DataFrame:\n\n if table_name is not None:\n unique_identifier_execution = table_name + get_unique_date()\n else:\n unique_identifier_execution = \"table_\" + get_unique_date()\n\n global_path = f\"{output_directory}\\{unique_identifier_execution}.csv\"\n\n request_df = load_request_df(request_df_path)\n individual_df = load_individuals_df(individual_df_path)\n individual_df_feat = preprocess_individuals(individual_df)\n final_output = build_requests_dataset_full(\n request_df=request_df,\n individual_df=individual_df_feat,\n select_var=select_var,\n var_target=var_target,\n common_var=common_var,\n )\n\n check_exist(output_directory)\n final_output.to_csv(global_path)\n"
] | [
[
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Princeton-LSI-ResearchComputing/sprite-pipeline | [
"5e11a588df9a70a98d167956af471880669a18f5"
] | [
"scripts/python/contact.py"
] | [
"from enum import Enum\nfrom itertools import combinations\nfrom random import random\nimport assembly\nimport numpy as np\nimport subprocess\n\n__author__ = \"Noah Ollikainen, Charlotte A Lai\"\n\nclass Downweighting(Enum):\n \"\"\"An enumeration of downweighting schemes.\n\n NONE -- No downweighting. Each contact has a value of 1.\n N_MINUS_ONE -- A contact from a cluster of n reads has a value of\n 1 /(n - 1).\n TWO_OVER_N -- A contact form a cluster of n reads has a value of\n 2 / n.\n UNKNOWN -- A default downweighing scheme for error checking.\n \"\"\"\n\n NONE = 1\n N_MINUS_ONE = 2\n TWO_OVER_N = 3\n UNKNOWN = 4\n\n\nclass Contacts:\n \"\"\"A class for making heatmaps from chromosomal conformation data.\n\n This class primarily deals with the cluster files from the Guttman Lab\n SPRITE workflow, but also contains some auxiliary methods for other Hi-C\n file formats.\n \"\"\"\n\n\n def __init__(self, chromosome, build = \"mm9\", resolution = 1000000,\n downweighting = \"none\"):\n \"\"\"Constructs an instance of the Contacts class.\n\n Args:\n chromosome (str): The chromosome to visualize, or \"genome\" for an\n interchromosomal heatmap.\n build (str): The genome assembly.\n resolution (int): The resolution of the heatmap, in bp.\n downweighting (str): The downweighting scheme.\n \"\"\"\n\n self._chromosome = chromosome\n self._resolution = resolution\n self._assembly = assembly.build(build, resolution)\n\n if downweighting == \"none\":\n self._downweighting = Downweighting.NONE\n elif downweighting == \"n_minus_one\":\n self._downweighting = Downweighting.N_MINUS_ONE\n elif downweighting == \"two_over_n\":\n self._downweighting = Downweighting.TWO_OVER_N\n else:\n self._downweighting = Downweighting.UNKNOWN\n\n if self._chromosome == \"genome\":\n self.init_genome_matrix()\n else:\n self.init_chromosome_matrix()\n\n\n def get_raw_contacts_from_sprite_file(self, clusters_file,\n min_cluster_size = 2, max_cluster_size = 1000):\n \"\"\"Parses a SPRITE clusters file and stores the relevant contacts.\"\"\"\n\n if self._chromosome.startswith(\"chr\"):\n self.get_raw_intrachromosomal_contacts_from_sprite_file(clusters_file,\n min_cluster_size, max_cluster_size)\n elif self._chromosome == \"genome\":\n self.get_raw_interchromosomal_contacts_from_sprite_file(clusters_file,\n min_cluster_size, max_cluster_size)\n else:\n raise Exception(\"Chromosome ID must start with 'chr' or \" + \n \"equal 'genome'\")\n\n\n def get_raw_interchromosomal_contacts_from_sprite_file(self, clusters_file,\n min_cluster_size, max_cluster_size):\n \"\"\"Parses a SPRITE clusters file and stores all contacts.\n\n This method is used for generating a genome-wide interchromosomal\n heatmap.\n\n Args:\n clusters_file (str): The path to the SPRITE clusters file\n min_cluster_size (int): Ignore clusters with a smaller size\n max_cluster_size (int): Ignore clusters with a larger size\n \"\"\"\n\n with open(clusters_file, 'r') as f:\n\n for line in f:\n reads = line.split()[1:]\n if not min_cluster_size <= len(reads) <= max_cluster_size:\n continue\n bins = set()\n\n for read in reads:\n _, coord = read.split('_')\n chrom, start, end = coord.replace('-', ':').split(':')\n genome_pos = self.get_genomic_position(chrom, start)\n if genome_pos is not None: # genome_pos == None if chrom not in dict\n bins.add(genome_pos)\n\n self.add_bins_to_contacts(bins)\n\n\n def get_raw_intrachromosomal_contacts_from_sprite_file(self, clusters_file,\n min_cluster_size, max_cluster_size):\n \"\"\"Parses a SPRITE clusters file and stores contacts from within a\n single chromosome.\n\n Note:\n This method does not take a chromosome as an argument. The\n chromosome is specified when the Contacts object is constructed. If\n you need a heatmap for each of multiple chromosomes, you'll need to\n make a new Contacts object for each.\n\n Args:\n clusters_file (str): The path to the SPRITE clusters file\n min_cluster_size (int): Ignore clusters with a smaller size\n max_cluster_size (int): Ignore clusters with a larger size\n \"\"\"\n\n with open(clusters_file, 'r') as f:\n\n for line in f:\n reads = line.split()[1:]\n if not min_cluster_size <= len(reads) <= max_cluster_size:\n continue\n bins = set()\n\n for read in reads:\n _, coord = read.split('_')\n chrom, start, end = coord.replace('-', ':').split(':')\n if chrom == self._chromosome:\n read_bin = int(start) // self._resolution\n bins.add(read_bin)\n\n self.add_bins_to_contacts(bins) \n\n\n def get_raw_intrachromosomal_contacts_from_aiden_hic_file(self, hic_file):\n \"\"\"Parses a Hi-C file from the Aiden lab and stores the contacts.\n\n Erez's Hi-C files have three columns. Columns one and two contain\n positions on a chromosome that interact. Column three is the number\n of interactions. As an example:\n\n 1213142 1213143 10\n 1213142 1213144 4\n 1213142 1213145 1\n\n Note:\n This method only handles intrachromosomal Hi-C files, e.g.,\n chr2-against-chr2.\n \"\"\"\n\n with open(hic_file, 'r') as f:\n for line in f:\n line = line.rstrip()\n pos1, pos2, count = line.split()\n pos1 = int(pos1) // self._resolution\n pos2 = int(pos2) // self._resolution\n count = int(float(count))\n self._contacts[pos1][pos2] = count\n self._contacts[pos2][pos1] = count\n\n\n def get_raw_contacts_from_ren_hic_file(self, hic_file):\n \"\"\"Parses a Hi-C file from the Ren lab and stores the contacts.\n\n The Ren lab's Hi-C files have seven columns. Columns two and three\n contain the chromosome and position of a read in a contact. Columns\n five and six contain the chromosome and position of the other read.\n Each line corresponds to a single contact. As an example:\n\n HWI-ST216_0305:5:1104:16545:105833#AGTAAG chr1 3000000 - chr1 5404761 +\n HWI-ST216_0305:4:2208:8611:50989#AAATGA chr1 3000001 + chr1 3000252 -\n HWI-ST216_0305:5:2307:15998:173700#GGTTGT chr1 3000001 + chr1 3000218 -\n \"\"\"\n\n if self._chromosome.startswith(\"chr\"):\n self.get_raw_intrachromosomal_contacts_from_ren_hic_file(hic_file)\n elif self._chromosome == \"genome\":\n self.get_raw_interchromosomal_contacts_from_ren_hic_file(hic_file)\n else:\n raise Exception(\"Chromosome ID must start with 'chr' or \" + \n \"equal 'genome'\")\n\n\n def get_raw_intrachromosomal_contacts_from_ren_hic_file(self, hic_file):\n \"\"\"Parses a Hi-C file from the Ren lab and stores the intrachromosomal\n contacts on one chromosome.\n \"\"\"\n\n assert self._chromosome.startswith(\"chr\")\n\n with open(hic_file, 'r') as f:\n for line in f:\n _, chrom1, pos1, _, chrom2, pos2, _ = line.split()\n if self._chromosome == chrom1 == chrom2:\n pos1 = int(pos1) // self._resolution\n pos2 = int(pos2) // self._resolution\n self._contacts[pos1][pos2] += 1\n self._contacts[pos2][pos1] += 1\n\n\n def get_raw_interchromosomal_contacts_from_ren_hic_file(self, hic_file):\n \"\"\"Parses a Hi-C file from the Ren lab and stores all contacts for a\n genome-wide interchromosomal heatmap.\n \"\"\"\n\n assert self._chromosome == \"genome\"\n\n with open(hic_file, 'r') as f:\n\n for line in f:\n _, chrom1, pos1, _, chrom2, pos2, _ = line.split()\n bin1 = self.get_genomic_position(chrom1, pos1)\n bin2 = self.get_genomic_position(chrom2, pos2)\n\n # Bins == None if get_genomic_position passed unknown chrom\n if bin1 is not None and bin2 is not None:\n self._contacts[bin1][bin2] += 1\n self._contacts[bin2][bin1] += 1\n\n\n def get_genomic_position(self, chromosome, position):\n \"\"\"Converts a chromosome and position to the appropriate heatmap index\n for a genome-wide interchromosomal heatmap\n\n For example, passing \"chr3\" and 10000 will return a value which\n (conceptually) is (10000 + length(chr2) + length(chr1)) / resolution.\n\n Args:\n chromosome (str): The chromosome, e.g., \"chr1\".\n position (int): The position along the chromosome.\n \"\"\"\n\n read_bin = int(position) // self._resolution\n offset = self._assembly.get_offset(chromosome)\n if offset is not None:\n return read_bin + offset\n\n\n def add_bins_to_contacts(self, bins):\n \"\"\"Stores all pairwise contacts implied by one SPRITE cluster.\"\"\"\n\n if len(bins) > 1:\n if self._downweighting == Downweighting.TWO_OVER_N:\n inc = 2.0 / len(bins)\n elif self._downweighting == Downweighting.N_MINUS_ONE:\n inc = 1.0 / (len(bins) - 1)\n else:\n assert self._downweighting == Downweighting.NONE\n inc = 1.0\n\n for bin1, bin2 in combinations(bins, 2):\n self._contacts[bin1][bin2] += inc\n self._contacts[bin2][bin1] += inc\n\n\n def zero_diagonal_entries(self):\n \"\"\"Sets all diagonal entries in the internal heatmap matrix to zero.\"\"\"\n\n for i in range(len(self._contacts)):\n self._contacts[i][i] = 0\n\n\n def init_chromosome_matrix(self):\n \"\"\"Initializes an internal heatmap matrix with a number of rows\n determined by this object's assembly, chromosome and resolution (e.g.,\n chr1 on mm9 at a 100 Mb resolution.\n\n This method is used for single intrachromosomal heatmaps only.\n \"\"\"\n\n chromosome_size = self._assembly.get_size(self._chromosome)\n num_bins = -(-chromosome_size // self._resolution)\n self._contacts = np.zeros((num_bins, num_bins))\n\n\n def init_genome_matrix(self):\n \"\"\"Initializes an internal heatmap matrix with a number of rows\n determined by this object's assembly and resolution (e.g., mm9 at\n 100 Mb resolution.\n\n This method is used for genome-wide interchromosomal heatmaps only.\n \"\"\"\n\n num_bins = 0\n for chromosome_size in self._assembly._chromsizes.values():\n num_bins += -(-chromosome_size // self._resolution)\n self._contacts = np.zeros((num_bins, num_bins))\n\n\n def write_contacts_to_file(self, outfile, fmt):\n \"\"\"Writes the internal heatmap matrix to file.\n\n Args:\n outfile (str): The path to write to.\n fmt (str): The numerical format to write.\n \"\"\"\n\n np.savetxt(outfile, self._contacts, delimiter = \"\\t\", fmt = fmt)\n\n\n def ice_raw_contacts(self, raw_contacts_file, bias_file, iterations,\n hicorrector_path):\n \"\"\"Calls Hi-Corrector to apply IC normalization to the internal heatmap\n matrix.\n\n This method generates a file of biases by calling the ic executable,\n then scales each cell of the internal heatmap matrix by the two\n appropriate factors in that file.\n \n Args:\n raw_contacts_file (str): The file containing a raw contacts heatmap.\n bias_file (str): The path to write the Hi-Corrector output to.\n iterations (int): The number of Hi-Corrector iterations to perform.\n hicorrector_path (str): The path to the Hi-Corrector ic program.\n \"\"\"\n\n biases = self.calculate_bias_factors(raw_contacts_file = raw_contacts_file,\n bias_file = bias_file, hicorrector = hicorrector_path,\n iterations = iterations)\n\n median_diagonal_value = self.get_median_diagonal_value()\n\n for row in range(self._contacts.shape[0]):\n for col in range(self._contacts.shape[1]):\n val = self._contacts[row][col]\n if val > 0:\n val /= (biases[row] * biases[col])\n self._contacts[row][col] = val\n\n\n def truncate_to_median_diagonal_value(self):\n \"\"\"Scales all values in the internal heatmap matrix relative to the\n median value of the +1 and the -1 diagonals (offset from main diagonal\n by +/- 1).\n\n If the median value is 10, a value of 3 will be scaled to 0.3 and a\n value of 9 will be scaled to 0.9. A value of 11 would be set to 1\n (since 11 > 10) rather than being set to 1.1.\n \"\"\"\n\n median_diagonal_value = self.get_median_diagonal_value()\n\n for row in range(self._contacts.shape[0]):\n for col in range(self._contacts.shape[1]):\n val = self._contacts[row][col]\n val = (1 if val >= median_diagonal_value\n else val / median_diagonal_value)\n self._contacts[row][col] = val\n\n \n def calculate_bias_factors(self, raw_contacts_file, bias_file, hicorrector,\n iterations):\n \"\"\"Runs Hi-Corrector on the raw contacts heatmap.\n\n The bias file that Hi-Corrector outputs is subsequently read and\n returned as a list of floats.\n\n Note:\n Hi-Corrector cannot access the internal numpy matrix of this\n object. The matrix needs to be written to disk first, then read by\n Hi-Corrector.\n\n Args:\n raw_contacts_file (str): The file containing a raw contacts heatmap.\n bias_file (str): The path to write the Hi-Corrector output to.\n iterations (int): The number of Hi-Corrector iterations to perform.\n hicorrector_path (str): The path to the Hi-Corrector ic program.\n \"\"\"\n\n skip_first_row = \"0\" # 0 == don't skip\n skip_first_column = \"0\"\n num_lines = self._contacts.shape[0]\n subprocess.check_call([hicorrector, raw_contacts_file, str(num_lines),\n str(iterations), skip_first_row, skip_first_column, bias_file])\n return self.parse_bias_file(bias_file)\n\n\n def parse_bias_file(self, bias_file):\n \"\"\"Parses a Hi-Corrector ic output file and returns the values as a\n list of floats.\n \"\"\"\n\n biases = []\n\n with open(bias_file) as f:\n for line in f:\n biases.append(float(line.strip()))\n return biases\n\n\n def get_median_diagonal_value(self):\n \"\"\"Returns the median diagonal value of this object's internal heatmap\n matrix.\"\n\n Note:\n The median diagonal value is actually the median of the two\n diagonals offset by +1 and -1.\n \"\"\"\n\n diagonal_values = []\n\n for i in range(self._contacts.shape[0] - 1):\n diagonal_values.append(self._contacts[i + 1][i])\n diagonal_values.append(self._contacts[i][i + 1])\n\n return np.median(diagonal_values)\n\n\n def downsample(self, target):\n \"\"\"Downsample the internal heatmap matrix to a target number of\n contacts.\n\n Args:\n target (int): The number of contacts to downsample to.\n \"\"\"\n\n dim = len(self._contacts)\n\n total_contacts = 0\n\n # Only sum contacts from diagonal and upper-triangle\n # Otherwise, will double-count.\n for i in range(dim):\n for j in range(i + 1):\n total_contacts += self._contacts[i][j]\n\n downsample_ratio = float(target) / total_contacts\n\n for i in range(dim):\n for j in range(i + 1):\n num_contacts = self._contacts[i][j]\n for contact in range(num_contacts):\n if random() < downsample_ratio:\n num_contacts -= 1\n self._contacts[i][j] = num_contacts\n self._contacts[j][i] = num_contacts\n"
] | [
[
"numpy.savetxt",
"numpy.median",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orchardbirds/skorecard-1 | [
"0f5375a6c159bb35f4b62c5be75a742bf50885e2"
] | [
"skorecard/bucket_mapping.py"
] | [
"\"\"\"Classes to store features mapping for bucketing.\n\"\"\"\nimport dataclasses\nfrom dataclasses import dataclass, field\nfrom typing import List, Union, Dict\n\nimport pandas as pd\nimport numpy as np\n\n\n@dataclass\nclass BucketMapping:\n \"\"\"Stores all the info to be able to bucket a feature.\n\n ```python\n from skorecard.bucket_mapping import BucketMapping\n\n # Manually a new bucket mapping for a feature\n bucket = BucketMapping('feature1', 'numerical', map = [2,3,4,5], specials={\"special 0\": [0]})\n print(bucket)\n\n # You can work with these classes as dicts as well\n bucket.as_dict()\n BucketMapping(**bucket.as_dict())\n\n # Transform new data\n bucket.transform(list(range(10)))\n ```\n\n Args:\n feature_name (str): Name of the feature\n type (str): Type of feature, one of ['categorical','numerical']\n map (list or dict): The info needed to create the buckets (boundaries or cats)\n right (bool): parameter to np.digitize, used when map='numerical'.\n specials (dict): dictionary of special values to bin separately. The key is used for the bin index,\n labels (dict): dictionary containing special values. It must be of the format:\n - keys: strings, containing the name (that will be used as labels) for the special values\n - values: lists, containing the special values\n \"\"\"\n\n feature_name: str\n type: str\n missing_treatment: Union[str, int] = field(default=\"separate\")\n map: Union[Dict, List] = field(default_factory=lambda: [])\n right: bool = True\n specials: Dict = field(default_factory=lambda: {})\n labels: Dict = field(default_factory=lambda: {})\n\n def __post_init__(self) -> None:\n \"\"\"Do input validation.\n\n Returns:\n None: nothing\n \"\"\"\n assert self.type in [\"numerical\", \"categorical\"]\n assert all(\n [isinstance(k, str) for k in self.specials.keys()]\n ), f\"The keys of the special dicionary must be \\\n strings, got {self.specials.keys()} instead.\"\n assert all(\n [isinstance(k, list) for k in self.specials.values()]\n ), f\"The keys of the special dicionary must be a list of elements, got {self.specials}instead.\"\n\n def get_map(self):\n \"\"\"Pretty form of the boundaries.\n\n For example [2] will return [\"(-inf, 2.0]\", \"(2.0, inf]\"]\n \"\"\"\n if self.type == \"categorical\":\n # We might want to implement a nice max_length form here?\n # ['a',...,'z']\n return self.map\n\n m = [str(c) for c in pd.cut(x=[], bins=[-np.inf] + list(self.map) + [np.inf], right=self.right).categories]\n\n return m\n\n def transform(self, x):\n \"\"\"Applies bucketing to and array.\n\n Args:\n x: array\n\n Returns:\n x: array\n \"\"\"\n assert isinstance(x, (list, pd.core.series.Series, np.ndarray))\n assert len(self.map) is not None, \"Please set a 'map' first\"\n return self._transform_buckets(x)\n\n def _validate_categorical_map(self):\n \"\"\"Assure that the provided mapping starts at 0 and that it has an incremental trend.\"\"\"\n if isinstance(self.map, list):\n # TODO: Fix why we need to do the following line.\n # It's because app_utils.determine_boundaries() adds +1 for numerical buckets\n self.map = {k: v - 1 for k, v in enumerate(self.map)}\n values = [v for v in self.map.values()]\n if len(values) > 0:\n if not np.array_equal(np.unique(values), np.arange(max(values) + 1)):\n err_msg = (\n f\"Mapping dictionary must start at 0 and be incremental. \"\n f\"Found the following mappings {np.unique(values)}, and expected {np.arange(max(values) + 1)}\"\n )\n raise ValueError(err_msg)\n\n def _transform_buckets(self, x):\n \"\"\"\n Apply binning using a boundaries map.\n\n Note:\n - resulting bins are zero-indexed\n\n ```python\n import numpy as np\n bins = np.array([-np.inf, 1, np.inf])\n x = np.array([-1,0,.5, 1, 1.5, 2,10, np.nan, 0])\n new = np.digitize(x, bins)\n np.where(np.isnan(x), np.nan, new)\n ```\n \"\"\"\n self.labels = {}\n if isinstance(x, np.ndarray):\n x = pd.Series(x)\n if isinstance(x, list):\n x = pd.Series(x)\n if self.type == \"numerical\":\n buckets = self._apply_num_mapping(x)\n max_bucket_number = int(buckets.max())\n\n elif self.type == \"categorical\":\n self._validate_categorical_map()\n buckets = self._apply_cat_mapping(x)\n max_bucket_number = int(max(self.labels.keys()))\n\n if x.isnull().any():\n if self.missing_treatment == \"separate\":\n buckets = np.where(x.isnull(), max_bucket_number + 1, buckets)\n self.labels[max_bucket_number + 1] = \"Missing\"\n elif type(self.missing_treatment) == dict:\n if self.feature_name in self.missing_treatment.keys():\n bucket = self.missing_treatment[self.feature_name]\n buckets = np.where(x.isnull(), bucket, buckets)\n self.labels[bucket] = f\"{self.labels[bucket]}, Missing\"\n else:\n print(\n f\"Feature {self.feature_name} not in missing_treatment dict.\",\n \"Applying default bucketing for missing values.\",\n )\n buckets = np.where(x.isnull(), max_bucket_number + 1, buckets)\n self.labels[max_bucket_number + 1] = \"Missing\"\n\n else:\n self.labels[max_bucket_number + 1] = \"Missing\"\n\n if len(self.specials) > 0:\n buckets = self._assign_specials(x, buckets, start_bucket_number=max_bucket_number + 1)\n\n return buckets\n\n def _apply_cat_mapping(self, x):\n # Add 'other' category\n other_value = 1 if len(self.map.values()) == 0 else max(self.map.values()) + 1\n mapping = MissingDict(self.map)\n mapping.set_missing_value(other_value) # This was 'other' but you cannot mix integers and strings\n\n new = x.map(mapping)\n\n # define the labels\n v = {}\n\n # create a dictionary that groups by the\n if 0 not in mapping.values():\n v[0] = \"empty map\"\n else:\n for key, value in sorted(mapping.items()):\n if not isinstance(key, str):\n key = str(key)\n v.setdefault(value, []).append(key)\n v[other_value] = \"other\"\n sorted_v = {key: v[key] for key in sorted(v)}\n\n # transform it all to a string-like format\n for k, v in sorted_v.items():\n\n # if k is of type int32, it will create weird characters if exported to files\n # if self.type=='categorical':\n if isinstance(k, np.int32):\n k = int(k)\n if isinstance(v, list):\n if len(v) == 1:\n v = v[0]\n if not isinstance(v, str):\n v = str(v)\n else:\n v = \", \".join(v)\n sorted_v[k] = v\n\n self.labels = sorted_v\n\n return new\n\n def as_dict(self) -> dict:\n \"\"\"Return data in class as a dict.\n\n Returns:\n dict: data in class\n \"\"\"\n return dataclasses.asdict(self)\n\n def _apply_num_mapping(self, x):\n \"\"\"Apply numerical bucketing and stores the labels for the buckets.\n\n Args:\n x (np.array): feature\n\n Returns:\n (np.array), buckets\n \"\"\"\n buckets = np.digitize(x, self.map, right=self.right)\n buckets = buckets.astype(int)\n\n map_ = np.hstack([-np.inf, self.map, np.inf])\n\n for bucket in np.unique(buckets):\n bucket_str = f\"{map_[int(bucket)]}, {map_[int(bucket) + 1]}\"\n if self.right:\n # The infinite edge should not be inclusive\n if not bucket_str.endswith(\"inf\"):\n bucket_str = f\"({bucket_str}]\"\n else:\n bucket_str = f\"({bucket_str})\"\n\n else:\n if not bucket_str.startswith(\"-inf\"):\n bucket_str = f\"[{bucket_str})\"\n else:\n bucket_str = f\"({bucket_str})\"\n\n self.labels[bucket] = bucket_str\n\n return buckets\n\n def _assign_specials(self, x, buckets, start_bucket_number=None):\n \"\"\"Assign the special buckets as defined in the specials dictionary.\n\n Args:\n x (np.array): feature\n buckets (np.array): the bucketed x\n start_bucket_number (int): where to start numbering the\n\n Returns:\n (np.array), buckets\n \"\"\"\n if not start_bucket_number:\n start_bucket_number = int(buckets.max())\n\n for k, v in self.specials.items():\n start_bucket_number += 1\n buckets = np.where(x.isin(v), start_bucket_number, buckets)\n self.labels[start_bucket_number] = \"Special: \" + str(k)\n return buckets\n\n\nclass FeaturesBucketMapping:\n \"\"\"Stores a collection of features BucketMapping.\n\n ```python\n from skorecard.bucket_mapping import FeaturesBucketMapping, BucketMapping\n\n # Working with collections of BucketMappings\n bucket1 = BucketMapping(feature_name='feature1', type='numerical', map=[2, 3, 4, 5])\n bucket2 = BucketMapping(feature_name='feature2', type='numerical', map=[5,6,7,8])\n features_bucket_mapping = FeaturesBucketMapping([bucket1, bucket2])\n print(features_bucket_mapping)\n\n # You can also work with class as dict\n features_bucket_mapping.as_dict()\n\n features_dict = {\n 'feature1': {'feature_name': 'feature1',\n 'type': 'numerical',\n 'map': [2, 3, 4, 5],\n 'right': True},\n 'feature2': {'feature_name': 'feature2',\n 'type': 'numerical',\n 'map': [5, 6, 7, 8],\n 'right': True}\n }\n\n features_bucket_mapping = FeaturesBucketMapping()\n features_bucket_mapping.load_dict(features_dict)\n # Or directly from dict\n FeaturesBucketMapping(features_dict)\n # See columns\n features_bucket_mapping.columns\n ```\n \"\"\"\n\n def __init__(self, maps=[]):\n \"\"\"Takes list of bucketmappings and stores as a dict.\n\n Args:\n maps (list): list of BucketMapping. Defaults to [].\n \"\"\"\n self.maps = {}\n if isinstance(maps, list):\n for bucketmap in maps:\n self.append(bucketmap)\n\n if isinstance(maps, dict):\n for _, bucketmap in maps.items():\n if not isinstance(bucketmap, BucketMapping):\n bucketmap = BucketMapping(**bucketmap)\n self.append(bucketmap)\n\n def __repr__(self):\n \"\"\"Pretty print self.\n\n Returns:\n str: reproducable object representation.\n \"\"\"\n class_name = self.__class__.__name__\n maps = list(self.maps.values())\n return f\"{class_name}({maps})\"\n\n def get(self, col: str):\n \"\"\"Get BucketMapping for a column.\n\n Args:\n col (str): Name of column\n\n Returns:\n mapping (BucketMapping): BucketMapping for column\n \"\"\"\n return self.maps[col]\n\n def append(self, bucketmap: BucketMapping) -> None:\n \"\"\"Add a BucketMapping to the collection.\n\n Args:\n bucketmap (BucketMapping): map of a feature\n \"\"\"\n assert isinstance(bucketmap, BucketMapping)\n self.maps[bucketmap.feature_name] = bucketmap\n\n def load_yml(self) -> None:\n \"\"\"Should load in data from a yml.\n\n Returns:\n None: nothing\n \"\"\"\n raise NotImplementedError(\"todo\")\n\n def save_yml(self) -> None:\n \"\"\"Should write data to a yml.\n\n Returns:\n None: nothing\n \"\"\"\n raise NotImplementedError(\"todo\")\n\n def load_dict(self, obj):\n \"\"\"Should load in data from a python dict.\n\n Args:\n obj (dict): Dict with names of features and their BucketMapping\n\n Returns:\n None: nothing\n \"\"\"\n assert isinstance(obj, dict)\n\n self.maps = {}\n for feature, bucketmap in obj.items():\n self.append(BucketMapping(**bucketmap))\n\n def as_dict(self):\n \"\"\"Returns data in class as a dict.\n\n Returns:\n dict: Data in class\n \"\"\"\n return {k: dataclasses.asdict(v) for k, v in self.maps.items()}\n\n @property\n def columns(self):\n \"\"\"Returns the columns that have a bucket_mapping.\"\"\"\n return list(self.as_dict().keys())\n\n\nclass MissingDict(dict):\n \"\"\"Deal with missing values in a dict map.\n\n Because Pandas .map() uses the __missing__ method\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.map.html\n\n Example usage:\n\n ```python\n s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])\n a = {'cat': 'kitten', 'dog': 'puppy'}\n s.map(a)\n a = missingdict(a)\n a.set_missing_value(\"bye\")\n s.map(a)\n ```\n \"\"\"\n\n def set_missing_value(self, value):\n \"\"\"Setter for a missing value.\"\"\"\n self.missing_value = value\n\n def __missing__(self, key):\n \"\"\"Adds a default for missing values.\"\"\"\n assert self.missing_value is not None, \"Use .set_missing_value(key) first\"\n return self.missing_value\n"
] | [
[
"numpy.hstack",
"numpy.digitize",
"pandas.Series",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pprp/yolo_deep_sort_pytorch | [
"7317058134b656177ef714f169c25fc76a3e1f2a"
] | [
"train.py"
] | [
"import argparse\nimport os\n\nimport torch.distributed as dist\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\n\nimport test # import test.py to get mAP after each epoch\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nmixed_precision = False\ntry: # Mixed precision training https://github.com/NVIDIA/apex\n from apex import amp\nexcept:\n mixed_precision = False # not installed\n\nwdir = 'weights' + os.sep # weights dir\nlast = wdir + 'last.pt'\nbest = wdir + 'best.pt'\nresults_file = 'results.txt'\n\n# Hyperparameters (results68: 59.2 [email protected] yolov3-spp-416) https://github.com/ultralytics/yolov3/issues/310\n\nhyp = {'giou': 3.54, # giou loss gain\n 'cls': 37.4, # cls loss gain\n 'cls_pw': 1.0, # cls BCELoss positive_weight\n 'obj': 64.3, # obj loss gain (*=img_size/416 if img_size != 416)\n 'obj_pw': 1.0, # obj BCELoss positive_weight\n 'iou_t': 0.225, # iou training threshold\n 'lr0': 0.00579, # initial learning rate (SGD=1E-3, Adam=9E-5)\n 'lrf': -4., # final LambdaLR learning rate = lr0 * (10 ** lrf)\n 'momentum': 0.937, # SGD momentum\n 'weight_decay': 0.000484, # optimizer weight decay\n 'fl_gamma': 0.5, # focal loss gamma\n 'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)\n 'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)\n 'hsv_v': 0.36, # image HSV-Value augmentation (fraction)\n 'degrees': 1.98, # image rotation (+/- deg)\n 'translate': 0.05, # image translation (+/- fraction)\n 'scale': 0.05, # image scale (+/- gain)\n 'shear': 0.641} # image shear (+/- deg)\n\n# Overwrite hyp with hyp*.txt (optional)\nf = glob.glob('hyp*.txt')\nif f:\n print('Using %s' % f[0])\n for k, v in zip(hyp.keys(), np.loadtxt(f[0])):\n hyp[k] = v\n\n\ndef train():\n cfg = opt.cfg\n data = opt.data\n img_size = opt.img_size\n epochs = 1 if opt.prebias else opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs\n batch_size = opt.batch_size\n accumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64\n weights = opt.weights # initial training weights\n\n if 'pw' not in opt.arc: # remove BCELoss positive weights\n hyp['cls_pw'] = 1.\n hyp['obj_pw'] = 1.\n\n # Initialize\n init_seeds()\n if opt.multi_scale:\n img_sz_min = round(img_size / 32 / 1.5)\n img_sz_max = round(img_size / 32 * 1.5)\n img_size = img_sz_max * 32 # initiate with maximum multi_scale size\n print('Using multi-scale %g - %g' % (img_sz_min * 32, img_size))\n\n # Configure run\n data_dict = parse_data_cfg(data)\n train_path = data_dict['train']\n test_path = data_dict['valid']\n nc = int(data_dict['classes']) # number of classes\n\n # Remove previous results\n for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):\n os.remove(f)\n\n # Initialize model\n model = Darknet(cfg, arc=opt.arc).to(device)\n\n # Optimizer\n pg0, pg1 = [], [] # optimizer parameter groups\n for k, v in dict(model.named_parameters()).items():\n if 'Conv2d.weight' in k:\n pg1 += [v] # parameter group 1 (apply weight_decay)\n else:\n pg0 += [v] # parameter group 0\n\n if opt.adam:\n optimizer = optim.Adam(pg0, lr=hyp['lr0'])\n # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)\n else:\n optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay\n del pg0, pg1\n\n # https://github.com/alphadl/lookahead.pytorch\n # optimizer = torch_utils.Lookahead(optimizer, k=5, alpha=0.5)\n\n cutoff = -1 # backbone reaches to cutoff layer\n start_epoch = 0\n best_fitness = float('inf')\n attempt_download(weights)\n if weights.endswith('.pt'): # pytorch format\n # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.\n chkpt = torch.load(weights, map_location=device)\n\n # load model\n try:\n chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()}\n model.load_state_dict(chkpt['model'], strict=False)\n except KeyError as e:\n s = \"%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. \" \\\n \"See https://github.com/ultralytics/yolov3/issues/657\" % (opt.weights, opt.cfg, opt.weights)\n raise KeyError(s) from e\n\n # load optimizer\n if chkpt['optimizer'] is not None:\n optimizer.load_state_dict(chkpt['optimizer'])\n best_fitness = chkpt['best_fitness']\n\n # load results\n if chkpt.get('training_results') is not None:\n with open(results_file, 'w') as file:\n file.write(chkpt['training_results']) # write results.txt\n\n start_epoch = chkpt['epoch'] + 1\n del chkpt\n\n elif len(weights) > 0: # darknet format\n # possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.\n cutoff = load_darknet_weights(model, weights)\n\n if opt.transfer or opt.prebias: # transfer learning edge (yolo) layers\n nf = int(model.module_defs[model.yolo_layers[0] - 1]['filters']) # yolo layer size (i.e. 255)\n\n if opt.prebias:\n for p in optimizer.param_groups:\n # lower param count allows more aggressive training settings: i.e. SGD ~0.1 lr0, ~0.9 momentum\n p['lr'] *= 100 # lr gain\n if p.get('momentum') is not None: # for SGD but not Adam\n p['momentum'] *= 0.9\n\n for p in model.parameters():\n if opt.prebias and p.numel() == nf: # train (yolo biases)\n p.requires_grad = True\n elif opt.transfer and p.shape[0] == nf: # train (yolo biases+weights)\n p.requires_grad = True\n else: # freeze layer\n p.requires_grad = False\n\n # Scheduler https://github.com/ultralytics/yolov3/issues/238\n # lf = lambda x: 1 - x / epochs # linear ramp to zero\n # lf = lambda x: 10 ** (hyp['lrf'] * x / epochs) # exp ramp\n # lf = lambda x: 1 - 10 ** (hyp['lrf'] * (1 - x / epochs)) # inverse exp ramp\n # scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n # scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=range(59, 70, 1), gamma=0.8) # gradual fall to 0.1*lr0\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(opt.epochs * x) for x in [0.8, 0.9]], gamma=0.1)\n scheduler.last_epoch = start_epoch - 1\n\n # # Plot lr schedule\n # y = []\n # for _ in range(epochs):\n # scheduler.step()\n # y.append(optimizer.param_groups[0]['lr'])\n # plt.plot(y, label='LambdaLR')\n # plt.xlabel('epoch')\n # plt.ylabel('LR')\n # plt.tight_layout()\n # plt.savefig('LR.png', dpi=300)\n\n # Mixed precision training https://github.com/NVIDIA/apex\n if mixed_precision:\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)\n\n # Initialize distributed training\n if device.type != 'cpu' and torch.cuda.device_count() > 1:\n dist.init_process_group(backend='nccl', # 'distributed backend'\n init_method='tcp://127.0.0.1:9999', # distributed training init method\n world_size=1, # number of nodes for distributed training\n rank=0) # distributed training node rank\n model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)\n model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level\n\n # Dataset\n dataset = LoadImagesAndLabels(train_path, img_size, batch_size,\n augment=True,\n hyp=hyp, # augmentation hyperparameters\n rect=opt.rect, # rectangular training\n image_weights=opt.img_weights,\n cache_labels=epochs > 10,\n cache_images=opt.cache_images and not opt.prebias)\n\n # Dataloader\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n shuffle=not opt.rect, # Shuffle=True unless rectangular training is used\n pin_memory=True,\n collate_fn=dataset.collate_fn)\n\n # Test Dataloader\n if not opt.prebias:\n testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, opt.img_size, batch_size,\n hyp=hyp,\n rect=True,\n cache_labels=True,\n cache_images=opt.cache_images),\n batch_size=batch_size,\n num_workers=nw,\n pin_memory=True,\n collate_fn=dataset.collate_fn)\n\n # Start training\n nb = len(dataloader)\n model.nc = nc # attach number of classes to model\n model.arc = opt.arc # attach yolo architecture\n model.hyp = hyp # attach hyperparameters to model\n model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights\n maps = np.zeros(nc) # mAP per class\n # torch.autograd.set_detect_anomaly(True)\n results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'\n t0 = time.time()\n torch_utils.model_info(model, report='summary') # 'full' or 'summary'\n print('Using %g dataloader workers' % nw)\n print('Starting %s for %g epochs...' % ('prebias' if opt.prebias else 'training', epochs))\n for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------\n model.train()\n print(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))\n\n # Freeze backbone at epoch 0, unfreeze at epoch 1 (optional)\n freeze_backbone = False\n if freeze_backbone and epoch < 2:\n for name, p in model.named_parameters():\n if int(name.split('.')[1]) < cutoff: # if layer < 75\n p.requires_grad = False if epoch == 0 else True\n\n # Update image weights (optional)\n if dataset.image_weights:\n w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights\n image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)\n dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx\n\n mloss = torch.zeros(4).to(device) # mean losses\n pbar = tqdm(enumerate(dataloader), total=nb) # progress bar\n for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------\n ni = i + nb * epoch # number integrated batches (since train start)\n imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0\n targets = targets.to(device)\n\n # Multi-Scale training\n if opt.multi_scale:\n if ni / accumulate % 10 == 0: # adjust (67% - 150%) every 10 batches\n img_size = random.randrange(img_sz_min, img_sz_max + 1) * 32\n sf = img_size / max(imgs.shape[2:]) # scale factor\n if sf != 1:\n ns = [math.ceil(x * sf / 32.) * 32 for x in imgs.shape[2:]] # new shape (stretched to 32-multiple)\n imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)\n\n # Plot images with bounding boxes\n if ni == 0:\n fname = 'train_batch%g.jpg' % i\n plot_images(imgs=imgs, targets=targets, paths=paths, fname=fname)\n if tb_writer:\n tb_writer.add_image(fname, cv2.imread(fname)[:, :, ::-1], dataformats='HWC')\n\n # Hyperparameter burn-in\n # n_burn = nb - 1 # min(nb // 5 + 1, 1000) # number of burn-in batches\n # if ni <= n_burn:\n # for m in model.named_modules():\n # if m[0].endswith('BatchNorm2d'):\n # m[1].momentum = 1 - i / n_burn * 0.99 # BatchNorm2d momentum falls from 1 - 0.01\n # g = (i / n_burn) ** 4 # gain rises from 0 - 1\n # for x in optimizer.param_groups:\n # x['lr'] = hyp['lr0'] * g\n # x['weight_decay'] = hyp['weight_decay'] * g\n\n # Run model\n pred = model(imgs)\n\n # Compute loss\n loss, loss_items = compute_loss(pred, targets, model)\n if not torch.isfinite(loss):\n print('WARNING: non-finite loss, ending training ', loss_items)\n return results\n\n # Scale loss by nominal batch_size of 64\n loss *= batch_size / 64\n\n # Compute gradient\n if mixed_precision:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # Accumulate gradient for x batches before optimizing\n if ni % accumulate == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n # Print batch results\n mloss = (mloss * i + loss_items) / (i + 1) # update mean losses\n mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0 # (GB)\n s = ('%10s' * 2 + '%10.3g' * 6) % (\n '%g/%g' % (epoch, epochs - 1), '%.3gG' % mem, *mloss, len(targets), img_size)\n pbar.set_description(s)\n\n # end batch ------------------------------------------------------------------------------------------------\n\n # Update scheduler\n scheduler.step()\n\n # Process epoch results\n final_epoch = epoch + 1 == epochs\n if opt.prebias:\n print_model_biases(model)\n elif not opt.notest or final_epoch: # Calculate mAP\n with torch.no_grad():\n is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80\n results, maps = test.test(cfg,\n data,\n batch_size=batch_size,\n img_size=opt.img_size,\n model=model,\n conf_thres=0.001 if final_epoch else 0.1, # 0.1 for speed\n save_json=final_epoch and is_coco,\n dataloader=testloader)\n\n # Write epoch results\n with open(results_file, 'a') as f:\n f.write(s + '%10.3g' * 7 % results + '\\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)\n if len(opt.name) and opt.bucket and not opt.prebias:\n os.system('gsutil cp results.txt gs://%s/results%s.txt' % (opt.bucket, opt.name))\n\n # Write Tensorboard results\n if tb_writer:\n x = list(mloss) + list(results)\n titles = ['GIoU', 'Objectness', 'Classification', 'Train loss',\n 'Precision', 'Recall', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification']\n for xi, title in zip(x, titles):\n tb_writer.add_scalar(title, xi, epoch)\n\n # Update best mAP\n fitness = sum(results[4:]) # total loss\n if fitness < best_fitness:\n best_fitness = fitness\n\n # Save training results\n save = (not opt.nosave) or (final_epoch and not opt.evolve) or opt.prebias\n if save:\n with open(results_file, 'r') as f:\n # Create checkpoint\n chkpt = {'epoch': epoch,\n 'best_fitness': best_fitness,\n 'training_results': f.read(),\n 'model': model.module.state_dict() if type(\n model) is nn.parallel.DistributedDataParallel else model.state_dict(),\n 'optimizer': None if final_epoch else optimizer.state_dict()}\n\n # Save last checkpoint\n torch.save(chkpt, last)\n\n # Save best checkpoint\n if best_fitness == fitness:\n torch.save(chkpt, best)\n\n # Save backup every 10 epochs (optional)\n if epoch > 0 and epoch % 10 == 0:\n torch.save(chkpt, wdir + 'backup%g.pt' % epoch)\n\n # Delete checkpoint\n del chkpt\n\n # end epoch ----------------------------------------------------------------------------------------------------\n\n # end training\n if len(opt.name) and not opt.prebias:\n fresults, flast, fbest = 'results%s.txt' % opt.name, 'last%s.pt' % opt.name, 'best%s.pt' % opt.name\n os.rename('results.txt', fresults)\n os.rename(wdir + 'last.pt', wdir + flast) if os.path.exists(wdir + 'last.pt') else None\n os.rename(wdir + 'best.pt', wdir + fbest) if os.path.exists(wdir + 'best.pt') else None\n\n # save to cloud\n if opt.bucket:\n os.system('gsutil cp %s %s gs://%s' % (fresults, wdir + flast, opt.bucket))\n\n plot_results() # save as results.png\n print('%g epochs completed in %.3f hours.\\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))\n dist.destroy_process_group() if torch.cuda.device_count() > 1 else None\n torch.cuda.empty_cache()\n\n return results\n\n\ndef prebias():\n # trains output bias layers for 1 epoch and creates new backbone\n if opt.prebias:\n a = opt.img_weights # save settings\n opt.img_weights = False # disable settings\n\n train() # transfer-learn yolo biases for 1 epoch\n create_backbone(last) # saved results as backbone.pt\n\n opt.weights = wdir + 'backbone.pt' # assign backbone\n opt.prebias = False # disable prebias\n opt.img_weights = a # reset settings\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=273) # 500200 batches at bs 16, 117263 images = 273 epochs\n parser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64\n parser.add_argument('--accumulate', type=int, default=4, help='batches to accumulate before optimizing')\n parser.add_argument('--cfg', type=str, default='cfg/yolov3-tiny-cbam.cfg', help='*.cfg path')\n parser.add_argument('--data', type=str, default='data/dataset1.data', help='*.data path')\n parser.add_argument('--multi-scale', action='store_true', help='adjust (67% - 150%) img_size every 10 batches')\n parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')\n parser.add_argument('--rect', action='store_true', help='rectangular training')\n parser.add_argument('--resume', action='store_true', help='resume training from last.pt')\n parser.add_argument('--transfer', action='store_true', help='transfer learning')\n parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')\n parser.add_argument('--notest', action='store_true', help='only test final epoch')\n parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')\n parser.add_argument('--img-weights', action='store_true', help='select training images by weight')\n parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')\n parser.add_argument('--weights', type=str, default='weights/darknet53.conv.74', help='initial weights')\n parser.add_argument('--arc', type=str, default='default', help='yolo architecture') # defaultpw, uCE, uBCE\n parser.add_argument('--prebias', action='store_true', help='transfer-learn yolo biases prior to training')\n parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')\n parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')\n parser.add_argument('--adam', action='store_true', help='use adam optimizer')\n parser.add_argument('--var', type=float, help='debug variable')\n opt = parser.parse_args()\n opt.weights = last if opt.resume else opt.weights\n print(opt)\n device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)\n if device.type == 'cpu':\n mixed_precision = False\n\n # scale hyp['obj'] by img_size (evolved at 416)\n hyp['obj'] *= opt.img_size / 416.\n\n tb_writer = None\n if not opt.evolve: # Train normally\n try:\n # Start Tensorboard with \"tensorboard --logdir=runs\", view at http://localhost:6006/\n from torch.utils.tensorboard import SummaryWriter\n\n tb_writer = SummaryWriter()\n except:\n pass\n\n prebias() # optional\n train() # train normally\n\n else: # Evolve hyperparameters (optional)\n opt.notest = True # only test final epoch\n opt.nosave = True # only save final checkpoint\n if opt.bucket:\n os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists\n\n for _ in range(1): # generations to evolve\n if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate\n # Select parent(s)\n x = np.loadtxt('evolve.txt', ndmin=2)\n parent = 'weighted' # parent selection method: 'single' or 'weighted'\n if parent == 'single' or len(x) == 1:\n x = x[fitness(x).argmax()]\n elif parent == 'weighted': # weighted combination\n n = min(10, x.shape[0]) # number to merge\n x = x[np.argsort(-fitness(x))][:n] # top n mutations\n w = fitness(x) - fitness(x).min() # weights\n x = (x[:n] * w.reshape(n, 1)).sum(0) / w.sum() # new parent\n for i, k in enumerate(hyp.keys()):\n hyp[k] = x[i + 7]\n\n # Mutate\n np.random.seed(int(time.time()))\n s = [.2, .2, .2, .2, .2, .2, .2, .0, .02, .2, .2, .2, .2, .2, .2, .2, .2, .2] # sigmas\n for i, k in enumerate(hyp.keys()):\n x = (np.random.randn(1) * s[i] + 1) ** 2.0 # plt.hist(x.ravel(), 300)\n hyp[k] *= float(x) # vary by sigmas\n\n # Clip to limits\n keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']\n limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]\n for k, v in zip(keys, limits):\n hyp[k] = np.clip(hyp[k], v[0], v[1])\n\n # Train mutation\n prebias()\n results = train()\n\n # Write mutation results\n print_mutation(hyp, results, opt.bucket)\n\n # Plot results\n # plot_evolution_results(hyp)\n"
] | [
[
"torch.optim.Adam",
"torch.distributed.init_process_group",
"torch.utils.tensorboard.SummaryWriter",
"torch.distributed.destroy_process_group",
"torch.optim.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
littleocub/python_practice | [
"c71b514ea1dcc071e83f279d9a08ec68c863d154"
] | [
"bj_tmp_matplotlib/beijing_2016.py"
] | [
"# beijing_2016\nimport csv\nimport matplotlib.dates\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\n\n\ndef date_to_list(data_index):\n \"\"\" save date to a list \"\"\"\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results\n\ndef data_to_list(data_index):\n \"\"\" save data to a list \"\"\"\n results = []\n for row in data:\n results.append(int(row[data_index]))\n return results\n\n\nfilename = 'beijing_2016.csv'\nwith open(filename) as bj:\n data = csv.reader(bj)\n header = next(data)\n\n # print(header)\n # print(next(data))\n\n # get the index of data needed\n print('date_akdt', header.index('date_akdt'))\n print('high_temp_f', header.index('high_temp_f'))\n print('low_temp_f', header.index('low_temp_f'))\n\n\n # create a list from the remaining contents in the iterable\n data = list(data)\n\n # save data to list\n high_temp_f_bj = data_to_list(1)\n high_temp_c_bj = [int((x-32)/1.8) for x in high_temp_f_bj]\n\n low_temp_f_bj = data_to_list(3)\n low_temp_c_bj = [int((x-32)/1.8) for x in low_temp_f_bj]\n\n date = date_to_list(0)\n\n\n plt.figure(figsize=(12, 5), dpi=100)\n plt.plot(date, high_temp_c_bj, c='xkcd:orange')\n plt.plot(date, low_temp_c_bj,c='xkcd:azure')\n\n plt.title('Beijing Temperatures (High & Low) - Year 2016', fontsize=22)\n plt.ylabel('Temperature (C)', fontsize=20)\n plt.tick_params(axis='both', labelsize=16)\n plt.fill_between(date, high_temp_c_bj, low_temp_c_bj, facecolor='xkcd:silver', alpha=0.2)\n\n plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%Y-%m\"))\n plt.gcf().autofmt_xdate()\n plt.margins(x=0,y=0.2)\n\n plt.show()"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jpapadakis/gdal | [
"f07aa15fd65af36b04291303cc6834c87f662814"
] | [
"gdal/swig/python/samples/rel.py"
] | [
"#!/usr/bin/env python3\n###############################################################################\n# $Id$\n#\n# Project: GDAL Python samples\n# Purpose: Script to produce a shaded relief image from elevation data\n# Author: Andrey Kiselev, [email protected]\n#\n###############################################################################\n# Copyright (c) 2003, Andrey Kiselev <[email protected]>\n# Copyright (c) 2009, Even Rouault <even dot rouault at spatialys.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport math\nimport sys\nimport numpy as np\n\nfrom osgeo import gdal, gdal_array\ngdal.TermProgress = gdal.TermProgress_nocb\n\n\n# =============================================================================\n\n\ndef Usage():\n print('Usage: rel.py -lsrcaz azimuth -lsrcel elevation [-elstep step]')\n print(' [-dx xsize] [-dy ysize] [-b band] [-ot type] infile outfile')\n print('Produce a shaded relief image from elevation data')\n print('')\n print(' -lsrcaz azimuth Azimuth angle of the diffuse light source (0..360 degrees)')\n print(' -lsrcel elevation Elevation angle of the diffuse light source (0..180 degrees)')\n print(' -elstep step Elevation change corresponding to a change of one grey level')\n print(' (default 1)')\n print(' -dx xsize X and Y dimensions (in meters) of one pixel on the ground')\n print(' -dy ysize (taken from the geotransform matrix by default)')\n print(' -r range\t Dynamic range for output image (default 255)')\n print(' -b band\t Select a band number to convert (default 1)')\n print(' -ot type\t Data type of the output dataset')\n print(' (Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/')\n print(' CInt16/CInt32/CFloat32/CFloat64, default is Byte)')\n print(' infile\t Name of the input file')\n print(' outfile\t Name of the output file')\n print('')\n return 1\n\n\ndef ParseType(typ):\n if typ == 'Byte':\n return gdal.GDT_Byte\n if typ == 'Int16':\n return gdal.GDT_Int16\n if typ == 'UInt16':\n return gdal.GDT_UInt16\n if typ == 'Int32':\n return gdal.GDT_Int32\n if typ == 'UInt32':\n return gdal.GDT_UInt32\n if typ == 'Float32':\n return gdal.GDT_Float32\n if typ == 'Float64':\n return gdal.GDT_Float64\n if typ == 'CInt16':\n return gdal.GDT_CInt16\n if typ == 'CInt32':\n return gdal.GDT_CInt32\n if typ == 'CFloat32':\n return gdal.GDT_CFloat32\n if typ == 'CFloat64':\n return gdal.GDT_CFloat64\n return gdal.GDT_Byte\n\n\ndef main(argv):\n infile = None\n outfile = None\n iBand = 1\t # The first band will be converted by default\n frmt = 'GTiff'\n typ = gdal.GDT_Byte\n\n lsrcaz = None\n lsrcel = None\n elstep = 1.0\n xsize = None\n ysize = None\n dyn_range = 255.0\n\n # Parse command line arguments.\n i = 1\n while i < len(argv):\n arg = argv[i]\n\n if arg == '-b':\n i += 1\n iBand = int(argv[i])\n\n elif arg == '-ot':\n i += 1\n typ = ParseType(argv[i])\n\n elif arg == '-lsrcaz':\n i += 1\n lsrcaz = float(argv[i])\n\n elif arg == '-lsrcel':\n i += 1\n lsrcel = float(argv[i])\n\n elif arg == '-elstep':\n i += 1\n elstep = float(argv[i])\n\n elif arg == '-dx':\n i += 1\n xsize = float(argv[i])\n\n elif arg == '-dy':\n i += 1\n ysize = float(argv[i])\n\n elif arg == '-r':\n i += 1\n dyn_range = float(argv[i])\n\n elif infile is None:\n infile = arg\n\n elif outfile is None:\n outfile = arg\n\n else:\n return Usage()\n\n i += 1\n\n if infile is None:\n return Usage()\n if outfile is None:\n return Usage()\n if lsrcaz is None:\n return Usage()\n if lsrcel is None:\n return Usage()\n\n # translate angles from degrees to radians\n lsrcaz = lsrcaz / 180.0 * math.pi\n lsrcel = lsrcel / 180.0 * math.pi\n\n lx = -math.sin(lsrcaz) * math.cos(lsrcel)\n ly = math.cos(lsrcaz) * math.cos(lsrcel)\n lz = math.sin(lsrcel)\n lxyz = math.sqrt(lx**2 + ly**2 + lz**2)\n\n indataset = gdal.Open(infile, gdal.GA_ReadOnly)\n if indataset is None:\n print('Cannot open', infile)\n return 2\n\n if indataset.RasterXSize < 3 or indataset.RasterYSize < 3:\n print('Input image is too small to process, minimum size is 3x3')\n return 3\n\n out_driver = gdal.GetDriverByName(frmt)\n outdataset = out_driver.Create(outfile, indataset.RasterXSize, indataset.RasterYSize, indataset.RasterCount, typ)\n outband = outdataset.GetRasterBand(1)\n\n geotransform = indataset.GetGeoTransform()\n projection = indataset.GetProjection()\n\n if xsize is None:\n xsize = abs(geotransform[1])\n if ysize is None:\n ysize = abs(geotransform[5])\n\n inband = indataset.GetRasterBand(iBand)\n if inband is None:\n print('Cannot load band', iBand, 'from the', infile)\n return 2\n\n numtype = gdal_array.GDALTypeCodeTonpTypeCode(typ)\n outline = np.empty((1, inband.XSize), numtype)\n\n prev = inband.ReadAsArray(0, 0, inband.XSize, 1, inband.XSize, 1)[0]\n outband.WriteArray(outline, 0, 0)\n gdal.TermProgress(0.0)\n\n cur = inband.ReadAsArray(0, 1, inband.XSize, 1, inband.XSize, 1)[0]\n outband.WriteArray(outline, 0, inband.YSize - 1)\n gdal.TermProgress(1.0 / inband.YSize)\n\n dx = 2 * xsize\n dy = 2 * ysize\n\n for i in range(1, inband.YSize - 1):\n next_ = inband.ReadAsArray(0, i + 1, inband.XSize, 1, inband.XSize, 1)[0]\n dzx = (cur[0:-2] - cur[2:]) * elstep\n dzy = (prev[1:-1] - next_[1:-1]) * elstep\n nx = -dy * dzx\n ny = dx * dzy\n nz = dx * dy\n nxyz = nx * nx + ny * ny + nz * nz\n nlxyz = nx * lx + ny * ly + nz * lz\n cosine = dyn_range * (nlxyz / (lxyz * np.sqrt(nxyz)))\n cosine = np.clip(cosine, 0.0, dyn_range)\n outline[0, 1:-1] = cosine.astype(numtype)\n outband.WriteArray(outline, 0, i)\n\n prev = cur\n cur = next_\n\n # Display progress report on terminal\n gdal.TermProgress(float(i + 1) / (inband.YSize - 1))\n\n outdataset.SetGeoTransform(geotransform)\n outdataset.SetProjection(projection)\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n\n"
] | [
[
"numpy.sqrt",
"numpy.empty",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HPAI-BSC/neural_patterns_abstractions | [
"9829e086abab4e8aef0e7327d442c1e354b976c4"
] | [
"plots_and_analysis/on_step4/interest_weight_analyisis.py"
] | [
"\"\"\"\nIn this file i will put the basic methods to represent the embedding graph.\n\ndef: Interest node: A feature s.t. has majority of images with value 1. \n\nThe main work in this file is: \n- Extract the interest weights (weights that connect two interest nodes) of a given synset. \n- Extract the mixed weights (weights that connect an interest node with a non interest one). \n- Save this weights in general and per layer. \n- Plot an hystogram with the three types of weights. \n\n\"\"\"\nimport numpy as np\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\nfrom time import time\nfrom datetime import timedelta\nimport os\n\nWEIGHTS_PATH = '../data/models/vgg16_imagenet_weights.npy'\nARRAY_PATH = '../data/arrays_maj_ones/'\nPLOT_PATH = '../data/plots/'\n\ntry:\n os.mkdir('../data/weights')\nexcept:\n pass\n\ntry:\n os.mkdir('../data/plots')\nexcept:\n pass\n\n\nconv_layers = OrderedDict(sorted({'conv1_1': (0, 64), 'conv1_2': (64, 128),\n 'conv2_1': (128, 256), 'conv2_2': (256, 384),\n 'conv3_1': (384, 640), 'conv3_2': (640, 896), 'conv3_3': (896, 1152),\n 'conv4_1': (1152, 1664), 'conv4_2': (1664, 2176), 'conv4_3': (2176, 2688),\n 'conv5_1': (2688, 3200), 'conv5_2': (3200, 3712), 'conv5_3': (3712, 4224),\n 'fc6': (4224, 8320), 'fc7': (8320, 12416)\n }.items(), key=lambda t: t[1]))\n\nrelu_layers = {u'vgg16_model/relu4_2': (1664, 2176), u'vgg16_model/relu4_3': (2176, 2688),\n u'vgg16_model/relu4_1': (1152, 1664), u'vgg16_model/relu3_1': (384, 640),\n u'vgg16_model/relu3_3': (896, 1152), u'vgg16_model/relu3_2': (640, 896),\n u'vgg16_model/relu7': (8320, 12416), u'vgg16_model/relu6': (4224, 8320),\n u'vgg16_model/relu2_1': (128, 256), u'vgg16_model/relu2_2': (256, 384),\n u'vgg16_model/relu5_3': (3712, 4224), u'vgg16_model/relu5_2': (3200, 3712),\n u'vgg16_model/relu5_1': (2688, 3200), u'vgg16_model/relu1_2': (64, 128), u'vgg16_model/relu1_1': (0, 64)}\n\n\ndef find_layer_of_feature(feature, layer_notation='conv'):\n \"\"\"\n This function find the layer correspondent to the given feature in a certain embedding.\n It returns the layer name of the feature.\n\n There are two different notations:\n - The used by the FNE: vgg16_model/relux_y\n - The used by the vgg16 weights: 'convx_y\n\n :param feature: a feature : int\n :param embedding: a dictionary with keys: ['embeddings', 'image_paths', 'image_labels', 'feature_scheme']\n :return:\n \"\"\"\n # layers = embedding['feature_scheme'][()] ## Ordered dict.\n if layer_notation == 'relu':\n layers = relu_layers\n\n else:\n layers = conv_layers\n\n layer = None\n for i in layers.values():\n if feature in range(i[0], i[1]):\n layer = i\n # print(feature, i)\n try:\n layer_name = list(layers.keys())[list(layers.values()).index(layer)]\n return layer_name\n except:\n print('Feature not in range')\n return None\n\n\ndef separate_per_layer(feature_array):\n \"\"\"\n It returns an ordered dicctionary that separates the features between the different layers. \n :param feature_array:\n :return:\n \"\"\"\n feature_dict = {}\n for f in feature_array:\n layer_name = find_layer_of_feature(f)\n try:\n feature_dict[layer_name] = np.append(feature_dict[layer_name], f)\n except:\n feature_dict[layer_name] = np.array([f])\n # print(feature_dict)\n return OrderedDict(sorted(feature_dict.items(), key=lambda t: t[0]))\n\n\ndef load_weights(path):\n all_weights = np.load(path, encoding='latin1')[()]\n # I take the weights without bias\n weights = {key: all_weights[key][0] for key in all_weights.keys()}\n return weights\n\ndef plot_weights():\n \"\"\"\n Plots an hystogram of all the weights. \n \"\"\"\n weights = load_weights(WEIGHTS_PATH)\n for k in weights.keys():\n print(k, weights[k].min(), weights[k].max())\n plt.figure()\n plt.hist(weights[k].flatten())\n plt.title(k)\n plt.savefig(PLOT_PATH + k + '.png')\n\ndef fne_feature_to_vgg16_block(feature, all_weights=None):\n \"\"\"\n This function returns the block of weights correspondent to each feature of the FNE.\n :param feature:\n :return:\n \"\"\"\n layer_name = find_layer_of_feature(feature)\n layer = conv_layers[layer_name]\n if not all_weights:\n weights_of_layer = load_weights(WEIGHTS_PATH)[layer_name]\n else:\n weights_of_layer = all_weights[layer_name]\n f = feature - layer[0]\n if 'conv' in layer_name:\n weights = weights_of_layer[:, :, :, f]\n else:\n weights = weights_of_layer[:, f]\n return weights\n\n\ndef weigh_distribution(features_array, name='patata'):\n \"\"\"\n This function returns an hystogram of the feature array given. \n \"\"\"\n all_weights = load_weights(WEIGHTS_PATH)\n\n conv_features = [k for k in features_array if k < 4224]\n weights_conv = np.array([])\n weights_conv_mean = np.array([])\n for f in conv_features:\n block = fne_feature_to_vgg16_block(f, all_weights)\n weights_conv = np.append(weights_conv, block.flatten())\n weights_conv_mean = np.append(weights_conv_mean, np.mean(block.flatten()))\n\n if len(weights_conv) > 0:\n plt.figure()\n plt.hist(weights_conv, bins=10, range=(-.05, 0.05))\n plt.savefig(PLOT_PATH + name + '_conv.png')\n\n fc_features = [k for k in features_array if k >= 4224]\n weights_fc = np.array([])\n\n for f in fc_features:\n weights_fc = np.append(weights_fc, fne_feature_to_vgg16_block(f, all_weights).flatten())\n if len(weights_fc) > 0:\n plt.figure()\n plt.hist(weights_fc, bins=10) # , range=(-.05, 0.05))\n plt.savefig(PLOT_PATH + name + '_fc.png')\n\n\ndef extract_weights_of_interest(all_interest_nodes, name='patata'):\n \"\"\"\n This function extracts the connexion between nodes with income connection such that have value one on\n their corresponding fne value.\n :param all_interest_nodes:\n :return:\n \"\"\"\n weights = np.array([])\n weights_conv = np.array([])\n weights_fc = np.array([])\n layers = list(all_interest_nodes.keys())\n all_weights = load_weights(WEIGHTS_PATH)\n\n per_layer = {}\n\n for l in range(len(layers)):\n layer = layers[l]\n if layer == 'fc7':\n break\n next_layer = layers[l + 1]\n local_weights = []\n if 'conv' in next_layer:\n for feature2 in all_interest_nodes[next_layer]:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in all_interest_nodes[layer]:\n local_weights = np.append(local_weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_conv = np.append(weights_conv, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n if next_layer == 'fc6':\n for feature2 in all_interest_nodes[next_layer]:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n wa = np.reshape(w, (7, 7, 512))\n for feature1 in all_interest_nodes[layer]:\n local_weights = np.append(local_weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_fc = np.append(weights_fc, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n if next_layer == 'fc7':\n for feature2 in all_interest_nodes[next_layer]:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in all_interest_nodes[layer]:\n local_weights = np.append(local_weights, w[feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[feature1 - conv_layers[layer][0]])\n weights_fc = np.append(weights_fc, w[feature1 - conv_layers[layer][0]])\n per_layer[layer] = local_weights\n np.savez('../data/weights/weights_interest_' + name + '.npz', w=weights, wconv=weights_conv, wfc=weights_fc,\n wpl=per_layer)\n return weights, weights_conv, weights_fc, per_layer\n\n\ndef extract_weights_of_interest_with_normal_origin(all_interest_nodes, name='patata_origin'):\n \"\"\"\n The main idea of this code is extract the connexion between neurons with income connection such that have value one on their corresponding fne value.\n :param feture_dict:\n :return:\n \"\"\"\n weights = np.array([])\n weights_conv = np.array([])\n weights_fc = np.array([])\n layers = list(all_interest_nodes.keys())\n all_weights = load_weights(WEIGHTS_PATH)\n\n per_layer = {}\n\n for l in range(len(layers)):\n layer = layers[l]\n\n if layer == 'fc7':\n break\n\n local_weights = []\n\n next_layer = layers[l + 1]\n\n all_nodes_origin = np.array(range(conv_layers[layer][0], conv_layers[layer][1]))\n all_nodes_destination = np.array(range(conv_layers[next_layer][0], conv_layers[next_layer][1]))\n\n interest_nodes_origin = all_interest_nodes[layer]\n interest_nodes_destination = all_interest_nodes[next_layer]\n normal_nodes_origin = np.setdiff1d(all_nodes_origin, interest_nodes_origin)\n normal_nodes_destination = np.setdiff1d(all_nodes_destination, interest_nodes_destination)\n\n interest_nodes = interest_nodes_destination\n normal_nodes = normal_nodes_origin\n\n if 'conv' in next_layer:\n for feature2 in interest_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in normal_nodes:\n local_weights = np.append(local_weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_conv = np.append(weights_conv, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n\n if next_layer == 'fc6':\n for feature2 in interest_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n wa = np.reshape(w, (7, 7, 512))\n for feature1 in normal_nodes:\n local_weights = np.append(local_weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_fc = np.append(weights_fc, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n\n if next_layer == 'fc7':\n for feature2 in interest_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in normal_nodes:\n local_weights = np.append(local_weights, w[feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[feature1 - conv_layers[layer][0]])\n weights_fc = np.append(weights_fc, w[feature1 - conv_layers[layer][0]])\n\n per_layer[layer] = local_weights\n\n np.savez('../data/weights/weights_origin_' + name + '.npz', w=weights, wconv=weights_conv, wfc=weights_fc,\n wpl=per_layer)\n return weights, weights_conv, weights_fc, per_layer\n\n\ndef extract_weights_of_interest_with_normal_destination(all_interest_nodes, name='patata_destiny'):\n \"\"\"\n The main idea of this code is extract the connexion between neurons with income connection such that have value one on\n their corresponding fne value is one.\n :param feture_dict:\n :return:\n \"\"\"\n weights = np.array([])\n weights_conv = np.array([])\n weights_fc = np.array([])\n layers = list(all_interest_nodes.keys())\n all_weights = load_weights(WEIGHTS_PATH)\n\n per_layer = {}\n\n for l in range(len(layers)):\n layer = layers[l]\n if layer == 'fc7':\n break\n\n next_layer = layers[l + 1]\n\n all_nodes_origin = np.array(range(conv_layers[layer][0], conv_layers[layer][1]))\n all_nodes_destination = np.array(range(conv_layers[next_layer][0], conv_layers[next_layer][1]))\n\n interest_nodes_origin = all_interest_nodes[layer]\n interest_nodes_destination = all_interest_nodes[next_layer]\n normal_nodes_origin = np.setdiff1d(all_nodes_origin, interest_nodes_origin)\n normal_nodes_destination = np.setdiff1d(all_nodes_destination, interest_nodes_destination)\n\n interest_nodes = interest_nodes_origin\n normal_nodes = normal_nodes_destination\n\n local_weights = []\n if 'conv' in next_layer:\n for feature2 in normal_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in interest_nodes:\n local_weights = np.append(local_weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_conv = np.append(weights_conv, w[:, :, feature1 - conv_layers[layer][0]].flatten())\n\n if next_layer == 'fc6':\n for feature2 in normal_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n wa = np.reshape(w, (7, 7, 512))\n for feature1 in interest_nodes:\n local_weights = np.append(local_weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n weights_fc = np.append(weights_fc, wa[:, :, feature1 - conv_layers[layer][0]].flatten())\n\n if next_layer == 'fc7':\n for feature2 in normal_nodes:\n w = fne_feature_to_vgg16_block(feature2, all_weights)\n for feature1 in interest_nodes:\n local_weights = np.append(local_weights, w[feature1 - conv_layers[layer][0]].flatten())\n weights = np.append(weights, w[feature1 - conv_layers[layer][0]])\n weights_fc = np.append(weights_fc, w[feature1 - conv_layers[layer][0]])\n per_layer[layer] = local_weights\n\n np.savez('../data/weights/weights_destiny_' + name + '.npz', w=weights, wconv=weights_conv, wfc=weights_fc,\n wpl=per_layer)\n return weights, weights_conv, weights_fc, per_layer\n\n\n\ndef plot_hist(a, name):\n plt.figure()\n plt.hist(a, histtype='step', density=True, bins=100)\n plt.savefig(PLOT_PATH + name + '.png')\n\n\ndef plot_three(interest, origin, destination, name):\n plt.figure()\n plt.hist(interest, histtype='step', density=True, bins=100, label='interest')\n plt.hist(origin, histtype='step', density=True, bins=100, label='origin')\n plt.hist(destination, histtype='step', density=True, bins=100, label='destination')\n plt.legend(loc='upper right')\n plt.savefig(PLOT_PATH + name + '.png')\n\n\ndef plot_nice_three(interest, origin, destination, name):\n plt.style.use('seaborn-deep')\n plt.figure()\n plt.hist([interest, origin, destination], histtype='step', density=True, bins=100,\n label=['interest', 'origin', 'destination'])\n plt.legend(loc='upper right')\n plt.savefig(PLOT_PATH + name + '-nicer.png')\n\n\ndef plot_weights_of_interest(all_interest_nodes, name):\n \"\"\"\n If the weights are generated, this function loads them, else it calculates and saves them. Then it plots all the histograms comparing the three types of weights. \n \"\"\"\n \n try:\n weights = np.load('../data/weights/weights_interest_' + name + '.npz')\n w1 = weights['w']\n w1_c = weights['wconv']\n w1_fc = weights['wfc']\n w1pl = weights['wpl']\n except:\n w1, w1_c, w1_fc, w1pl = extract_weights_of_interest(all_interest_nodes, name)\n print('interest calculated')\n\n try:\n weights = np.load('../data/weights/weights_origin_' + name + '.npz')\n w2 = weights['w']\n w2_c = weights['wconv']\n w2_fc = weights['wfc']\n w2pl = weights['wpl']\n except:\n w2, w2_c, w2_fc, w2pl = extract_weights_of_interest_with_normal_origin(all_interest_nodes, name)\n print('origin calculated')\n\n try:\n weights = np.load('../data/weights/weights_destiny_' + name + '.npz')\n w3 = weights['w']\n w3_c = weights['wconv']\n w3_fc = weights['wfc']\n w3pl = weights['wpl']\n except:\n w3, w3_c, w3_fc, w3pl = extract_weights_of_interest_with_normal_destination(all_interest_nodes, name)\n print('destiny calculated')\n\n plot_three(w1, w2, w3, name)\n plot_three(w1_c, w2_c, w3_c, name + '_conv')\n plot_three(w1_fc, w2_fc, w3_fc, name + '_fc')\n\n print(w1pl.item())\n for layer in w1pl.item().keys():\n plot_three(w1pl.item()[layer], w2pl.item()[layer], w3pl.item()[layer], name + layer)\n\n print('all ploted')\n\n\n\ndef main():\n hunting_dog = np.load(ARRAY_PATH + 'hunting_dog_pos_features_maj_ones.npz')['pos_features']\n\n ss = hunting_dog\n\n ss_dictionary = separate_per_layer(ss)\n\n plot_weights_of_interest(ss_dictionary, 'hunting_dog')\n\n\n\nif __name__ == \"__main__\":\n init = time()\n main()\n print('time:', timedelta(seconds=time() - init))\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.savez",
"matplotlib.pyplot.title",
"matplotlib.pyplot.style.use",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"numpy.setdiff1d",
"numpy.append",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
handsomezebra/ToD-BERT | [
"c4fddd7471758aa93e327f0bdd2326bcf6c9a559"
] | [
"utils/loss_function/masked_cross_entropy.py"
] | [
"import torch\nfrom torch.nn import functional\nfrom torch.autograd import Variable\nfrom utils.config import *\nimport torch.nn as nn\nimport numpy as np\n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_range_expand = Variable(seq_range_expand)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = (sequence_length.unsqueeze(1)\n .expand_as(seq_range_expand))\n return seq_range_expand < seq_length_expand\n\ndef cross_entropy(logits, target):\n batch_size = logits.size(0)\n log_probs_flat = functional.log_softmax(logits)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target)\n loss = losses_flat.sum() / batch_size\n return loss\n\ndef masked_cross_entropy(logits, target, length):\n \"\"\"\n Args:\n logits: A Variable containing a FloatTensor of size\n (batch, max_len, num_classes) which contains the\n unnormalized probability for each class.\n target: A Variable containing a LongTensor of size\n (batch, max_len) which contains the index of the true\n class for each corresponding step.\n length: A Variable containing a LongTensor of size (batch,)\n which contains the length of each data in a batch.\n\n Returns:\n loss: An average loss value masked by the length.\n \"\"\"\n if USE_CUDA:\n length = Variable(torch.LongTensor(length)).cuda()\n else:\n length = Variable(torch.LongTensor(length)) \n\n # logits_flat: (batch * max_len, num_classes)\n logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions\n # log_probs_flat: (batch * max_len, num_classes)\n log_probs_flat = functional.log_softmax(logits_flat, dim=1)\n # target_flat: (batch * max_len, 1)\n target_flat = target.view(-1, 1)\n # losses_flat: (batch * max_len, 1)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n # losses: (batch, max_len)\n losses = losses_flat.view(*target.size())\n # mask: (batch, max_len)\n mask = sequence_mask(sequence_length=length, max_len=target.size(1)) \n losses = losses * mask.float()\n loss = losses.sum() / length.float().sum()\n return loss\n\ndef masked_binary_cross_entropy(logits, target, length):\n '''\n logits: (batch, max_len, num_class)\n target: (batch, max_len, num_class)\n '''\n if USE_CUDA:\n length = Variable(torch.LongTensor(length)).cuda()\n else:\n length = Variable(torch.LongTensor(length)) \n bce_criterion = nn.BCEWithLogitsLoss()\n loss = 0\n for bi in range(logits.size(0)):\n for i in range(logits.size(1)):\n if i < length[bi]:\n loss += bce_criterion(logits[bi][i], target[bi][i])\n loss = loss / length.float().sum()\n return loss\n\n\ndef masked_cross_entropy_(logits, target, length, take_log=False):\n if USE_CUDA:\n length = Variable(torch.LongTensor(length)).cuda()\n else:\n length = Variable(torch.LongTensor(length)) \n\n # logits_flat: (batch * max_len, num_classes)\n logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions\n if take_log:\n logits_flat = torch.log(logits_flat)\n # target_flat: (batch * max_len, 1)\n target_flat = target.view(-1, 1)\n # losses_flat: (batch * max_len, 1)\n losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)\n # losses: (batch, max_len)\n losses = losses_flat.view(*target.size())\n # mask: (batch, max_len)\n mask = sequence_mask(sequence_length=length, max_len=target.size(1)) \n losses = losses * mask.float()\n loss = losses.sum() / length.float().sum()\n return loss\n\ndef masked_coverage_loss(coverage, attention, length):\n if USE_CUDA:\n length = Variable(torch.LongTensor(length)).cuda()\n else:\n length = Variable(torch.LongTensor(length)) \n mask = sequence_mask(sequence_length=length) \n min_ = torch.min(coverage, attention)\n mask = mask.unsqueeze(2).expand_as(min_)\n min_ = min_ * mask.float()\n loss = min_.sum() / (len(length)*1.0)\n return loss\n\ndef masked_cross_entropy_for_slot(logits, target, mask, use_softmax=True):\n # print(\"logits\", logits)\n # print(\"target\", target)\n logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions\n # print(logits_flat.size())\n if use_softmax:\n log_probs_flat = functional.log_softmax(logits_flat, dim=1)\n else:\n log_probs_flat = logits_flat #torch.log(logits_flat)\n # print(\"log_probs_flat\", log_probs_flat)\n target_flat = target.view(-1, 1)\n # print(\"target_flat\", target_flat)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n losses = losses_flat.view(*target.size()) # b * |s|\n losses = losses * mask.float()\n loss = losses.sum() / (losses.size(0)*losses.size(1))\n # print(\"loss inside\", loss)\n return loss\n\ndef masked_cross_entropy_for_value(logits, target, mask):\n # logits: b * |s| * m * |v|\n # target: b * |s| * m\n # mask: b * |s|\n logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions\n # print(logits_flat.size())\n log_probs_flat = torch.log(logits_flat)\n # print(\"log_probs_flat\", log_probs_flat)\n target_flat = target.view(-1, 1)\n # print(\"target_flat\", target_flat)\n losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)\n losses = losses_flat.view(*target.size()) # b * |s| * m\n loss = masking(losses, mask)\n return loss\n\ndef masking(losses, mask):\n mask_ = []\n batch_size = mask.size(0)\n max_len = losses.size(2)\n for si in range(mask.size(1)):\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n if mask[:,si].is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = mask[:, si].unsqueeze(1).expand_as(seq_range_expand)\n mask_.append( (seq_range_expand < seq_length_expand) )\n mask_ = torch.stack(mask_)\n mask_ = mask_.transpose(0, 1)\n if losses.is_cuda:\n mask_ = mask_.cuda()\n losses = losses * mask_.float()\n loss = losses.sum() / (mask_.sum().float())\n return loss\n\n\n\n"
] | [
[
"torch.LongTensor",
"torch.nn.functional.log_softmax",
"torch.min",
"torch.gather",
"torch.nn.BCEWithLogitsLoss",
"torch.log",
"torch.arange",
"torch.stack",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lukeberry99/pycryptobot | [
"8e064acf0917e23b9683fd90c370174fa69794fe"
] | [
"pycryptobot.py"
] | [
"\"\"\"Python Crypto Bot consuming Coinbase Pro or Binance APIs\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport logging, os, random, sched, sys, time\n\nfrom models.PyCryptoBot import PyCryptoBot\nfrom models.AppState import AppState\nfrom models.Trading import TechnicalAnalysis\nfrom models.TradingAccount import TradingAccount\nfrom models.Telegram import Telegram\nfrom views.TradingGraphs import TradingGraphs\n\n# production: disable traceback\n#sys.tracebacklimit = 0\n\napp = PyCryptoBot()\nstate = AppState()\n\ns = sched.scheduler(time.time, time.sleep)\n\nconfig = {}\naccount = None\nif app.getLastAction() != None:\n state.last_action = app.getLastAction()\n\n account = TradingAccount(app)\n orders = account.getOrders(app.getMarket(), '', 'done')\n if len(orders) > 0:\n df = orders[orders.action == 'buy']\n df = df[-1:]\n\n if str(df.action.values[0]) == 'buy':\n state.last_buy_amount = float(df[df.action == 'buy']['size'])\n state.last_buy_price = float(df[df.action == 'buy']['price'])\n\n# if live trading is enabled\nelif app.isLive() == 1:\n # connectivity check\n if app.getTime() is None:\n raise ConnectionError('Unable to start the bot as your connection to the exchange is down. Please check your Internet connectivity!')\n\n account = TradingAccount(app)\n\n if account.getBalance(app.getBaseCurrency()) < account.getBalance(app.getQuoteCurrency()):\n state.last_action = 'SELL'\n elif account.getBalance(app.getBaseCurrency()) > account.getBalance(app.getQuoteCurrency()):\n state.last_action = 'BUY'\n\n if app.getExchange() == 'binance':\n if state.last_action == 'SELL' and account.getBalance(app.getQuoteCurrency()) < 0.001:\n raise Exception('Insufficient available funds to place sell order: ' + str(account.getBalance(app.getQuoteCurrency())) + ' < 0.1 ' + app.getQuoteCurrency() + \"\\nNote: A manual limit order places a hold on available funds.\")\n elif state.last_action == 'BUY' and account.getBalance(app.getBaseCurrency()) < 0.001:\n raise Exception('Insufficient available funds to place buy order: ' + str(account.getBalance(app.getBaseCurrency())) + ' < 0.1 ' + app.getBaseCurrency() + \"\\nNote: A manual limit order places a hold on available funds.\")\n \n elif app.getExchange() == 'coinbasepro':\n if state.last_action == 'SELL' and account.getBalance(app.getQuoteCurrency()) < 50:\n raise Exception('Insufficient available funds to place buy order: ' + str(account.getBalance(app.getQuoteCurrency())) + ' < 50 ' + app.getQuoteCurrency() + \"\\nNote: A manual limit order places a hold on available funds.\")\n elif state.last_action == 'BUY' and account.getBalance(app.getBaseCurrency()) < 0.001:\n raise Exception('Insufficient available funds to place sell order: ' + str(account.getBalance(app.getBaseCurrency())) + ' < 0.1 ' + app.getBaseCurrency() + \"\\nNote: A manual limit order places a hold on available funds.\")\n\n orders = account.getOrders(app.getMarket(), '', 'done')\n if len(orders) > 0:\n df = orders[-1:]\n\n if str(df.action.values[0]) == 'buy':\n state.last_action = 'BUY'\n state.last_buy_amount = float(df[df.action == 'buy']['size'])\n state.last_buy_price = float(df[df.action == 'buy']['price'])\n state.last_buy_value = float(df[df.action == 'buy']['value'])\n else:\n state.last_action = 'SELL'\n state.last_buy_price = 0.0\n\ndef executeJob(sc, app=PyCryptoBot(), state=AppState(), trading_data=pd.DataFrame()):\n \"\"\"Trading bot job which runs at a scheduled interval\"\"\"\n\n # connectivity check (only when running live)\n if app.isLive() and app.getTime() is None:\n print ('Your connection to the exchange has gone down, will retry in 1 minute!')\n \n # poll every 5 minute\n list(map(s.cancel, s.queue))\n s.enter(300, 1, executeJob, (sc, app, state))\n return\n\n # increment state.iterations\n state.iterations = state.iterations + 1\n\n if app.isSimulation() == 0:\n # retrieve the app.getMarket() data\n trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())\n else:\n if len(trading_data) == 0:\n return None\n\n # analyse the market data\n trading_dataCopy = trading_data.copy()\n ta = TechnicalAnalysis(trading_dataCopy)\n ta.addAll()\n df = ta.getDataFrame()\n\n if app.isSimulation() == 1:\n # with a simulation df_last will iterate through data\n df_last = df.iloc[state.iterations-1:state.iterations]\n else:\n # df_last contains the most recent entry\n df_last = df.tail(1)\n \n if len(df_last.index.format()) > 0:\n current_df_index = str(df_last.index.format()[0])\n else:\n current_df_index = state.last_df_index\n\n if app.getSmartSwitch() == 1 and app.getExchange() == 'binance' and app.getGranularity() == '1h' and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:\n print (\"*** smart switch from granularity '1h' (1 hour) to '15m' (15 min) ***\")\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + \" smart switch from granularity '1h' (1 hour) to '15m' (15 min)\")\n\n app.setGranularity('15m')\n list(map(s.cancel, s.queue))\n s.enter(5, 1, executeJob, (sc, app, state))\n\n elif app.getSmartSwitch() == 1 and app.getExchange() == 'coinbasepro' and app.getGranularity() == 3600 and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:\n print ('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + \" smart switch from granularity 3600 (1 hour) to 900 (15 min)\")\n\n app.setGranularity(900)\n list(map(s.cancel, s.queue))\n s.enter(5, 1, executeJob, (sc, app, state))\n\n if app.getSmartSwitch() == 1 and app.getExchange() == 'binance' and app.getGranularity() == '15m' and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:\n print (\"*** smart switch from granularity '15m' (15 min) to '1h' (1 hour) ***\")\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + \" smart switch from granularity '15m' (15 min) to '1h' (1 hour)\")\n\n app.setGranularity('1h')\n list(map(s.cancel, s.queue))\n s.enter(5, 1, executeJob, (sc, app, state))\n\n elif app.getSmartSwitch() == 1 and app.getExchange() == 'coinbasepro' and app.getGranularity() == 900 and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:\n print (\"*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***\")\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + \" smart switch from granularity 900 (15 min) to 3600 (1 hour)\")\n\n app.setGranularity(3600)\n list(map(s.cancel, s.queue))\n s.enter(5, 1, executeJob, (sc, app, state))\n\n if app.getExchange() == 'binance' and str(app.getGranularity()) == '1d':\n if len(df) < 250:\n # data frame should have 250 rows, if not retry\n print('error: data frame length is < 250 (' + str(len(df)) + ')')\n logging.error('error: data frame length is < 250 (' + str(len(df)) + ')')\n list(map(s.cancel, s.queue))\n s.enter(300, 1, executeJob, (sc, app, state))\n else:\n if len(df) < 300:\n if app.isSimulation() == 0:\n # data frame should have 300 rows, if not retry\n print('error: data frame length is < 300 (' + str(len(df)) + ')')\n logging.error('error: data frame length is < 300 (' + str(len(df)) + ')')\n list(map(s.cancel, s.queue))\n s.enter(300, 1, executeJob, (sc, app, state))\n \n if len(df_last) > 0:\n now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')\n\n if app.isSimulation() == 0:\n ticker = app.getTicker(app.getMarket())\n now = ticker[0] \n price = ticker[1]\n if price < df_last['low'].values[0] or price == 0:\n price = float(df_last['close'].values[0])\n else:\n price = float(df_last['close'].values[0])\n\n if price < 0.0001:\n raise Exception(app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!')\n\n # technical indicators\n ema12gtema26 = bool(df_last['ema12gtema26'].values[0])\n ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])\n goldencross = bool(df_last['goldencross'].values[0])\n macdgtsignal = bool(df_last['macdgtsignal'].values[0])\n macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])\n ema12ltema26 = bool(df_last['ema12ltema26'].values[0])\n ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])\n macdltsignal = bool(df_last['macdltsignal'].values[0])\n macdltsignalco = bool(df_last['macdltsignalco'].values[0])\n obv = float(df_last['obv'].values[0])\n obv_pc = float(df_last['obv_pc'].values[0])\n elder_ray_buy = bool(df_last['eri_buy'].values[0])\n elder_ray_sell = bool(df_last['eri_sell'].values[0])\n\n # if simulation interations < 200 set goldencross to true\n if app.isSimulation() == 1 and state.iterations < 200:\n goldencross = True\n\n # candlestick detection\n hammer = bool(df_last['hammer'].values[0])\n inverted_hammer = bool(df_last['inverted_hammer'].values[0])\n hanging_man = bool(df_last['hanging_man'].values[0])\n shooting_star = bool(df_last['shooting_star'].values[0])\n three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])\n three_black_crows = bool(df_last['three_black_crows'].values[0])\n morning_star = bool(df_last['morning_star'].values[0])\n evening_star = bool(df_last['evening_star'].values[0])\n three_line_strike = bool(df_last['three_line_strike'].values[0])\n abandoned_baby = bool(df_last['abandoned_baby'].values[0])\n morning_doji_star = bool(df_last['morning_doji_star'].values[0])\n evening_doji_star = bool(df_last['evening_doji_star'].values[0])\n two_black_gapping = bool(df_last['two_black_gapping'].values[0])\n\n # criteria for a buy signal\n if ema12gtema26co is True \\\n and (macdgtsignal is True or app.disableBuyMACD()) \\\n and (goldencross is True or app.disableBullOnly()) \\\n and (obv_pc > -5 or app.disableBuyOBV()) \\\n and (elder_ray_buy is True or app.disableBuyElderRay()) \\\n and state.last_action != 'BUY':\n state.action = 'BUY'\n \n elif ema12gtema26 is True \\\n and macdgtsignalco is True \\\n and (goldencross is True or app.disableBullOnly()) \\\n and (obv_pc > -5 or app.disableBuyOBV()) \\\n and (elder_ray_buy is True or app.disableBuyElderRay()) \\\n and state.last_action != 'BUY':\n state.action = 'BUY'\n\n # criteria for a sell signal\n elif ema12ltema26co is True \\\n and (macdltsignal is True or app.disableBuyMACD()) \\\n and state.last_action not in ['', 'SELL']:\n state.action = 'SELL'\n # anything other than a buy or sell, just wait\n else:\n state.action = 'WAIT'\n\n # if disabled, do not buy within 3% of the dataframe close high\n if state.action == 'BUY' and app.disableBuyNearHigh() and (price > (df['close'].max() * 0.97)):\n state.action = 'WAIT'\n\n log_text = now + ' | ' + app.getMarket() + ' | ' + str(app.getGranularity()) + ' | Ignoring Buy Signal (price ' + str(price) + ' within 3% of high ' + str(df['close'].max()) + ')'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n immediate_action = False\n\n if state.last_buy_amount > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':\n # update last buy high\n if price > state.last_buy_high:\n state.last_buy_high = price\n\n if state.last_buy_high > 1:\n change_pcnt_high = ((price / state.last_buy_high) - 1) * 100\n else:\n change_pcnt_high = 0\n\n # buy and sell calculations\n buy_percent = app.getBuyPercent()\n buy_amount_quote = (buy_percent / 100) * state.last_buy_amount\n buy_fee = buy_amount_quote * app.getTakerFee()\n buy_filled = buy_amount_quote - buy_fee\n buy_amount_base = buy_filled / state.last_buy_price\n\n #print ('last_buy_price:', state.last_buy_price)\n #print ('buy_buy_percent:', buy_percent)\n #print ('buy_amount_quote:', buy_amount_quote)\n #print ('buy_fee:', buy_fee)\n #print ('buy_filled:', buy_filled)\n #print ('buy_amount_base:', buy_amount_base)\n\n sell_amount_quote = price * buy_amount_base\n sell_fee = round(sell_amount_quote * app.getTakerFee(), 2)\n sell_filled = sell_amount_quote - sell_fee\n\n #print ('price', price)\n #print ('sell_amount_quote:', sell_amount_quote)\n #print ('sell_fee:', sell_fee)\n #print ('sell_filled:', sell_filled)\n\n margin = (((sell_filled - state.last_buy_amount) / state.last_buy_amount) * 100)\n\n #print ('margin:', margin)\n\n # loss failsafe sell at fibonacci band\n if app.disableFailsafeFibonacciLow() is False and app.allowSellAtLoss() and app.sellLowerPcnt() is None and state.fib_low > 0 and state.fib_low >= float(price):\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Loss Failsafe Triggered (Fibonacci Band: ' + str(state.fib_low) + ')'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n # loss failsafe sell at trailing_stop_loss\n if app.allowSellAtLoss() and app.trailingStopLoss() != None and change_pcnt_high < app.trailingStopLoss():\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Trailing Stop Loss Triggered (< ' + str(app.trailingStopLoss()) + '%)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n # loss failsafe sell at sell_lower_pcnt\n elif app.disableFailsafeLowerPcnt() is False and app.allowSellAtLoss() and app.sellLowerPcnt() != None and margin < app.sellLowerPcnt():\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Loss Failsafe Triggered (< ' + str(app.sellLowerPcnt()) + '%)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n # profit bank at sell_upper_pcnt\n if app.disableProfitbankUpperPcnt() is False and app.sellUpperPcnt() != None and margin > app.sellUpperPcnt():\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Profit Bank Triggered (> ' + str(app.sellUpperPcnt()) + '%)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n # profit bank when strong reversal detected\n if app.disableProfitbankReversal() is False and margin > 3 and obv_pc < 0 and macdltsignal is True:\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Profit Bank Triggered (Strong Reversal Detected)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n # configuration specifies to not sell at a loss\n if state.action == 'SELL' and not app.allowSellAtLoss() and margin <= 0:\n state.action = 'WAIT'\n state.last_action = 'BUY'\n immediate_action = False\n log_text = '! Ignore Sell Signal (No Sell At Loss)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # profit bank when strong reversal detected\n if app.sellAtResistance() is True and margin > 1 and price > 0 and price != ta.getTradeExit(price):\n state.action = 'SELL'\n state.last_action = 'BUY'\n immediate_action = True\n log_text = '! Profit Bank Triggered (Selling At Resistance)'\n print (log_text, \"\\n\")\n logging.warning(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled() and not (not app.allowSellAtLoss() and margin <= 0):\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n bullbeartext = ''\n if app.disableBullOnly() is True or (df_last['sma50'].values[0] == df_last['sma200'].values[0]):\n bullbeartext = ''\n elif goldencross is True:\n bullbeartext = ' (BULL)'\n elif goldencross is False:\n bullbeartext = ' (BEAR)'\n\n # polling is every 5 minutes (even for hourly intervals), but only process once per interval\n if (immediate_action is True or state.last_df_index != current_df_index):\n precision = 2\n\n if (price < 0.01):\n precision = 8\n\n price_text = 'Close: ' + str(app.truncate(price, precision))\n ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)\n\n macd_text = ''\n if app.disableBuyMACD() is False:\n macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)\n\n obv_text = ''\n if app.disableBuyOBV() is False:\n obv_text = 'OBV: ' + str(app.truncate(df_last['obv'].values[0], 4)) + ' (' + str(app.truncate(df_last['obv_pc'].values[0], 2)) + '%)'\n\n state.eri_text = ''\n if app.disableBuyElderRay() is False:\n if elder_ray_buy is True:\n state.eri_text = 'ERI: buy | '\n elif elder_ray_sell is True:\n state.eri_text = 'ERI: sell | '\n else:\n state.eri_text = 'ERI: | '\n\n if hammer is True:\n log_text = '* Candlestick Detected: Hammer (\"Weak - Reversal - Bullish Signal - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n if shooting_star is True:\n log_text = '* Candlestick Detected: Shooting Star (\"Weak - Reversal - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n if hanging_man is True:\n log_text = '* Candlestick Detected: Hanging Man (\"Weak - Continuation - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n if inverted_hammer is True:\n log_text = '* Candlestick Detected: Inverted Hammer (\"Weak - Continuation - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n \n if three_white_soldiers is True:\n log_text = '*** Candlestick Detected: Three White Soldiers (\"Strong - Reversal - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if three_black_crows is True:\n log_text = '* Candlestick Detected: Three Black Crows (\"Strong - Reversal - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if morning_star is True:\n log_text = '*** Candlestick Detected: Morning Star (\"Strong - Reversal - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if evening_star is True:\n log_text = '*** Candlestick Detected: Evening Star (\"Strong - Reversal - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if three_line_strike is True:\n log_text = '** Candlestick Detected: Three Line Strike (\"Reliable - Reversal - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if abandoned_baby is True:\n log_text = '** Candlestick Detected: Abandoned Baby (\"Reliable - Reversal - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if morning_doji_star is True:\n log_text = '** Candlestick Detected: Morning Doji Star (\"Reliable - Reversal - Bullish Pattern - Up\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if evening_doji_star is True:\n log_text = '** Candlestick Detected: Evening Doji Star (\"Reliable - Reversal - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n if two_black_gapping is True:\n log_text = '*** Candlestick Detected: Two Black Gapping (\"Reliable - Reversal - Bearish Pattern - Down\")'\n print (log_text, \"\\n\")\n logging.debug(log_text)\n\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') ' + log_text)\n\n ema_co_prefix = ''\n ema_co_suffix = ''\n if ema12gtema26co is True:\n ema_co_prefix = '*^ '\n ema_co_suffix = ' ^*'\n elif ema12ltema26co is True:\n ema_co_prefix = '*v '\n ema_co_suffix = ' v*' \n elif ema12gtema26 is True:\n ema_co_prefix = '^ '\n ema_co_suffix = ' ^'\n elif ema12ltema26 is True:\n ema_co_prefix = 'v '\n ema_co_suffix = ' v'\n\n macd_co_prefix = ''\n macd_co_suffix = ''\n if app.disableBuyMACD() is False:\n if macdgtsignalco is True:\n macd_co_prefix = '*^ '\n macd_co_suffix = ' ^* | '\n elif macdltsignalco is True:\n macd_co_prefix = '*v '\n macd_co_suffix = ' v* | '\n elif macdgtsignal is True:\n macd_co_prefix = '^ '\n macd_co_suffix = ' ^ | '\n elif macdltsignal is True:\n macd_co_prefix = 'v '\n macd_co_suffix = ' v | '\n\n obv_prefix = ''\n obv_suffix = ''\n if app.disableBuyOBV() is False:\n if float(obv_pc) > 0:\n obv_prefix = '^ '\n obv_suffix = ' ^ | '\n elif float(obv_pc) < 0:\n obv_prefix = 'v '\n obv_suffix = ' v | '\n\n if app.isVerbose() == 0:\n if state.last_action != '':\n output_text = current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + str(app.getGranularity()) + ' | ' + price_text + ' | ' + ema_co_prefix + ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + obv_prefix + obv_text + obv_suffix + state.eri_text + state.action + ' | Last Action: ' + state.last_action\n else:\n output_text = current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + str(app.getGranularity()) + ' | ' + price_text + ' | ' + ema_co_prefix + ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + obv_prefix + obv_text + obv_suffix + state.eri_text + state.action + ' '\n\n if state.last_action == 'BUY':\n if state.last_buy_amount > 0:\n margin_text = str(app.truncate(margin, 2)) + '%'\n else:\n margin_text = '0%'\n\n output_text += ' | ' + margin_text + ' (delta: ' + str(round(price - state.last_buy_price, 2)) + ')'\n\n logging.debug(output_text)\n print (output_text)\n \n if state.last_action == 'BUY':\n # display support, resistance and fibonacci levels\n logging.debug(output_text)\n print (ta.printSupportResistanceFibonacciLevels(price))\n\n else:\n logging.debug('-- Iteration: ' + str(state.iterations) + ' --' + bullbeartext)\n\n if state.last_action == 'BUY':\n if state.last_buy_amount > 0:\n margin_text = str(app.truncate(margin, 2)) + '%'\n else:\n margin_text = '0%'\n\n logging.debug('-- Margin: ' + margin_text + ' --') \n \n logging.debug('price: ' + str(app.truncate(price, precision)))\n logging.debug('ema12: ' + str(app.truncate(float(df_last['ema12'].values[0]), precision)))\n logging.debug('ema26: ' + str(app.truncate(float(df_last['ema26'].values[0]), precision)))\n logging.debug('ema12gtema26co: ' + str(ema12gtema26co))\n logging.debug('ema12gtema26: ' + str(ema12gtema26))\n logging.debug('ema12ltema26co: ' + str(ema12ltema26co))\n logging.debug('ema12ltema26: ' + str(ema12ltema26))\n logging.debug('sma50: ' + str(app.truncate(float(df_last['sma50'].values[0]), precision)))\n logging.debug('sma200: ' + str(app.truncate(float(df_last['sma200'].values[0]), precision)))\n logging.debug('macd: ' + str(app.truncate(float(df_last['macd'].values[0]), precision)))\n logging.debug('signal: ' + str(app.truncate(float(df_last['signal'].values[0]), precision)))\n logging.debug('macdgtsignal: ' + str(macdgtsignal))\n logging.debug('macdltsignal: ' + str(macdltsignal))\n logging.debug('obv: ' + str(obv))\n logging.debug('obv_pc: ' + str(obv_pc))\n logging.debug('action: ' + state.action)\n\n # informational output on the most recent entry \n print('')\n print('================================================================================')\n txt = ' Iteration : ' + str(state.iterations) + bullbeartext\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Timestamp : ' + str(df_last.index.format()[0])\n print('|', txt, (' ' * (75 - len(txt))), '|')\n print('--------------------------------------------------------------------------------')\n txt = ' Close : ' + str(app.truncate(price, precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' EMA12 : ' + str(app.truncate(float(df_last['ema12'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' EMA26 : ' + str(app.truncate(float(df_last['ema26'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|') \n txt = ' Crossing Above : ' + str(ema12gtema26co)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Currently Above : ' + str(ema12gtema26)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Crossing Below : ' + str(ema12ltema26co)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Currently Below : ' + str(ema12ltema26)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n\n if (ema12gtema26 is True and ema12gtema26co is True):\n txt = ' Condition : EMA12 is currently crossing above EMA26'\n elif (ema12gtema26 is True and ema12gtema26co is False):\n txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'\n elif (ema12ltema26 is True and ema12ltema26co is True):\n txt = ' Condition : EMA12 is currently crossing below EMA26'\n elif (ema12ltema26 is True and ema12ltema26co is False):\n txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'\n else:\n txt = ' Condition : -'\n print('|', txt, (' ' * (75 - len(txt))), '|')\n\n txt = ' SMA20 : ' + str(app.truncate(float(df_last['sma20'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' SMA200 : ' + str(app.truncate(float(df_last['sma200'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n\n print('--------------------------------------------------------------------------------')\n txt = ' MACD : ' + str(app.truncate(float(df_last['macd'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Signal : ' + str(app.truncate(float(df_last['signal'].values[0]), precision))\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Currently Above : ' + str(macdgtsignal)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n txt = ' Currently Below : ' + str(macdltsignal)\n print('|', txt, (' ' * (75 - len(txt))), '|')\n\n if (macdgtsignal is True and macdgtsignalco is True):\n txt = ' Condition : MACD is currently crossing above Signal'\n elif (macdgtsignal is True and macdgtsignalco is False):\n txt = ' Condition : MACD is currently above Signal and has crossed over'\n elif (macdltsignal is True and macdltsignalco is True):\n txt = ' Condition : MACD is currently crossing below Signal'\n elif (macdltsignal is True and macdltsignalco is False):\n txt = ' Condition : MACD is currently below Signal and has crossed over'\n else:\n txt = ' Condition : -'\n print('|', txt, (' ' * (75 - len(txt))), '|')\n\n print('--------------------------------------------------------------------------------')\n txt = ' Action : ' + state.action\n print('|', txt, (' ' * (75 - len(txt))), '|')\n print('================================================================================')\n if state.last_action == 'BUY':\n txt = ' Margin : ' + margin_text\n print('|', txt, (' ' * (75 - len(txt))), '|')\n print('================================================================================')\n\n # if a buy signal\n if state.action == 'BUY': \n state.last_buy_price = price\n state.last_buy_high = state.last_buy_price\n\n state.buy_count = state.buy_count + 1\n fee = float(price) * app.getTakerFee()\n price_incl_fees = float(price) + fee\n state.buy_sum = state.buy_sum + price_incl_fees\n\n # if live\n if app.isLive() == 1:\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') BUY at ' + price_text)\n\n if app.isVerbose() == 0:\n logging.info(current_df_index + ' | ' + app.getMarket() + ' ' + str(app.getGranularity()) + ' | ' + price_text + ' | BUY')\n print (\"\\n\", current_df_index, '|', app.getMarket(), str(app.getGranularity()), '|', price_text, '| BUY', \"\\n\") \n else:\n print('--------------------------------------------------------------------------------')\n print('| *** Executing LIVE Buy Order *** |')\n print('--------------------------------------------------------------------------------')\n \n # display balances\n print (app.getBaseCurrency(), 'balance before order:', account.getBalance(app.getBaseCurrency()))\n print (app.getQuoteCurrency(), 'balance before order:', account.getBalance(app.getQuoteCurrency()))\n\n # execute a live market buy\n state.last_buy_amount = float(account.getBalance(app.getQuoteCurrency()))\n resp = app.marketBuy(app.getMarket(), state.last_buy_amount, app.getBuyPercent())\n logging.info(resp)\n\n # display balances\n print (app.getBaseCurrency(), 'balance after order:', account.getBalance(app.getBaseCurrency()))\n print (app.getQuoteCurrency(), 'balance after order:', account.getBalance(app.getQuoteCurrency()))\n\n # if not live\n else:\n # TODO: calculate buy amount from dummy account\n state.last_buy_amount = 1000\n\n state.last_buy_price = price\n\n if app.isVerbose() == 0:\n logging.info(current_df_index + ' | ' + app.getMarket() + ' ' + str(app.getGranularity()) + ' | ' + price_text + ' | BUY')\n print (\"\\n\", current_df_index, '|', app.getMarket(), str(app.getGranularity()), '|', price_text, '| BUY')\n\n bands = ta.getFibonacciRetracementLevels(float(price)) \n print (' Fibonacci Retracement Levels:', str(bands))\n ta.printSupportResistanceLevel(float(price))\n\n if len(bands) >= 1 and len(bands) <= 2:\n if len(bands) == 1:\n first_key = list(bands.keys())[0]\n if first_key == 'ratio1':\n state.fib_low = 0\n state.fib_high = bands[first_key]\n if first_key == 'ratio1_618':\n state.fib_low = bands[first_key]\n state.fib_high = bands[first_key] * 2\n else:\n state.fib_low = bands[first_key]\n\n elif len(bands) == 2:\n first_key = list(bands.keys())[0]\n second_key = list(bands.keys())[1]\n state.fib_low = bands[first_key] \n state.fib_high = bands[second_key]\n \n else:\n print('--------------------------------------------------------------------------------')\n print('| *** Executing TEST Buy Order *** |')\n print('--------------------------------------------------------------------------------')\n\n if app.shouldSaveGraphs() == 1:\n tradinggraphs = TradingGraphs(ta)\n ts = datetime.now().timestamp()\n filename = app.getMarket() + '_' + str(app.getGranularity()) + '_buy_' + str(ts) + '.png'\n tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)\n\n # if a sell signal\n elif state.action == 'SELL':\n state.sell_count = state.sell_count + 1\n fee = float(price) * app.getTakerFee()\n price_incl_fees = float(price) - fee\n state.sell_sum = state.sell_sum + price_incl_fees\n\n # if live\n if app.isLive() == 1:\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send(app.getMarket() + ' (' + str(app.getGranularity()) + ') SELL at ' + price_text + ' (margin: ' + margin_text + ', (delta: ' + str(round(price - state.last_buy_price, 2)) + ')')\n\n if app.isVerbose() == 0:\n logging.info(current_df_index + ' | ' + app.getMarket() + ' ' + str(app.getGranularity()) + ' | ' + price_text + ' | SELL')\n print (\"\\n\", current_df_index, '|', app.getMarket(), str(app.getGranularity()), '|', price_text, '| SELL')\n\n bands = ta.getFibonacciRetracementLevels(float(price)) \n print (' Fibonacci Retracement Levels:', str(bands), \"\\n\") \n\n if len(bands) >= 1 and len(bands) <= 2:\n if len(bands) == 1:\n first_key = list(bands.keys())[0]\n if first_key == 'ratio1':\n state.fib_low = 0\n state.fib_high = bands[first_key]\n if first_key == 'ratio1_618':\n state.fib_low = bands[first_key]\n state.fib_high = bands[first_key] * 2\n else:\n state.fib_low = bands[first_key]\n\n elif len(bands) == 2:\n first_key = list(bands.keys())[0]\n second_key = list(bands.keys())[1]\n state.fib_low = bands[first_key] \n state.fib_high = bands[second_key]\n\n else:\n print('--------------------------------------------------------------------------------')\n print('| *** Executing LIVE Sell Order *** |')\n print('--------------------------------------------------------------------------------')\n\n # display balances\n print (app.getBaseCurrency(), 'balance before order:', account.getBalance(app.getBaseCurrency()))\n print (app.getQuoteCurrency(), 'balance before order:', account.getBalance(app.getQuoteCurrency()))\n\n # execute a live market sell\n resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())), app.getSellPercent())\n logging.info(resp)\n\n # display balances\n print (app.getBaseCurrency(), 'balance after order:', account.getBalance(app.getBaseCurrency()))\n print (app.getQuoteCurrency(), 'balance after order:', account.getBalance(app.getQuoteCurrency()))\n\n # if not live\n else:\n if app.isVerbose() == 0:\n buy_sell_diff = round(np.subtract(price, state.last_buy_price), precision)\n\n sell_percent = app.getSellPercent()\n sell_amount_quote = (sell_percent / 100) * (buy_amount_base * price)\n sell_fee = sell_amount_quote * app.getTakerFee()\n sell_filled = sell_amount_quote - sell_fee\n\n #print ('sell_percent:', sell_percent)\n #print ('sell_amount_quote:', sell_amount_quote)\n #print ('sell_fee:', sell_fee)\n #print ('sell_filled:', sell_filled)\n #print ('sell_amount_base:', sell_amount_base)\n\n margin = (((sell_filled - buy_amount_quote) / buy_amount_quote) * 100)\n\n #print ('margin:', margin)\n\n if price > 0:\n margin_text = str(app.truncate(margin, 2)) + '%'\n else:\n margin_text = '0%'\n\n logging.info(current_df_index + ' | ' + app.getMarket() + ' ' + str(app.getGranularity()) + ' | SELL | ' + str(price) + ' | BUY | ' + str(state.last_buy_price) + ' | DIFF | ' + str(buy_sell_diff) + ' | MARGIN NO FEES | ' + margin_text + ' | MARGIN FEES | ' + str(sell_fee))\n print (\"\\n\", current_df_index, '|', app.getMarket(), str(app.getGranularity()), '| SELL |', str(price), '| BUY |', str(state.last_buy_price), '| DIFF |', str(buy_sell_diff) , '| MARGIN NO FEES |', margin_text, '| MARGIN FEES |', str(round(sell_fee, 2)), \"\\n\") \n\n else:\n print('--------------------------------------------------------------------------------')\n print('| *** Executing TEST Sell Order *** |')\n print('--------------------------------------------------------------------------------')\n\n if app.shouldSaveGraphs() == 1:\n tradinggraphs = TradingGraphs(ta)\n ts = datetime.now().timestamp()\n filename = app.getMarket() + '_' + str(app.getGranularity()) + '_sell_' + str(ts) + '.png'\n tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)\n\n # reset values after buy\n state.last_buy_price = 0\n state.last_buy_amount = 0\n state.last_buy_price = 0\n state.last_buy_high = 0\n\n # last significant action\n if state.action in [ 'BUY', 'SELL' ]:\n state.last_action = state.action\n \n state.last_df_index = str(df_last.index.format()[0])\n\n if state.iterations == len(df):\n print (\"\\nSimulation Summary\\n\")\n\n if state.buy_count > state.sell_count and app.allowSellAtLoss() == 1:\n fee = price * app.getTakerFee()\n last_price_minus_fees = price - fee\n state.sell_sum = state.sell_sum + last_price_minus_fees\n state.sell_count = state.sell_count + 1\n\n elif state.buy_count > state.sell_count and app.allowSellAtLoss() == 0:\n print (' Note : \"sell at loss\" is disabled and you have an open trade, if the margin')\n print (' result below is negative it will assume you sold at the end of the')\n print (' simulation which may not be ideal. Try setting --sellatloss 1', \"\\n\")\n\n print (' Buy Count :', state.buy_count)\n print (' Sell Count :', state.sell_count, \"\\n\")\n\n if state.sell_count > 0:\n print (' Margin :', str(app.truncate((((state.sell_sum - state.buy_sum) /state.sell_sum) * 100), 2)) + '%', \"\\n\")\n\n print (' ** non-live simulation, assuming highest fees', \"\\n\")\n\n else:\n print (now, '|', app.getMarket() + bullbeartext, '|', str(app.getGranularity()), '| Current Price:', price)\n\n # decrement ignored iteration\n state.iterations = state.iterations - 1\n\n # if live\n if not app.disableTracker() and app.isLive() == 1:\n # update order tracker csv\n if app.getExchange() == 'binance':\n account.saveTrackerCSV(app.getMarket())\n elif app.getExchange() == 'coinbasepro':\n account.saveTrackerCSV()\n\n if app.isSimulation() == 1:\n if state.iterations < 300:\n if app.simuluationSpeed() in [ 'fast', 'fast-sample' ]:\n # fast processing\n executeJob(sc, app, state, trading_data)\n else:\n # slow processing\n list(map(s.cancel, s.queue))\n s.enter(1, 1, executeJob, (sc, app, state, trading_data))\n\n else:\n # poll every 2 minutes\n list(map(s.cancel, s.queue))\n s.enter(120, 1, executeJob, (sc, app, state))\n\ndef main(init=True):\n try:\n # initialise logging\n logging.basicConfig(filename=app.getLogFile(), format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a', level=logging.DEBUG)\n\n if init:\n # telegram\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n if app.getExchange() == 'coinbasepro':\n telegram.send('Starting Coinbase Pro bot for ' + app.getMarket() + ' using granularity ' + str(app.getGranularity()))\n elif app.getExchange() == 'binance':\n telegram.send('Starting Binance bot for ' + app.getMarket() + ' using granularity ' + str(app.getGranularity()))\n\n if init or app.isSimulation() == 1:\n # initialise and start application\n trading_data = app.startApp(account, state.last_action, banner=init)\n\n # run the first job immediately after starting\n if app.isSimulation() == 1:\n executeJob(s, app, state, trading_data)\n else:\n executeJob(s, app, state)\n\n s.run()\n\n # catches a keyboard break of app, exits gracefully\n except KeyboardInterrupt:\n print(datetime.now(), 'closed')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n except(BaseException, Exception) as e:\n # catch all not managed exceptions and send a Telegram message if configured\n telegram = None\n\n if not app.disableTelegram() and app.isTelegramEnabled():\n telegram = Telegram(app.getTelegramToken(), app.getTelegramClientId())\n telegram.send('Bot for ' + app.getMarket() + ' got an exception: ' + repr(e))\n\n print(repr(e))\n\n # Wait 1 second and try to relaunch application\n time.sleep(1)\n print('Restarting application after exception...')\n\n if telegram:\n telegram.send('Auto restarting bot for ' + app.getMarket() + ' after exception')\n\n main(init=False)\n\nmain()"
] | [
[
"numpy.subtract",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
JAOP1/pyristic | [
"ae3c1151640d05e144535c6c95c286c5c6a9a817"
] | [
"pyristic/heuristic/EvolutionStrategy_search.py"
] | [
"from pyristic.utils.operators import selection, mutation, crossover\nfrom pyristic.utils.helpers import function_type, ContinuosFixer, EvolutionStrategyConfig\nfrom tqdm import tqdm\nimport numpy as np\n\n__all__= ['EvolutionStrategy']\n\n\nclass EvolutionStrategy:\n def __init__(self, function: function_type,\\\n decision_variables:int,\\\n constraints:list=[],\\\n bounds: list=[],\\\n config = EvolutionStrategyConfig()):\n \"\"\"\n ------------------------------------------------------\n Description:\n Initializing every variable necessary to the search.\n Arguments: \n - Function: Objective function to minimize.\n - Constraints: Constraints to be a feasible solution.\n - Bounds: bound for every variable, this should be a matrix 2 x N\n where N is the variables number. The first element is lowert limit \n and another one is the upper limit. \n ------------------------------------------------------ \n \"\"\"\n\n self.f = function\n self.Constraints = constraints\n self.Bounds = bounds\n self.Decision_variables = decision_variables\n \n #Configuration.\n self._mutation_operator = config.mutation_op if config.mutation_op != None else mutation.sigma_mutator()\n self._crossover_operator = config.cross_op if config.cross_op != None else crossover.discrete_crossover()\n self._survivor_selector = config.survivor_selector if config.survivor_selector != None else selection.merge_selector()\n self._fixer = config.fixer if config.fixer != None else ContinuosFixer(self.Bounds)\n self._adaptive_crossover_operator = config.adaptive_crossover_op if config.adaptive_crossover_op != None else crossover.intermediate_crossover()\n self._adaptive_mutation_operator = config.adaptive_mutation_op if config.adaptive_mutation_op != None else mutation.mult_sigma_adaptive_mutator(self.Decision_variables)\n\n #Search information.\n self.logger = {}\n self.logger['best_individual'] = None\n self.logger['best_f'] = None\n self.logger['current_iter'] = None\n self.logger['total_iter'] = None\n\n def __str__(self):\n printable = \"Evolution Strategy search: \\n F_a(X) = {} \\n X = {} \\n \".format(self.logger['best_f'], self.logger['best_individual'])\n first = True\n \n for i in range(len(self.Constraints)):\n if self.Constraints[i].__doc__ != None:\n \n if first:\n first = False\n printable += \"Constraints: \\n \"\n \n self.Constraints[i](self.logger['best_individual'])\n printable += \"{} \\n\".format( self.Constraints[i].__doc__)\n \n return printable\n\n def optimize(self, generations: int ,\\\n population_size: int,\\\n offspring_size: int,\\\n eps_sigma: float=0.001,\\\n verbose=True,\\\n **kwargs) -> None:\n \"\"\"\n ------------------------------------------------------\n Description:\n The main function to find the best solution using tabu search.\n Arguments:\n - generations: integer that represent the total iterations.\n - population_size: the population that \n ------------------------------------------------------\n \"\"\"\n #Reset global solution.\n self.logger['best_individual'] = None\n self.logger['best_f'] = None\n self.logger['current_iter'] = 0\n self.logger['total_iter'] = generations\n self.logger['parent_population_size'] = population_size\n self.logger['offspring_population_size'] = offspring_size \n #offspring_size//=2\n self.logger['parent_population_x']= self.initialize_population(**kwargs)\n self.logger['parent_population_sigma'] = self.initialize_step_weights(eps_sigma,**kwargs)\n self.logger['parent_population_f'] = np.apply_along_axis( self.f ,\\\n 1,\\\n self.logger['parent_population_x']) \n\n try:\n for g in tqdm(range(generations), disable = not verbose):\n \n\n #Crossover.\n first_parent_indices, second_parent_indices = self.get_pairs(**kwargs)\n self.logger['offspring_population_x'] = self.crossover_operator( first_parent_indices,\\\n second_parent_indices,\\\n **kwargs)\n\n self.logger['offspring_population_sigma'] = self.adaptive_crossover( first_parent_indices,\\\n second_parent_indices,\\\n **kwargs)\n #mutate.\n self.logger['offspring_population_sigma'] = self.adaptive_mutation(**kwargs)\n self.logger['offspring_population_x'] = self.mutation_operator(**kwargs)\n \n #Fixing solutions and getting aptitude.\n f_offspring = []\n for i in range(len(self.logger['offspring_population_x'])):\n if self.is_invalid(self.logger['offspring_population_x'][i]):\n self.logger['offspring_population_x'][i] = self.fixer(i)\n f_offspring.append(self.f(self.logger['offspring_population_x'][i]))\n\n self.logger['offspring_population_f'] = np.array(f_offspring)\n \n next_generation = self.survivor_selection(**kwargs)\n self.logger['parent_population_x'] = next_generation['parent_population_x']\n self.logger['parent_population_sigma'] = next_generation['parent_population_sigma']\n self.logger['parent_population_f'] = next_generation['parent_population_f']\n self.logger['current_iter'] += 1\n\n except KeyboardInterrupt:\n print(\"Interrupted, saving best solution found so far.\")\n \n ind = np.argmin(self.logger['parent_population_f'])\n\n self.logger['best_individual'] = self.logger['parent_population_x'][ind]\n self.logger['best_f'] = self.logger['parent_population_f'][ind]\n\n def initialize_step_weights(self, eps_sigma:float, **kwargs) -> np.ndarray :\n steps = np.random.uniform(0,1, size=(self.logger['parent_population_size'], self._adaptive_mutation_operator.length))\n return np.maximum(steps,eps_sigma)\n\n def initialize_population(self, **kwargs) -> np.ndarray:\n \"\"\"\n ------------------------------------------------------\n Description:\n How to distribute the population. This function create a\n population using uniform distribution.\n This should return an matrix of size (M x N)\n where M is the number of population and N is the number of \n variables.\n Arguments:\n -size_: Tnteger n where n is the number of variables about the problem.\n ------------------------------------------------------\n \"\"\"\n return np.random.uniform( self.Bounds[0], self.Bounds[1], size=(self.logger['parent_population_size'],self.Decision_variables))\n\n def fixer(self, ind:int) -> np.ndarray:\n \"\"\"\n ------------------------------------------------------\n Description:\n Function which helps to move solution in a valid region.\n Arguments:\n - ind: index of individual.\n ------------------------------------------------------\n \"\"\"\n return self._fixer(self.logger['offspring_population_x'], ind)\n \n def crossover_operator(self, parent_ind1: np.ndarray,\\\n parent_ind2: np.ndarray,\\\n **kwargs) -> np.ndarray:\n return self._crossover_operator(self.logger['parent_population_x'],\\\n parent_ind1,\\\n parent_ind2)\n\n def adaptive_crossover(self, parent_ind1: np.ndarray,\\\n parent_ind2: np.ndarray,\\\n **kwargs) -> np.ndarray:\n return self._adaptive_crossover_operator( self.logger['parent_population_sigma'],\\\n parent_ind1,\\\n parent_ind2)\n\n def mutation_operator(self, **kwargs) -> np.ndarray:\n \"\"\"\n ------------------------------------------------------\n Description:\n The current population is updated by specific change\n using the adaptive control. \n This function should mutate the population.\n ------------------------------------------------------\n \"\"\"\n return self._mutation_operator(self.logger['offspring_population_x'],\\\n self.logger['offspring_population_sigma'])\n\n def adaptive_mutation(self, **kwargs) -> np.ndarray:\n return self._adaptive_mutation_operator(self.logger['offspring_population_sigma'])\n\n def survivor_selection(self,**kwargs) -> dict:\n individuals = {}\n individuals['parent_population_x'] = [self.logger['parent_population_x'],\\\n self.logger['offspring_population_x']]\n individuals['parent_population_sigma'] = [self.logger['parent_population_sigma'],\\\n self.logger['offspring_population_sigma']]\n\n return self._survivor_selector( self.logger['parent_population_f'],\\\n self.logger['offspring_population_f'],\\\n individuals)\n #-----------------------------------------------------\n #Private functions.\n #-----------------------------------------------------\n def is_invalid(self, x : np.ndarray) -> bool:\n \"\"\"\n ------------------------------------------------------\n Description:\n Check if the current solution is invalid. \n ------------------------------------------------------\n \"\"\"\n for constraint in self.Constraints:\n if not constraint(x):\n return True\n return False\n \n def get_pairs(self, **kwargs ):\n parent_ind1 = np.random.randint(self.logger['parent_population_size'], size=(self.logger['offspring_population_size'],))\n parent_ind2 = np.random.randint(self.logger['parent_population_size'], size=(self.logger['offspring_population_size'],))\n \n return parent_ind1,parent_ind2\n"
] | [
[
"numpy.maximum",
"numpy.apply_along_axis",
"numpy.argmin",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HowardHu97/ZOOpt | [
"01568e8e6b0e65ac310d362af2da5245ac375e53"
] | [
"example/simple_functions/simple_function.py"
] | [
"\"\"\"\nObjective functions can be implemented in this file.\n\nAuthor:\n Yu-Ren Liu\n\"\"\"\n\nfrom random import Random\nfrom zoopt.dimension import Dimension\nimport numpy as np\n\n\nclass SetCover:\n \"\"\"\n set cover problem for discrete optimization\n this problem has some extra initialization tasks, thus we define this problem as a class\n \"\"\"\n __weight = None\n __subset = None\n\n def __init__(self):\n self.__weight = [0.8356, 0.5495, 0.4444, 0.7269, 0.9960, 0.6633, 0.5062, 0.8429, 0.1293, 0.7355,\n 0.7979, 0.2814, 0.7962, 0.1754, 0.0267, 0.9862, 0.1786, 0.5884, 0.6289, 0.3008]\n self.__subset = []\n self.__subset.append([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0])\n self.__subset.append([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0])\n self.__subset.append([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0])\n self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0])\n self.__subset.append([1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1])\n self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0])\n self.__subset.append([0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0])\n self.__subset.append([0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])\n self.__subset.append([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0])\n self.__subset.append([0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1])\n self.__subset.append([0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0])\n self.__subset.append([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1])\n self.__subset.append([1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1])\n self.__subset.append([1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1])\n self.__subset.append([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\n self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0])\n self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1])\n self.__subset.append([0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1])\n self.__subset.append([0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0])\n self.__subset.append([0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1])\n\n def fx(self, solution):\n \"\"\"\n Objective function.\n\n :param solution: a Solution object\n :return: the value of f(x)\n \"\"\"\n x = solution.get_x()\n allweight = 0\n countw = 0\n for i in range(len(self.__weight)):\n allweight += self.__weight[i]\n\n dims = []\n for i in range(len(self.__subset[0])):\n dims.append(False)\n\n for i in range(len(self.__subset)):\n if x[i] == 1:\n countw += self.__weight[i]\n for j in range(len(self.__subset[i])):\n if self.__subset[i][j] == 1:\n dims[j] = True\n full = True\n for i in range(len(dims)):\n if dims[i] is False:\n full = False\n\n if full is False:\n countw += allweight\n\n return countw\n\n @property\n def dim(self):\n \"\"\"\n Dimension of set cover problem.\n :return: Dimension instance\n \"\"\"\n dim_size = 20\n dim_regs = [[0, 1]] * dim_size\n dim_tys = [False] * dim_size\n return Dimension(dim_size, dim_regs, dim_tys)\n\n\ndef sphere(solution):\n \"\"\"\n Sphere function for continuous optimization\n \"\"\"\n x = solution.get_x()\n value = sum([(i-0.2)*(i-0.2) for i in x])\n return value\n\n\ndef sphere_mixed(solution):\n \"\"\"\n Sphere function for mixed optimization\n \"\"\"\n x = solution.get_x()\n value = sum([i*i for i in x])\n return value\n\n\ndef sphere_discrete_order(solution):\n \"\"\"\n Sphere function for integer continuous optimization\n \"\"\"\n a = 0\n rd = Random()\n x = solution.get_x()\n value = sum([(i-2)*(i-2) for i in x])\n return value\n\n\ndef ackley(solution):\n \"\"\"\n Ackley function for continuous optimization\n \"\"\"\n x = solution.get_x()\n bias = 0.2\n ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)\n ave_cos = sum([np.cos(2.0*np.pi*(i-bias)) for i in x]) / len(x)\n value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e\n return value\n\n\ndef ackley_noise_creator(mu, sigma):\n \"\"\"\n Ackley function under noise\n \"\"\"\n return lambda solution: ackley(solution) + np.random.normal(mu, sigma, 1)\n\n\n\n\n\n\n"
] | [
[
"numpy.random.normal",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muhammadtarek/excelify | [
"9f9a5b367df5a2714bd64ffa7c0e83a3a721b405"
] | [
"api/Code/ModelVerifier.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom keras.utils import np_utils\nfrom keras.models import load_model\n\nfrom api.Code.ModelProcessing import ModelProcessing\n\n\nclass ModelVerifier(ModelProcessing):\n def __init__(self):\n super(ModelVerifier,self).__init__()\n self.test = pd.read_csv(self.test_path)\n self.mappings = self.load_char_mappings(self.mappings_path)\n self.X_test, self.y_test, self.test_true_classes = self.preprocess_data(self.test)\n self.model = load_model(\"../Classification Model/model.h5\")\n\n\n\n def plot_images(self,images, cls_true, cls_pred=None):\n assert len(images) == len(cls_true) == 16\n\n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(4, 4)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(self.img_size, self.img_size), cmap='binary')\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n plt.show()\n\n def plot_example_errors(self,cls_pred):\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Boolean array whether the predicted class is incorrect.\n incorrect = (cls_pred != self.test_true_classes)\n\n # Get the images from the test-set that have been\n # incorrectly classified.\n images = self.X_test[incorrect]\n\n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = self.test_true_classes[incorrect]\n\n true_classes = []\n for i in cls_true:\n true_classes.append(chr(self.mappings.get(i)))\n\n pred_classes = []\n for i in cls_pred:\n pred_classes.append(chr(self.mappings.get(i)))\n\n # Plot the first 9 images.\n self.plot_images(images=images[0:16],\n cls_true=true_classes[0:16],\n cls_pred=pred_classes[0:16])\n\n def load_char_mappings(self,mapping_path):\n \"\"\"\n load EMNIST character mappings. This maps a label to the correspondent byte value of the given character\n return: the dictionary of label mappings\n \"\"\"\n mappings = {}\n with open(mapping_path) as f:\n for line in f:\n (key, val) = line.split()\n mappings[int(key)] = int(val)\n\n return mappings\n\n def plot_examples(self):\n # plotting examples\n images = self.X_test[0:16]\n labels = self.test_true_classes[0:16]\n cls_true = []\n for i in labels:\n cls_true.append(chr(self.mappings.get(i)))\n\n y_pred = self.model.predict(images)\n y_pred_cls = np.argmax(y_pred, axis=1)\n cls_pred = []\n for i in y_pred_cls:\n cls_pred.append(chr(self.mappings.get(i)))\n self.plot_images(images=images, cls_true=cls_true, cls_pred=cls_pred)\n\n def plot_error(self):\n # plotting error examples\n y_pred = self.model.predict(self.X_test)\n cls_pred = np.argmax(y_pred, axis=1)\n self.plot_example_errors(cls_pred=cls_pred)\n\n def verify(self):\n # If you want to plot some examples\n self.plot_examples()\n\n # If you want to plot some missclassified examples\n self.plot_error()"
] | [
[
"pandas.read_csv",
"numpy.argmax",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
skn123/psoCNN | [
"a29a0424dd3d38f297ac95bcf2b30ea990b3e051"
] | [
"psoCNN.py"
] | [
"import keras\r\nfrom keras.datasets import mnist\r\nfrom keras.datasets import fashion_mnist\r\nfrom keras.datasets import cifar10\r\nimport keras.backend\r\n\r\nfrom population import Population\r\n\r\nimport numpy as np\r\n\r\nfrom copy import deepcopy\r\n\r\nclass psoCNN:\r\n def __init__(self, dataset, n_iter, pop_size, batch_size, epochs, min_layer, max_layer, \\\r\n conv_prob, pool_prob, fc_prob, max_conv_kernel, max_out_ch, max_fc_neurons, dropout_rate):\r\n \r\n self.pop_size = pop_size\r\n self.n_iter = n_iter\r\n self.epochs = epochs\r\n\r\n self.batch_size = batch_size\r\n self.gBest_acc = np.zeros(n_iter)\r\n self.gBest_test_acc = np.zeros(n_iter)\r\n\r\n if dataset == \"mnist\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()\r\n \r\n if dataset == \"fashion-mnist\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n (self.x_train, self.y_train), (self.x_test, self.y_test) = fashion_mnist.load_data()\r\n\r\n self.x_train = self.x_train.astype('float32')\r\n self.x_test = self.x_test.astype('float32')\r\n self.x_train /= 255\r\n self.x_test /= 255\r\n\r\n if dataset == \"mnist-background-images\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n train = np.loadtxt(\"./datasets/mnist-background-images/mnist_background_images_train.amat\")\r\n test = np.loadtxt(\"./datasets/mnist-background-images/mnist_background_images_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"mnist-rotated-digits\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n train = np.loadtxt(\"./datasets/mnist-rotated-digits/mnist_all_rotation_normalized_float_train_valid.amat\")\r\n test = np.loadtxt(\"./datasets/mnist-rotated-digits/mnist_all_rotation_normalized_float_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"mnist-random-background\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n train = np.loadtxt(\"./datasets/mnist-random-background/mnist_background_random_train.amat\")\r\n test = np.loadtxt(\"./datasets/mnist-random-background/mnist_background_random_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"mnist-rotated-with-background\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 10\r\n\r\n train = np.loadtxt(\"./datasets/mnist-rotated-with-background/mnist_all_background_images_rotation_normalized_train_valid.amat\")\r\n test = np.loadtxt(\"./datasets/mnist-rotated-with-background/mnist_all_background_images_rotation_normalized_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"rectangles\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 2\r\n\r\n train = np.loadtxt(\"./datasets/rectangles/rectangles_train.amat\")\r\n test = np.loadtxt(\"./datasets/rectangles/rectangles_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"rectangles-images\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 2\r\n\r\n train = np.loadtxt(\"./datasets/rectangles-images/rectangles_im_train.amat\")\r\n test = np.loadtxt(\"./datasets/rectangles-images/rectangles_im_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n if dataset == \"convex\":\r\n input_width = 28\r\n input_height = 28\r\n input_channels = 1\r\n output_dim = 2\r\n\r\n train = np.loadtxt(\"./datasets/convex/convex_train.amat\")\r\n test = np.loadtxt(\"./datasets/convex/convex_test.amat\")\r\n\r\n self.x_train = train[:, :-1]\r\n self.x_test = test[:, :-1]\r\n\r\n # Reshape images to 28x28\r\n self.x_train = np.reshape(self.x_train, (-1, 28, 28))\r\n self.x_test = np.reshape(self.x_test, (-1, 28, 28))\r\n\r\n self.y_train = train[:, -1]\r\n self.y_test = test[:, -1]\r\n\r\n self.x_train = self.x_train.reshape(self.x_train.shape[0], self.x_train.shape[1], self.x_train.shape[2], input_channels)\r\n self.x_test = self.x_test.reshape(self.x_test.shape[0], self.x_test.shape[1], self.x_test.shape[2], input_channels)\r\n\r\n self.y_train = keras.utils.to_categorical(self.y_train, output_dim)\r\n self.y_test = keras.utils.to_categorical(self.y_test, output_dim)\r\n\r\n print(\"Initializing population...\")\r\n self.population = Population(pop_size, min_layer, max_layer, input_width, input_height, input_channels, conv_prob, pool_prob, fc_prob, max_conv_kernel, max_out_ch, max_fc_neurons, output_dim)\r\n \r\n print(\"Verifying accuracy of the current gBest...\")\r\n print(self.population.particle[0])\r\n self.gBest = deepcopy(self.population.particle[0])\r\n self.gBest.model_compile(dropout_rate)\r\n hist = self.gBest.model_fit(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs)\r\n test_metrics = self.gBest.model.evaluate(x=self.x_test, y=self.y_test, batch_size=batch_size)\r\n self.gBest.model_delete()\r\n \r\n self.gBest_acc[0] = hist.history['accuracy'][-1]\r\n self.gBest_test_acc[0] = test_metrics[1]\r\n \r\n self.population.particle[0].acc = hist.history['accuracy'][-1]\r\n self.population.particle[0].pBest.acc = hist.history['accuracy'][-1]\r\n\r\n print(\"Current gBest acc: \" + str(self.gBest_acc[0]) + \"\\n\")\r\n print(\"Current gBest test acc: \" + str(self.gBest_test_acc[0]) + \"\\n\")\r\n\r\n print(\"Looking for a new gBest in the population...\")\r\n for i in range(1, self.pop_size):\r\n print('Initialization - Particle: ' + str(i+1))\r\n print(self.population.particle[i])\r\n\r\n self.population.particle[i].model_compile(dropout_rate)\r\n hist = self.population.particle[i].model_fit(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs)\r\n self.population.particle[i].model_delete()\r\n \r\n self.population.particle[i].acc = hist.history['accuracy'][-1]\r\n self.population.particle[i].pBest.acc = hist.history['accuracy'][-1]\r\n\r\n if self.population.particle[i].pBest.acc >= self.gBest_acc[0]:\r\n print(\"Found a new gBest.\")\r\n self.gBest = deepcopy(self.population.particle[i])\r\n self.gBest_acc[0] = self.population.particle[i].pBest.acc\r\n print(\"New gBest acc: \" + str(self.gBest_acc[0]))\r\n \r\n self.gBest.model_compile(dropout_rate)\r\n test_metrics = self.gBest.model.evaluate(x=self.x_test, y=self.y_test, batch_size=batch_size)\r\n self.gBest_test_acc[0] = test_metrics[1]\r\n print(\"New gBest test acc: \" + str(self.gBest_acc[0]))\r\n \r\n self.gBest.model_delete()\r\n\r\n\r\n def fit(self, Cg, dropout_rate):\r\n for i in range(1, self.n_iter): \r\n gBest_acc = self.gBest_acc[i-1]\r\n gBest_test_acc = self.gBest_test_acc[i-1]\r\n\r\n for j in range(self.pop_size):\r\n print('Iteration: ' + str(i) + ' - Particle: ' + str(j+1))\r\n\r\n # Update particle velocity\r\n self.population.particle[j].velocity(self.gBest.layers, Cg)\r\n\r\n # Update particle architecture\r\n self.population.particle[j].update()\r\n\r\n print('Particle NEW architecture: ')\r\n print(self.population.particle[j])\r\n\r\n # Compute the acc in the updated particle\r\n self.population.particle[j].model_compile(dropout_rate)\r\n hist = self.population.particle[j].model_fit(self.x_train, self.y_train, batch_size=self.batch_size, epochs=self.epochs)\r\n self.population.particle[j].model_delete()\r\n\r\n self.population.particle[j].acc = hist.history['accuracy'][-1]\r\n \r\n f_test = self.population.particle[j].acc\r\n pBest_acc = self.population.particle[j].pBest.acc\r\n\r\n if f_test >= pBest_acc:\r\n print(\"Found a new pBest.\")\r\n print(\"Current acc: \" + str(f_test))\r\n print(\"Past pBest acc: \" + str(pBest_acc))\r\n pBest_acc = f_test\r\n self.population.particle[j].pBest = deepcopy(self.population.particle[j])\r\n\r\n if pBest_acc >= gBest_acc:\r\n print(\"Found a new gBest.\")\r\n gBest_acc = pBest_acc\r\n self.gBest = deepcopy(self.population.particle[j])\r\n \r\n self.gBest.model_compile(dropout_rate)\r\n hist = self.gBest.model_fit(self.x_train, self.y_train, batch_size=self.batch_size, epochs=self.epochs)\r\n test_metrics = self.gBest.model.evaluate(x=self.x_test, y=self.y_test, batch_size=self.batch_size)\r\n self.gBest.model_delete()\r\n gBest_test_acc = test_metrics[1]\r\n\r\n \r\n self.gBest_acc[i] = gBest_acc\r\n self.gBest_test_acc[i] = gBest_test_acc\r\n\r\n print(\"Current gBest acc: \" + str(self.gBest_acc[i]))\r\n print(\"Current gBest test acc: \" + str(self.gBest_test_acc[i]))\r\n\r\n def fit_gBest(self, batch_size, epochs, dropout_rate):\r\n print(\"\\nFurther training gBest model...\")\r\n self.gBest.model_compile(dropout_rate)\r\n\r\n trainable_count = 0\r\n for i in range(len(self.gBest.model.trainable_weights)):\r\n trainable_count += keras.backend.count_params(self.gBest.model.trainable_weights[i])\r\n \r\n print(\"gBest's number of trainable parameters: \" + str(trainable_count))\r\n self.gBest.model_fit_complete(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs)\r\n\r\n return trainable_count\r\n \r\n def evaluate_gBest(self, batch_size):\r\n print(\"\\nEvaluating gBest model on the test set...\")\r\n \r\n metrics = self.gBest.model.evaluate(x=self.x_test, y=self.y_test, batch_size=batch_size)\r\n\r\n print(\"\\ngBest model loss in the test set: \" + str(metrics[0]) + \" - Test set accuracy: \" + str(metrics[1]))\r\n return metrics\r\n"
] | [
[
"numpy.reshape",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lyqun/FPConv | [
"9fc3a71258550101bec671330c5e97b45725291c"
] | [
"datasets/s3dis_dataset.py"
] | [
"import os\nimport numpy as np\nimport sys\nfrom torch.utils.data import Dataset\n\n\nclass S3DIS(Dataset):\n def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096, test_area=5, block_size=1.0, sample_rate=1.0, transform=None, if_normal=True):\n super().__init__()\n print('Initiating DataLoader....')\n self.if_normal = if_normal\n self.num_point = num_point\n self.block_size = block_size\n self.transform = transform\n rooms = sorted(os.listdir(data_root))\n rooms = [room for room in rooms if 'Area_' in room]\n if split == 'train':\n rooms_split = [\n room for room in rooms if not 'Area_{}'.format(test_area) in room]\n else:\n rooms_split = [\n room for room in rooms if 'Area_{}'.format(test_area) in room]\n self.room_points, self.room_labels = [], []\n self.room_coord_min, self.room_coord_max = [], []\n num_point_all = []\n for room_name in rooms_split:\n room_path = os.path.join(data_root, room_name)\n room_data = np.load(room_path) # xyzrgbl, N*7\n # xyzrgb, N*6; l, N\n points, labels = room_data[:, 0:6], room_data[:, 6]\n points[:, 0:3] -= np.amin(points, axis=0)[0:3]\n\n coord_min, coord_max = np.amin(points, axis=0)[\n :3], np.amax(points, axis=0)[:3]\n\n self.room_points.append(points), self.room_labels.append(labels)\n self.room_coord_min.append(\n coord_min), self.room_coord_max.append(coord_max)\n num_point_all.append(labels.size)\n\n # Generate label weights\n self.labelweights = self.__gen_labelweights(self.room_labels)\n\n sample_prob = num_point_all / np.sum(num_point_all)\n num_iter = int(np.sum(num_point_all) * sample_rate / num_point)\n room_idxs = []\n for index in range(len(rooms_split)):\n room_idxs.extend(\n [index] * int(round(sample_prob[index] * num_iter)))\n self.room_idxs = np.array(room_idxs)\n np.random.seed(123)\n np.random.shuffle(self.room_idxs)\n\n print('Num of labels: ', len(self.room_labels))\n print(\"Totally {} samples in {} set.\".format(len(self.room_idxs), split))\n\n def __gen_labelweights(self, labels):\n labelweights = np.zeros(13)\n for seg in labels:\n tmp, _ = np.histogram(seg, range(14))\n labelweights += tmp\n labelweights = labelweights.astype(np.float32)\n labelweights = labelweights / np.sum(labelweights)\n # self.labelweights = 1/np.log(1.2+labelweights)\n return np.power(np.amax(labelweights) / labelweights, 1 / 3.0)\n\n def __getitem__(self, idx):\n room_idx = self.room_idxs[idx]\n points = self.room_points[room_idx] # N * 6\n labels = self.room_labels[room_idx] # N\n N_points = points.shape[0]\n\n while True:\n center = points[np.random.choice(N_points)][:3]\n block_min = center - [self.block_size /\n 2.0, self.block_size / 2.0, 0]\n block_max = center + [self.block_size /\n 2.0, self.block_size / 2.0, 0]\n point_idxs = np.where((points[:, 0] >= block_min[0]) & (points[:, 0] <= block_max[0]) & (\n points[:, 1] >= block_min[1]) & (points[:, 1] <= block_max[1]))[0]\n if point_idxs.size > 1024:\n break\n\n if point_idxs.size >= self.num_point:\n selected_point_idxs = np.random.choice(\n point_idxs, self.num_point, replace=False)\n else:\n selected_point_idxs = np.random.choice(\n point_idxs, self.num_point, replace=True)\n\n # normalize\n selected_points = points[selected_point_idxs, :] # num_point * 6\n selected_points[:, 0] = selected_points[:, 0] - center[0]\n selected_points[:, 1] = selected_points[:, 1] - center[1]\n selected_points[:, 3:6] /= 255.0\n if self.if_normal:\n current_points = np.zeros((self.num_point, 9)) # num_point * 9\n current_points[:, 6] = selected_points[:, 0] / \\\n self.room_coord_max[room_idx][0]\n current_points[:, 7] = selected_points[:, 1] / \\\n self.room_coord_max[room_idx][1]\n current_points[:, 8] = selected_points[:, 2] / \\\n self.room_coord_max[room_idx][2]\n current_points[:, 0:6] = selected_points\n else:\n current_points = selected_points\n current_labels = labels[selected_point_idxs]\n if self.transform is not None:\n current_points, current_labels = self.transform(\n current_points, current_labels)\n\n sampleweights = self.labelweights[current_labels.astype(np.uint8)]\n return current_points, current_labels, sampleweights\n\n def __len__(self):\n return len(self.room_idxs)\n\n\nif __name__ == '__main__':\n data_root = '/home/zizheng/data/s3dis/stanford_indoor3d_all_classes'\n num_point, test_area, block_size, sample_rate = 4096, 5, 1.0, 0.01\n\n import psutil\n print(\"Before loading, the memory usage is \", psutil.virtual_memory())\n point_data = S3DIS(split='train', data_root=data_root, num_point=num_point,\n test_area=test_area, block_size=block_size, sample_rate=sample_rate, transform=None)\n print('point data size:', point_data.__len__())\n print('point data 0 shape:', point_data.__getitem__(0)[0].shape)\n print('point label 0 shape:', point_data.__getitem__(0)[1].shape)\n import torch\n import time\n import random\n manual_seed = 123\n random.seed(manual_seed)\n np.random.seed(manual_seed)\n torch.manual_seed(manual_seed)\n torch.cuda.manual_seed_all(manual_seed)\n\n print(\"After loading, the memory usage is \", psutil.virtual_memory())\n\n def worker_init_fn(worker_id):\n random.seed(manual_seed + worker_id)\n train_loader = torch.utils.data.DataLoader(\n point_data, batch_size=32, shuffle=True, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)\n for idx in range(4):\n end = time.time()\n for i, (points, target, weight) in enumerate(train_loader):\n print('time: {}/{}--{}'.format(i + 1,\n len(train_loader), time.time() - end))\n print('Size of points: ', points.size())\n points_np = points.cpu().data.numpy()\n points_np_block1 = points_np[0, ...]\n minp = points_np_block1[:, 0].min()\n maxp = points_np_block1[:, 0].max()\n print('weight is ', weight)\n print('Min in x is {}, Max in x is {}'.format(minp, maxp))\n print('Min in y is {}, Max in y is {}'.format(\n points_np_block1[:, 1].min(), points_np_block1[:, 1].max()))\n\n print(\"In loop, the memory usage is \", psutil.virtual_memory())\n sys.exit(0)\n"
] | [
[
"numpy.amax",
"numpy.random.seed",
"numpy.random.choice",
"numpy.amin",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"numpy.where",
"torch.cuda.manual_seed_all",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
labelshift/labelshift | [
"d5d6a06ef435a7fca96be7bbef415e52fb5235b4"
] | [
"labelshift/algorithms/expectation_maximization.py"
] | [
"\"\"\"Expectation Maximization algorithm.\"\"\"\nimport warnings\nfrom typing import Optional\nimport numpy as np\nfrom numpy.typing import ArrayLike\n\nimport labelshift.probability as prob\nimport labelshift.recalibrate as recalib\n\n\ndef expectation_maximization(\n predictions: ArrayLike,\n training_prevalences: ArrayLike,\n *,\n initial_prevalences: Optional[ArrayLike] = None,\n max_steps: int = 10000,\n atol: float = 0.01,\n) -> np.ndarray:\n \"\"\"Expectation maximization algorithm, as described in\n\n M. Saerens et al., Adjusting the outputs of a classifier to\n new a priori probabilities: A simple procedure.\n Neur. Comput.14, 1 (2002), 21--41.\n\n Args:\n predictions: test set probability predictions. Shape (n_samples, n_classes).\n prevalences: prevalences in the training data set.\n Shape (n_classes,), (n_classes, 1) or (1, n_classes). Will be normalized.\n initial_prevalences: starting prevalences for optimization.\n If not provided, the training prevalences are used.\n Shape (n_classes,), (n_classes, 1) or (1, n_classes). Will be normalized.\n max_steps: maximal number of iteration steps\n atol: desired accuracy (for early stopping)\n\n Returns:\n test set prevalences, shape (n_classes,).\n \"\"\"\n predictions = np.asarray(predictions, dtype=float)\n\n training_prevalences: np.ndarray = prob.normalize_prevalences(\n training_prevalences\n ) # Shape (1, n_classes)\n\n # Set the initial estimate for the test set prevalences\n if initial_prevalences is not None:\n test_prevalences = prob.normalize_prevalences(initial_prevalences)\n else:\n test_prevalences = training_prevalences.copy()\n\n # Iteratively improve the estimate of the test set prevalences\n for _ in range(max_steps):\n old_prevalences = test_prevalences.copy()\n\n new_predictions: np.ndarray = recalib.recalibrate(\n predictions, training=training_prevalences, test=test_prevalences\n )\n test_prevalences: np.ndarray = np.sum(new_predictions, axis=0).reshape(\n 1, -1\n ) / len(new_predictions)\n\n # Check if converged\n if np.allclose(old_prevalences, test_prevalences, atol=0.01, rtol=0):\n break\n\n warnings.warn(\n RuntimeWarning(f\"Required accuracy not reached in {max_steps} steps.\")\n )\n return test_prevalences.ravel()\n"
] | [
[
"numpy.asarray",
"numpy.sum",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyang-huang/PSMNet | [
"0449dc290324ed1cd05e6b4668c56d81c800ec77"
] | [
"Test_img.py"
] | [
"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport numpy as np\nimport time\nimport math\nfrom utils import preprocess \nfrom models import *\nimport cv2\nimport re\nimport sys\n\n# 2012 data /media/jiaren/ImageNet/data_scene_flow_2012/testing/\n\nparser = argparse.ArgumentParser(description='PSMNet')\nparser.add_argument('--KITTI', default='2015',\n help='KITTI version')\nparser.add_argument('--datapath', default='/media/jiaren/ImageNet/data_scene_flow_2015/testing/',\n help='select model')\nparser.add_argument('--loadmodel', default='./trained/pretrained_model_KITTI2015.tar',\n help='loading model')\nparser.add_argument('--leftimg', default= None,\n help='load model')\nparser.add_argument('--rightimg', default= None,\n help='load model') \nparser.add_argument('--isgray', default= False,\n help='load model') \nparser.add_argument('--model', default='stackhourglass',\n help='select model')\nparser.add_argument('--maxdisp', type=int, default=192,\n help='maxium disparity')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n#print(os.environ[\"CUDA_VISIBLE_DEVICES\"])\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n#print(os.environ[\"CUDA_VISIBLE_DEVICES\"])\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n#test_left_img, test_right_img = DA.dataloader(args.datapath)\n\nif args.model == 'stackhourglass':\n model = stackhourglass(args.maxdisp)\nelif args.model == 'basic':\n model = basic(args.maxdisp)\nelse:\n print('no model')\n\nmodel = nn.DataParallel(model, device_ids=[0])\nmodel.cuda()\n\n\nif args.loadmodel is not None:\n print('load PSMNet')\n print(args.loadmodel)\n state_dict = torch.load(args.loadmodel)\n model.load_state_dict(state_dict['state_dict'])\n\nprint('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\ndef test(imgL,imgR):\n model.eval()\n\n if args.cuda:\n imgL = torch.FloatTensor(imgL).cuda()\n imgR = torch.FloatTensor(imgR).cuda() \n\n imgL, imgR= Variable(imgL), Variable(imgR)\n\n print('imgL=',imgL)\n print(imgL.shape)\n print(imgL.dtype)\n with torch.no_grad():\n disp = model(imgL,imgR)\n\n print('out=',disp)\n print(disp.shape)\n print(disp.dtype)\n disp = torch.squeeze(disp)\n pred_disp = disp.data.cpu().numpy()\n\n return pred_disp\n\n\ndef main():\n processed = preprocess.get_transform(augment=False)\n print(type(args.isgray))\n if args.isgray:\n imgL_o = cv2.cvtColor(cv2.imread(args.leftimg,0), cv2.COLOR_GRAY2RGB)\n imgR_o = cv2.cvtColor(cv2.imread(args.rightimg,0), cv2.COLOR_GRAY2RGB)\n print('liyang gray test~')\n else:\n print('false~')\n imgL_o = (skimage.io.imread(args.leftimg).astype('float32'))\n imgR_o = (skimage.io.imread(args.rightimg).astype('float32'))\n \n imgL = processed(imgL_o).numpy()\n imgR = processed(imgR_o).numpy()\n #print('imgL=',imgL)\n #print(imgL.shape)\n imgL = np.reshape(imgL,[1,3,imgL.shape[1],imgL.shape[2]])\n imgR = np.reshape(imgR,[1,3,imgR.shape[1],imgR.shape[2]])\n\n # pad to width and hight to 16 times\n if imgL.shape[2] % 16 != 0:\n times = imgL.shape[2]//16 \n top_pad = (times+1)*16 -imgL.shape[2]\n else:\n top_pad = 0\n if imgL.shape[3] % 16 != 0:\n times = imgL.shape[3]//16 \n left_pad = (times+1)*16-imgL.shape[3]\n else:\n left_pad = 0 \n imgL = np.lib.pad(imgL,((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)\n imgR = np.lib.pad(imgR,((0,0),(0,0),(top_pad,0),(0,left_pad)),mode='constant',constant_values=0)\n\n start_time = time.time()\n #print('imgL=',imgL)\n #print(imgL.shape)\n pred_disp = test(imgL,imgR)\n print('time = %.2f' %(time.time() - start_time))\n if top_pad !=0 or left_pad != 0:\n #img = pred_disp[top_pad:,:-left_pad]\n img = pred_disp[top_pad:,:]\n else:\n img = pred_disp\n #print('out',img)\n #max_disp = np.nanmax(img[img != np.inf])\n #min_disp = np.nanmin(img[img != np.inf])\n #print(max_disp)\n #print(min_disp)\n #img = (img - min_disp) / (max_disp - min_disp)\n \n img = (img*256).astype('uint16')\n #img = (img*255).astype('uint8')\n #print(type(img))\n #print(np.shape(pred_disp))\n #print(np.shape(img))\n #print(top_pad)\n #print(left_pad)\n skimage.io.imsave('disparity.png',img)\n \n gt = readPFM('./cv_img/Synthetic/TLD0.pfm')\n #print(type(gt))\n #print(type(img))\n max_gt = np.nanmax(gt[gt != np.inf])\n min_gt = np.nanmin(gt[gt != np.inf])\n #print(max_gt)\n #print(min_gt)\n #print(gt.shape)\n #print(img.shape)\n #print(gt.dtype)\n #print(img.dtype)\n print(gt)\n #print(img)\n \n \n #img = np.concatenate((imgL_o, imgR_o),axis=1)\n #img = cv2.line(img, (0, 240), (1504, 240), (0, 0, 255), 2)\n #img = cv2.line(img, (0, 210), (1504, 210), (0, 0, 255), 2)\n #img = cv2.line(img, (0, 270), (1504, 270), (0, 0, 255), 2)\n #skimage.io.imsave('test.png',img)\n\ndef readPFM(file):\n file = open(file, 'rb')\n\n header = file.readline().rstrip()\n header = header.decode('utf-8')\n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', file.readline().decode('utf-8'))\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n return data\n\ndef cal_avgerr(GT, disp):\n return np.sum(np.multiply(np.abs(GT - disp), GT[GT != np.inf])) / np.sum(GT[GT != np.inf])\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n"
] | [
[
"numpy.lib.pad",
"numpy.nanmax",
"numpy.fromfile",
"numpy.abs",
"torch.cuda.manual_seed",
"torch.load",
"numpy.reshape",
"torch.manual_seed",
"numpy.nanmin",
"numpy.flipud",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"numpy.sum",
"torch.squeeze",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ackey-code/3d-artefacts-nca | [
"b13228d5dd30519ad885d2400061be2adf6cfc3c"
] | [
"artefact_nca/base/base_torch_dataset.py"
] | [
"import abc\nfrom typing import Any, Dict\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass BaseTorchDataset(Dataset, metaclass=abc.ABCMeta):\n\n _config_group_ = \"trainer/dataset\"\n _config_name_ = \"default\"\n\n def sample(self, batch_size: int) -> Dict[str, Any]:\n \"\"\"Random sample from dataset\n\n Args:\n batch_size (int): batch size to sample\n\n Returns:\n typing.Dict[str, typing.Any]: dict of outputs, ex:\n {\"data\": subset, \"targets\":targets}\n \"\"\"\n max_indices = self.__len__()\n indices = torch.randint(0, max_indices, (batch_size,))\n return self.__getitem__(indices)\n\n def to_device(self, device: torch.device) -> None:\n pass\n"
] | [
[
"torch.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Troyliu777/VideoSuperResolution | [
"a844687aa3ee0f4db59e704f707532f2ab6dc1c0"
] | [
"UTest/motion_test.py"
] | [
"\"\"\"\nunit test for VSR.Framework.Motion package\n\"\"\"\n\nfrom VSR.Framework import Motion as M\n\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image\n\nTEST_FLO_FILE = './data/flying_chair/0-gt.flo'\nTEST_PNG16_FILE = './data/kitti_car/f_01.png'\n\n\ndef test_open_flo():\n X = M.open_flo(TEST_FLO_FILE)\n assert X.shape == (384, 512, 2)\n\n\ndef test_open_png16():\n X = M.open_png16(TEST_PNG16_FILE)\n assert X.shape == (375, 1242, 3)\n\n\ndef test_grid():\n G = np.meshgrid(range(5), range(5))\n G = np.stack(G, -1)\n G_bar = M._grid(5, 5)[0]\n with tf.Session() as sess:\n G_bar = sess.run(G_bar)\n assert np.all(G == G_bar.transpose([1, 0, 2]))\n\n\ndef test_sample():\n G = np.meshgrid(range(5), range(5))\n G = np.stack(G, -1)\n G = np.expand_dims(G, 0)\n G.transpose([0, 2, 1, 3])\n\n X = np.random.rand(1, 5, 5, 3).astype('float32')\n X_bar = M._sample(X, G[..., 0], G[..., 1])\n\n with tf.Session() as sess:\n X_bar = sess.run(X_bar)\n assert np.all(X == X_bar)\n\n\ndef test_warp_car():\n flow = M.KITTI.open_flow(TEST_PNG16_FILE)\n car = M.open_png16('./data/kitti_car/c_11.png')\n flow = flow.reshape([1, *flow.shape])\n car = car.reshape([1, *car.shape])\n car_bar = M.warp(car, flow[..., 0], flow[..., 1], True)\n with tf.Session() as sess:\n car_bar = sess.run(car_bar)[0]\n car_bar = car_bar.astype('uint8')\n Image.fromarray(car_bar, 'RGB').show()\n\n\ndef test_warp_chair():\n flow = M.open_flo(TEST_FLO_FILE)\n ch0 = np.array(Image.open('./data/flying_chair/img1.png')).astype('float32')\n flow = flow.reshape([1, *flow.shape])\n ch0 = np.expand_dims(ch0, 0)\n ch1 = M.warp(ch0, flow[..., 0], flow[..., 1], True)\n with tf.Session() as sess:\n ch1 = sess.run(ch1)[0]\n ch1 = ch1.astype('uint8')\n Image.fromarray(ch1, 'RGB').show()\n"
] | [
[
"numpy.expand_dims",
"numpy.stack",
"numpy.all",
"numpy.random.rand",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Tengelma/CapstonePlayground | [
"03ff9aaa2adf9f6eca5c059082667b1d894ea6bb"
] | [
"server/test_server.py"
] | [
"import os\nimport tempfile\nimport pytest\nimport re\nimport server\nfrom server import get_x, get_y, create_fig, serialize_fig\nimport numpy as np\n\[email protected]\ndef client():\n server.app.config['TESTING'] = True\n with server.app.test_client() as client:\n yield client\n\ndef test_get_x():\n test = np.array([0,1,2,3,4])\n assert np.array_equal(get_x(5), test)\n\ndef test_get_y():\n test_x = np.array([0,1,2,3,4])\n test_y = np.array([5 * x + 3 for x in test_x])\n assert np.array_equal(test_y, get_y(test_x, 5, 3))\n\ndef test_create_fig():\n test_x = np.array([0,1,2,3,4])\n test_y = np.array([5 * x + 3 for x in test_x])\n create_fig(test_x, test_y)\n os.path.exists(\"./temp/temp.png\")\n\ndef test_serialize_fig():\n string = serialize_fig()\n assert is_base_64(string)\n\ndef test_graph(client):\n req = graph(client, 3, 5)\n assert(is_base_64(req.json[\"image\"]))\n\n\ndef graph(client, slope, y_intercept):\n return client.post('/graph', json={\"slope\": slope, \"yIntercept\": y_intercept})\n\ndef is_base_64(s: str):\n pattern = re.compile(\"^b'[a-zA-Z0-9+/]*={0,3}'$\")\n return pattern.match(s)\n\n\n\n\n \n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swapithorat/obj-det-using-ml | [
"9ac8ece4f67820076b794d577391ec77a92b4ca0"
] | [
"object_detection_app/app.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport base64\nimport cStringIO\nimport sys\nimport tempfile\n\nMODEL_BASE = '/opt/models/research'\nsys.path.append(MODEL_BASE)\nsys.path.append(MODEL_BASE + '/object_detection')\nsys.path.append(MODEL_BASE + '/slim')\n\nfrom decorator import requires_auth\nfrom flask import Flask\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom flask_wtf.file import FileField\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport tensorflow as tf\nfrom utils import label_map_util\nfrom werkzeug.datastructures import CombinedMultiDict\nfrom wtforms import Form\nfrom wtforms import ValidationError\n\n\napp = Flask(__name__)\n\n\[email protected]_request\n@requires_auth\ndef before_request():\n pass\n\n\nPATH_TO_CKPT = '/opt/graph_def/frozen_inference_graph.pb'\nPATH_TO_LABELS = MODEL_BASE + '/object_detection/data/mscoco_label_map.pbtxt'\n\ncontent_types = {'jpg': 'image/jpeg',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png'}\nextensions = sorted(content_types.keys())\n\n\ndef is_image():\n def _is_image(form, field):\n if not field.data:\n raise ValidationError()\n elif field.data.filename.split('.')[-1].lower() not in extensions:\n raise ValidationError()\n\n return _is_image\n\n\nclass PhotoForm(Form):\n input_photo = FileField(\n 'File extension should be: %s (case-insensitive)' % ', '.join(extensions),\n validators=[is_image()])\n\n\nclass ObjectDetector(object):\n\n def __init__(self):\n self.detection_graph = self._build_graph()\n self.sess = tf.Session(graph=self.detection_graph)\n\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=90, use_display_name=True)\n self.category_index = label_map_util.create_category_index(categories)\n\n def _build_graph(self):\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n return detection_graph\n\n def _load_image_into_numpy_array(self, image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n def detect(self, image):\n image_np = self._load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n graph = self.detection_graph\n image_tensor = graph.get_tensor_by_name('image_tensor:0')\n boxes = graph.get_tensor_by_name('detection_boxes:0')\n scores = graph.get_tensor_by_name('detection_scores:0')\n classes = graph.get_tensor_by_name('detection_classes:0')\n num_detections = graph.get_tensor_by_name('num_detections:0')\n\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n boxes, scores, classes, num_detections = map(\n np.squeeze, [boxes, scores, classes, num_detections])\n\n return boxes, scores, classes.astype(int), num_detections\n\n\ndef draw_bounding_box_on_image(image, box, color='red', thickness=4):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n ymin, xmin, ymax, xmax = box\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n\n\ndef encode_image(image):\n image_buffer = cStringIO.StringIO()\n image.save(image_buffer, format='PNG')\n imgstr = 'data:image/png;base64,{:s}'.format(\n base64.b64encode(image_buffer.getvalue()))\n return imgstr\n\n\ndef detect_objects(image_path):\n image = Image.open(image_path).convert('RGB')\n boxes, scores, classes, num_detections = client.detect(image)\n image.thumbnail((480, 480), Image.ANTIALIAS)\n\n new_images = {}\n for i in range(num_detections):\n if scores[i] < 0.7: continue\n cls = classes[i]\n if cls not in new_images.keys():\n new_images[cls] = image.copy()\n draw_bounding_box_on_image(new_images[cls], boxes[i],\n thickness=int(scores[i]*10)-4)\n\n result = {}\n result['original'] = encode_image(image.copy())\n\n for cls, new_image in new_images.iteritems():\n category = client.category_index[cls]['name']\n result[category] = encode_image(new_image)\n\n return result\n\n\[email protected]('/')\ndef upload():\n photo_form = PhotoForm(request.form)\n return render_template('upload.html', photo_form=photo_form, result={})\n\n\[email protected]('/post', methods=['GET', 'POST'])\ndef post():\n form = PhotoForm(CombinedMultiDict((request.files, request.form)))\n if request.method == 'POST' and form.validate():\n with tempfile.NamedTemporaryFile() as temp:\n form.input_photo.data.save(temp)\n temp.flush()\n result = detect_objects(temp.name)\n\n photo_form = PhotoForm(request.form)\n return render_template('upload.html',\n photo_form=photo_form, result=result)\n else:\n return redirect(url_for('upload'))\n\n\nclient = ObjectDetector()\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=False)\n \n"
] | [
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"tensorflow.Session",
"tensorflow.GraphDef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jopapo/financial-demo | [
"15f72198756edaef4887e97b91d007cc0d120e8a"
] | [
"actions/profile_db.py"
] | [
"import os\nimport sqlalchemy as sa\nfrom sqlalchemy import Column, Integer, String, DateTime, REAL\nfrom sqlalchemy.orm import Session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.engine.base import Engine\nfrom typing import Dict, Text, List, Union, Optional\n\nfrom random import choice, randrange, sample, randint\nfrom numpy import arange\nfrom datetime import datetime, timedelta\nimport pytz\n\nutc = pytz.UTC\n\nGENERAL_ACCOUNTS = {\n \"recipient\": [\n \"katy parrow\",\n \"evan oslo\",\n \"william baker\",\n \"karen lancaster\",\n \"kyle gardner\",\n \"john jacob\",\n \"percy donald\",\n \"lisa macintyre\",\n ],\n \"vendor\": [\"target\", \"starbucks\", \"amazon\"],\n \"depositor\": [\"interest\", \"employer\"],\n}\n\nACCOUNT_NUMBER_LENGTH = 12\nCREDIT_CARD_NUMBER_LENGTH = 14\n\n\nBase = declarative_base()\n\n\nclass Account(Base):\n \"\"\"Accounts table.\n `session_id` is only meaningful for accounts generated by conversation sessions,\n when it is equal to `tracker.sender_id`.\n Since `id` autoincrements, it is used to generate unique account numbers by\n adding leading zeros to it.\n \"\"\"\n\n __tablename__ = \"account\"\n id = Column(Integer, primary_key=True)\n session_id = Column(String(255))\n account_holder_name = Column(String(255))\n currency = Column(String(255))\n\n\nclass CreditCard(Base):\n \"\"\"Credit cards table. `account_id` is an `Account.id`\"\"\"\n\n __tablename__ = \"creditcards\"\n id = Column(Integer, primary_key=True)\n credit_card_name = Column(String(255))\n minimum_balance = Column(REAL)\n current_balance = Column(REAL)\n account_id = Column(Integer)\n\n\nclass Transaction(Base):\n \"\"\"Transactions table. `to/from_acount_number` are `Account.id`s with leading zeros\"\"\"\n\n __tablename__ = \"transactions\"\n id = Column(Integer, primary_key=True)\n timestamp = Column(DateTime)\n amount = Column(REAL)\n from_account_number = Column(String(14))\n to_account_number = Column(String(14))\n\n\nclass RecipientRelationship(Base):\n \"\"\"Valid recipients table. `account_id` and `recipient_account_id` are `Account.id`'s\"\"\"\n\n __tablename__ = \"recipient_relationships\"\n id = Column(Integer, primary_key=True)\n account_id = Column(Integer)\n recipient_account_id = Column(Integer)\n recipient_nickname = Column(String(255))\n\n\ndef create_database(database_engine: Engine, database_name: Text):\n \"\"\"Try to connect to the database. Create it if it does not exist\"\"\"\n try:\n database_engine.connect()\n except sa.exc.OperationalError:\n default_db_url = f\"sqlite:///{database_name}.db\"\n default_engine = sa.create_engine(default_db_url)\n conn = default_engine.connect()\n conn.execute(\"commit\")\n conn.execute(f\"CREATE DATABASE {database_name}\")\n conn.close()\n\n\nclass ProfileDB:\n def __init__(self, db_engine: Engine):\n self.engine = db_engine\n self.create_tables()\n self.session = self.get_session()\n\n def get_session(self) -> Session:\n return sessionmaker(bind=self.engine, autoflush=True)()\n\n def create_tables(self):\n CreditCard.__table__.create(self.engine, checkfirst=True)\n Transaction.__table__.create(self.engine, checkfirst=True)\n RecipientRelationship.__table__.create(self.engine, checkfirst=True)\n Account.__table__.create(self.engine, checkfirst=True)\n\n def get_account(self, id: int):\n \"\"\"Get an `Account` object based on an `Account.id`\"\"\"\n return self.session.query(Account).filter(Account.id == id).first()\n\n def get_account_from_session_id(self, session_id: Text):\n \"\"\"Get an `Account` object based on a `Account.session_id`\"\"\"\n # if the action server restarts in the middle of a conversation, the db will need to be repopulated outside of an action_session_start\n if not self.check_session_id_exists(session_id):\n self.populate_profile_db(session_id)\n account = (\n self.session.query(Account).filter(Account.session_id == session_id).first()\n )\n return account\n\n @staticmethod\n def get_account_number(account: Union[CreditCard, Account]):\n \"\"\"Get a bank or credit card account number by adding the appropriate number of leading zeros to an `Account.id`\"\"\"\n if type(account) is CreditCard:\n return f\"%0.{CREDIT_CARD_NUMBER_LENGTH}d\" % account.id\n else:\n return f\"%0.{ACCOUNT_NUMBER_LENGTH}d\" % account.id\n\n def get_account_from_number(self, account_number: Text):\n \"\"\"Get a bank or credit card account based on an account number\"\"\"\n if len(account_number) == CREDIT_CARD_NUMBER_LENGTH:\n return (\n self.session.query(CreditCard)\n .filter(CreditCard.id == int(account_number))\n .first()\n )\n else:\n return (\n self.session.query(Account)\n .filter(Account.id == int(account_number))\n .first()\n )\n\n def get_recipient_from_name(self, session_id: Text, recipient_name: Text):\n \"\"\"Get a recipient based on the nickname.\n Take the first one if there are multiple that match.\n \"\"\"\n account = self.get_account_from_session_id(session_id)\n recipient = (\n self.session.query(RecipientRelationship)\n .filter(RecipientRelationship.account_id == account.id)\n .filter(RecipientRelationship.recipient_nickname == recipient_name.lower())\n .first()\n )\n recipient_account = self.get_account(recipient.recipient_account_id)\n return recipient_account\n\n def list_known_recipients(self, session_id: Text):\n \"\"\"List recipient nicknames available to an account holder\"\"\"\n recipients = (\n self.session.query(RecipientRelationship.recipient_nickname)\n .filter(\n RecipientRelationship.account_id\n == self.get_account_from_session_id(session_id).id\n )\n .all()\n )\n return [recipient.recipient_nickname for recipient in recipients]\n\n def check_session_id_exists(self, session_id: Text):\n \"\"\"Check if an account for `session_id` already exists\"\"\"\n return self.session.query(\n self.session.query(Account.session_id)\n .filter(Account.session_id == session_id)\n .exists()\n ).scalar()\n\n def get_account_balance(self, session_id: Text):\n \"\"\"Get the account balance for an account\"\"\"\n account_number = self.get_account_number(\n self.get_account_from_session_id(session_id)\n )\n spent = float(\n self.session.query(sa.func.sum(Transaction.amount))\n .filter(Transaction.from_account_number == account_number)\n .all()[0][0]\n )\n earned = float(\n self.session.query(sa.func.sum(Transaction.amount))\n .filter(Transaction.to_account_number == account_number)\n .all()[0][0]\n )\n return earned - spent\n\n def get_currency(self, session_id: Text):\n \"\"\"Get the currency for an account\"\"\"\n return (\n self.session.query(Account.currency)\n .filter(Account.session_id == session_id)\n .first()[0]\n )\n\n def search_transactions(\n self,\n session_id: Text,\n start_time: Optional[datetime] = None,\n end_time: Optional[datetime] = None,\n deposit: bool = False,\n vendor: Optional[Text] = None,\n ):\n \"\"\"Find all transactions for an account between `start_time` and `end_time`.\n Looks for spend transactions by default, set `deposit` to `True` to search earnings.\n Looks for transactions with anybody by default, set `vendor` to search by vendor\n \"\"\"\n account = self.get_account_from_session_id(session_id)\n account_number = self.get_account_number(account)\n if deposit:\n transactions = self.session.query(Transaction).filter(\n Transaction.to_account_number == account_number\n )\n elif vendor:\n to_account = (\n self.session.query(Account.id)\n .filter(Account.session_id.startswith(\"vendor_\"))\n .filter(Account.account_holder_name == vendor.lower())\n .first()\n )\n to_account_number = self.get_account_number(to_account)\n transactions = (\n self.session.query(Transaction)\n .filter(Transaction.from_account_number == account_number)\n .filter(Transaction.to_account_number == to_account_number)\n )\n else:\n transactions = self.session.query(Transaction).filter(\n Transaction.from_account_number == account_number\n )\n if start_time:\n transactions = transactions.filter(Transaction.timestamp >= start_time)\n if end_time:\n transactions = transactions.filter(Transaction.timestamp <= end_time)\n\n return transactions\n\n def list_credit_cards(self, session_id: Text):\n \"\"\"List valid credit cards for an acccount\"\"\"\n account = self.get_account_from_session_id(session_id)\n cards = (\n self.session.query(CreditCard)\n .filter(CreditCard.account_id == account.id)\n .all()\n )\n return [card.credit_card_name for card in cards]\n\n def get_credit_card(self, session_id: Text, credit_card_name: Text):\n \"\"\"Get a `CreditCard` object based on the card's name and the `session_id`\"\"\"\n account = self.get_account_from_session_id(session_id)\n return (\n self.session.query(CreditCard)\n .filter(CreditCard.account_id == account.id)\n .filter(CreditCard.credit_card_name == credit_card_name.lower())\n .first()\n )\n\n def get_credit_card_balance(\n self,\n session_id: Text,\n credit_card_name: Text,\n balance_type: Text = \"current_balance\",\n ):\n \"\"\"Get the balance for a credit card based on its name and the balance type\"\"\"\n balance_type = \"_\".join(balance_type.split())\n card = self.get_credit_card(session_id, credit_card_name)\n return getattr(card, balance_type)\n\n @staticmethod\n def list_balance_types():\n \"\"\"List valid balance types for credit cards\"\"\"\n return [\n \" \".join(name.split(\"_\"))\n for name in CreditCard.__table__.columns.keys()\n if name.endswith(\"balance\")\n ]\n\n def list_vendors(self):\n \"\"\"List valid vendors\"\"\"\n vendors = (\n self.session.query(Account.account_holder_name)\n .filter(Account.session_id.startswith(\"vendor_\"))\n .all()\n )\n return [vendor.account_holder_name for vendor in vendors]\n\n def pay_off_credit_card(\n self, session_id: Text, credit_card_name: Text, amount: float\n ):\n \"\"\"Do a transaction to move the specified amount from an account to a credit card\"\"\"\n account = self.get_account_from_session_id(session_id)\n account_number = self.get_account_number(account)\n credit_card = (\n self.session.query(CreditCard)\n .filter(CreditCard.account_id == account.id)\n .filter(CreditCard.credit_card_name == credit_card_name.lower())\n .first()\n )\n self.transact(\n account_number,\n self.get_account_number(credit_card),\n amount,\n )\n credit_card.current_balance -= amount\n if amount < credit_card.minimum_balance:\n credit_card.minimum_balance -= amount\n else:\n credit_card.minimum_balance = 0\n self.session.commit()\n\n def add_session_account(self, session_id: Text, name: Optional[Text] = \"\"):\n \"\"\"Add a new account for a new session_id. Assumes no such account exists yet.\"\"\"\n self.session.add(\n Account(session_id=session_id, account_holder_name=name, currency=\"$\")\n )\n\n def add_credit_cards(self, session_id: Text):\n \"\"\"Populate the creditcard table for a given session_id\"\"\"\n credit_card_names = [\"iron bank\", \"credit all\", \"emblem\", \"justice bank\"]\n credit_cards = [\n CreditCard(\n credit_card_name=cardname,\n minimum_balance=choice([20, 30, 40]),\n current_balance=choice(\n [round(amount, 2) for amount in list(arange(20, 500, 0.01))]\n ),\n account_id=self.get_account_from_session_id(session_id).id,\n )\n for cardname in credit_card_names\n ]\n self.session.add_all(credit_cards)\n\n def check_general_accounts_populated(\n self, general_account_names: Dict[Text, List[Text]]\n ):\n \"\"\"Check whether tables have been populated with global values for vendors, recipients, and depositors\"\"\"\n account_names = set(\n [\n name\n for list_names in general_account_names.values()\n for name in list_names\n ]\n )\n existing_accounts = self.session.query(Account.account_holder_name).filter(\n Account.account_holder_name.in_(account_names)\n )\n existing_account_names = set(\n [account.account_holder_name for account in existing_accounts.all()]\n )\n return account_names == existing_account_names\n\n def add_general_accounts(self, general_account_names: Dict[Text, List[Text]]):\n \"\"\"Populate tables with global values for vendors, recipients, and depositors\"\"\"\n general_accounts = [\n Account(session_id=f\"{prefix}_{id}\", account_holder_name=name)\n for prefix, names in general_account_names.items()\n for id, name in enumerate(names)\n ]\n\n for account in general_accounts:\n self.session.merge(account)\n self.session.commit()\n\n def add_recipients(self, session_id: Text):\n \"\"\"Populate recipients table\"\"\"\n account = self.get_account_from_session_id(session_id)\n recipients = (\n self.session.query(Account.account_holder_name, Account.id)\n .filter(Account.session_id.startswith(\"recipient_\"))\n .all()\n )\n session_recipients = sample(recipients, choice(list(range(3, len(recipients)))))\n relationships = [\n RecipientRelationship(\n account_id=account.id,\n recipient_account_id=recipient.id,\n recipient_nickname=recipient.account_holder_name,\n )\n for recipient in session_recipients\n ]\n self.session.add_all(relationships)\n\n def add_transactions(self, session_id: Text):\n \"\"\"Populate transactions table for a session ID with random transactions\"\"\"\n account_number = self.get_account_number(\n self.get_account_from_session_id(session_id)\n )\n vendors = (\n self.session.query(Account)\n .filter(Account.session_id.startswith(\"vendor_\"))\n .all()\n )\n depositors = (\n self.session.query(Account)\n .filter(Account.session_id.startswith(\"depositor_\"))\n .all()\n )\n\n start_date = utc.localize(datetime(2019, 1, 1))\n end_date = utc.localize(datetime.now())\n number_of_days = (end_date - start_date).days\n\n for vendor in vendors:\n rand_spend_amounts = sample(\n [round(amount, 2) for amount in list(arange(5, 50, 0.01))],\n number_of_days // 2,\n )\n\n rand_dates = [\n (start_date + timedelta(days=randrange(number_of_days)))\n for x in range(0, len(rand_spend_amounts))\n ]\n\n spend_transactions = [\n Transaction(\n from_account_number=account_number,\n to_account_number=self.get_account_number(vendor),\n amount=amount,\n timestamp=date,\n )\n for amount, date in zip(rand_spend_amounts, rand_dates)\n ]\n\n self.session.add_all(spend_transactions)\n\n for depositor in depositors:\n if depositor.account_holder_name == \"interest\":\n rand_deposit_amounts = sample(\n [round(amount, 2) for amount in list(arange(5, 20, 0.01))],\n number_of_days // 30,\n )\n else:\n rand_deposit_amounts = sample(\n [round(amount, 2) for amount in list(arange(1000, 2000, 0.01))],\n number_of_days // 14,\n )\n\n rand_dates = [\n (start_date + timedelta(days=randrange(number_of_days)))\n for x in range(0, len(rand_deposit_amounts))\n ]\n\n deposit_transactions = [\n Transaction(\n from_account_number=self.get_account_number(depositor),\n to_account_number=account_number,\n amount=amount,\n timestamp=date,\n )\n for amount, date in zip(rand_deposit_amounts, rand_dates)\n ]\n\n self.session.add_all(deposit_transactions)\n\n def populate_profile_db(self, session_id: Text):\n \"\"\"Initialize the database for a conversation session.\n Will populate all tables with sample values.\n If general accounts have already been populated, it will only\n add account-holder-specific values to tables.\n \"\"\"\n if not self.check_general_accounts_populated(GENERAL_ACCOUNTS):\n self.add_general_accounts(GENERAL_ACCOUNTS)\n if not self.check_session_id_exists(session_id):\n self.add_session_account(session_id)\n self.add_recipients(session_id)\n self.add_transactions(session_id)\n self.add_credit_cards(session_id)\n\n self.session.commit()\n\n def transact(\n self, from_account_number: Text, to_account_number: Text, amount: float\n ):\n \"\"\"Add a transation to the transaction table\"\"\"\n timestamp = datetime.now()\n transaction = Transaction(\n from_account_number=from_account_number,\n to_account_number=to_account_number,\n amount=amount,\n timestamp=timestamp,\n )\n self.session.add(transaction)\n self.session.commit()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SheldonTsui/SepStereo_ECCV2020 | [
"1ae44dc6ace71c164fefe0fd430078552e3b3f1f"
] | [
"demo_stereo.py"
] | [
"import os\nimport os.path as osp\nimport sys\nimport pdb\nimport argparse\nimport librosa\nimport numpy as np\nfrom tqdm import tqdm\nimport h5py\nfrom PIL import Image\nimport subprocess\nfrom options.test_options import TestOptions\nimport torchvision.transforms as transforms\nimport torch\nimport torchvision\nfrom data.stereo_dataset import generate_spectrogram\nfrom models.networks import VisualNet, VisualNetDilated, AudioNet, AssoConv, APNet, weights_init \n\ndef audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):\n rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))\n samples = samples * (desired_rms / rms)\n return rms / desired_rms, samples\n\ndef main():\n #load test arguments\n opt = TestOptions().parse()\n opt.device = torch.device(\"cuda\")\n\n ## build network\n # visual net\n original_resnet = torchvision.models.resnet18(pretrained=True)\n if opt.visual_model == 'VisualNet':\n net_visual = VisualNet(original_resnet)\n elif opt.visual_model == 'VisualNetDilated':\n net_visual = VisualNetDilated(original_resnet)\n else:\n raise TypeError(\"please input correct visual model type\")\n\n if len(opt.weights_visual) > 0:\n print('Loading weights for visual stream')\n net_visual.load_state_dict(torch.load(opt.weights_visual), strict=True)\n\n # audio net\n net_audio = AudioNet(\n ngf=opt.unet_ngf,\n input_nc=opt.unet_input_nc,\n output_nc=opt.unet_output_nc,\n )\n net_audio.apply(weights_init)\n if len(opt.weights_audio) > 0:\n print('Loading weights for audio stream')\n net_audio.load_state_dict(torch.load(opt.weights_audio), strict=True)\n\n # fusion net\n if opt.fusion_model == 'none':\n net_fusion = None\n elif opt.fusion_model == 'AssoConv':\n net_fusion = AssoConv()\n elif opt.fusion_model == 'APNet':\n net_fusion = APNet()\n else:\n raise TypeError(\"Please input correct fusion model type\") \n\n if net_fusion is not None and len(opt.weights_fusion) > 0:\n net_fusion.load_state_dict(torch.load(opt.weights_fusion), strict=True)\n\n net_visual.to(opt.device)\n net_audio.to(opt.device)\n net_visual.eval()\n net_audio.eval()\n if net_fusion is not None:\n net_fusion.to(opt.device)\n net_fusion.eval()\n\n test_h5_path = opt.hdf5FolderPath\n print(\"---Testing---: \", test_h5_path)\n testf = h5py.File(test_h5_path, 'r')\n audio_list = testf['audio'][:]\n\n # ensure output dir\n if not osp.exists(opt.output_dir_root):\n os.mkdir(opt.output_dir_root)\n\n for audio_file in tqdm(audio_list):\n audio_file = bytes.decode(audio_file)\n video_path = audio_file.replace('audio_resave', 'frames')[:-4]\n input_audio_path = audio_file\n video_frame_path = video_path\n audio_id = audio_file.split('/')[-1][:-4]\n cur_output_dir_root = os.path.join(opt.output_dir_root, audio_id)\n\n #load the audio to perform separation\n audio, audio_rate = librosa.load(input_audio_path, sr=opt.audio_sampling_rate, mono=False)\n audio_channel1 = audio[0,:]\n audio_channel2 = audio[1,:]\n\n\t#define the transformation to perform on visual frames\n vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]\n vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))\n vision_transform = transforms.Compose(vision_transform_list)\n\n\t#perform spatialization over the whole audio using a sliding window approach\n overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated\n binaural_audio = np.zeros((audio.shape))\n\n\t#perform spatialization over the whole spectrogram in a siliding-window fashion\n sliding_window_start = 0\n data = {}\n samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)\n while sliding_window_start + samples_per_window < audio.shape[-1]:\n sliding_window_end = sliding_window_start + samples_per_window\n normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])\n audio_segment_channel1 = audio_segment[0,:]\n audio_segment_channel2 = audio_segment[1,:]\n audio_segment_mix = audio_segment_channel1 + audio_segment_channel2\n\n audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension\n audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension\n #get the frame index for current window\n frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))\n image = Image.open(os.path.join(video_frame_path, str(frame_index) + '.jpg')).convert('RGB')\n #image = image.transpose(Image.FLIP_LEFT_RIGHT)\n frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension\n # data to device\n audio_diff = audio_diff.to(opt.device)\n audio_mix = audio_mix.to(opt.device)\n frame = frame.to(opt.device)\n\n vfeat = net_visual(frame) \n if net_fusion is not None: \n upfeatures, output = net_audio(audio_diff, audio_mix, vfeat, return_upfeatures=True)\n output.update(net_fusion(audio_mix, vfeat, upfeatures)) \n else:\n output = net_audio(audio_diff, audio_mix, vfeat)\n\n\t #ISTFT to convert back to audio\n if opt.use_fusion_pred:\n pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()\n pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]\n reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)\n pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()\n pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]\n reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)\n else:\n predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()\n reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])\n reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)\n reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2\n reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2\n reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer\n\n binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural\n overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1\n sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)\n\n\t#deal with the last segment\n normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])\n audio_segment_channel1 = audio_segment[0,:]\n audio_segment_channel2 = audio_segment[1,:]\n audio_diff = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension\n audio_mix = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension\n\t#get the frame index for last window\n frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))\n image = Image.open(os.path.join(video_frame_path, str(frame_index) + '.jpg')).convert('RGB')\n\t#image = image.transpose(Image.FLIP_LEFT_RIGHT)\n frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension\n # data to device\n audio_diff = audio_diff.to(opt.device)\n audio_mix = audio_mix.to(opt.device)\n frame = frame.to(opt.device)\n\n vfeat = net_visual(frame) \n if net_fusion is not None: \n upfeatures, output = net_audio(audio_diff, audio_mix, vfeat, return_upfeatures=True)\n output.update(net_fusion(audio_mix, vfeat, upfeatures)) \n else:\n output = net_audio(audio_diff, audio_mix, vfeat)\n\n\t#ISTFT to convert back to audio\n if opt.use_fusion_pred:\n pred_left_spec = output['pred_left'][0,:,:,:].data[:].cpu().numpy()\n pred_left_spec = pred_left_spec[0,:,:] + 1j * pred_left_spec[1,:,:]\n reconstructed_signal_left = librosa.istft(pred_left_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)\n pred_right_spec = output['pred_right'][0,:,:,:].data[:].cpu().numpy()\n pred_right_spec = pred_right_spec[0,:,:] + 1j * pred_right_spec[1,:,:]\n reconstructed_signal_right = librosa.istft(pred_right_spec, hop_length=160, win_length=400, center=True, length=samples_per_window)\n else:\n predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()\n reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])\n reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)\n reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2\n reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2\n reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer\n\n #add the spatialized audio to reconstructed_binaural\n binaural_audio[:,-samples_per_window:] = binaural_audio[:,-samples_per_window:] + reconstructed_binaural\n overlap_count[:,-samples_per_window:] = overlap_count[:,-samples_per_window:] + 1\n\n\t#divide aggregated predicted audio by their corresponding counts\n predicted_binaural_audio = np.divide(binaural_audio, overlap_count)\n\n\t#check output directory\n if not os.path.isdir(cur_output_dir_root):\n os.mkdir(cur_output_dir_root)\n\n mixed_mono = (audio_channel1 + audio_channel2) / 2\n librosa.output.write_wav(os.path.join(cur_output_dir_root, 'predicted_binaural.wav'), predicted_binaural_audio, opt.audio_sampling_rate)\n librosa.output.write_wav(os.path.join(cur_output_dir_root, 'mixed_mono.wav'), mixed_mono, opt.audio_sampling_rate)\n librosa.output.write_wav(os.path.join(cur_output_dir_root, 'input_binaural.wav'), audio, opt.audio_sampling_rate)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.expand_dims",
"torch.load",
"numpy.mean",
"torch.device",
"numpy.zeros",
"numpy.divide"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erego/anomalydetection | [
"838a5c39c3350b2f9d7b8988d877372c07d9e56f"
] | [
"pyande/data/draw.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mptches\nimport seaborn as sns\nimport scipy.stats as sts\n\n\ndef draw_correlation_matrix(sigma, data):\n\n # Get correlation matrix and draw it\n corr_matrix = np.corrcoef(sigma)\n features = data.columns.values.tolist()\n\n sns.set(style=\"white\")\n sns.heatmap(corr_matrix, annot=True, fmt=\".2f\", xticklabels=features, yticklabels=features)\n\n plt.xlabel('Correlation Matrix')\n plt.show()\n\n\ndef draw_roc_curve(total_positives, total_negatives, statistical_measures, cost_fp, cost_fn):\n\n total_population = total_positives + total_negatives\n\n # epsilon\n epsilon = statistical_measures[:, 0]\n\n # RATIO\n # True positive ratio\n tp_ratio = statistical_measures[:, 1] / total_population\n\n # False positive ratio\n fp_ratio = statistical_measures[:, 2] / total_population\n\n # False negative ratio\n fn_ratio = statistical_measures[:, 3] / total_population\n\n # True negative ratio\n tn_ratio = statistical_measures[:, 4] / total_population\n\n # RATE\n # True positive rate\n tp_rate = statistical_measures[:, 1] / total_positives\n\n # False positive rate\n fp_rate = statistical_measures[:, 2] / total_negatives\n\n cost = fp_ratio * cost_fp + fn_ratio * cost_fn\n\n plt.figure()\n\n red_patch = mptches.Patch(color='red', label='The false positive ratio')\n blue_patch = mptches.Patch(color='blue', label='The false negative ratio')\n plt.legend(handles=[red_patch, blue_patch])\n\n plt.plot(epsilon, fp_ratio, 'r', epsilon, fn_ratio, 'b', epsilon, cost, 'y')\n\n plt.xlabel('epsilon')\n plt.ylabel('False and True Negative Rate')\n\n x_linear = np.arange(0., 1.1, 0.1)\n\n plt.figure()\n plt.plot(fp_rate, tp_rate, 'b', x_linear, x_linear, 'r--')\n\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\ndef draw_histogram_feature(x_data, x_header=None, y_value=None):\n m = len(x_data[0, :])\n for i in range(m):\n feature_value = x_data[:, i]\n\n if y_value is None:\n # the histogram of the data\n plt.hist(feature_value, 100, normed=1, facecolor='g', alpha=0.75)\n\n mean = np.mean(feature_value, axis=0)\n sigma = np.cov(feature_value, rowvar=False)\n\n num_bins = 50\n # the histogram of the data\n plt.hist(feature_value, num_bins, facecolor='green', alpha=0.5)\n plt.xlabel('feature_value ' + str(i))\n plt.show()\n\n n, bins, patches = plt.hist(feature_value, num_bins, normed=1, facecolor='green',\n alpha=0.5)\n # add a 'best fit' line\n y = sts.norm.pdf(bins, mean, sigma)\n plt.plot(bins, y, 'r--')\n plt.show()\n\n else:\n\n plt.hist(feature_value, 50, normed=1, facecolor='g', alpha=0.75)\n n, bins, patches = plt.hist((y_value[:, i]), 50, normed=1, color='r', alpha=0.75)\n\n mean = np.mean(feature_value, axis=0)\n sigma = np.cov(feature_value, rowvar=False)\n\n y = sts.norm.pdf(bins, mean, sigma)\n plt.plot(bins, y, 'r--')\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"matplotlib.pyplot.title",
"scipy.stats.norm.pdf",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.cov",
"numpy.mean",
"numpy.corrcoef",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mr-c/cogent3 | [
"5c4dbd9e3a970004e88fab636ba1dc330280c1a9"
] | [
"src/cogent3/evolve/models.py"
] | [
"#! /usr/bin/env python\n\"\"\"A collection of pre-defined models. These are provided for convenience so that\nusers do not need to keep reconstructing the standard models. We encourage users\nto think about the assumptions in these models and consider if their problem could\nbenefit from a user defined model.\nNote that models that do not traditionally deal with gaps are implemented with\ngap recoding that will convert gaps to Ns, and model gaps set to False.\"\"\"\n\n# this file using functions etc. to allow each model to serve as an example for users\n# wishing to construct their own models\nfrom itertools import permutations\n\n# The models are constructed in a strait forward manner with no attempt to condense\nimport numpy\n\nfrom cogent3 import DNA\nfrom cogent3.evolve import ns_substitution_model, substitution_model\nfrom cogent3.evolve.predicate import MotifChange, omega, replacement\nfrom cogent3.evolve.solved_models import F81, HKY85, TN93\nfrom cogent3.evolve.substitution_model import _SubstitutionModel\nfrom cogent3.util.table import Table\n\n\n__author__ = \"Matthew Wakefield\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Matthew Wakefield\", \"Peter Maxwell\", \"Gavin Huttley\", \"James Kondilios\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.12.21a\"\n__maintainer__ = \"Matthew Wakefield\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nnucleotide_models = [\n \"JC69\",\n \"K80\",\n \"F81\",\n \"HKY85\",\n \"TN93\",\n \"GTR\",\n \"ssGN\",\n \"GN\",\n \"BH\",\n \"DT\",\n]\n\ncodon_models = [\n \"CNFGTR\",\n \"CNFHKY\",\n \"MG94HKY\",\n \"MG94GTR\",\n \"GY94\",\n \"Y98\",\n \"H04G\",\n \"H04GK\",\n \"H04GGK\",\n \"GNC\",\n]\n\nprotein_models = [\"DSO78\", \"AH96\", \"AH96_mtmammals\", \"JTT92\", \"WG01\"]\n\nmodels = nucleotide_models + codon_models + protein_models\n\n# Substitution model rate matrix predicates\n_gtr_preds = [MotifChange(x, y) for x, y in [\"AC\", \"AG\", \"AT\", \"CG\", \"CT\"]]\n_kappa = (~MotifChange(\"R\", \"Y\")).aliased(\"kappa\")\n_omega = omega\n_cg = MotifChange(\"CG\").aliased(\"G\")\n_cg_k = (_cg & _kappa).aliased(\"G.K\")\n\n\ndef _make_gn_preds():\n _general_preds = []\n for f, t in permutations(\"ACTG\", 2):\n if f != \"T\" or t != \"G\": # Match GTR's reference cell\n _general_preds.append(MotifChange(f, t, forward_only=True))\n return _general_preds\n\n\n_general_preds = _make_gn_preds()\n\n\ndef _make_symn_preds():\n pair = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n sym_preds = []\n for f, t in \"AG\", \"AT\", \"CG\", \"CT\", \"GT\":\n sym_preds.append(\n MotifChange(f, t, forward_only=True)\n | MotifChange(pair[f], pair[t], forward_only=True)\n )\n return sym_preds\n\n\n_sym_preds = _make_symn_preds()\n\n\ndef BH(optimise_motif_probs=True, **kw):\n \"\"\"Barry and Hartigan Discrete Time substitution model\n\n Barry and Hartigan 1987. Biometrics 43: 261–76.\n \"\"\"\n return DT(\n optimise_motif_probs=optimise_motif_probs, motif_length=1, name=\"BH\", **kw\n )\n\n\ndef DT(optimise_motif_probs=True, motif_length=1, **kw):\n \"\"\"\n Discrete Time substitution model (non-stationary, non-reversible).\n motif_length=2 makes this a dinucleotide model, motif_length=3 a\n trinucleotide model.\n \"\"\"\n alpha = DNA.alphabet.get_word_alphabet(motif_length)\n kw[\"optimise_motif_probs\"] = optimise_motif_probs\n kw[\"mprob_model\"] = \"tuple\"\n kw[\"name\"] = kw.get(\"name\", f\"DT-{motif_length}\")\n sm = ns_substitution_model.DiscreteSubstitutionModel(alpha, **kw)\n return sm\n\n\ndef GN(optimise_motif_probs=True, **kw):\n \"\"\"General Markov Nucleotide (non-stationary, non-reversible).\n\n Kaehler, Yap, Zhang, Huttley, 2015, Sys Biol 64 (2): 281–93\"\"\"\n required = dict(\n optimise_motif_probs=optimise_motif_probs, name=\"GN\", predicates=_general_preds\n )\n kwargs = dict(recode_gaps=True, model_gaps=False)\n kwargs.update(kw)\n kwargs.update(required)\n sm = ns_substitution_model.NonReversibleNucleotide(**kwargs)\n return sm\n\n\ndef ssGN(optimise_motif_probs=True, **kw):\n \"\"\"strand-symmetric general Markov nucleotide (non-stationary, non-reversible).\n\n Kaehler, 2017, Journal of Theoretical Biology 420: 144–51\"\"\"\n # note the StrandSymmetric class predefines the predicates and name\n sm = ns_substitution_model.StrandSymmetric(\n optimise_motif_probs=optimise_motif_probs, name=\"ssGN\", **kw\n )\n return sm\n\n\ndef K80(**kw):\n \"\"\"Kimura 1980\"\"\"\n required = dict(equal_motif_probs=True, optimise_motif_probs=False)\n kwargs = {}\n kwargs.update(kw)\n kwargs.update(required)\n return HKY85(**kwargs)\n\n\ndef JC69(**kw):\n \"\"\"Jukes and Cantor's 1969 model\"\"\"\n required = dict(equal_motif_probs=True, optimise_motif_probs=False)\n kwargs = {}\n kwargs.update(kw)\n kwargs.update(required)\n return F81(**kwargs)\n\n\ndef GTR(**kw):\n \"\"\"General Time Reversible nucleotide substitution model.\"\"\"\n required = dict(\n name=\"GTR\", predicates=_gtr_preds, mprob_model=\"conditional\", model_gaps=False\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleNucleotide(**kwargs)\n\n\n# Codon Models\ndef CNFGTR(**kw):\n \"\"\"Conditional nucleotide frequency codon substitution model, GTR variant\n (with params analagous to the nucleotide GTR model).\n\n Yap, Lindsay, Easteal and Huttley, 2010, Mol Biol Evol 27: 726-734\"\"\"\n required = dict(\n name=\"CNFGTR\",\n predicates=_gtr_preds + [_omega],\n mprob_model=\"conditional\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef CNFHKY(**kw):\n \"\"\"Conditional nucleotide frequency codon substitution model, HKY variant\n (with kappa, the ratio of transitions to transversions)\n\n Yap, Lindsay, Easteal and Huttley, 2010, Mol Biol Evol 27: 726-734\"\"\"\n required = dict(\n name=\"CNFHKY\",\n predicates=[_kappa, _omega],\n mprob_model=\"conditional\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef MG94HKY(**kw):\n \"\"\"Muse and Gaut 1994 codon substitution model, HKY variant (with kappa,\n the ratio of transitions to transversions)\n\n Muse and Gaut, 1994, Mol Biol Evol, 11, 715-24\"\"\"\n required = dict(\n name=\"MG94HKY\",\n predicates=[_kappa, _omega],\n mprob_model=\"monomer\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef MG94GTR(**kw):\n \"\"\"Muse and Gaut 1994 codon substitution model, GTR variant (with params\n analagous to the nucleotide GTR model)\n\n Muse and Gaut, 1994, Mol Biol Evol, 11, 715-24\"\"\"\n required = dict(\n name=\"MG94GTR\",\n predicates=_gtr_preds + [_omega],\n mprob_model=\"monomer\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef GY94(**kw):\n \"\"\"Goldman and Yang 1994 codon substitution model.\n\n N Goldman and Z Yang, 1994, Mol Biol Evol, 11(5):725-36.\"\"\"\n return Y98(**kw)\n\n\ndef Y98(**kw):\n \"\"\"Yang's 1998 substitution model, a derivative of the GY94.\n\n Z Yang, 1998, Mol Biol Evol, 15(5):568-73\"\"\"\n required = dict(\n name=\"Y98\", predicates=[_kappa, _omega], mprob_model=\"tuple\", model_gaps=False\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef H04G(**kw):\n \"\"\"Huttley 2004 CpG substitution model. Includes a term for substitutions\n to or from CpG's.\n\n GA Huttley, 2004, Mol Biol Evol, 21(9):1760-8\"\"\"\n required = dict(\n name=\"H04G\",\n predicates=[_cg, _kappa, _omega],\n mprob_model=\"tuple\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef H04GK(**kw):\n \"\"\"Huttley 2004 CpG substitution model. Includes a term for transition\n substitutions to or from CpG's.\n\n GA Huttley, 2004, Mol Biol Evol, 21(9):1760-8\"\"\"\n required = dict(\n name=\"H04GK\",\n predicates=[_cg_k, _kappa, _omega],\n mprob_model=\"tuple\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef H04GGK(**kw):\n \"\"\"Huttley 2004 CpG substitution model. Includes a general term for\n substitutions to or from CpG's and an adjustment for CpG transitions.\n\n GA Huttley, 2004, Mol Biol Evol, 21(9):1760-8\"\"\"\n required = dict(\n name=\"H04GGK\",\n predicates=[_cg, _cg_k, _kappa, _omega],\n mprob_model=\"tuple\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)\n\n\ndef GNC(optimise_motif_probs=True, **kw):\n \"\"\"General Nucleotide Codon, a non-reversible codon model.\n\n Kaehler, Yap, Huttley, 2017, Gen Biol Evol 9(1): 134–49\"\"\"\n required = dict(\n name=\"GNC\",\n optimise_motif_probs=optimise_motif_probs,\n predicates=_general_preds + [_omega],\n mprob_model=\"tuple\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return ns_substitution_model.NonReversibleCodon(**kwargs)\n\n\n# Protein Models\n\n# Empirical Protein Models\n\nDSO78_matrix = numpy.array(\n [\n [\n 0.00000000e00,\n 3.60000000e01,\n 1.20000000e02,\n 1.98000000e02,\n 1.80000000e01,\n 2.40000000e02,\n 2.30000000e01,\n 6.50000000e01,\n 2.60000000e01,\n 4.10000000e01,\n 7.20000000e01,\n 9.80000000e01,\n 2.50000000e02,\n 8.90000000e01,\n 2.70000000e01,\n 4.09000000e02,\n 3.71000000e02,\n 2.08000000e02,\n 0.00000000e00,\n 2.40000000e01,\n ],\n [\n 3.60000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.10000000e01,\n 2.80000000e01,\n 4.40000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.90000000e01,\n 0.00000000e00,\n 2.30000000e01,\n 1.61000000e02,\n 1.60000000e01,\n 4.90000000e01,\n 0.00000000e00,\n 9.60000000e01,\n ],\n [\n 1.20000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 1.15300000e03,\n 0.00000000e00,\n 1.25000000e02,\n 8.60000000e01,\n 2.40000000e01,\n 7.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 9.05000000e02,\n 1.30000000e01,\n 1.34000000e02,\n 0.00000000e00,\n 9.50000000e01,\n 6.60000000e01,\n 1.80000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 1.98000000e02,\n 0.00000000e00,\n 1.15300000e03,\n 0.00000000e00,\n 0.00000000e00,\n 8.10000000e01,\n 4.30000000e01,\n 6.10000000e01,\n 8.30000000e01,\n 1.10000000e01,\n 3.00000000e01,\n 1.48000000e02,\n 5.10000000e01,\n 7.16000000e02,\n 1.00000000e00,\n 7.90000000e01,\n 3.40000000e01,\n 3.70000000e01,\n 0.00000000e00,\n 2.20000000e01,\n ],\n [\n 1.80000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.50000000e01,\n 4.80000000e01,\n 1.96000000e02,\n 0.00000000e00,\n 1.57000000e02,\n 9.20000000e01,\n 1.40000000e01,\n 1.10000000e01,\n 0.00000000e00,\n 1.40000000e01,\n 4.60000000e01,\n 1.30000000e01,\n 1.20000000e01,\n 7.60000000e01,\n 6.98000000e02,\n ],\n [\n 2.40000000e02,\n 1.10000000e01,\n 1.25000000e02,\n 8.10000000e01,\n 1.50000000e01,\n 0.00000000e00,\n 1.00000000e01,\n 0.00000000e00,\n 2.70000000e01,\n 7.00000000e00,\n 1.70000000e01,\n 1.39000000e02,\n 3.40000000e01,\n 2.80000000e01,\n 9.00000000e00,\n 2.34000000e02,\n 3.00000000e01,\n 5.40000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 2.30000000e01,\n 2.80000000e01,\n 8.60000000e01,\n 4.30000000e01,\n 4.80000000e01,\n 1.00000000e01,\n 0.00000000e00,\n 7.00000000e00,\n 2.60000000e01,\n 4.40000000e01,\n 0.00000000e00,\n 5.35000000e02,\n 9.40000000e01,\n 6.06000000e02,\n 2.40000000e02,\n 3.50000000e01,\n 2.20000000e01,\n 4.40000000e01,\n 2.70000000e01,\n 1.27000000e02,\n ],\n [\n 6.50000000e01,\n 4.40000000e01,\n 2.40000000e01,\n 6.10000000e01,\n 1.96000000e02,\n 0.00000000e00,\n 7.00000000e00,\n 0.00000000e00,\n 4.60000000e01,\n 2.57000000e02,\n 3.36000000e02,\n 7.70000000e01,\n 1.20000000e01,\n 1.80000000e01,\n 6.40000000e01,\n 2.40000000e01,\n 1.92000000e02,\n 8.89000000e02,\n 0.00000000e00,\n 3.70000000e01,\n ],\n [\n 2.60000000e01,\n 0.00000000e00,\n 7.10000000e01,\n 8.30000000e01,\n 0.00000000e00,\n 2.70000000e01,\n 2.60000000e01,\n 4.60000000e01,\n 0.00000000e00,\n 1.80000000e01,\n 2.43000000e02,\n 3.18000000e02,\n 3.30000000e01,\n 1.53000000e02,\n 4.64000000e02,\n 9.60000000e01,\n 1.36000000e02,\n 1.00000000e01,\n 0.00000000e00,\n 1.30000000e01,\n ],\n [\n 4.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 1.10000000e01,\n 1.57000000e02,\n 7.00000000e00,\n 4.40000000e01,\n 2.57000000e02,\n 1.80000000e01,\n 0.00000000e00,\n 5.27000000e02,\n 3.40000000e01,\n 3.20000000e01,\n 7.30000000e01,\n 1.50000000e01,\n 1.70000000e01,\n 3.30000000e01,\n 1.75000000e02,\n 4.60000000e01,\n 2.80000000e01,\n ],\n [\n 7.20000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 3.00000000e01,\n 9.20000000e01,\n 1.70000000e01,\n 0.00000000e00,\n 3.36000000e02,\n 2.43000000e02,\n 5.27000000e02,\n 0.00000000e00,\n 1.00000000e00,\n 1.70000000e01,\n 1.14000000e02,\n 9.00000000e01,\n 6.20000000e01,\n 1.04000000e02,\n 2.58000000e02,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 9.80000000e01,\n 0.00000000e00,\n 9.05000000e02,\n 1.48000000e02,\n 1.40000000e01,\n 1.39000000e02,\n 5.35000000e02,\n 7.70000000e01,\n 3.18000000e02,\n 3.40000000e01,\n 1.00000000e00,\n 0.00000000e00,\n 4.20000000e01,\n 1.03000000e02,\n 3.20000000e01,\n 4.95000000e02,\n 2.29000000e02,\n 1.50000000e01,\n 2.30000000e01,\n 9.50000000e01,\n ],\n [\n 2.50000000e02,\n 1.90000000e01,\n 1.30000000e01,\n 5.10000000e01,\n 1.10000000e01,\n 3.40000000e01,\n 9.40000000e01,\n 1.20000000e01,\n 3.30000000e01,\n 3.20000000e01,\n 1.70000000e01,\n 4.20000000e01,\n 0.00000000e00,\n 1.53000000e02,\n 1.03000000e02,\n 2.45000000e02,\n 7.80000000e01,\n 4.80000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 8.90000000e01,\n 0.00000000e00,\n 1.34000000e02,\n 7.16000000e02,\n 0.00000000e00,\n 2.80000000e01,\n 6.06000000e02,\n 1.80000000e01,\n 1.53000000e02,\n 7.30000000e01,\n 1.14000000e02,\n 1.03000000e02,\n 1.53000000e02,\n 0.00000000e00,\n 2.46000000e02,\n 5.60000000e01,\n 5.30000000e01,\n 3.50000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 2.70000000e01,\n 2.30000000e01,\n 0.00000000e00,\n 1.00000000e00,\n 1.40000000e01,\n 9.00000000e00,\n 2.40000000e02,\n 6.40000000e01,\n 4.64000000e02,\n 1.50000000e01,\n 9.00000000e01,\n 3.20000000e01,\n 1.03000000e02,\n 2.46000000e02,\n 0.00000000e00,\n 1.54000000e02,\n 2.60000000e01,\n 2.40000000e01,\n 2.01000000e02,\n 8.00000000e00,\n ],\n [\n 4.09000000e02,\n 1.61000000e02,\n 9.50000000e01,\n 7.90000000e01,\n 4.60000000e01,\n 2.34000000e02,\n 3.50000000e01,\n 2.40000000e01,\n 9.60000000e01,\n 1.70000000e01,\n 6.20000000e01,\n 4.95000000e02,\n 2.45000000e02,\n 5.60000000e01,\n 1.54000000e02,\n 0.00000000e00,\n 5.50000000e02,\n 3.00000000e01,\n 7.50000000e01,\n 3.40000000e01,\n ],\n [\n 3.71000000e02,\n 1.60000000e01,\n 6.60000000e01,\n 3.40000000e01,\n 1.30000000e01,\n 3.00000000e01,\n 2.20000000e01,\n 1.92000000e02,\n 1.36000000e02,\n 3.30000000e01,\n 1.04000000e02,\n 2.29000000e02,\n 7.80000000e01,\n 5.30000000e01,\n 2.60000000e01,\n 5.50000000e02,\n 0.00000000e00,\n 1.57000000e02,\n 0.00000000e00,\n 4.20000000e01,\n ],\n [\n 2.08000000e02,\n 4.90000000e01,\n 1.80000000e01,\n 3.70000000e01,\n 1.20000000e01,\n 5.40000000e01,\n 4.40000000e01,\n 8.89000000e02,\n 1.00000000e01,\n 1.75000000e02,\n 2.58000000e02,\n 1.50000000e01,\n 4.80000000e01,\n 3.50000000e01,\n 2.40000000e01,\n 3.00000000e01,\n 1.57000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 2.80000000e01,\n ],\n [\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 7.60000000e01,\n 0.00000000e00,\n 2.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 4.60000000e01,\n 0.00000000e00,\n 2.30000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 2.01000000e02,\n 7.50000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 6.10000000e01,\n ],\n [\n 2.40000000e01,\n 9.60000000e01,\n 0.00000000e00,\n 2.20000000e01,\n 6.98000000e02,\n 0.00000000e00,\n 1.27000000e02,\n 3.70000000e01,\n 1.30000000e01,\n 2.80000000e01,\n 0.00000000e00,\n 9.50000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 8.00000000e00,\n 3.40000000e01,\n 4.20000000e01,\n 2.80000000e01,\n 6.10000000e01,\n 0.00000000e00,\n ],\n ]\n)\n\nDSO78_freqs = {\n \"A\": 0.087126912873087131,\n \"C\": 0.033473966526033475,\n \"E\": 0.04952995047004953,\n \"D\": 0.046871953128046873,\n \"G\": 0.088611911388088618,\n \"F\": 0.039771960228039777,\n \"I\": 0.036885963114036892,\n \"H\": 0.033617966382033626,\n \"K\": 0.08048191951808048,\n \"M\": 0.014752985247014754,\n \"L\": 0.085356914643085369,\n \"N\": 0.040431959568040438,\n \"Q\": 0.038254961745038257,\n \"P\": 0.050679949320050689,\n \"S\": 0.069576930423069588,\n \"R\": 0.040903959096040908,\n \"T\": 0.058541941458058543,\n \"W\": 0.010493989506010494,\n \"V\": 0.064717935282064723,\n \"Y\": 0.029915970084029919,\n}\n\nJTT92_matrix = numpy.array(\n [\n [\n 0.0,\n 56.0,\n 81.0,\n 105.0,\n 15.0,\n 179.0,\n 27.0,\n 36.0,\n 35.0,\n 30.0,\n 54.0,\n 54.0,\n 194.0,\n 57.0,\n 58.0,\n 378.0,\n 475.0,\n 298.0,\n 9.0,\n 11.0,\n ],\n [\n 56.0,\n 0.0,\n 10.0,\n 5.0,\n 78.0,\n 59.0,\n 69.0,\n 17.0,\n 7.0,\n 23.0,\n 31.0,\n 34.0,\n 14.0,\n 9.0,\n 113.0,\n 223.0,\n 42.0,\n 62.0,\n 115.0,\n 209.0,\n ],\n [\n 81.0,\n 10.0,\n 0.0,\n 767.0,\n 4.0,\n 130.0,\n 112.0,\n 11.0,\n 26.0,\n 7.0,\n 15.0,\n 528.0,\n 15.0,\n 49.0,\n 16.0,\n 59.0,\n 38.0,\n 31.0,\n 4.0,\n 46.0,\n ],\n [\n 105.0,\n 5.0,\n 767.0,\n 0.0,\n 5.0,\n 119.0,\n 26.0,\n 12.0,\n 181.0,\n 9.0,\n 18.0,\n 58.0,\n 18.0,\n 323.0,\n 29.0,\n 30.0,\n 32.0,\n 45.0,\n 10.0,\n 7.0,\n ],\n [\n 15.0,\n 78.0,\n 4.0,\n 5.0,\n 0.0,\n 5.0,\n 40.0,\n 89.0,\n 4.0,\n 248.0,\n 43.0,\n 10.0,\n 17.0,\n 4.0,\n 5.0,\n 92.0,\n 12.0,\n 62.0,\n 53.0,\n 536.0,\n ],\n [\n 179.0,\n 59.0,\n 130.0,\n 119.0,\n 5.0,\n 0.0,\n 23.0,\n 6.0,\n 27.0,\n 6.0,\n 14.0,\n 81.0,\n 24.0,\n 26.0,\n 137.0,\n 201.0,\n 33.0,\n 47.0,\n 55.0,\n 8.0,\n ],\n [\n 27.0,\n 69.0,\n 112.0,\n 26.0,\n 40.0,\n 23.0,\n 0.0,\n 16.0,\n 45.0,\n 56.0,\n 33.0,\n 391.0,\n 115.0,\n 597.0,\n 328.0,\n 73.0,\n 46.0,\n 11.0,\n 8.0,\n 573.0,\n ],\n [\n 36.0,\n 17.0,\n 11.0,\n 12.0,\n 89.0,\n 6.0,\n 16.0,\n 0.0,\n 21.0,\n 229.0,\n 479.0,\n 47.0,\n 10.0,\n 9.0,\n 22.0,\n 40.0,\n 245.0,\n 961.0,\n 9.0,\n 32.0,\n ],\n [\n 35.0,\n 7.0,\n 26.0,\n 181.0,\n 4.0,\n 27.0,\n 45.0,\n 21.0,\n 0.0,\n 14.0,\n 65.0,\n 263.0,\n 21.0,\n 292.0,\n 646.0,\n 47.0,\n 103.0,\n 14.0,\n 10.0,\n 8.0,\n ],\n [\n 30.0,\n 23.0,\n 7.0,\n 9.0,\n 248.0,\n 6.0,\n 56.0,\n 229.0,\n 14.0,\n 0.0,\n 388.0,\n 12.0,\n 102.0,\n 72.0,\n 38.0,\n 59.0,\n 25.0,\n 180.0,\n 52.0,\n 24.0,\n ],\n [\n 54.0,\n 31.0,\n 15.0,\n 18.0,\n 43.0,\n 14.0,\n 33.0,\n 479.0,\n 65.0,\n 388.0,\n 0.0,\n 30.0,\n 16.0,\n 43.0,\n 44.0,\n 29.0,\n 226.0,\n 323.0,\n 24.0,\n 18.0,\n ],\n [\n 54.0,\n 34.0,\n 528.0,\n 58.0,\n 10.0,\n 81.0,\n 391.0,\n 47.0,\n 263.0,\n 12.0,\n 30.0,\n 0.0,\n 15.0,\n 86.0,\n 45.0,\n 503.0,\n 232.0,\n 16.0,\n 8.0,\n 70.0,\n ],\n [\n 194.0,\n 14.0,\n 15.0,\n 18.0,\n 17.0,\n 24.0,\n 115.0,\n 10.0,\n 21.0,\n 102.0,\n 16.0,\n 15.0,\n 0.0,\n 164.0,\n 74.0,\n 285.0,\n 118.0,\n 23.0,\n 6.0,\n 10.0,\n ],\n [\n 57.0,\n 9.0,\n 49.0,\n 323.0,\n 4.0,\n 26.0,\n 597.0,\n 9.0,\n 292.0,\n 72.0,\n 43.0,\n 86.0,\n 164.0,\n 0.0,\n 310.0,\n 53.0,\n 51.0,\n 20.0,\n 18.0,\n 24.0,\n ],\n [\n 58.0,\n 113.0,\n 16.0,\n 29.0,\n 5.0,\n 137.0,\n 328.0,\n 22.0,\n 646.0,\n 38.0,\n 44.0,\n 45.0,\n 74.0,\n 310.0,\n 0.0,\n 101.0,\n 64.0,\n 17.0,\n 126.0,\n 20.0,\n ],\n [\n 378.0,\n 223.0,\n 59.0,\n 30.0,\n 92.0,\n 201.0,\n 73.0,\n 40.0,\n 47.0,\n 59.0,\n 29.0,\n 503.0,\n 285.0,\n 53.0,\n 101.0,\n 0.0,\n 477.0,\n 38.0,\n 35.0,\n 63.0,\n ],\n [\n 475.0,\n 42.0,\n 38.0,\n 32.0,\n 12.0,\n 33.0,\n 46.0,\n 245.0,\n 103.0,\n 25.0,\n 226.0,\n 232.0,\n 118.0,\n 51.0,\n 64.0,\n 477.0,\n 0.0,\n 112.0,\n 12.0,\n 21.0,\n ],\n [\n 298.0,\n 62.0,\n 31.0,\n 45.0,\n 62.0,\n 47.0,\n 11.0,\n 961.0,\n 14.0,\n 180.0,\n 323.0,\n 16.0,\n 23.0,\n 20.0,\n 17.0,\n 38.0,\n 112.0,\n 0.0,\n 25.0,\n 16.0,\n ],\n [\n 9.0,\n 115.0,\n 4.0,\n 10.0,\n 53.0,\n 55.0,\n 8.0,\n 9.0,\n 10.0,\n 52.0,\n 24.0,\n 8.0,\n 6.0,\n 18.0,\n 126.0,\n 35.0,\n 12.0,\n 25.0,\n 0.0,\n 71.0,\n ],\n [\n 11.0,\n 209.0,\n 46.0,\n 7.0,\n 536.0,\n 8.0,\n 573.0,\n 32.0,\n 8.0,\n 24.0,\n 18.0,\n 70.0,\n 10.0,\n 24.0,\n 20.0,\n 63.0,\n 21.0,\n 16.0,\n 71.0,\n 0.0,\n ],\n ]\n)\n\nJTT92_freqs = {\n \"A\": 0.076747923252076758,\n \"C\": 0.019802980197019805,\n \"E\": 0.061829938170061841,\n \"D\": 0.05154394845605155,\n \"G\": 0.073151926848073159,\n \"F\": 0.040125959874040135,\n \"I\": 0.053760946239053767,\n \"H\": 0.022943977056022944,\n \"K\": 0.058675941324058678,\n \"M\": 0.023825976174023829,\n \"L\": 0.091903908096091905,\n \"N\": 0.042644957355042652,\n \"Q\": 0.040751959248040752,\n \"P\": 0.050900949099050907,\n \"S\": 0.068764931235068771,\n \"R\": 0.051690948309051694,\n \"T\": 0.058564941435058568,\n \"W\": 0.014260985739014262,\n \"V\": 0.066004933995066004,\n \"Y\": 0.032101967898032102,\n}\n\nAH96_matrix = numpy.array(\n [\n [\n 0.0,\n 59.93,\n 17.67,\n 9.77,\n 6.37,\n 120.71,\n 13.9,\n 96.49,\n 8.36,\n 25.46,\n 141.88,\n 26.95,\n 54.31,\n 1.9,\n 23.18,\n 387.86,\n 480.72,\n 195.06,\n 1.9,\n 6.48,\n ],\n [\n 59.93,\n 0.0,\n 1.9,\n 1.9,\n 70.8,\n 30.71,\n 141.49,\n 62.73,\n 1.9,\n 25.65,\n 6.18,\n 58.94,\n 31.26,\n 75.24,\n 103.33,\n 277.05,\n 179.97,\n 1.9,\n 33.6,\n 254.77,\n ],\n [\n 17.67,\n 1.9,\n 0.0,\n 583.55,\n 4.98,\n 56.77,\n 113.99,\n 4.34,\n 2.31,\n 1.9,\n 1.9,\n 794.38,\n 13.43,\n 55.28,\n 1.9,\n 69.02,\n 28.01,\n 1.9,\n 19.86,\n 21.21,\n ],\n [\n 9.77,\n 1.9,\n 583.55,\n 0.0,\n 2.67,\n 28.28,\n 49.12,\n 3.31,\n 313.86,\n 1.9,\n 1.9,\n 63.05,\n 12.83,\n 313.56,\n 1.9,\n 54.71,\n 14.82,\n 21.14,\n 1.9,\n 13.12,\n ],\n [\n 6.37,\n 70.8,\n 4.98,\n 2.67,\n 0.0,\n 1.9,\n 48.16,\n 84.67,\n 6.44,\n 216.06,\n 90.82,\n 15.2,\n 17.31,\n 19.11,\n 4.69,\n 64.29,\n 33.85,\n 6.35,\n 7.84,\n 465.58,\n ],\n [\n 120.71,\n 30.71,\n 56.77,\n 28.28,\n 1.9,\n 0.0,\n 1.9,\n 5.98,\n 22.73,\n 2.41,\n 1.9,\n 53.3,\n 1.9,\n 6.75,\n 23.03,\n 125.93,\n 11.17,\n 2.53,\n 10.92,\n 3.21,\n ],\n [\n 13.9,\n 141.49,\n 113.99,\n 49.12,\n 48.16,\n 1.9,\n 0.0,\n 12.26,\n 127.67,\n 11.49,\n 11.97,\n 496.13,\n 60.97,\n 582.4,\n 165.23,\n 77.46,\n 44.78,\n 1.9,\n 7.08,\n 670.14,\n ],\n [\n 96.49,\n 62.73,\n 4.34,\n 3.31,\n 84.67,\n 5.98,\n 12.26,\n 0.0,\n 19.57,\n 329.09,\n 517.98,\n 27.1,\n 20.63,\n 8.34,\n 1.9,\n 47.7,\n 368.43,\n 1222.94,\n 1.9,\n 25.01,\n ],\n [\n 8.36,\n 1.9,\n 2.31,\n 313.86,\n 6.44,\n 22.73,\n 127.67,\n 19.57,\n 0.0,\n 14.88,\n 91.37,\n 608.7,\n 50.1,\n 465.58,\n 141.4,\n 105.79,\n 136.33,\n 1.9,\n 24.0,\n 51.17,\n ],\n [\n 25.46,\n 25.65,\n 1.9,\n 1.9,\n 216.06,\n 2.41,\n 11.49,\n 329.09,\n 14.88,\n 0.0,\n 537.53,\n 15.16,\n 40.1,\n 39.7,\n 15.58,\n 73.61,\n 126.4,\n 91.67,\n 32.44,\n 44.15,\n ],\n [\n 141.88,\n 6.18,\n 1.9,\n 1.9,\n 90.82,\n 1.9,\n 11.97,\n 517.98,\n 91.37,\n 537.53,\n 0.0,\n 65.41,\n 18.84,\n 47.37,\n 1.9,\n 111.16,\n 528.17,\n 387.54,\n 21.71,\n 39.96,\n ],\n [\n 26.95,\n 58.94,\n 794.38,\n 63.05,\n 15.2,\n 53.3,\n 496.13,\n 27.1,\n 608.7,\n 15.16,\n 65.41,\n 0.0,\n 73.31,\n 173.56,\n 13.24,\n 494.39,\n 238.46,\n 1.9,\n 10.68,\n 191.36,\n ],\n [\n 54.31,\n 31.26,\n 13.43,\n 12.83,\n 17.31,\n 1.9,\n 60.97,\n 20.63,\n 50.1,\n 40.1,\n 18.84,\n 73.31,\n 0.0,\n 137.29,\n 23.64,\n 169.9,\n 128.22,\n 8.23,\n 4.21,\n 16.21,\n ],\n [\n 1.9,\n 75.24,\n 55.28,\n 313.56,\n 19.11,\n 6.75,\n 582.4,\n 8.34,\n 465.58,\n 39.7,\n 47.37,\n 173.56,\n 137.29,\n 0.0,\n 220.99,\n 54.11,\n 94.93,\n 19.0,\n 1.9,\n 38.82,\n ],\n [\n 23.18,\n 103.33,\n 1.9,\n 1.9,\n 4.69,\n 23.03,\n 165.23,\n 1.9,\n 141.4,\n 15.58,\n 1.9,\n 13.24,\n 23.64,\n 220.99,\n 0.0,\n 6.04,\n 2.08,\n 7.64,\n 21.95,\n 1.9,\n ],\n [\n 387.86,\n 277.05,\n 69.02,\n 54.71,\n 64.29,\n 125.93,\n 77.46,\n 47.7,\n 105.79,\n 73.61,\n 111.16,\n 494.39,\n 169.9,\n 54.11,\n 6.04,\n 0.0,\n 597.21,\n 1.9,\n 38.58,\n 64.92,\n ],\n [\n 480.72,\n 179.97,\n 28.01,\n 14.82,\n 33.85,\n 11.17,\n 44.78,\n 368.43,\n 136.33,\n 126.4,\n 528.17,\n 238.46,\n 128.22,\n 94.93,\n 2.08,\n 597.21,\n 0.0,\n 204.54,\n 9.99,\n 38.73,\n ],\n [\n 195.06,\n 1.9,\n 1.9,\n 21.14,\n 6.35,\n 2.53,\n 1.9,\n 1222.94,\n 1.9,\n 91.67,\n 387.54,\n 1.9,\n 8.23,\n 19.0,\n 7.64,\n 1.9,\n 204.54,\n 0.0,\n 5.37,\n 1.9,\n ],\n [\n 1.9,\n 33.6,\n 19.86,\n 1.9,\n 7.84,\n 10.92,\n 7.08,\n 1.9,\n 24.0,\n 32.44,\n 21.71,\n 10.68,\n 4.21,\n 1.9,\n 21.95,\n 38.58,\n 9.99,\n 5.37,\n 0.0,\n 26.25,\n ],\n [\n 6.48,\n 254.77,\n 21.21,\n 13.12,\n 465.58,\n 3.21,\n 670.14,\n 25.01,\n 51.17,\n 44.15,\n 39.96,\n 191.36,\n 16.21,\n 38.82,\n 1.9,\n 64.92,\n 38.73,\n 1.9,\n 26.25,\n 0.0,\n ],\n ]\n)\n\nAH96_freqs = {\n \"A\": 0.071999999999999995,\n \"C\": 0.0060000000000000001,\n \"E\": 0.024,\n \"D\": 0.019,\n \"G\": 0.056000000000000001,\n \"F\": 0.060999999999999999,\n \"I\": 0.087999999999999995,\n \"H\": 0.028000000000000001,\n \"K\": 0.023,\n \"M\": 0.053999999999999999,\n \"L\": 0.16900000000000001,\n \"N\": 0.039,\n \"Q\": 0.025000000000000001,\n \"P\": 0.053999999999999999,\n \"S\": 0.071999999999999995,\n \"R\": 0.019,\n \"T\": 0.085999999999999993,\n \"W\": 0.029000000000000001,\n \"V\": 0.042999999999999997,\n \"Y\": 0.033000000000000002,\n}\n\nAH96_mtmammals_matrix = numpy.array(\n [\n [\n 0.00000000e00,\n 0.00000000e00,\n 1.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 7.80000000e01,\n 8.00000000e00,\n 7.50000000e01,\n 0.00000000e00,\n 2.10000000e01,\n 7.60000000e01,\n 2.00000000e00,\n 5.30000000e01,\n 0.00000000e00,\n 3.20000000e01,\n 3.42000000e02,\n 6.81000000e02,\n 3.98000000e02,\n 5.00000000e00,\n 0.00000000e00,\n ],\n [\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 7.00000000e00,\n 0.00000000e00,\n 3.05000000e02,\n 4.10000000e01,\n 0.00000000e00,\n 2.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.86000000e02,\n 3.47000000e02,\n 1.14000000e02,\n 0.00000000e00,\n 6.50000000e01,\n 5.30000000e02,\n ],\n [\n 1.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 5.69000000e02,\n 5.00000000e00,\n 7.90000000e01,\n 1.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 8.64000000e02,\n 2.00000000e00,\n 4.90000000e01,\n 0.00000000e00,\n 1.60000000e01,\n 0.00000000e00,\n 1.00000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 0.00000000e00,\n 0.00000000e00,\n 5.69000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 2.20000000e01,\n 2.20000000e01,\n 0.00000000e00,\n 2.15000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 2.74000000e02,\n 0.00000000e00,\n 2.10000000e01,\n 4.00000000e00,\n 2.00000000e01,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 0.00000000e00,\n 7.00000000e00,\n 5.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 5.70000000e01,\n 0.00000000e00,\n 2.46000000e02,\n 1.10000000e01,\n 6.00000000e00,\n 1.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 9.00000000e01,\n 8.00000000e00,\n 6.00000000e00,\n 0.00000000e00,\n 6.82000000e02,\n ],\n [\n 7.80000000e01,\n 0.00000000e00,\n 7.90000000e01,\n 2.20000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 4.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 1.80000000e01,\n 1.12000000e02,\n 0.00000000e00,\n 5.00000000e00,\n 0.00000000e00,\n 1.00000000e00,\n ],\n [\n 8.00000000e00,\n 3.05000000e02,\n 1.10000000e01,\n 2.20000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 2.60000000e01,\n 0.00000000e00,\n 4.58000000e02,\n 5.30000000e01,\n 5.50000000e02,\n 2.32000000e02,\n 2.00000000e01,\n 1.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.52500000e03,\n ],\n [\n 7.50000000e01,\n 4.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 5.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 6.00000000e00,\n 2.32000000e02,\n 3.78000000e02,\n 1.90000000e01,\n 5.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 3.60000000e02,\n 2.22000000e03,\n 0.00000000e00,\n 1.60000000e01,\n ],\n [\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 2.15000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 6.00000000e00,\n 0.00000000e00,\n 4.00000000e00,\n 5.90000000e01,\n 4.08000000e02,\n 1.80000000e01,\n 2.42000000e02,\n 5.00000000e01,\n 6.50000000e01,\n 5.00000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 6.70000000e01,\n ],\n [\n 2.10000000e01,\n 2.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 2.46000000e02,\n 0.00000000e00,\n 2.60000000e01,\n 2.32000000e02,\n 4.00000000e00,\n 0.00000000e00,\n 6.09000000e02,\n 0.00000000e00,\n 4.30000000e01,\n 2.00000000e01,\n 6.00000000e00,\n 7.40000000e01,\n 3.40000000e01,\n 1.00000000e02,\n 1.20000000e01,\n 2.50000000e01,\n ],\n [\n 7.60000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.10000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 3.78000000e02,\n 5.90000000e01,\n 6.09000000e02,\n 0.00000000e00,\n 2.10000000e01,\n 0.00000000e00,\n 2.20000000e01,\n 0.00000000e00,\n 4.70000000e01,\n 6.91000000e02,\n 8.32000000e02,\n 1.30000000e01,\n 0.00000000e00,\n ],\n [\n 2.00000000e00,\n 0.00000000e00,\n 8.64000000e02,\n 0.00000000e00,\n 6.00000000e00,\n 4.70000000e01,\n 4.58000000e02,\n 1.90000000e01,\n 4.08000000e02,\n 0.00000000e00,\n 2.10000000e01,\n 0.00000000e00,\n 3.30000000e01,\n 8.00000000e00,\n 4.00000000e00,\n 4.46000000e02,\n 1.10000000e02,\n 0.00000000e00,\n 6.00000000e00,\n 1.56000000e02,\n ],\n [\n 5.30000000e01,\n 0.00000000e00,\n 2.00000000e00,\n 0.00000000e00,\n 1.70000000e01,\n 0.00000000e00,\n 5.30000000e01,\n 5.00000000e00,\n 1.80000000e01,\n 4.30000000e01,\n 0.00000000e00,\n 3.30000000e01,\n 0.00000000e00,\n 5.10000000e01,\n 9.00000000e00,\n 2.02000000e02,\n 7.80000000e01,\n 0.00000000e00,\n 7.00000000e00,\n 8.00000000e00,\n ],\n [\n 0.00000000e00,\n 0.00000000e00,\n 4.90000000e01,\n 2.74000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 5.50000000e02,\n 0.00000000e00,\n 2.42000000e02,\n 2.00000000e01,\n 2.20000000e01,\n 8.00000000e00,\n 5.10000000e01,\n 0.00000000e00,\n 2.46000000e02,\n 3.00000000e01,\n 0.00000000e00,\n 3.30000000e01,\n 0.00000000e00,\n 5.40000000e01,\n ],\n [\n 3.20000000e01,\n 1.86000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.80000000e01,\n 2.32000000e02,\n 0.00000000e00,\n 5.00000000e01,\n 6.00000000e00,\n 0.00000000e00,\n 4.00000000e00,\n 9.00000000e00,\n 2.46000000e02,\n 0.00000000e00,\n 3.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.60000000e01,\n 0.00000000e00,\n ],\n [\n 3.42000000e02,\n 3.47000000e02,\n 1.60000000e01,\n 2.10000000e01,\n 9.00000000e01,\n 1.12000000e02,\n 2.00000000e01,\n 0.00000000e00,\n 6.50000000e01,\n 7.40000000e01,\n 4.70000000e01,\n 4.46000000e02,\n 2.02000000e02,\n 3.00000000e01,\n 3.00000000e00,\n 0.00000000e00,\n 6.14000000e02,\n 0.00000000e00,\n 1.70000000e01,\n 1.07000000e02,\n ],\n [\n 6.81000000e02,\n 1.14000000e02,\n 0.00000000e00,\n 4.00000000e00,\n 8.00000000e00,\n 0.00000000e00,\n 1.00000000e00,\n 3.60000000e02,\n 5.00000000e01,\n 3.40000000e01,\n 6.91000000e02,\n 1.10000000e02,\n 7.80000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 6.14000000e02,\n 0.00000000e00,\n 2.37000000e02,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 3.98000000e02,\n 0.00000000e00,\n 1.00000000e01,\n 2.00000000e01,\n 6.00000000e00,\n 5.00000000e00,\n 0.00000000e00,\n 2.22000000e03,\n 0.00000000e00,\n 1.00000000e02,\n 8.32000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 3.30000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 2.37000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n ],\n [\n 5.00000000e00,\n 6.50000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.20000000e01,\n 1.30000000e01,\n 6.00000000e00,\n 7.00000000e00,\n 0.00000000e00,\n 1.60000000e01,\n 1.70000000e01,\n 0.00000000e00,\n 0.00000000e00,\n 0.00000000e00,\n 1.40000000e01,\n ],\n [\n 0.00000000e00,\n 5.30000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 6.82000000e02,\n 1.00000000e00,\n 1.52500000e03,\n 1.60000000e01,\n 6.70000000e01,\n 2.50000000e01,\n 0.00000000e00,\n 1.56000000e02,\n 8.00000000e00,\n 5.40000000e01,\n 0.00000000e00,\n 1.07000000e02,\n 0.00000000e00,\n 0.00000000e00,\n 1.40000000e01,\n 0.00000000e00,\n ],\n ]\n)\n\nAH96_mtmammals_freqs = {\n \"A\": 0.069199999999999998,\n \"C\": 0.0064999999999999997,\n \"E\": 0.023599999999999999,\n \"D\": 0.018599999999999998,\n \"G\": 0.0557,\n \"F\": 0.061100000000000002,\n \"I\": 0.090499999999999997,\n \"H\": 0.027699999999999999,\n \"K\": 0.022100000000000002,\n \"M\": 0.056099999999999997,\n \"L\": 0.16750000000000001,\n \"N\": 0.040000000000000001,\n \"Q\": 0.023800000000000002,\n \"P\": 0.053600000000000002,\n \"S\": 0.072499999999999995,\n \"R\": 0.0184,\n \"T\": 0.086999999999999994,\n \"W\": 0.0293,\n \"V\": 0.042799999999999998,\n \"Y\": 0.034000000000000002,\n}\n\nWG01_matrix = numpy.array(\n [\n [\n 0.0,\n 1.02704,\n 0.738998,\n 1.58285,\n 0.210494,\n 1.41672,\n 0.316954,\n 0.193335,\n 0.906265,\n 0.397915,\n 0.893496,\n 0.509848,\n 1.43855,\n 0.908598,\n 0.551571,\n 3.37079,\n 2.12111,\n 2.00601,\n 0.113133,\n 0.240735,\n ],\n [\n 1.02704,\n 0.0,\n 0.0302949,\n 0.021352,\n 0.39802,\n 0.306674,\n 0.248972,\n 0.170135,\n 0.0740339,\n 0.384287,\n 0.390482,\n 0.265256,\n 0.109404,\n 0.0988179,\n 0.528191,\n 1.40766,\n 0.512984,\n 1.00214,\n 0.71707,\n 0.543833,\n ],\n [\n 0.738998,\n 0.0302949,\n 0.0,\n 6.17416,\n 0.0467304,\n 0.865584,\n 0.930676,\n 0.039437,\n 0.479855,\n 0.0848047,\n 0.103754,\n 5.42942,\n 0.423984,\n 0.616783,\n 0.147304,\n 1.07176,\n 0.374866,\n 0.152335,\n 0.129767,\n 0.325711,\n ],\n [\n 1.58285,\n 0.021352,\n 6.17416,\n 0.0,\n 0.0811339,\n 0.567717,\n 0.570025,\n 0.127395,\n 2.58443,\n 0.154263,\n 0.315124,\n 0.947198,\n 0.682355,\n 5.46947,\n 0.439157,\n 0.704939,\n 0.822765,\n 0.588731,\n 0.156557,\n 0.196303,\n ],\n [\n 0.210494,\n 0.39802,\n 0.0467304,\n 0.0811339,\n 0.0,\n 0.049931,\n 0.679371,\n 1.05947,\n 0.088836,\n 2.11517,\n 1.19063,\n 0.0961621,\n 0.161444,\n 0.0999208,\n 0.102711,\n 0.545931,\n 0.171903,\n 0.649892,\n 1.52964,\n 6.45428,\n ],\n [\n 1.41672,\n 0.306674,\n 0.865584,\n 0.567717,\n 0.049931,\n 0.0,\n 0.24941,\n 0.0304501,\n 0.373558,\n 0.0613037,\n 0.1741,\n 1.12556,\n 0.24357,\n 0.330052,\n 0.584665,\n 1.34182,\n 0.225833,\n 0.187247,\n 0.336983,\n 0.103604,\n ],\n [\n 0.316954,\n 0.248972,\n 0.930676,\n 0.570025,\n 0.679371,\n 0.24941,\n 0.0,\n 0.13819,\n 0.890432,\n 0.499462,\n 0.404141,\n 3.95629,\n 0.696198,\n 4.29411,\n 2.13715,\n 0.740169,\n 0.473307,\n 0.118358,\n 0.262569,\n 3.87344,\n ],\n [\n 0.193335,\n 0.170135,\n 0.039437,\n 0.127395,\n 1.05947,\n 0.0304501,\n 0.13819,\n 0.0,\n 0.323832,\n 3.17097,\n 4.25746,\n 0.554236,\n 0.0999288,\n 0.113917,\n 0.186979,\n 0.31944,\n 1.45816,\n 7.8213,\n 0.212483,\n 0.42017,\n ],\n [\n 0.906265,\n 0.0740339,\n 0.479855,\n 2.58443,\n 0.088836,\n 0.373558,\n 0.890432,\n 0.323832,\n 0.0,\n 0.257555,\n 0.934276,\n 3.01201,\n 0.556896,\n 3.8949,\n 5.35142,\n 0.96713,\n 1.38698,\n 0.305434,\n 0.137505,\n 0.133264,\n ],\n [\n 0.397915,\n 0.384287,\n 0.0848047,\n 0.154263,\n 2.11517,\n 0.0613037,\n 0.499462,\n 3.17097,\n 0.257555,\n 0.0,\n 4.85402,\n 0.131528,\n 0.415844,\n 0.869489,\n 0.497671,\n 0.344739,\n 0.326622,\n 1.80034,\n 0.665309,\n 0.398618,\n ],\n [\n 0.893496,\n 0.390482,\n 0.103754,\n 0.315124,\n 1.19063,\n 0.1741,\n 0.404141,\n 4.25746,\n 0.934276,\n 4.85402,\n 0.0,\n 0.198221,\n 0.171329,\n 1.54526,\n 0.683162,\n 0.493905,\n 1.51612,\n 2.05845,\n 0.515706,\n 0.428437,\n ],\n [\n 0.509848,\n 0.265256,\n 5.42942,\n 0.947198,\n 0.0961621,\n 1.12556,\n 3.95629,\n 0.554236,\n 3.01201,\n 0.131528,\n 0.198221,\n 0.0,\n 0.195081,\n 1.54364,\n 0.635346,\n 3.97423,\n 2.03006,\n 0.196246,\n 0.0719167,\n 1.086,\n ],\n [\n 1.43855,\n 0.109404,\n 0.423984,\n 0.682355,\n 0.161444,\n 0.24357,\n 0.696198,\n 0.0999288,\n 0.556896,\n 0.415844,\n 0.171329,\n 0.195081,\n 0.0,\n 0.933372,\n 0.679489,\n 1.61328,\n 0.795384,\n 0.314887,\n 0.139405,\n 0.216046,\n ],\n [\n 0.908598,\n 0.0988179,\n 0.616783,\n 5.46947,\n 0.0999208,\n 0.330052,\n 4.29411,\n 0.113917,\n 3.8949,\n 0.869489,\n 1.54526,\n 1.54364,\n 0.933372,\n 0.0,\n 3.0355,\n 1.02887,\n 0.857928,\n 0.301281,\n 0.215737,\n 0.22771,\n ],\n [\n 0.551571,\n 0.528191,\n 0.147304,\n 0.439157,\n 0.102711,\n 0.584665,\n 2.13715,\n 0.186979,\n 5.35142,\n 0.497671,\n 0.683162,\n 0.635346,\n 0.679489,\n 3.0355,\n 0.0,\n 1.22419,\n 0.554413,\n 0.251849,\n 1.16392,\n 0.381533,\n ],\n [\n 3.37079,\n 1.40766,\n 1.07176,\n 0.704939,\n 0.545931,\n 1.34182,\n 0.740169,\n 0.31944,\n 0.96713,\n 0.344739,\n 0.493905,\n 3.97423,\n 1.61328,\n 1.02887,\n 1.22419,\n 0.0,\n 4.37802,\n 0.232739,\n 0.523742,\n 0.786993,\n ],\n [\n 2.12111,\n 0.512984,\n 0.374866,\n 0.822765,\n 0.171903,\n 0.225833,\n 0.473307,\n 1.45816,\n 1.38698,\n 0.326622,\n 1.51612,\n 2.03006,\n 0.795384,\n 0.857928,\n 0.554413,\n 4.37802,\n 0.0,\n 1.38823,\n 0.110864,\n 0.291148,\n ],\n [\n 2.00601,\n 1.00214,\n 0.152335,\n 0.588731,\n 0.649892,\n 0.187247,\n 0.118358,\n 7.8213,\n 0.305434,\n 1.80034,\n 2.05845,\n 0.196246,\n 0.314887,\n 0.301281,\n 0.251849,\n 0.232739,\n 1.38823,\n 0.0,\n 0.365369,\n 0.31473,\n ],\n [\n 0.113133,\n 0.71707,\n 0.129767,\n 0.156557,\n 1.52964,\n 0.336983,\n 0.262569,\n 0.212483,\n 0.137505,\n 0.665309,\n 0.515706,\n 0.0719167,\n 0.139405,\n 0.215737,\n 1.16392,\n 0.523742,\n 0.110864,\n 0.365369,\n 0.0,\n 2.48539,\n ],\n [\n 0.240735,\n 0.543833,\n 0.325711,\n 0.196303,\n 6.45428,\n 0.103604,\n 3.87344,\n 0.42017,\n 0.133264,\n 0.398618,\n 0.428437,\n 1.086,\n 0.216046,\n 0.22771,\n 0.381533,\n 0.786993,\n 0.291148,\n 0.31473,\n 2.48539,\n 0.0,\n ],\n ]\n)\n\nWG01_freqs = {\n \"A\": 0.086627908662790867,\n \"C\": 0.019307801930780195,\n \"E\": 0.058058905805890577,\n \"D\": 0.057045105704510574,\n \"G\": 0.083251808325180837,\n \"F\": 0.038431903843190382,\n \"I\": 0.048466004846600491,\n \"H\": 0.024431302443130246,\n \"K\": 0.062028606202860624,\n \"M\": 0.019502701950270197,\n \"L\": 0.086209008620900862,\n \"N\": 0.039089403908940397,\n \"Q\": 0.036728103672810368,\n \"P\": 0.045763104576310464,\n \"S\": 0.069517906951790692,\n \"R\": 0.043972004397200441,\n \"T\": 0.061012706101270617,\n \"W\": 0.014385901438590145,\n \"V\": 0.070895607089560719,\n \"Y\": 0.035274203527420354,\n}\n\n\ndef DSO78(**kw):\n \"\"\"Dayhoff et al 1978 empirical protein model\n Dayhoff, MO, Schwartz RM, and Orcutt, BC. 1978\n A model of evolutionary change in proteins. Pp. 345-352.\n Atlas of protein sequence and structure, Vol 5, Suppl. 3.\n National Biomedical Research Foundation, Washington D. C\n Matrix imported from PAML dayhoff.dat file\"\"\"\n sm = substitution_model.EmpiricalProteinMatrix(\n DSO78_matrix, DSO78_freqs, name=\"DSO78\", **kw\n )\n\n return sm\n\n\ndef JTT92(**kw):\n \"\"\"Jones, Taylor and Thornton 1992 empirical protein model\n Jones DT, Taylor WR, Thornton JM.\n The rapid generation of mutation data matrices from protein sequences.\n Comput Appl Biosci. 1992 Jun;8(3):275-82.\n Matrix imported from PAML jones.dat file\"\"\"\n sm = substitution_model.EmpiricalProteinMatrix(\n JTT92_matrix, JTT92_freqs, name=\"JTT92\", **kw\n )\n\n return sm\n\n\ndef AH96(**kw):\n \"\"\"Adachi and Hasegawa 1996 empirical model for mitochondrial proteins.\n Adachi J, Hasegawa M.\n Model of amino acid substitution in proteins encoded by mitochondrial DNA.\n J Mol Evol. 1996 Apr;42(4):459-68.\n Matrix imported from PAML mtREV24.dat file\"\"\"\n sm = substitution_model.EmpiricalProteinMatrix(\n AH96_matrix, AH96_freqs, name=\"AH96_mtREV24\", **kw\n )\n\n return sm\n\n\ndef get_model(name, **kw):\n \"\"\"returns an instance of the named model\n\n name is case sensitive and must be in the models attribute\"\"\"\n if isinstance(name, _SubstitutionModel):\n # already a substitution model\n return name\n if name not in models:\n msg = 'Unknown model \"%s\". Model names are case sensitive!' % name\n raise ValueError(msg)\n\n g = globals()\n return g[name](**kw)\n\n\ndef mtREV(**kw):\n return AH96(**kw)\n\n\ndef AH96_mtmammals(**kw):\n \"\"\"Adachi and Hasegawa 1996 empirical model for mammalian mitochondrial\n proteins.\n Adachi J, Hasegawa M.\n Model of amino acid substitution in proteins encoded by mitochondrial DNA.\n J Mol Evol. 1996 Apr;42(4):459-68.\n Matrix imported from PAML mtmam.dat file\"\"\"\n sm = substitution_model.EmpiricalProteinMatrix(\n AH96_mtmammals_matrix, AH96_mtmammals_freqs, name=\"AH96_mtmammals\", **kw\n )\n\n return sm\n\n\ndef mtmam(**kw):\n return AH96_mtmammals(**kw)\n\n\ndef WG01(**kw):\n \"\"\"Whelan and Goldman 2001 empirical model for globular proteins.\n Whelan S, Goldman N.\n A general empirical model of protein evolution derived from multiple protein\n families using a maximum-likelihood approach.\n Mol Biol Evol. 2001 May;18(5):691-9.\n Matrix imported from PAML wag.dat file\"\"\"\n sm = substitution_model.EmpiricalProteinMatrix(\n WG01_matrix, WG01_freqs, name=\"WG01\", **kw\n )\n\n return sm\n\n\ndef available_models(model_types=None):\n \"\"\"This function returns a cogent3 Table instance with header\n ['Model Type', 'Abbreviation', 'Description'].\"\"\"\n column_headings = [\"Model Type\", \"Abbreviation\", \"Description\"]\n _model_types = {\n \"nucleotide\": nucleotide_models,\n \"codon\": codon_models,\n \"protein\": protein_models,\n }\n if model_types is not None:\n model_types = model_types if not isinstance(model_types, str) else [model_types]\n else:\n model_types = _model_types.keys()\n\n rows = []\n for mod_type in model_types:\n for abbreviation in _model_types[mod_type]:\n if eval(abbreviation).__doc__:\n description = \" \".join(eval(abbreviation).__doc__.split())\n else:\n description = \"\"\n rows.append([mod_type, abbreviation, description])\n\n t = Table(\n header=column_headings,\n data=rows,\n title=\"Specify a model using 'Abbreviation' (case sensitive).\",\n )\n return t\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jrcai/BagofTricks-LT | [
"d75b195367e3d535d316d134ec4bbef4bb7fcbdd"
] | [
"lib/core/function.py"
] | [
"import _init_paths\r\nfrom core.evaluate import accuracy, AverageMeter, FusionMatrix\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.distributed as dist\r\nimport time\r\nfrom tqdm import tqdm\r\nfrom apex.parallel import DistributedDataParallel as DDP\r\nfrom apex.fp16_utils import *\r\nfrom apex import amp, optimizers\r\nfrom apex.multi_tensor_apply import multi_tensor_applier\r\nimport os\r\n\r\ndef train_model(\r\n trainLoader, model, epoch, epoch_number, optimizer, combiner, criterion, cfg, logger, rank=0, **kwargs\r\n):\r\n if cfg.EVAL_MODE:\r\n model.eval()\r\n else:\r\n model.train()\r\n\r\n trainLoader.dataset.update(epoch)\r\n combiner.update(epoch)\r\n criterion.update(epoch)\r\n\r\n\r\n start_time = time.time()\r\n number_batch = len(trainLoader)\r\n\r\n all_loss = AverageMeter()\r\n acc = AverageMeter()\r\n for i, (image, label, meta) in enumerate(trainLoader):\r\n cnt = label.shape[0]\r\n loss, now_acc = combiner.forward(model, criterion, image, label, meta)\r\n\r\n optimizer.zero_grad()\r\n\r\n with amp.scale_loss(loss, optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n\r\n optimizer.step()\r\n all_loss.update(loss.data.item(), cnt)\r\n acc.update(now_acc, cnt)\r\n\r\n if i % cfg.SHOW_STEP == 0 and rank == 0:\r\n pbar_str = \"Epoch:{:>3d} Batch:{:>3d}/{} Batch_Loss:{:>5.3f} Batch_Accuracy:{:>5.2f}% \".format(\r\n epoch, i, number_batch, all_loss.val, acc.val * 100\r\n )\r\n logger.info(pbar_str)\r\n end_time = time.time()\r\n pbar_str = \"---Epoch:{:>3d}/{} Avg_Loss:{:>5.3f} Epoch_Accuracy:{:>5.2f}% Epoch_Time:{:>5.2f}min---\".format(\r\n epoch, epoch_number, all_loss.avg, acc.avg * 100, (end_time - start_time) / 60\r\n )\r\n if rank == 0:\r\n logger.info(pbar_str)\r\n return acc.avg, all_loss.avg\r\n\r\ndef reduce_tensor(tensor, world_size):\r\n rt = tensor.clone()\r\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\r\n rt /= world_size\r\n return rt\r\n\r\ndef valid_model(\r\n dataLoader, epoch_number, model, cfg, criterion, logger, device, rank, distributed, **kwargs\r\n):\r\n model.eval()\r\n\r\n with torch.no_grad():\r\n all_loss = AverageMeter()\r\n acc_avg = AverageMeter()\r\n\r\n func = torch.nn.Sigmoid() \\\r\n if cfg.LOSS.LOSS_TYPE in ['FocalLoss', 'ClassBalanceFocal'] else \\\r\n torch.nn.Softmax(dim=1)\r\n\r\n for i, (image, label, meta) in enumerate(dataLoader):\r\n image, label = image.to(device), label.to(device)\r\n\r\n feature = model(image, feature_flag=True)\r\n\r\n output = model(feature, classifier_flag=True, label=label)\r\n loss = criterion(output, label, feature=feature)\r\n score_result = func(output)\r\n\r\n now_result = torch.argmax(score_result, 1)\r\n acc, cnt = accuracy(now_result.cpu().numpy(), label.cpu().numpy())\r\n\r\n if distributed:\r\n world_size = float(os.environ.get(\"WORLD_SIZE\", 1))\r\n reduced_loss = reduce_tensor(loss.data, world_size)\r\n reduced_acc = reduce_tensor(torch.from_numpy(np.array([acc])).cuda(), world_size)\r\n loss = reduced_loss.cpu().data\r\n acc = reduced_acc.cpu().data\r\n\r\n all_loss.update(loss.data.item(), label.shape[0])\r\n if distributed:\r\n acc_avg.update(acc.data.item(), cnt*world_size)\r\n else:\r\n acc_avg.update(acc, cnt)\r\n\r\n pbar_str = \"------- Valid: Epoch:{:>3d} Valid_Loss:{:>5.3f} Valid_Acc:{:>5.2f}%-------\".format(\r\n epoch_number, all_loss.avg, acc_avg.avg * 100\r\n )\r\n if rank == 0:\r\n logger.info(pbar_str)\r\n return acc_avg.avg, all_loss.avg\r\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Sigmoid",
"torch.no_grad",
"numpy.array",
"torch.distributed.all_reduce",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Muhammad4hmed/Amazon-Web-Services-Hackathon | [
"8dda149af1c7f9ccc18706477b7363ed8e08f3e5"
] | [
"chatscript.py"
] | [
"import nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer=LancasterStemmer()\nimport pickle\nimport numpy\nimport tflearn\nimport tensorflow\nimport random\nimport json\nimport pyttsx3\n\ndef tts(txt):\n\tengine=pyttsx3.init()\n\tengine.say(txt)\n\tengine.runAndWait()\n\n\ntry:\n\twith open('data.pickle','rb') as f:\n\t\twords,labels,training,output=pickle.load(f)\nexcept:\n\twith open('intents.json') as file:\n\t\tdata=json.load(file)\n\n\twords=[]\n\tlabels=[]\n\tdocs_x=[]\n\tdocs_class=[]\n\tfor intent in data['intents']:\n\t\tfor pattern in intent['patterns']:\n\t\t\tword_single=nltk.word_tokenize(pattern)\n\t\t\twords.extend(word_single)\n\t\t\tdocs_x.append(word_single)\n\t\t\tdocs_class.append(intent['tag'])\n\t\tif intent['tag'] not in labels:\n\t\t\tlabels.append(intent['tag']) \n\n\twords=[stemmer.stem(w.lower()) for w in words if w not in '?']\n\twords=sorted(list(set(words)))\n\n\tlabels=sorted(labels)\n\n\ttraining=[]\n\toutput=[]\n\tout_empty=[0 for _ in range(len(labels))]\n\n\tfor x,doc in enumerate(docs_x):\n\t\tbag=[]\n\n\t\tword_single=[stemmer.stem(w) for w in doc]\n\n\t\tfor w in words:\n\n\t\t\tif w in word_single:\n\t\t\t\tbag.append(1)\n\t\t\telse:\n\t\t\t\tbag.append(0)\n\t\toutput_row=out_empty[:]\n\t\toutput_row[labels.index(docs_class[x])]=1\n\n\t\ttraining.append(bag)\n\t\toutput.append(output_row)\n\ttraining=numpy.array(training)\n\toutput=numpy.array(output)\n\n\twith open('data.pickle','wb') as f:\n\t\tpickle.dump((words,labels,training,output),f)\n\n\ntry:\n\ttensorflow.reset_default_graph()\n\tnet=tflearn.input_data(shape=[None,len(training[0])])\n\tnet=tflearn.fully_connected(net,8)\n\tnet=tflearn.fully_connected(net,16)\n\tnet=tflearn.fully_connected(net,8)\n\tnet=tflearn.fully_connected(net,len(output[0]),activation='softmax')\n\tnet=tflearn.regression(net)\n\n\tmodel=tflearn.DNN(net,tensorboard_dir='log')\n\tmodel.load('my_model.tflearn')\n\nexcept:\n\ttensorflow.reset_default_graph()\n\tnet=tflearn.input_data(shape=[None,len(training[0])])\n\tnet=tflearn.fully_connected(net,8)\n\tnet=tflearn.fully_connected(net,16)\n\tnet=tflearn.fully_connected(net,8)\n\tnet=tflearn.fully_connected(net,len(output[0]),activation='softmax')\n\tnet=tflearn.regression(net)\n\n\tmodel=tflearn.DNN(net,tensorboard_dir='log')\n\n\tmodel.fit(training,output,n_epoch=1500,batch_size=8,show_metric=True)\n\tmodel.save('my_model.tflearn')\n\ndef bag_of_words(s,words):\n\tbag=[0 for _ in range(len(words))]\n\ts_words=nltk.word_tokenize(s)\n\ts_words=[stemmer.stem(word.lower()) for word in s_words]\n\n\tfor x in s_words:\n\t\tfor i,w in enumerate(words):\n\t\t\tif w==x:\n\t\t\t\tbag[i]=1\n\treturn numpy.array(bag)\n\ndef chat(inp):\n\timport random\n\t#print('Enter a robotics related query!(type quit to stop)')\n\twhile True:\n\t\tif inp.lower()=='quit':\n\t\t\tbreak\n\t\tresults=model.predict([bag_of_words(inp,words)])[0]\n\t\t'''with open('chatbot.pb','wb') as f:\n\t\t\t\t\t\t\t\t\tpickle.dump(model,f)'''\n\t\tresults_idx=numpy.argmax(results)\n\t\tidx=random.randint(0, 1)\n\n\t\tif results[results_idx]>0.3:\n\n\t\t\ttag=labels[results_idx]\n\t\t\twith open('intents.json') as file:\n\t\t\t\tdata=json.load(file)\n\n\t\t\t\t\n\t\t\tfor trunc in data['intents']:\n\t\t\t\tif trunc['tag']==tag:\n\t\t\t\t\tresponses=trunc['responses']\n\t\t\t\t\tprefix=trunc['response_pre_string']\n\t\t\t\t\tsuffix=trunc['response_post_string']\n\t\t\ttxt=random.choices(responses)\n\n\n\t\t\t\n\t\t\treturn(\"{}\".format(txt[0]))\n\n\t\t\t\n\t\t\t#print(tag)\n\n\t\t\t#print(tts(txt))\n\t\telse: return('If you still feel that your query was unaddressed feel free to mail us at support@toll_automator.in')\n\nprint(chat(\"I can't sign in\"))"
] | [
[
"numpy.array",
"tensorflow.reset_default_graph",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
raulf2012/PROJ_IrOx_OER | [
"56883d6f5b62e67703fe40899e2e68b3f5de143b"
] | [
"dft_workflow/dft_scripts/slab_dft.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Run VASP job on slab.\n\nAuthor(s): Michal Badich, Raul A. Flores\n\"\"\"\n\n# | - Import Modules\nimport os\nprint(os.getcwd())\nimport sys\n\nimport json\nimport subprocess\nimport time\nt0 = time.time()\n\nimport numpy as np\n\nimport ase.calculators.vasp as vasp_calculator\nfrom ase import io\n\n# My Modules\nfrom ase_modules.ase_methods import clean_up_dft, get_slab_kpts\n#__|\n\n# | - Script Inputs\neasy_params = False\n\n# set_magmoms_i = False\n#__|\n\n#| - Misc setup\ndirectory = \"out_data\"\nif not os.path.exists(directory):\n os.makedirs(directory)\n#__|\n\n# | - Read Atoms Object\nif os.path.isfile(\"init.traj\"):\n atoms = io.read('init.traj')\n#__|\n\n#| - Setting magmoms\nfrom dft_workflow_methods import set_magmoms\n\nset_magmoms(\n atoms=atoms,\n mode=None, # \"set_magmoms_M_O\", \"set_magmoms_random\"\n set_from_file_if_avail=True)\n\n# #########################################################\ninit_magmoms = atoms.get_initial_magnetic_moments()\ndata_path = os.path.join(\"out_data/init_magmoms.json\")\nwith open(data_path, \"w\") as fle:\n json.dump(list(init_magmoms), fle, indent=2)\n# #########################################################\n#__|\n\n# | - Calculator\n\n# ########################################################\ndata_root_path = os.path.join(\n os.environ[\"PROJ_irox_oer\"],\n \"dft_workflow/dft_scripts/out_data\")\n\ndata_path = os.path.join(data_root_path, \"dft_calc_settings.json\")\nwith open(data_path, \"r\") as fle:\n dft_calc_settings = json.load(fle)\n\nif easy_params:\n easy_data_path = os.path.join(\n data_root_path, \"easy_dft_calc_settings.json\")\n with open(easy_data_path, \"r\") as fle:\n easy_dft_calc_settings = json.load(fle)\n\n# ########################################################\nkpoints = get_slab_kpts(atoms)\n\nlocal_dft_settings = dict(\n kpts=kpoints,\n )\ndft_calc_settings.update(local_dft_settings)\n\n# ########################################################\n# Reading VASP parameters from file and merging with params in script\nfrom ase_modules.dft_params import VASP_Params\nVP = VASP_Params(load_defaults=False)\nVP.load_params()\ndft_calc_settings.update(VP.params)\n\n\n# ########################################################\nif easy_params:\n dft_calc_settings.update(easy_dft_calc_settings)\n\n\n# ########################################################\ncalc = vasp_calculator.Vasp(**dft_calc_settings)\n\n\n# ########################################################\n# Writing all calculator parameters to json\nwith open(\"out_data/calc_params.json\", \"w\") as outfile:\n json.dump(calc.todict(), outfile, indent=2)\n\n#__|\n\n\n# Setting magmoms to 0 if not running spin-polarized calculation\nif dft_calc_settings.get(\"ispin\", None) == 1:\n print(30 * \"*\")\n print(\"Setting magnetic moments to 0 since ispin=1\")\n print(30 * \"*\")\n atoms.set_initial_magnetic_moments(magmoms=None)\n # #####################################################\n init_magmoms = atoms.get_initial_magnetic_moments()\n data_path = os.path.join(\"out_data/init_magmoms.json\")\n with open(data_path, \"w\") as fle:\n json.dump(list(init_magmoms), fle, indent=2)\n # #####################################################\n\n\natoms.set_calculator(calc)\n\n#| - Copy over WAVECAR file if available\ncwd = os.getcwd()\nrev_num_i = cwd.split(\"/\")[-1]\nassert rev_num_i[0] == \"_\", \"ijdfisifjisd\"\nrev_num_i = int(rev_num_i[1:])\n\nispin = dft_calc_settings.get(\"ispin\", None)\n\nfrom pathlib import Path\nfrom vasp.vasp_methods import read_incar\n\nif rev_num_i > 1:\n rev_num_im1 = rev_num_i - 1\n prev_rev_dir = \"../_\" + str(rev_num_im1).zfill(2)\n\n\n my_file = Path(prev_rev_dir)\n if my_file.is_dir():\n # directory exists\n\n prev_rev_files_list = os.listdir(prev_rev_dir)\n\n # Checking spin of previous calculation\n path_i = os.path.join(prev_rev_dir, \"INCAR\")\n my_file = Path(path_i)\n prev_ispin = 1\n if my_file.is_file():\n prev_incar_dict = read_incar(prev_rev_dir, verbose=False)\n prev_ispin = prev_incar_dict.get(\"ISPIN\", None)\n\n if ispin == 2 and prev_ispin == 1:\n print(\"Job changed spin, so not copying WAVECAR\")\n else:\n if \"WAVECAR\" in prev_rev_files_list:\n wavecar_dir = os.path.join(prev_rev_dir, \"WAVECAR\")\n from shutil import copyfile\n copyfile(wavecar_dir, \"./WAVECAR\")\n#__|\n\n#| - Writing pre-DFT objects to file\nout_data = dict(\n atoms=atoms,\n dft_calc_settings=dft_calc_settings,\n VP=VP,\n calc=calc,\n local_dft_settings=local_dft_settings,\n t0=t0,\n )\n\n# Pickling data ###########################################\nimport os; import pickle\ndirectory = \"out_data\"\nif not os.path.exists(directory): os.makedirs(directory)\nwith open(os.path.join(directory, \"pre_out_data.pickle\"), \"wb\") as fle:\n pickle.dump(out_data, fle)\n# #########################################################\n#__|\n\n\ncompenv = os.environ[\"COMPENV\"]\nif not compenv in [\"nersc\", \"slac\", \"sherlock\", \"slac/sdf_cluster\", ]:\n print(\"Not in a environment setup for DFT calculations\")\nelse:\n atoms.get_potential_energy()\n\n #| - Writing files\n\n #| - TEST new write\n from ase.io.trajectory import Trajectory\n import subprocess\n\n traj2=Trajectory('final_with_calculator.traj', 'w')\n traj2.write(atoms)\n subprocess.call('ase convert -f final_with_calculator.traj final_with_calculator.json', shell=True)\n #__|\n\n io.write(\"out.cif\", atoms)\n io.write(\"out.traj\", atoms)\n\n try:\n traj = io.read(\"OUTCAR\", index=\":\")\n io.write(\"final_images.traj\", traj)\n except:\n print(\"Couldn't read/write final OUTCAR traj object\")\n\n tf = time.time()\n out_data = dict(\n atoms=atoms,\n dft_calc_settings=dft_calc_settings,\n VP=VP,\n calc=calc,\n local_dft_settings=local_dft_settings,\n run_time=np.abs(tf - t0),\n t0=t0,\n tf=tf,\n )\n\n # Pickling data #######################################\n import os; import pickle\n directory = \"out_data\"\n if not os.path.exists(directory): os.makedirs(directory)\n with open(os.path.join(directory, \"out_data.pickle\"), \"wb\") as fle:\n pickle.dump(out_data, fle)\n # #####################################################\n\n #__|\n\n clean_up_dft()\n"
] | [
[
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skmendez/LightShow | [
"d47989da39c45ad34933dbb37ed7d85986997d21"
] | [
"gen_demo.py"
] | [
"import ffmpeg\nimport numpy as np\nfrom videoprocess import process_video\nfrom youtube_dl import YoutubeDL\nimport os\n\nclass MyLogger(object):\n def debug(self, msg):\n pass\n\n def warning(self, msg):\n pass\n\n def error(self, msg):\n print(msg)\n\nydl_opts = {\n 'format': \"18\",\n 'postprocessors': [{'key': 'FFmpegVideoConvertor', 'preferedformat': 'mp4'}],\n 'outtmpl': '%(title)s.%(ext)s',\n \"logger\": MyLogger()}\n\ndef gen_video(url, song_db = None, song_lock = None):\n with YoutubeDL(ydl_opts) as ydl:\n title = ydl.extract_info(url)[\"title\"]\n fname = \"{}.mp4\".format(title)\n\n if not os.path.exists(fname):\n raise ValueError(\"File not generated\")\n\n demo = process_video(fname, 25)\n\n out, _ = (\n ffmpeg\n .input(fname)\n .output('pipe:', ac=1, ar=48000, format='wav')\n .run(capture_stdout=True)\n )\n wav = (\n np\n .frombuffer(out, np.int16)\n )\n demo.wav = wav\n demo.to_file(\"{}.npz\".format(title))\n os.remove(fname)\n\n if song_db is not None:\n with song_lock:\n if \"-\" in title:\n artist, song = title.split(\"-\")\n song = song.strip()\n artist = artist.strip()\n else:\n song = title.strip()\n artist = None\n song_db[(artist, song)] = \"{}.npz\".format(title)\n"
] | [
[
"numpy.frombuffer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hugofloresgarcia/torchopenl3 | [
"4fecb2dae04b52b58b3eb700cb1b6db4037e31f6"
] | [
"tests/test_model.py"
] | [
"import pytest\nimport numpy as np\nimport torch\nimport torchopenl3\n\n###############################################################################\n# Test model\n###############################################################################\n\[email protected](\"model\", [m for m in torchopenl3.all_models()])\ndef test_model_output_shape(model):\n audio = np.random.rand(1, 48000)\n audio = torch.from_numpy(audio).unsqueeze(0).float()\n\n expected_shape = (1, model.embedding_size)\n print(model.content_type)\n\n assert model(audio).shape == expected_shape"
] | [
[
"torch.from_numpy",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zoeqwq/gs-quant | [
"2ee3ffe5ed918845f5f56f006755efd04774abd6"
] | [
"gs_quant/timeseries/measures_reports.py"
] | [
"\"\"\"\nCopyright 2020 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\nfrom pydash import decapitalize\n\nfrom gs_quant.api.gs.data import QueryType\nfrom gs_quant.data.core import DataContext\nfrom gs_quant.entities.entity import EntityType\nfrom gs_quant.errors import MqValueError, MqError\nfrom gs_quant.markets.factor import Factor\nfrom gs_quant.markets.report import FactorRiskReport, PerformanceReport\nfrom gs_quant.models.risk_model import ReturnFormat\nfrom gs_quant.target.portfolios import RiskAumSource\nfrom gs_quant.timeseries import plot_measure_entity\nfrom gs_quant.timeseries.measures import _extract_series_from_df\nfrom gs_quant.api.gs.portfolios import GsPortfolioApi\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_EXPOSURE])\ndef factor_exposure(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor exposure data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor exposure for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_EXPOSURE)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_PNL])\ndef factor_pnl(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor PnL data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor pnl for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_PNL)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_PROPORTION_OF_RISK])\ndef factor_proportion_of_risk(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor proportion of risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor proportion of risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_PROPORTION_OF_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.DAILY_RISK])\ndef daily_risk(report_id: str, factor_name: str = 'Total', *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Daily risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name (must be \"Factor\", \"Specific\", or \"Total\")\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.DAILY_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.ANNUAL_RISK])\ndef annual_risk(report_id: str, factor_name: str = 'Total', *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Annual risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name (must be \"Factor\", \"Specific\", or \"Total\")\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.ANNUAL_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.PNL])\ndef normalized_performance(report_id: str, aum_source: str = None, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Returns the Normalized Performance of a performance report based on AUM source\n :param report_id: id of performance report\n :param aum_source: source to normalize pnl from, default is the aum source on your portfolio,\n if no aum source is set on your portfolio the default is gross\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: portfolio normalized performance\n\n **Usage**\n\n Returns the normalized performance of the portfolio based on AUM source.\n\n If :math:`aum_source` is \"Custom AUM\":\n We read AUM from custom AUM uploaded to that portfolio and normalize performance based on that exposure\n\n If :math:`aum_source` is one of: Long, Short, RiskAumSource.Net,\n AumSource.Gross, we take these exposures from the calculated exposures based on daily positions\n\n :math:`NP_{t} = SUM( PNL_{t}/ ( AUM_{t} ) - cPNL_{t-1) ) if ( AUM_{t} ) - cPNL_{t-1) ) > 0 else:\n 1/ SUM( PNL_{t}/ ( AUM_{t} ) - cPNL_{t-1) )`\n\n This takes into account varying AUM and adjusts for exposure change due to PNL\n\n where :math:`cPNL_{t-1}` is your performance reports cumulative PNL at date t-1\n where :math:`PNL_{t}` is your performance reports pnl at date t\n where :math:`AUM_{t}` is portfolio exposure on date t\n\n\n \"\"\"\n start_date = DataContext.current.start_time\n end_date = DataContext.current.end_time\n\n ppa_report = PerformanceReport.get(report_id)\n if not aum_source:\n port = GsPortfolioApi.get_portfolio(ppa_report.position_source_id)\n aum_source = port.aum_source if port.aum_source else RiskAumSource.Net\n else:\n aum_source = RiskAumSource(aum_source)\n\n aum_col_name = aum_source.value.lower()\n aum_col_name = f'{aum_col_name}Exposure' if aum_col_name != 'custom aum' else 'aum'\n measures = [aum_col_name, 'pnl'] if aum_source != RiskAumSource.Custom_AUM else ['pnl']\n data = ppa_report.get_many_measures(measures, start_date, end_date)\n data.loc[0, 'pnl'] = 0\n data['cumulativePnlT-1'] = data['pnl'].cumsum(axis=0)\n data = pd.DataFrame.from_records(data).set_index(['date'])\n if aum_source == RiskAumSource.Custom_AUM:\n custom_aum = pd.DataFrame(GsPortfolioApi.get_custom_aum(ppa_report.position_source_id, start_date, end_date))\n if custom_aum.empty:\n raise MqError(f'No custom AUM for portfolio {ppa_report.position_source_id} between dates {start_date},'\n f' {end_date}')\n custom_aum = pd.DataFrame.from_records(custom_aum).set_index(['date'])\n data = data.join(custom_aum.loc[:, aum_col_name], how='inner')\n if aum_source == RiskAumSource.Short:\n data[f'{aum_col_name}'] = -1 * data[f'{aum_col_name}']\n data['normalizedExposure'] = data[f'{aum_col_name}'] - data['cumulativePnlT-1']\n data['pnlOverNormalizedExposure'] = data['pnl'] / data['normalizedExposure']\n data['normalizedPerformance'] = data['pnlOverNormalizedExposure'].cumsum(axis=0) + 1\n data.loc[data.normalizedExposure < 0, 'normalizedPerformance'] = 1 / data.loc[:, 'normalizedPerformance']\n return pd.Series(data['normalizedPerformance'], name=\"normalizedPerformance\").dropna()\n\n\ndef _get_factor_data(report_id: str, factor_name: str, query_type: QueryType) -> pd.Series:\n # Check params\n report = FactorRiskReport.get(report_id)\n risk_model_id = report.get_risk_model_id()\n if factor_name not in ['Factor', 'Specific', 'Total']:\n if query_type in [QueryType.DAILY_RISK, QueryType.ANNUAL_RISK]:\n raise MqValueError('Please pick a factor name from the following: [\"Total\", \"Factor\", \"Specific\"]')\n factor = Factor.get(risk_model_id, factor_name)\n factor_name = factor.name\n\n # Extract relevant data for each date\n col_name = query_type.value.replace(' ', '')\n col_name = decapitalize(col_name)\n data_type = decapitalize(col_name[6:]) if col_name.startswith('factor') else col_name\n\n factor_data = report.get_results(\n factors=[factor_name],\n start_date=DataContext.current.start_time,\n end_date=DataContext.current.end_time,\n return_format=ReturnFormat.JSON\n )\n factor_exposures = [{'date': data['date'], col_name: data[data_type]} for data in factor_data\n if data.get(data_type)]\n\n # Create and return timeseries\n df = pd.DataFrame(factor_exposures)\n if not df.empty:\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, query_type)\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kevinbdsouza/Hi-C-LSTM | [
"c8e5d23b7e69878b8b5d7c537a3f33b3cdf9907d"
] | [
"src/analyses/classification/run_class.py"
] | [
"from analyses.classification.rna_seq import GeneExp\nfrom training.config import Config\nimport pandas as pd\nimport time\nfrom torch.utils.tensorboard import SummaryWriter\nfrom analyses.classification.downstream_helper import DownstreamHelper\nfrom analyses.classification.fires import Fires\nfrom analyses.classification.rep_timing import Rep_timing\nfrom analyses.classification.pe_interactions import PeInteractions\nfrom analyses.classification.loops import Loops\nfrom analyses.classification.domains import Domains\nfrom analyses.classification.subcompartments import Subcompartments\nfrom analyses.feature_attribution.tf import TFChip\nfrom analyses.classification.model_class import MultiClass\nfrom training.model import SeqLSTM\nimport torch\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nCUDA_LAUNCH_BLOCKING = 1\n\n\nclass DownstreamTasks:\n \"\"\"\n Class to run all downstream classifi experiments using XGBoost.\n Compute mAP, Accuaracy, PR curves, AuROC, and F-score.\n \"\"\"\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.saved_model_dir = cfg.model_dir\n self.calculate_map = True\n self.downstream_helper_ob = DownstreamHelper(cfg)\n self.df_columns = [str(i) for i in range(0, 16)] + [\"pos\"]\n self.class_columns = [str(i) for i in range(0, 10)]\n self.iter = 0\n\n def get_zero_one(self, window_labels, chr, zero_target, mode, combined):\n window_labels = window_labels.drop_duplicates(keep='first').reset_index(drop=True)\n\n if zero_target:\n if mode == \"ends\":\n col_list = ['start', 'end']\n else:\n col_list = ['pos']\n zero_pos_frame = self.downstream_helper_ob.get_zero_pos(window_labels, col_list, chr)\n zero_pos_frame = self.downstream_helper_ob.add_cum_pos(zero_pos_frame, chr, mode=\"pos\")\n\n window_labels = self.downstream_helper_ob.add_cum_pos(window_labels, chr, mode=mode)\n if mode == \"ends\":\n window_labels = self.downstream_helper_ob.get_pos_data(window_labels, chr)\n else:\n zero_pos_frame = None\n if not combined:\n window_labels = self.downstream_helper_ob.add_cum_pos(window_labels, chr, mode=mode)\n\n zero_pos_frame = window_labels.loc[window_labels[\"target\"] == 0]\n window_labels = window_labels.loc[window_labels[\"target\"] == 1]\n\n if mode == \"ends\":\n zero_pos_frame = self.downstream_helper_ob.get_pos_data(zero_pos_frame, chr)\n window_labels = self.downstream_helper_ob.get_pos_data(window_labels, chr)\n\n return window_labels, zero_pos_frame\n\n def run_gene_expression(self, chr):\n \"\"\"\n run_gene_expression(chr, embed_rows) -> float, float, float, float\n Gets gene expression data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n rna_seq_ob = GeneExp(self.cfg, chr)\n rna_seq_ob.get_rna_seq()\n rna_seq_chr = rna_seq_ob.filter_rna_seq()\n return rna_seq_chr\n\n def run_rep_timing(self, chr):\n \"\"\"\n run_rep_timing(chr, embed_rows) -> float, float, float, float\n Gets replication timing data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n rep_ob = Rep_timing(self.cfg, chr)\n rep_chr = rep_ob.get_rep_data()\n return rep_chr\n\n def run_enhancers(self, chr):\n \"\"\"\n run_enhancers(chr, embed_rows) -> float, float, float, float\n Gets Enhancer data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n pe_ob = PeInteractions(cfg, chr)\n pe_chr = pe_ob.get_pe_data()\n pe_chr = pe_ob.filter_pe_data(pe_chr)\n return pe_chr\n\n def run_tss(self, chr):\n \"\"\"\n run_tss(chr, embed_rows) -> float, float, float, float\n Gets TSS data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n pe_ob = PeInteractions(cfg, chr)\n pe_chr = pe_ob.get_pe_data()\n pe_chr = pe_ob.filter_pe_data(pe_chr)\n return pe_chr\n\n def run_pe(self, chr):\n \"\"\"\n run_pe(chr, embed_rows) -> float, float, float, float\n Gets PE interaction data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n pe_ob = PeInteractions(cfg, chr)\n pe_chr = pe_ob.get_pe_data()\n pe_chr = pe_ob.filter_pe_data(pe_chr)\n return pe_chr\n\n def run_fires(self, chr):\n \"\"\"\n run_fires(chr, embed_rows) -> float, float, float, float\n Gets FIRE data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n fire_ob = Fires(cfg, chr, mode=\"classifi\")\n fire_ob.get_fire_data()\n fire_chr = fire_ob.filter_fire_data()\n return fire_chr\n\n def run_domains(self, chr):\n \"\"\"\n run_domains(chr, embed_rows) -> float, float, float, float\n Gets TAD, subTAD, and boundaries data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n domain_ob = Domains(cfg, chr, mode=\"class\")\n\n \"get data and run xgboost according to type of domain element\"\n if self.cfg.class_element == \"TADs\":\n data = domain_ob.get_tad_data()\n elif self.cfg.class_element == \"subTADs\":\n data = domain_ob.get_tad_data()\n elif self.cfg.class_element == \"TADBs\":\n tf_ob = TFChip(cfg, chr)\n data = domain_ob.get_tad_boundaries(tf_ob, ctcf=\"all\")\n elif self.cfg.class_element == \"subTADBs\":\n data = domain_ob.get_subtad_boundaries()\n\n return data\n\n def run_loops(self, chr):\n \"\"\"\n run_loops(chr, embed_rows) -> float, float, float, float\n Gets loop data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n loop_ob = Loops(cfg, chr, mode=\"classifi\")\n loop_chr = loop_ob.get_loop_data()\n return loop_chr\n\n def run_sub_compartments(self, chr):\n \"\"\"\n run_sub_compartments(chr, embed_rows) -> float, float, float, float\n Gets subcompartment data for given cell type and chromosome.\n Runs xgboost using representations from chosen method and celltype. Or runs baseline.\n Returns classifi metrics.\n Args:\n chr (int): chromosome to run classifi on.\n embed_rows (DataFrame): Dataframe with representations and positions.\n \"\"\"\n\n sc_ob = Subcompartments(cfg, chr)\n sc_chr = sc_ob.get_sc_data()\n return sc_chr\n\n def return_data_for_element(self, element, chr):\n self.cfg.class_element = element\n\n if element == \"Gene Expression\":\n data = self.run_gene_expression(chr)\n model_base = \"gene\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=False, mode=\"ends\", combined=False)\n elif element == \"Replication Timing\":\n data = self.run_rep_timing(chr)\n model_base = \"rep\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=False, mode=\"ends\", combined=False)\n elif element == \"Enhancers\":\n data = self.run_enhancers(chr)\n model_base = \"en\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"TSS\":\n data = self.run_tss(chr)\n model_base = \"tss\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"PE-Interactions\":\n data = self.run_pe(chr)\n model_base = \"pei\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"FIREs\":\n data = self.run_fires(chr)\n model_base = \"fire\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=False, mode=\"ends\", combined=False)\n elif element == \"TADs\":\n data = self.run_domains(chr)\n model_base = \"tad\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"subTADs\":\n data = self.run_domains(chr)\n model_base = \"subtad\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"Loop Domains\":\n data = self.run_loops(chr)\n model_base = \"loop\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"ends\", combined=False)\n elif element == \"TADBs\":\n data = self.run_domains(chr)\n model_base = \"tadb\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"pos\", combined=False)\n elif element == \"subTADBs\":\n data = self.run_domains(chr)\n model_base = \"subtadb\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=True, mode=\"pos\", combined=False)\n elif element == \"Subcompartments\":\n data = self.run_sub_compartments(chr)\n model_base = \"subc\"\n one_data, zero_data = self.get_zero_one(data, chr, zero_target=False, mode=\"ends\", combined=False)\n\n return one_data, zero_data, model_base\n\n def run_multi_label(self, cfg):\n \"\"\"\n run_multi_label(cfg) -> No return object\n Runs xgboost for the given model and all classifi elements.\n Uses provide configuration. Saves classifi metrics in CSV file.\n Args:\n cfg (Config): Model whose representations need to be used to run xgboost.\n \"\"\"\n\n self.cfg.class_model_mode = \"test\"\n\n for chr in cfg.chr_train_list:\n print(\"Chromosome: {}\".format(chr))\n\n self.cfg.model_name = \"class_chr\" + str(chr)\n self.cfg.chr = chr\n\n \"load class model\"\n model = MultiClass(self.cfg, device).to(device)\n model.load_weights()\n\n \"load embed model\"\n embed_model = SeqLSTM(self.cfg, device).to(device)\n embed_model.load_weights()\n\n \"Initalize optimizer\"\n optimizer = model.compile_optimizer()\n\n \"Set up Tensorboard logging\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + self.cfg.model_name + timestr)\n\n try:\n main_data = pd.read_csv(cfg.output_directory + \"element_data_chr%s.csv\" % (chr))\n except:\n main_data = pd.DataFrame()\n for i, element in enumerate(cfg.class_elements_list):\n print(\"Data for: %s\" % (element))\n element_data, _, _ = self.return_data_for_element(element, chr, )\n element_data[self.class_columns] = 0\n element_data[self.cfg.class_dict[element]] = 1\n element_data = element_data[self.class_columns + [\"pos\"]]\n\n if i == 0:\n main_data = pd.concat([main_data, element_data])\n else:\n common_pos = element_data[element_data[\"pos\"].isin(main_data[\"pos\"])]\n diff_pos = element_data[~element_data[\"pos\"].isin(main_data[\"pos\"])]\n\n common_pos = common_pos.drop_duplicates(keep='first').reset_index(drop=True)\n main_data.loc[main_data[\"pos\"].isin(common_pos[\"pos\"]), self.cfg.class_dict[element]] = 1\n\n main_data = pd.concat([main_data, diff_pos])\n main_data = main_data.drop_duplicates(keep='first').reset_index(drop=True)\n\n main_data = main_data.sample(frac=1)\n\n if self.cfg.class_model_mode == \"train\":\n iter = 0\n for epoch in range(self.cfg.num_epochs):\n iter, model, loss = model.train_model_multi(embed_model, epoch, optimizer, writer, main_data, iter,\n self.cfg)\n\n print('Final loss for chromosome %s : %s' % (chr, loss))\n main_data.to_csv(cfg.output_directory + \"element_data_chr%s.csv\" % (chr))\n else:\n map, _ = model.test_model_multi(main_data, self.cfg, embed_model)\n\n print('Mean mAP for chromosome %s : %s' % (chr, map))\n\n\nif __name__ == '__main__':\n \"\"\"\n Script to run xgboost for representations from class_method specified in config. One of hiclstm, sniper, and sca.\n Baselines like pca_baseline, subc_baseline can be specified in class_experiment in config. \n If class_experiment is not specified as baseline, assumes experiment is based on one of the methods. \n Sets class_experiment as class_method. \n All experiments except for Subcompartments are binary. Subcompartments is multiclass.\n To change the cell type, specify the cell in config. \n The appropriate model and elements for cell type will be loaded.\n \"\"\"\n\n cfg = Config()\n cell = cfg.cell\n\n \"downstream data object\"\n downstream_ob = DownstreamTasks(cfg)\n\n \"run multilabel\"\n downstream_ob.run_multi_label(cfg)\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sirno/pypetting | [
"c0c7b7a53ef10eeb89e6a8ee46a6bbfa3752fd29"
] | [
"pypetting/liha.py"
] | [
"\"\"\"Pipetting functions.\"\"\"\n\nimport numpy as np\n\nfrom numpy.typing import ArrayLike\n\nfrom .base import GridSite, Labware\nfrom .labware import labwares\nfrom .utils import bin_to_dec, volumes_to_string\n\n__all__ = [\n \"aspirate\",\n \"dispense\",\n \"mix\",\n \"wash\",\n \"move_liha\",\n]\n\n\ndef aspirate(\n grid_site: GridSite,\n column: int,\n column_mask: ArrayLike,\n volumes: ArrayLike | int | float,\n liquid_class: str,\n spacing: int = 1,\n labware: Labware | str = \"greiner96\",\n):\n \"\"\"Advanced aspirate command.\n\n Parameters\n ----------\n grid_site : GridSite\n column : int\n column_mask : array_like\n Boolean mask to indicate which wells to aspirate from\n volumes : array_like | int | float\n Array showing which volumes each tip aspirates; a number is given, all\n tips will aspirate that volume.\n spacing : int, optional\n Spacing of the tips with respect to the column mask\n labware : Labware | str, optional\n Labware specification to be used. (Default: Greiner 96 Well Plate)\n\n \"\"\"\n\n return _liha_command(\n cmd=\"Aspirate\",\n grid_site=grid_site,\n column=column,\n column_mask=column_mask,\n volumes=volumes,\n liquid_class=liquid_class,\n spacing=spacing,\n labware=labware,\n )\n\n\ndef dispense(\n grid_site: GridSite,\n column: int,\n column_mask: ArrayLike,\n volumes: ArrayLike | int | float,\n liquid_class: str,\n spacing: int = 1,\n labware: Labware | str = \"greiner96\",\n):\n \"\"\"Advanced dispense command.\"\"\"\n\n return _liha_command(\n cmd=\"Dispense\",\n grid_site=grid_site,\n column=column,\n column_mask=column_mask,\n volumes=volumes,\n liquid_class=liquid_class,\n spacing=spacing,\n labware=labware,\n )\n\n\ndef mix(\n grid_site: GridSite,\n column: int,\n column_mask: ArrayLike,\n volumes: ArrayLike | int | float,\n liquid_class: str,\n spacing: int = 1,\n labware: Labware | str = \"greiner96\",\n):\n \"\"\"Advanced mix command.\"\"\"\n\n return _liha_command(\n cmd=\"Mix\",\n grid_site=grid_site,\n column=column,\n column_mask=column_mask,\n volumes=volumes,\n liquid_class=liquid_class,\n spacing=spacing,\n labware=labware,\n )\n\n\ndef wash(waste_volume: int | float, cleaner_volume: int | float, station: int):\n \"\"\"Wash tips command.\"\"\"\n return (\n \"B;Wash(\"\n \"255,\"\n f\"{station},1,{station},0,\"\n f'\"{waste_volume}\",'\n f\"500,\"\n f'\"{cleaner_volume}\",'\n \"0,10,70,30,0,0,1000,0);\"\n ).encode()\n\n\ndef move_liha(\n grid_site: GridSite,\n column: int,\n positions: ArrayLike = None,\n spacing: int = 1,\n labware: Labware | str = \"greiner96\",\n local: bool = False,\n z_pos: int = 0,\n speed: int = 10,\n):\n \"\"\"Move LiHa to grid site.\"\"\"\n\n if isinstance(labware, str):\n labware = labwares[labware]\n\n if positions is None:\n positions = np.array([True] * 8)\n\n if column not in range(1, labware.cols + 1):\n raise IndexError(f\"Plate column out of bounds: {column=}\")\n\n command = (\n (\n \"B;MoveLiha(\"\n \"255,\"\n f\"{grid_site.grid},\"\n f\"{grid_site.site},\"\n f'{spacing},\"'\n ).encode()\n + _well_select(positions, column, labware.rows, labware.cols)\n + (f'\",{local:#d},{z_pos},0,{speed},0,0);').encode()\n )\n\n return command\n\n\ndef _encode_well_select(column_mask, offset):\n encoder = np.zeros(offset + len(column_mask), dtype=np.int8)\n for i, volume in enumerate(column_mask):\n encoder[offset + i] = 1 if volume > 0 else 0\n splits = [encoder[(7 * i) : (7 * (i + 1))] for i in range((len(encoder) + 6) // 7)]\n return [2 ** np.arange(len(split)) * split for split in splits]\n\n\ndef _well_select(column_mask, column, nrows, ncols, **kwargs):\n \"\"\"Generate well select string for volumes.\"\"\"\n well = (column - 1) * nrows\n encoders = _encode_well_select(column_mask, well % 7)\n n_chars = (nrows * ncols + 6) // 7\n sequence = [\n f\"{ncols:02x}\".encode(),\n f\"{nrows:02x}\".encode(),\n b\"0\" * int(well / 7),\n *[\n int(sum(encoder) + 48).to_bytes(1, \"little\") if sum(encoder) > 0 else b\"0\"\n for encoder in encoders\n ],\n b\"0\" * (n_chars - len(encoders) - int(well / 7)),\n ]\n return b\"\".join(sequence)\n\n\ndef _liha_command(\n cmd: str,\n grid_site: GridSite,\n column: int,\n column_mask: ArrayLike,\n volumes: ArrayLike | int | float,\n liquid_class: str,\n spacing: int = 1,\n labware: Labware | str = \"greiner96\",\n):\n if isinstance(volumes, int | float):\n volumes = np.array(column_mask * volumes)\n\n if isinstance(labware, str):\n labware = labwares[labware]\n\n if column not in range(1, labware.cols + 1):\n raise IndexError(f\"Plate column out of bounds: {column=}\")\n\n command = (\n (\n f\"B;{cmd}(\"\n f\"{bin_to_dec(volumes)},\"\n f'\"{liquid_class}\",'\n f\"{volumes_to_string(volumes)},\"\n f\"{grid_site.grid},\"\n f\"{grid_site.site},\"\n f'{spacing},\"'\n ).encode()\n + _well_select(column_mask, column, labware.rows, labware.cols)\n + b'\",0,0);'\n )\n return command\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SpangleLabs/advent-of-code | [
"9692f533c30f22e59f7055ca12253128ca8dac0a"
] | [
"2021/19_matrices.py"
] | [
"from abc import ABC, abstractmethod\nimport dataclasses\nimport datetime\nimport itertools\nfrom functools import cached_property\nfrom typing import List, Tuple, Set, Optional\nimport numpy\n\nfrom utils.coords3d import Coords3D\nfrom utils.input import load_input\n\n\nclass Transformation3D(ABC):\n\n @property\n @abstractmethod\n def matrix(self) -> numpy.array:\n raise NotImplementedError\n\n @abstractmethod\n def apply_transformation(self, coord: Coords3D) -> Coords3D:\n raise NotImplementedError\n\n @abstractmethod\n def __neg__(self) -> \"Transformation3D\":\n raise NotImplementedError\n\n\[email protected](eq=True, frozen=True)\nclass Flip3D(Transformation3D):\n coord_flip: int\n\n @property\n def matrix(self) -> numpy.array:\n return numpy.array([\n [(2 * (self.coord_flip % 2)) - 1, 0, 0, 0],\n [0, (2 * (self.coord_flip // 2 % 2)) - 1, 0, 0],\n [0, 0, (2 * (self.coord_flip // 4 % 2)) - 1, 0],\n [0, 0, 0, 1]\n ])\n\n def apply_transformation(self, coord: Coords3D) -> Coords3D:\n x_flip = (2 * (self.coord_flip % 2)) - 1\n y_flip = (2 * (self.coord_flip // 2 % 2)) - 1\n z_flip = (2 * (self.coord_flip // 4 % 2)) - 1\n return Coords3D(\n x_flip * coord.x,\n y_flip * coord.y,\n z_flip * coord.z\n )\n\n def __neg__(self) -> \"Flip3D\":\n return Flip3D(\n self.coord_flip\n )\n\n\[email protected](eq=True, frozen=True)\nclass Rotation3D(Transformation3D):\n coord_order: str\n\n @property\n def matrix(self) -> numpy.array:\n char1 = self.coord_order[0]\n char2 = self.coord_order[1]\n char3 = self.coord_order[2]\n return numpy.array([\n [int(\"x\" == char1), int(\"y\" == char1), int(\"z\" == char1), 0],\n [int(\"x\" == char2), int(\"y\" == char2), int(\"z\" == char2), 0],\n [int(\"x\" == char3), int(\"y\" == char3), int(\"z\" == char3), 0],\n [0, 0, 0, 1]\n ])\n\n def apply_transformation(self, coord: Coords3D) -> Coords3D:\n return Coords3D(\n coord.__getattribute__(self.coord_order[0]),\n coord.__getattribute__(self.coord_order[1]),\n coord.__getattribute__(self.coord_order[2])\n )\n\n def __neg__(self) -> \"Rotation3D\":\n new_order = {\n \"xyz\": \"xyz\",\n \"xzy\": \"xzy\",\n \"yxz\": \"yxz\",\n \"yzx\": \"zxy\",\n \"zxy\": \"yzx\",\n \"zyx\": \"zyx\",\n }[self.coord_order]\n return Rotation3D(new_order)\n\n\[email protected](eq=True, frozen=True)\nclass Translate3D(Transformation3D):\n coord: Coords3D\n\n @property\n def matrix(self) -> numpy.array:\n return numpy.array([\n [1, 0, 0, self.coord.x],\n [0, 1, 0, self.coord.y],\n [0, 0, 1, self.coord.z],\n [0, 0, 0, 1]\n ])\n\n def apply_transformation(self, coord: Coords3D) -> Coords3D:\n return coord + self.coord\n\n def __neg__(self) -> \"Translate3D\":\n return Translate3D(-self.coord)\n\n\[email protected](eq=True, frozen=True)\nclass CompoundTransformation:\n transformations: Tuple[Transformation3D, ...]\n\n @cached_property\n def matrix(self) -> numpy.array:\n result = numpy.identity(4)\n for trans in self.transformations[::-1]:\n result = numpy.matmul(result, trans.matrix)\n return result\n\n def apply(self, coords: Coords3D) -> Coords3D:\n coord_array = numpy.array([coords.x, coords.y, coords.z, 1])\n result = numpy.matmul(self.matrix, coord_array)\n return Coords3D(result[0], result[1], result[2])\n\n def __neg__(self) -> \"CompoundTransformation\":\n transformations: List[Transformation3D] = []\n for trans in self.transformations[::-1]:\n transformations.append(-trans)\n return CompoundTransformation(tuple(transformations))\n\n\ndef transform_beacons(beacons: Set[Coords3D], transformation: CompoundTransformation) -> Set[Coords3D]:\n return {\n transformation.apply(beacon)\n for beacon in beacons\n }\n\n\ndef all_transformations() -> List[Tuple[Rotation3D, Flip3D]]:\n results = []\n for coord_order in itertools.permutations(\"xyz\", 3):\n for coord_flip in range(8):\n rotation = Rotation3D(\"\".join(coord_order))\n flip = Flip3D(coord_flip)\n results.append((rotation, flip))\n return results\n\n\ndef truncate_beacons(beacons: Set[Coords3D], max_distance: int = 1000) -> Set[Coords3D]:\n return {\n beacon\n for beacon in beacons\n if all([\n -max_distance <= beacon.x <= max_distance,\n -max_distance <= beacon.y <= max_distance,\n -max_distance <= beacon.z <= max_distance\n ])\n }\n\n\nclass Scanner:\n def __init__(self, number: int, beacons: Set[Coords3D], transforms: List[CompoundTransformation] = None) -> None:\n self.number = number\n self.beacons = beacons\n self.transforms = transforms\n\n @classmethod\n def parse_input(cls, scanner_lines: List[str]) -> \"Scanner\":\n number = int(scanner_lines[0].strip(\" -\").split()[-1])\n coords = {\n Coords3D.from_input_line(line) for line in scanner_lines[1:]\n }\n return cls(number, coords)\n\n def __eq__(self, other: \"Scanner\") -> bool:\n return self.number == other.number\n\n def __hash__(self) -> int:\n return hash((Scanner, self.number))\n\n def overlap_point(self, other: \"Scanner\") -> Optional[CompoundTransformation]:\n for my_beacon in self.beacons:\n for their_beacon in other.beacons:\n for rotate, flip in all_transformations():\n compound_tf = CompoundTransformation(\n (\n Translate3D(-my_beacon),\n rotate,\n flip,\n Translate3D(their_beacon)\n )\n )\n my_mapped_beacons = transform_beacons(self.beacons, compound_tf)\n\n my_intersect_beacons = my_mapped_beacons.intersection(other.beacons)\n if len(my_intersect_beacons) < 12:\n continue\n else:\n return CompoundTransformation(\n (\n Translate3D(-my_beacon),\n rotate,\n flip,\n Translate3D(their_beacon)\n )\n )\n\n return None\n\n\ndef find_overlaps(scanners: List[Scanner]) -> None:\n unknown_scanners = [scanner for scanner in scanners if scanner.transforms is None]\n newly_known = {scanner for scanner in scanners if scanner.transforms is not None}\n while newly_known:\n print(\"Searching again\")\n try_next = set()\n for known_scanner in newly_known:\n for unknown_scanner in unknown_scanners:\n if unknown_scanner in try_next:\n continue\n print(f\"Checking {known_scanner.number} against {unknown_scanner.number}\")\n if transform := known_scanner.overlap_point(unknown_scanner):\n unknown_scanner.transforms = known_scanner.transforms + [transform]\n print(f\"Scanner {known_scanner.number} and {unknown_scanner.number} overlap!\")\n try_next.add(unknown_scanner)\n newly_known = try_next\n unknown_scanners = [scanner for scanner in scanners if scanner.transforms is None]\n\n\ndef _test() -> None:\n coord = Coords3D(123, 456, 789)\n translate = Translate3D(Coords3D(64, 32, 79))\n for rotate, flip in all_transformations():\n\n compound_translate = CompoundTransformation((translate,))\n compound_rotate = CompoundTransformation((rotate,))\n compound_flip = CompoundTransformation((flip,))\n assert translate.apply_transformation(coord) == compound_translate.apply(coord)\n assert rotate.apply_transformation(coord) == compound_rotate.apply(coord)\n assert flip.apply_transformation(coord) == compound_flip.apply(coord)\n\n compound_full = CompoundTransformation((translate, rotate, flip, -translate))\n coords_full = translate.apply_transformation(coord)\n coords_full = rotate.apply_transformation(coords_full)\n coords_full = flip.apply_transformation(coords_full)\n coords_full = (-translate).apply_transformation(coords_full)\n assert coords_full == compound_full.apply(coord)\n\n\ndef _main() -> str:\n my_input = load_input()\n scanner_inputs = my_input.split(\"\\n\\n\")\n # Parse scanners\n scanners = []\n for scanner_input in scanner_inputs:\n scanners.append(Scanner.parse_input(scanner_input.split(\"\\n\")))\n # scanners = scanners[:2]\n # coord = Coords3D(14, -230, 408)\n # for tf, coords in all_transformations(set([coord])).items():\n # print(coord)\n # print(tf.apply(coord))\n # print((-tf).apply(tf.apply(coord)))\n # assert coord == (-tf).apply(tf.apply(coord))\n # return\n\n # Find overlaps\n scanners[0].transforms = []\n find_overlaps(scanners)\n # Gather all coordinates\n all_coords = scanners[0].beacons\n for scanner in scanners[1:]:\n zeroed_coords = scanner.beacons\n for tf in scanner.transforms[::-1]:\n zeroed_coords = transform_beacons(zeroed_coords, -tf)\n all_coords = all_coords.union(zeroed_coords)\n return str(len(all_coords))\n\n\nif __name__ == \"__main__\":\n start_time = datetime.datetime.now()\n print(_main())\n print(f\"Time taken: {(datetime.datetime.now() - start_time).total_seconds()}s\")\n"
] | [
[
"numpy.array",
"numpy.identity",
"numpy.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luojy95/FBGEMM | [
"95985145b0060cdd5acd4fa0eb65347b60ef060a"
] | [
"fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py"
] | [
"#!/usr/bin/env python3\n\n# pyre-ignore-all-errors[56]\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport enum\nimport logging\nfrom dataclasses import dataclass\nfrom itertools import accumulate\nfrom math import log2\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers\nimport torch\nfrom fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType\nfrom fbgemm_gpu.split_embedding_configs import SparseType\nfrom torch import Tensor, nn\n\nASSOC = 32\n# Maximum number of times prefetch() can be called without\n# a corresponding forward() call\nMAX_PREFETCH_DEPTH = 100\nINT8_EMB_ROW_DIM_OFFSET = 8\n\n\nclass DoesNotHavePrefix(Exception):\n pass\n\n\nclass EmbeddingLocation(enum.IntEnum):\n DEVICE = 0\n MANAGED = 1\n MANAGED_CACHING = 2\n HOST = 3\n\n\nclass ComputeDevice(enum.IntEnum):\n CPU = 0\n CUDA = 1\n\n\nclass CacheAlgorithm(enum.Enum):\n LRU = 0\n LFU = 1\n\n\nclass PoolingMode(enum.IntEnum):\n SUM = 0\n MEAN = 1\n NONE = 2\n\n\n@dataclass\nclass SplitState:\n dev_size: int\n host_size: int\n uvm_size: int\n placements: List[EmbeddingLocation]\n offsets: List[int]\n\n\ndef construct_split_state(\n embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],\n rowwise: bool,\n cacheable: bool,\n precision: SparseType = SparseType.FP32,\n int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET,\n) -> SplitState:\n placements = []\n offsets = []\n dev_size = 0\n host_size = 0\n uvm_size = 0\n for (num_embeddings, embedding_dim, location, _) in embedding_specs:\n assert embedding_dim % 4 == 0, f\"{embedding_dim}\"\n if precision == SparseType.INT8:\n embedding_dim += int8_emb_row_dim_offset\n state_size = num_embeddings * embedding_dim if not rowwise else num_embeddings\n if location == EmbeddingLocation.HOST:\n placements.append(EmbeddingLocation.HOST)\n offsets.append(host_size)\n host_size += state_size\n # If table is on device, then opimtizer is on device.\n # If table is managed, then if optimizer state is rowwise, optimizer is on device, otherwise optimizer is managed.\n elif location == EmbeddingLocation.DEVICE or rowwise:\n placements.append(EmbeddingLocation.DEVICE)\n offsets.append(dev_size)\n dev_size += state_size\n else:\n if cacheable and location == EmbeddingLocation.MANAGED_CACHING:\n placements.append(EmbeddingLocation.MANAGED_CACHING)\n else:\n placements.append(EmbeddingLocation.MANAGED)\n offsets.append(uvm_size)\n uvm_size += state_size\n assert len(placements) == len(offsets)\n return SplitState(\n dev_size=dev_size,\n host_size=host_size,\n uvm_size=uvm_size,\n placements=placements,\n offsets=offsets,\n )\n\n\n@dataclass\nclass CacheState:\n # T + 1 elements and cache_hash_size_cumsum[-1] == total_cache_hash_size\n cache_hash_size_cumsum: List[int]\n cache_index_table_map: List[int]\n total_cache_hash_size: int\n\n\ndef construct_cache_state(\n embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],\n feature_table_map: List[int],\n) -> CacheState:\n _cache_hash_size_cumsum = [0]\n total_cache_hash_size = 0\n for (num_embeddings, _, location, _) in embedding_specs:\n if location == EmbeddingLocation.MANAGED_CACHING:\n total_cache_hash_size += num_embeddings\n _cache_hash_size_cumsum.append(total_cache_hash_size)\n # [T], -1: non-cached table\n cache_hash_size_cumsum = []\n # [total_cache_hash_size], linear cache index -> table index\n cache_index_table_map = [-1] * total_cache_hash_size\n for t, t_ in enumerate(feature_table_map):\n for i in range(_cache_hash_size_cumsum[t_], _cache_hash_size_cumsum[t_ + 1]):\n cache_index_table_map[i] = t\n (_, _, location, _) = embedding_specs[t_]\n if location == EmbeddingLocation.MANAGED_CACHING:\n cache_hash_size_cumsum.append(_cache_hash_size_cumsum[t_])\n else:\n cache_hash_size_cumsum.append(-1)\n cache_hash_size_cumsum.append(total_cache_hash_size)\n s = CacheState(\n cache_hash_size_cumsum=cache_hash_size_cumsum,\n cache_index_table_map=cache_index_table_map,\n total_cache_hash_size=total_cache_hash_size,\n )\n return s\n\n\nclass SplitTableBatchedEmbeddingBagsCodegen(nn.Module):\n \"\"\"\n Multiple sparse features can share one embedding table.\n 'feature_table_map' specifies the feature-table mapping.\n T: number of logical tables\n T_: number of physical tables\n T >= T_\n \"\"\"\n\n embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]\n optimizer_args: invokers.lookup_args.OptimizerArgs\n lxu_cache_locations_list: List[Tensor]\n lxu_cache_locations_empty: Tensor\n timesteps_prefetched: List[int]\n\n def __init__( # noqa C901\n self,\n embedding_specs: List[\n Tuple[int, int, EmbeddingLocation, ComputeDevice]\n ], # tuple of (rows, dims, placements, compute_devices)\n feature_table_map: Optional[List[int]] = None, # [T]\n cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,\n cache_load_factor: float = 0.2,\n cache_sets: int = 0,\n cache_reserved_memory: float = 0.0,\n cache_precision: SparseType = SparseType.FP32,\n weights_precision: SparseType = SparseType.FP32,\n enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache\n optimizer: OptimType = OptimType.EXACT_SGD,\n record_cache_metrics: bool = False,\n # General Optimizer args\n stochastic_rounding: bool = False,\n gradient_clipping: bool = False,\n max_gradient: float = 1.0,\n learning_rate: float = 0.01,\n eps: float = 1.0e-8, # used by Adagrad, LAMB, and Adam\n momentum: float = 0.9, # used by LARS-SGD\n weight_decay: float = 0.0, # used by LARS-SGD, LAMB, and ADAM\n eta: float = 0.001, # used by LARS-SGD,\n beta1: float = 0.9, # used by LAMB and ADAM\n beta2: float = 0.999, # used by LAMB and ADAM\n pooling_mode: PoolingMode = PoolingMode.SUM,\n device: Optional[torch.device] = None,\n ) -> None:\n super(SplitTableBatchedEmbeddingBagsCodegen, self).__init__()\n\n self.pooling_mode = pooling_mode\n self.weights_precision = weights_precision\n self.record_cache_metrics = record_cache_metrics\n # NOTE: a placeholder to avoid multi-construction and make TorchScript work!\n self.dummy_tensor: Tensor = torch.zeros(0, device=device)\n\n self.embedding_specs = embedding_specs\n (rows, dims, locations, compute_devices) = zip(*embedding_specs)\n T_ = len(self.embedding_specs)\n assert T_ > 0\n\n assert all(\n cd == compute_devices[0] for cd in compute_devices\n ), \"Heterogenous compute_devices are NOT supported!\"\n self.use_cpu: bool = all(cd == ComputeDevice.CPU for cd in compute_devices)\n assert not self.use_cpu or all(\n loc == EmbeddingLocation.HOST for loc in locations\n ), \"ComputeDevice.CPU is only for EmbeddingLocation.HOST!\"\n\n if device is not None:\n self.current_device: torch.device = device\n else:\n self.current_device: torch.device = (\n torch.device(\"cpu\") if self.use_cpu else torch.cuda.current_device()\n )\n\n # add placeholder require_grad param tensor to enable autograd with int8 weights\n self.placeholder_autograd_tensor = nn.Parameter(\n torch.zeros(0, device=self.current_device, dtype=torch.float)\n )\n\n self.int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET\n\n self.feature_table_map: List[int] = (\n feature_table_map if feature_table_map is not None else list(range(T_))\n )\n T = len(self.feature_table_map)\n assert T_ <= T\n table_has_feature = [False] * T_\n for t in self.feature_table_map:\n table_has_feature[t] = True\n assert all(table_has_feature), \"Each table must have at least one feature!\"\n\n D_offsets = [dims[t] for t in self.feature_table_map]\n D_offsets = [0] + list(accumulate(D_offsets))\n self.total_D: int = D_offsets[-1]\n self.max_D: int = max(dims)\n cached_dims = [\n embedding_spec[1]\n for embedding_spec in embedding_specs\n if embedding_spec[2] == EmbeddingLocation.MANAGED_CACHING\n ]\n self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0\n\n self.register_buffer(\n \"D_offsets\",\n torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),\n )\n\n hash_size_cumsum = [0] + list(accumulate(rows))\n self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)\n # The last element is to easily access # of rows of each table by\n # hash_size_cumsum[t + 1] - hash_size_cumsum[t]\n hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [\n hash_size_cumsum[-1]\n ]\n self.register_buffer(\n \"hash_size_cumsum\",\n torch.tensor(\n hash_size_cumsum, device=self.current_device, dtype=torch.int64\n ),\n )\n weight_split = construct_split_state(\n embedding_specs,\n rowwise=False,\n cacheable=True,\n precision=weights_precision,\n )\n table_embedding_dtype = torch.float32\n if weights_precision == SparseType.FP16:\n table_embedding_dtype = torch.float16\n elif weights_precision == SparseType.INT8:\n table_embedding_dtype = torch.uint8\n\n self._apply_split(\n weight_split,\n prefix=\"weights\",\n dtype=table_embedding_dtype,\n enforce_hbm=enforce_hbm,\n )\n\n if self.use_cpu:\n # Construct optimizer states\n assert optimizer in (\n OptimType.EXACT_ADAGRAD,\n OptimType.EXACT_ROWWISE_ADAGRAD,\n OptimType.EXACT_SGD,\n OptimType.ROWWISE_ADAGRAD,\n OptimType.SGD,\n ), f\"Optimizer {optimizer} is not supported in cpu mode.\"\n else:\n assert optimizer in (\n OptimType.ADAM,\n OptimType.EXACT_ADAGRAD,\n OptimType.EXACT_ROWWISE_ADAGRAD,\n OptimType.EXACT_SGD,\n OptimType.LAMB,\n OptimType.LARS_SGD,\n OptimType.PARTIAL_ROWWISE_ADAM,\n OptimType.PARTIAL_ROWWISE_LAMB,\n OptimType.SGD,\n ), f\"Optimizer {optimizer} is not supported.\"\n\n self.stochastic_rounding = stochastic_rounding\n self.optimizer = optimizer\n\n self.optimizer_args = invokers.lookup_args.OptimizerArgs(\n stochastic_rounding=stochastic_rounding,\n gradient_clipping=gradient_clipping,\n max_gradient=max_gradient,\n learning_rate=learning_rate,\n eps=eps,\n beta1=beta1,\n beta2=beta2,\n weight_decay=weight_decay,\n eta=eta,\n momentum=momentum,\n )\n\n if optimizer in (\n OptimType.SGD,\n OptimType.EXACT_SGD,\n ):\n # NOTE: make TorchScript work!\n self.register_buffer(\n \"momentum1_dev\", torch.tensor([0], dtype=torch.int64), persistent=False\n )\n self.register_buffer(\n \"momentum1_host\", torch.tensor([0], dtype=torch.int64), persistent=False\n )\n self.register_buffer(\n \"momentum1_uvm\", torch.tensor([0], dtype=torch.int64), persistent=False\n )\n self.register_buffer(\n \"momentum1_placements\",\n torch.tensor([0], dtype=torch.int64),\n persistent=False,\n )\n self.register_buffer(\n \"momentum1_offsets\",\n torch.tensor([0], dtype=torch.int64),\n persistent=False,\n )\n else:\n self._apply_split(\n construct_split_state(\n embedding_specs,\n rowwise=optimizer\n in [OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.ROWWISE_ADAGRAD],\n cacheable=False,\n ),\n prefix=\"momentum1\",\n dtype=torch.float32,\n enforce_hbm=enforce_hbm,\n )\n if optimizer in (\n OptimType.ADAM,\n OptimType.PARTIAL_ROWWISE_ADAM,\n OptimType.LAMB,\n OptimType.PARTIAL_ROWWISE_LAMB,\n ):\n self._apply_split(\n construct_split_state(\n embedding_specs,\n rowwise=optimizer\n in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),\n cacheable=False,\n ),\n prefix=\"momentum2\",\n dtype=torch.float32,\n )\n self.register_buffer(\"iter\", torch.zeros(1, dtype=torch.int64, device=self.current_device))\n else:\n # NOTE: make TorchScript work!\n self.register_buffer(\n \"momentum2_dev\", torch.zeros(1, dtype=torch.int64, device=self.current_device), persistent=False\n )\n self.register_buffer(\n \"momentum2_host\", torch.zeros(1, dtype=torch.int64, device=self.current_device), persistent=False\n )\n self.register_buffer(\n \"momentum2_uvm\", torch.zeros(1, dtype=torch.int64, device=self.current_device), persistent=False\n )\n self.register_buffer(\n \"momentum2_placements\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"momentum2_offsets\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"iter\", torch.zeros(1, dtype=torch.int64, device=self.current_device), persistent=False\n )\n\n cache_state = construct_cache_state(embedding_specs, self.feature_table_map)\n if cache_precision == SparseType.FP32:\n cache_embedding_dtype = torch.float32\n elif cache_precision == SparseType.FP16:\n cache_embedding_dtype = torch.float16\n else:\n raise AssertionError(f\"cache_precision {cache_precision} not supported!\")\n\n self._apply_cache_state(\n cache_state,\n cache_algorithm,\n cache_load_factor,\n cache_sets,\n cache_reserved_memory,\n dtype=cache_embedding_dtype,\n )\n\n logging.debug(\n f\"Using fused {optimizer} with optimizer_args={self.optimizer_args}\"\n )\n\n self.step = 0\n\n def get_states(self, prefix: str) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n if not hasattr(self, f\"{prefix}_physical_placements\"):\n raise DoesNotHavePrefix()\n dev_param = getattr(self, f\"{prefix}_dev\")\n host_param = getattr(self, f\"{prefix}_host\")\n uvm_param = getattr(self, f\"{prefix}_uvm\")\n placements = getattr(self, f\"{prefix}_physical_placements\")\n offsets = getattr(self, f\"{prefix}_physical_offsets\")\n return (\n dev_param,\n host_param,\n uvm_param,\n torch.tensor(placements, dtype=torch.int32),\n torch.tensor(offsets, dtype=torch.int64),\n )\n\n def get_all_states(self) -> List[Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]]:\n all_states = []\n for prefix in [\"weights\", \"momentum1\", \"momentum2\"]:\n try:\n all_states.append(self.get_states(prefix))\n except DoesNotHavePrefix:\n pass\n return all_states\n\n @torch.jit.export\n def get_cache_miss_counter(self) -> Tensor:\n # cache_miss_counter contains two items:\n # The first one is cache_miss_forward_count which records the total number of forwards which has at least one cache miss\n # The second one is the unique_cache_miss_count which records to total number of unique (dedup) cache misses\n\n # pyre-fixme[7]: Expected `Tensor` but got `typing.Union[Tensor,\n # nn.Module]`.\n return self.cache_miss_counter\n\n def forward(\n self,\n indices: Tensor,\n offsets: Tensor,\n per_sample_weights: Optional[Tensor] = None,\n feature_requires_grad: Optional[Tensor] = None,\n ) -> Tensor:\n (indices, offsets) = indices.long(), offsets.long()\n self.step += 1\n\n if len(self.timesteps_prefetched) == 0:\n self.prefetch(indices, offsets)\n\n self.timesteps_prefetched.pop(0)\n lxu_cache_locations = (\n self.lxu_cache_locations_empty\n if len(self.lxu_cache_locations_list) == 0\n else self.lxu_cache_locations_list.pop(0)\n )\n common_args = invokers.lookup_args.CommonArgs(\n placeholder_autograd_tensor=self.placeholder_autograd_tensor,\n # pyre-fixme[6]: Expected `Tensor` for 2nd param but got `Union[Tensor,\n # nn.Module]`.\n dev_weights=self.weights_dev,\n # pyre-fixme[6]: Expected `Tensor` for 3rd param but got `Union[Tensor,\n # nn.Module]`.\n host_weights=self.weights_host,\n # pyre-fixme[6]: Expected `Tensor` for 4th param but got `Union[Tensor,\n # nn.Module]`.\n uvm_weights=self.weights_uvm,\n # pyre-fixme[6]: Expected `Tensor` for 5th param but got `Union[Tensor,\n # nn.Module]`.\n lxu_cache_weights=self.lxu_cache_weights,\n # pyre-fixme[6]: Expected `Tensor` for 6th param but got `Union[Tensor,\n # nn.Module]`.\n weights_placements=self.weights_placements,\n # pyre-fixme[6]: Expected `Tensor` for 7th param but got `Union[Tensor,\n # nn.Module]`.\n weights_offsets=self.weights_offsets,\n # pyre-fixme[6]: Expected `Tensor` for 8th param but got `Union[Tensor,\n # nn.Module]`.\n D_offsets=self.D_offsets,\n total_D=self.total_D,\n max_D=self.max_D,\n # pyre-fixme[6]: Expected `Tensor` for 11th param but got `Union[Tensor,\n # nn.Module]`.\n hash_size_cumsum=self.hash_size_cumsum,\n total_hash_size_bits=self.total_hash_size_bits,\n indices=indices,\n offsets=offsets,\n pooling_mode=self.pooling_mode,\n indice_weights=per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n lxu_cache_locations=lxu_cache_locations,\n )\n\n if self.optimizer == OptimType.EXACT_SGD:\n return invokers.lookup_sgd.invoke(common_args, self.optimizer_args)\n elif self.optimizer == OptimType.SGD:\n assert self.use_cpu, \"Approx SGD is only supported in CPU mode\"\n return invokers.lookup_approx_sgd.invoke(common_args, self.optimizer_args)\n\n momentum1 = invokers.lookup_args.Momentum(\n # pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[Tensor,\n # nn.Module]`.\n dev=self.momentum1_dev,\n # pyre-fixme[6]: Expected `Tensor` for 2nd param but got `Union[Tensor,\n # nn.Module]`.\n host=self.momentum1_host,\n # pyre-fixme[6]: Expected `Tensor` for 3rd param but got `Union[Tensor,\n # nn.Module]`.\n uvm=self.momentum1_uvm,\n # pyre-fixme[6]: Expected `Tensor` for 4th param but got `Union[Tensor,\n # nn.Module]`.\n offsets=self.momentum1_offsets,\n # pyre-fixme[6]: Expected `Tensor` for 5th param but got `Union[Tensor,\n # nn.Module]`.\n placements=self.momentum1_placements,\n )\n\n if self.optimizer == OptimType.LARS_SGD:\n return invokers.lookup_lars_sgd.invoke(\n common_args, self.optimizer_args, momentum1\n )\n if self.optimizer == OptimType.EXACT_ADAGRAD:\n return invokers.lookup_adagrad.invoke(\n common_args, self.optimizer_args, momentum1\n )\n if self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:\n return invokers.lookup_rowwise_adagrad.invoke(\n common_args, self.optimizer_args, momentum1\n )\n if self.optimizer == OptimType.ROWWISE_ADAGRAD:\n assert self.use_cpu, \"Approx rowwise AdaGrad is only supported in CPU mode\"\n return invokers.lookup_approx_rowwise_adagrad.invoke(\n common_args, self.optimizer_args, momentum1\n )\n\n momentum2 = invokers.lookup_args.Momentum(\n # pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[Tensor,\n # nn.Module]`.\n dev=self.momentum2_dev,\n # pyre-fixme[6]: Expected `Tensor` for 2nd param but got `Union[Tensor,\n # nn.Module]`.\n host=self.momentum2_host,\n # pyre-fixme[6]: Expected `Tensor` for 3rd param but got `Union[Tensor,\n # nn.Module]`.\n uvm=self.momentum2_uvm,\n # pyre-fixme[6]: Expected `Tensor` for 4th param but got `Union[Tensor,\n # nn.Module]`.\n offsets=self.momentum2_offsets,\n # pyre-fixme[6]: Expected `Tensor` for 5th param but got `Union[Tensor,\n # nn.Module]`.\n placements=self.momentum2_placements,\n )\n # Ensure iter is always on CPU so the increment doesn't synchronize.\n if self.iter.is_cuda:\n # pyre-fixme[16]: `SplitTableBatchedEmbeddingBagsCodegen` has no\n # attribute `iter`.\n self.iter = self.iter.cpu()\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self, Tensor),\n # Named(item, typing.Any)], typing.Any], Tensor], Tensor, nn.Module]` is not a\n # function.\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__setitem__)[[Named(self, Tensor),\n # Named(item, typing.Any), Named(other, typing.Any)], None], Tensor], Tensor,\n # nn.Module]` is not a function.\n self.iter[0] += 1\n\n if self.optimizer == OptimType.ADAM:\n return invokers.lookup_adam.invoke(\n common_args,\n self.optimizer_args,\n momentum1,\n momentum2,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.item)[[Named(self,\n # Tensor)], typing.Union[float, int]], Tensor], Tensor, nn.Module]` is\n # not a function.\n self.iter.item(),\n )\n if self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM:\n return invokers.lookup_partial_rowwise_adam.invoke(\n common_args,\n self.optimizer_args,\n momentum1,\n momentum2,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.item)[[Named(self,\n # Tensor)], typing.Union[float, int]], Tensor], Tensor, nn.Module]` is\n # not a function.\n self.iter.item(),\n )\n if self.optimizer == OptimType.LAMB:\n return invokers.lookup_lamb.invoke(\n common_args,\n self.optimizer_args,\n momentum1,\n momentum2,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.item)[[Named(self,\n # Tensor)], typing.Union[float, int]], Tensor], Tensor, nn.Module]` is\n # not a function.\n self.iter.item(),\n )\n if self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB:\n return invokers.lookup_partial_rowwise_lamb.invoke(\n common_args,\n self.optimizer_args,\n momentum1,\n momentum2,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.item)[[Named(self,\n # Tensor)], typing.Union[float, int]], Tensor], Tensor, nn.Module]` is\n # not a function.\n self.iter.item(),\n )\n\n raise ValueError(f\"Invalid OptimType: {self.optimizer}\")\n\n def prefetch(self, indices: Tensor, offsets: Tensor) -> None:\n self.timestep += 1\n self.timesteps_prefetched.append(self.timestep)\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],\n # int], Tensor], Tensor, nn.Module]` is not a function.\n if not self.lxu_cache_weights.numel():\n return\n\n (indices, offsets) = indices.long(), offsets.long()\n linear_cache_indices = torch.ops.fb.linearize_cache_indices(\n self.cache_hash_size_cumsum,\n indices,\n offsets,\n )\n\n if self.record_cache_metrics:\n lxu_cache_locations = torch.ops.fb.lxu_cache_lookup(\n linear_cache_indices,\n self.lxu_cache_state,\n )\n self._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)\n\n if self.cache_algorithm == CacheAlgorithm.LRU:\n torch.ops.fb.lru_cache_populate(\n self.weights_uvm,\n self.cache_hash_size_cumsum,\n self.total_cache_hash_size,\n self.cache_index_table_map,\n self.weights_offsets,\n self.D_offsets,\n linear_cache_indices,\n self.lxu_cache_state,\n self.lxu_cache_weights,\n self.timestep,\n self.lxu_state,\n self.stochastic_rounding,\n )\n elif self.cache_algorithm == CacheAlgorithm.LFU:\n torch.ops.fb.lfu_cache_populate(\n self.weights_uvm,\n self.cache_hash_size_cumsum,\n self.total_cache_hash_size,\n self.cache_index_table_map,\n self.weights_offsets,\n self.D_offsets,\n linear_cache_indices,\n self.lxu_cache_state,\n self.lxu_cache_weights,\n self.lxu_state,\n self.stochastic_rounding,\n )\n\n assert (\n len(self.lxu_cache_locations_list) < self.max_prefetch_depth\n ), f\"self.lxu_cache_locations_list has grown to size: {len(self.lxu_cache_locations_list)}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()\"\n self.lxu_cache_locations_list.append(torch.ops.fb.lxu_cache_lookup(\n linear_cache_indices,\n self.lxu_cache_state,\n )\n )\n\n def _update_cache_miss_counter(self, lxu_cache_locations: Tensor, linear_cache_indices: Tensor) -> None:\n CACHE_MISS = -1\n CACHE_HIT = -2\n\n cache_missed_locations = torch.where(lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT)\n unique_ids_list = torch.unique(cache_missed_locations)\n unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)\n\n miss_count = torch.sum(unique_ids_count_list)\n\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)\n\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n self.cache_miss_counter[1] += miss_count\n\n\n def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:\n splits = self.split_embedding_weights()\n if self.weights_precision == SparseType.INT8:\n # TODO: add in-place FloatToFused8BitRowwiseQuantized conversion\n for emb in splits:\n assert (\n len(emb.shape) == 2\n ), \"Int8 embedding only supported for 2D weight tensors.\"\n shape = [emb.shape[0], emb.shape[1] - self.int8_emb_row_dim_offset]\n tmp_emb = torch.zeros(shape, device=self.current_device)\n tmp_emb.uniform_(min_val, max_val)\n tmp_emb_i8 = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(tmp_emb)\n emb.data.copy_(tmp_emb_i8)\n else:\n for param in splits:\n param.uniform_(min_val, max_val)\n\n @torch.jit.export\n def split_embedding_weights(self) -> List[Tensor]:\n \"\"\"\n Returns a list of weights, split by table\n \"\"\"\n splits = []\n for t, (rows, dim, _, _) in enumerate(self.embedding_specs):\n if self.weights_precision == SparseType.INT8:\n dim += self.int8_emb_row_dim_offset\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n placement = self.weights_physical_placements[t]\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n offset = self.weights_physical_offsets[t]\n if placement == EmbeddingLocation.DEVICE.value:\n weights = self.weights_dev\n elif placement == EmbeddingLocation.HOST.value:\n weights = self.weights_host\n else:\n weights = self.weights_uvm\n splits.append(\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.detach)[[Named(self,\n # Tensor)], Tensor], Tensor], Tensor, nn.Module]` is not a function.\n weights.detach()[offset : offset + rows * dim].view(rows, dim)\n )\n return splits\n\n @torch.jit.ignore\n def get_optimizer_buffer(self, state: str) -> torch.Tensor:\n for name, buffer in self.named_buffers():\n if name == state:\n return buffer\n return torch.tensor(0)\n\n @torch.jit.export\n def get_optimizer_state(self) -> List[Dict[str, torch.Tensor]]:\n r\"\"\"\n Get the optimizer state dict that matches the OSS Pytorch optims\n TODO: populate the supported list of optimizers\n \"\"\"\n if (\n self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD\n or self.optimizer == OptimType.ROWWISE_ADAGRAD\n ):\n list_of_state_dict = [\n {\"sum\": _sum[0]} for _sum in self.split_optimizer_states()\n ]\n else:\n raise NotImplementedError(\n f\"Getting optimizer state {self.optimizer} is not implmeneted\"\n )\n\n return list_of_state_dict\n\n @torch.jit.ignore\n def split_optimizer_states(self) -> List[Tuple[torch.Tensor]]:\n \"\"\"\n Returns a list of states, split by table\n \"\"\"\n\n def get_optimizer_states(\n state_dev: Tensor,\n state_host: Tensor,\n state_uvm: Tensor,\n state_offsets: Tensor,\n state_placements: Tensor,\n rowwise: bool,\n ) -> List[torch.Tensor]:\n splits = []\n for t, (rows, dim, _, _) in enumerate(self.embedding_specs):\n offset = state_offsets[t]\n placement = state_placements[t]\n if placement == EmbeddingLocation.DEVICE:\n state = state_dev\n elif placement == EmbeddingLocation.HOST:\n state = state_host\n else:\n state = state_uvm\n if not rowwise:\n splits.append(\n state.detach()[offset : offset + rows * dim].view(rows, dim)\n )\n else:\n splits.append(state.detach()[offset : offset + rows].view(rows))\n return splits\n\n states: List[List[torch.Tensor]] = []\n if self.optimizer not in (\n OptimType.SGD,\n OptimType.EXACT_SGD,\n ):\n states.append(\n get_optimizer_states(\n # pyre-fixme[6]: Expected `Tensor` for 1st param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum1_dev,\n # pyre-fixme[6]: Expected `Tensor` for 2nd param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum1_host,\n # pyre-fixme[6]: Expected `Tensor` for 3rd param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum1_uvm,\n # pyre-fixme[6]: Expected `Tensor` for 4th param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum1_physical_offsets,\n # pyre-fixme[6]: Expected `Tensor` for 5th param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum1_physical_placements,\n rowwise=self.optimizer\n in [OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.ROWWISE_ADAGRAD],\n )\n )\n if self.optimizer in (\n OptimType.ADAM,\n OptimType.PARTIAL_ROWWISE_ADAM,\n OptimType.LAMB,\n OptimType.PARTIAL_ROWWISE_LAMB,\n ):\n states.append(\n get_optimizer_states(\n # pyre-fixme[6]: Expected `Tensor` for 1st param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum2_dev,\n # pyre-fixme[6]: Expected `Tensor` for 2nd param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum2_host,\n # pyre-fixme[6]: Expected `Tensor` for 3rd param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum2_uvm,\n # pyre-fixme[6]: Expected `Tensor` for 4th param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum2_physical_offsets,\n # pyre-fixme[6]: Expected `Tensor` for 5th param but got\n # `Union[Tensor, nn.Module]`.\n self.momentum2_physical_placements,\n rowwise=self.optimizer\n in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),\n )\n )\n return list(zip(*states))\n\n @torch.jit.export\n def set_learning_rate(self, lr: float) -> None:\n \"\"\"\n Sets the learning rate.\n \"\"\"\n self._set_learning_rate(lr)\n\n @torch.jit.ignore\n def _set_learning_rate(self, lr: float) -> float:\n \"\"\"\n Helper function to script `set_learning_rate`.\n Note that returning None does not work.\n \"\"\"\n self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)\n return 0.0\n\n @torch.jit.export\n def flush(self) -> None:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],\n # int], Tensor], Tensor, nn.Module]` is not a function.\n if not self.lxu_cache_weights.numel():\n return\n torch.ops.fb.lxu_cache_flush(\n self.weights_uvm,\n self.cache_hash_size_cumsum,\n self.cache_index_table_map,\n self.weights_offsets,\n self.D_offsets,\n self.total_D,\n self.lxu_cache_state,\n self.lxu_cache_weights,\n self.stochastic_rounding,\n )\n\n def _apply_split(\n self,\n split: SplitState,\n prefix: str,\n dtype: torch.dtype,\n enforce_hbm: bool = False,\n ) -> None:\n setattr(self, f\"{prefix}_physical_placements\", split.placements)\n setattr(self, f\"{prefix}_physical_offsets\", split.offsets)\n\n offsets = [split.offsets[t] for t in self.feature_table_map]\n placements = [split.placements[t] for t in self.feature_table_map]\n self.register_buffer(\n f\"{prefix}_offsets\",\n torch.tensor(offsets, device=self.current_device, dtype=torch.int64),\n )\n self.register_buffer(\n f\"{prefix}_placements\",\n torch.tensor(placements, device=self.current_device, dtype=torch.int32),\n )\n if split.dev_size > 0:\n self.register_buffer(\n f\"{prefix}_dev\",\n torch.zeros(split.dev_size, device=self.current_device, dtype=dtype),\n )\n else:\n self.register_buffer(\n f\"{prefix}_dev\",\n torch.empty(0, device=self.current_device, dtype=dtype),\n )\n if split.host_size > 0:\n if dtype == torch.uint8:\n self.register_buffer(\n f\"{prefix}_host\",\n torch.zeros(\n split.host_size, device=self.current_device, dtype=dtype\n ),\n )\n else:\n setattr(\n self,\n f\"{prefix}_host\",\n nn.Parameter(\n torch.zeros(\n split.host_size, device=self.current_device, dtype=dtype\n )\n ),\n )\n else:\n self.register_buffer(\n f\"{prefix}_host\",\n torch.empty(0, device=self.current_device, dtype=dtype),\n )\n if split.uvm_size > 0:\n assert not self.use_cpu\n if enforce_hbm:\n self.register_buffer(\n f\"{prefix}_uvm\",\n torch.zeros(\n split.uvm_size, device=self.current_device, dtype=dtype\n ),\n )\n else:\n self.register_buffer(\n f\"{prefix}_uvm\",\n torch.zeros(\n split.uvm_size,\n out=torch.ops.fb.new_managed_tensor(\n torch.zeros(1, device=self.current_device, dtype=dtype),\n [split.uvm_size],\n ),\n ),\n )\n else:\n self.register_buffer(\n f\"{prefix}_uvm\",\n torch.empty(0, device=self.current_device, dtype=dtype),\n )\n\n def _apply_cache_state(\n self,\n cache_state: CacheState,\n cache_algorithm: CacheAlgorithm,\n cache_load_factor: float,\n cache_sets: int,\n cache_reserved_memory: float,\n dtype: torch.dtype,\n ) -> None:\n self.cache_algorithm = cache_algorithm\n self.timestep = 1\n self.timesteps_prefetched = []\n\n self.max_prefetch_depth = MAX_PREFETCH_DEPTH\n self.lxu_cache_locations_list = []\n self.lxu_cache_locations_empty = torch.empty(\n 0, device=self.current_device, dtype=torch.int32\n ).fill_(-1)\n\n # NOTE: no cache for CPU mode!\n if cache_state.total_cache_hash_size == 0 or self.use_cpu:\n self.register_buffer(\n \"lxu_cache_weights\",\n torch.zeros(0, 0, device=self.current_device, dtype=dtype),\n )\n # NOTE: make TorchScript work!\n self.register_buffer(\n \"cache_hash_size_cumsum\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"total_cache_hash_size\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"cache_index_table_map\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"lxu_cache_state\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"lxu_state\",\n torch.zeros(1, dtype=torch.int64, device=self.current_device),\n persistent=False,\n )\n self.register_buffer(\n \"cache_miss_counter\",\n torch.tensor([0, 0], dtype=torch.int64),\n persistent=False,\n )\n return\n\n assert cache_load_factor > 0\n element_size = 2 if dtype == torch.float16 else 4\n if cache_sets <= 0:\n total_memory = torch.cuda.get_device_properties(\n self.current_device\n ).total_memory\n free_memory = (\n total_memory\n - torch.cuda.memory_reserved(self.current_device)\n - int(cache_reserved_memory)\n )\n assert free_memory > 0\n cache_sets = (\n int(cache_state.total_cache_hash_size * cache_load_factor) + ASSOC - 1\n ) // ASSOC\n cache_size = cache_sets * ASSOC * element_size * self.max_D_cache\n if cache_size > free_memory:\n cache_sets = (\n int(1.0 * free_memory / self.max_D_cache / element_size) + ASSOC - 1\n ) // ASSOC\n cache_load_factor = (\n 1.0 * cache_sets * ASSOC / int(cache_state.total_cache_hash_size)\n )\n assert cache_sets > 0\n if cache_algorithm == CacheAlgorithm.LFU:\n assert cache_sets < 2 ** 24 - 1\n cache_size = cache_sets * 32 * element_size * self.max_D_cache\n logging.info(\n f\"Using on-device cache with admission algorithm \"\n f\"{cache_algorithm}, {cache_sets} sets, \"\n f\"load_factor: {cache_load_factor : .3f}, \"\n f\"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB\"\n )\n\n self.total_cache_hash_size = cache_state.total_cache_hash_size\n self.register_buffer(\n \"cache_hash_size_cumsum\",\n torch.tensor(\n cache_state.cache_hash_size_cumsum,\n device=self.current_device,\n dtype=torch.int64,\n ),\n )\n self.register_buffer(\n \"cache_index_table_map\",\n torch.tensor(\n cache_state.cache_index_table_map,\n device=self.current_device,\n dtype=torch.int32,\n ),\n )\n self.register_buffer(\n \"lxu_cache_state\",\n torch.zeros(\n cache_sets, ASSOC, device=self.current_device, dtype=torch.int64\n ).fill_(-1),\n )\n self.register_buffer(\n \"lxu_cache_weights\",\n torch.zeros(\n cache_sets * ASSOC,\n self.max_D_cache,\n device=self.current_device,\n dtype=dtype,\n ),\n )\n self.register_buffer(\n \"lxu_state\",\n # pyre-fixme[28]: Unexpected keyword argument `size`.\n torch.zeros(\n size=(self.total_cache_hash_size + 1,)\n if cache_algorithm == CacheAlgorithm.LFU\n else (cache_sets, ASSOC),\n device=self.current_device,\n dtype=torch.int64,\n ),\n )\n self.register_buffer(\n \"cache_miss_counter\",\n torch.tensor([0, 0], device=self.current_device, dtype=torch.int64),\n )\n if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):\n raise ValueError(\n f\"cache_algorithm must be {CacheAlgorithm.LRU} \"\n f\"or {CacheAlgorithm.LFU}\"\n )\n\n def reset_cache_states(self) -> None:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],\n # int], Tensor], Tensor, nn.Module]` is not a function.\n if not self.lxu_cache_weights.numel():\n return\n self.lxu_cache_state.fill_(-1)\n self.lxu_state.fill_(0)\n self.timestep = 1\n\n\n# pyre-fixme[13]: Attribute `D_offsets` is never initialized.\n# pyre-fixme[13]: Attribute `hash_size_cumsum` is never initialized.\n# pyre-fixme[13]: Attribute `weights_offsets` is never initialized.\nclass DenseTableBatchedEmbeddingBagsCodegen(nn.Module):\n \"\"\"\n Table-batched version of nn.EmbeddingBag(sparse=False)\n \"\"\"\n\n weights: Tensor\n weights_offsets: Tensor\n D_offsets: Tensor\n total_D: int\n max_D: int\n hash_size_cumsum: Tensor\n total_hash_size_bits: int\n embedding_specs: List[Tuple[int, int]]\n\n def __init__(\n self,\n embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)\n feature_table_map: Optional[List[int]] = None, # [T]\n pooling_mode: PoolingMode = PoolingMode.SUM,\n use_cpu: bool = False,\n ) -> None: # noqa C901 # tuple of (rows, dims,)\n super(DenseTableBatchedEmbeddingBagsCodegen, self).__init__()\n\n self.pooling_mode = pooling_mode\n\n self.use_cpu = use_cpu\n self.current_device: torch.device = (\n torch.device(\"cpu\") if self.use_cpu else torch.cuda.current_device()\n )\n\n self.embedding_specs = embedding_specs\n (rows, dims) = zip(*embedding_specs)\n T_ = len(self.embedding_specs)\n assert T_ > 0\n\n feature_table_map = (\n feature_table_map if feature_table_map is not None else list(range(T_))\n )\n T = len(feature_table_map)\n assert T_ <= T\n D_offsets = [dims[t] for t in feature_table_map]\n D_offsets = [0] + list(accumulate(D_offsets))\n self.total_D = D_offsets[-1]\n self.max_D = max(dims)\n self.register_buffer(\n \"D_offsets\",\n torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),\n )\n assert self.D_offsets.numel() == T + 1\n\n hash_size_cumsum = [0] + list(accumulate(rows))\n self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)\n # The last element is to easily access # of rows of each table by\n # hash_size_cumsum[t + 1] - hash_size_cumsum[t]\n hash_size_cumsum = [hash_size_cumsum[t] for t in feature_table_map] + [\n hash_size_cumsum[-1]\n ]\n self.register_buffer(\n \"hash_size_cumsum\",\n torch.tensor(\n hash_size_cumsum, device=self.current_device, dtype=torch.int64\n ),\n )\n weights_offsets = [0] + list(\n accumulate([row * dim for (row, dim) in embedding_specs])\n )\n self.weights = nn.Parameter(\n torch.randn(\n weights_offsets[-1],\n device=self.current_device,\n )\n )\n for feature in range(T):\n t = feature_table_map[feature]\n row, dim = embedding_specs[t]\n if (\n self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()\n != row * dim\n ):\n logging.info(\n f\"row {row} dim {dim} feature {feature} t {t} {self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()}\"\n )\n assert (\n self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()\n == row * dim\n )\n assert self.hash_size_cumsum[feature] == sum(\n row for (row, _) in embedding_specs[:t]\n )\n\n self.weights_physical_offsets: List[int] = weights_offsets\n weights_offsets = [weights_offsets[t] for t in feature_table_map]\n self.register_buffer(\n \"weights_offsets\",\n torch.tensor(\n weights_offsets, device=self.current_device, dtype=torch.int64\n ),\n )\n\n def forward(\n self,\n indices: Tensor,\n offsets: Tensor,\n per_sample_weights: Optional[Tensor] = None,\n feature_requires_grad: Optional[Tensor] = None,\n ) -> Tensor:\n (indices, offsets) = indices.long(), offsets.long()\n return torch.ops.fb.dense_embedding_codegen_lookup_function(\n dev_weights=self.weights,\n weights_offsets=self.weights_offsets,\n D_offsets=self.D_offsets,\n total_D=self.total_D,\n max_D=self.max_D,\n hash_size_cumsum=self.hash_size_cumsum,\n total_hash_size_bits=self.total_hash_size_bits,\n indices=indices,\n offsets=offsets,\n pooling_mode=self.pooling_mode,\n indice_weights=per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n )\n\n @torch.jit.export\n def split_embedding_weights(self) -> List[Tensor]:\n \"\"\"\n Returns a list of weights, split by table\n \"\"\"\n splits = []\n for t, (rows, dim) in enumerate(self.embedding_specs):\n offset = self.weights_physical_offsets[t]\n splits.append(\n self.weights.detach()[offset : offset + rows * dim].view(rows, dim)\n )\n return splits\n\n def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:\n splits = self.split_embedding_weights()\n for param in splits:\n param.uniform_(min_val, max_val)\n\n\nclass SequenceEmbeddingCodegen(SplitTableBatchedEmbeddingBagsCodegen):\n \"\"\"\n This class wraps around SplitTableBatchedEmbeddingBagsCodegen to get\n sequence embedding op: nn.EmbeddingBag(sparse=True)\n \"\"\"\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n # assert T == 1\n assert \"embedding_specs\" in kwargs\n assert len(kwargs[\"embedding_specs\"]) == 1\n super(SequenceEmbeddingCodegen, self).__init__(\n **kwargs,\n )\n\n # @torch.jit.ignore\n def forward(\n self,\n indices: Tensor,\n offsets: Optional[Tensor] = None,\n per_sample_weights: Optional[Tensor] = None,\n feature_requires_grad: Optional[Tensor] = None,\n ) -> Tensor:\n offsets = torch.arange(\n 0,\n indices.numel() + 1,\n device=indices.device,\n dtype=torch.int64,\n )\n return super(SequenceEmbeddingCodegen, self).forward(\n indices,\n offsets,\n per_sample_weights,\n feature_requires_grad,\n )\n\n\nclass DenseSequenceEmbeddingCodegen(DenseTableBatchedEmbeddingBagsCodegen):\n \"\"\"\n This class wraps around DenseTableBatchedEmbeddingBagsCodegen to get\n sequence embedding op, nn.EmbeddingBag(sparse=False)\n \"\"\"\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n # assert T == 1\n assert \"embedding_specs\" in kwargs\n assert len(kwargs[\"embedding_specs\"]) == 1\n super(DenseSequenceEmbeddingCodegen, self).__init__(\n **kwargs,\n )\n\n # @torch.jit.ignore\n def forward(\n self,\n indices: Tensor,\n offsets: Optional[Tensor] = None,\n per_sample_weights: Optional[Tensor] = None,\n feature_requires_grad: Optional[Tensor] = None,\n ) -> Tensor:\n offsets = torch.arange(\n 0,\n indices.numel() + 1,\n device=indices.device,\n dtype=torch.int64,\n )\n return super(DenseSequenceEmbeddingCodegen, self).forward(\n indices,\n offsets,\n per_sample_weights,\n feature_requires_grad,\n )\n\ndef round_up(a: int, b: int) -> int:\n return int((a + b - 1) // b) * b\n\n\ndef rounded_row_size_in_bytes(dim: int, weight_ty: SparseType) -> int:\n r = unpadded_row_size_in_bytes(dim, weight_ty)\n # align each row to 16-byte boundaries.\n return round_up(r, 16)\n\ndef unpadded_row_size_in_bytes(dim: int, weight_ty: SparseType) -> int:\n r = {\n SparseType.FP16.value: dim * 2,\n SparseType.INT8.value: dim + 4,\n SparseType.INT4.value: dim // 2 + 4,\n SparseType.INT2.value: dim // 4 + 4,\n }[weight_ty.value]\n return r\n\n\nclass IntNBitTableBatchedEmbeddingBagsCodegen(nn.Module):\n \"\"\"\n Table-batched version of nn.EmbeddingBag(sparse=False)\n \"\"\"\n\n def __init__(\n self,\n embedding_specs: List[\n Tuple[int, int, SparseType]\n ], # tuple of (rows, dims, SparseType)\n feature_table_map: Optional[List[int]] = None, # [T]\n index_remapping: Optional[List[Tensor]] = None,\n pooling_mode: PoolingMode = PoolingMode.SUM,\n use_cpu: bool = False,\n ) -> None: # noqa C901 # tuple of (rows, dims,)\n super(IntNBitTableBatchedEmbeddingBagsCodegen, self).__init__()\n import numpy as np\n\n self.use_cpu = use_cpu\n self.current_device: torch.device = (\n torch.device(\"cpu\") if self.use_cpu else torch.cuda.current_device()\n )\n\n self.pooling_mode = pooling_mode\n\n self.embedding_specs = embedding_specs\n # (rows, dims, weights_tys, ) = zip(*embedding_specs)\n # Pyre workaround\n rows: List[int] = [e[0] for e in embedding_specs]\n dims: List[int] = [e[1] for e in embedding_specs]\n weights_tys: List[SparseType] = [e[2] for e in embedding_specs]\n\n T_ = len(self.embedding_specs)\n\n assert T_ > 0\n for (dim, weight_ty) in zip(dims, weights_tys):\n assert dim % weight_ty.align_size() == 0\n\n feature_table_map = (\n feature_table_map if feature_table_map is not None else list(range(T_))\n )\n T = len(feature_table_map)\n assert T_ <= T\n D_offsets = [dims[t] for t in feature_table_map]\n D_offsets = [0] + np.cumsum(D_offsets).tolist()\n self.total_D: int = D_offsets[-1]\n\n def max_ty_D(ty: SparseType) -> int:\n return max(\n [\n dim\n for dim, weight_ty in zip(dims, weights_tys)\n if weight_ty == ty\n ],\n default=0,\n )\n self.max_int2_D: int = max_ty_D(SparseType.INT2)\n self.max_int4_D: int = max_ty_D(SparseType.INT4)\n self.max_int8_D: int = max_ty_D(SparseType.INT8)\n self.max_float16_D: int = max_ty_D(SparseType.FP16)\n\n\n self.register_buffer(\n \"D_offsets\",\n torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),\n )\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],\n # int], Tensor], Tensor, nn.Module]` is not a function.\n assert self.D_offsets.numel() == T + 1\n\n def align_to_cacheline(a: int) -> int:\n # align each table to 128b cache line boundary.\n return round_up(a, 128)\n\n weights_offsets = [0] + np.cumsum(\n [align_to_cacheline(row * rounded_row_size_in_bytes(dim, weight_ty)) for (row, dim, weight_ty) in embedding_specs]\n ).tolist()\n self.register_buffer(\n \"weights\",\n torch.randint(\n 0,\n 255,\n size=(weights_offsets[-1],),\n dtype=torch.uint8,\n device=self.current_device,\n ),\n )\n\n for feature in range(T):\n t = feature_table_map[feature]\n row, dim, weight_ty = embedding_specs[t]\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n assert self.weights[\n weights_offsets[t] : weights_offsets[t + 1]\n ].numel() == align_to_cacheline(row * rounded_row_size_in_bytes(dim, weight_ty))\n\n weights_offsets = [weights_offsets[t] for t in feature_table_map]\n weights_tys_int = [weights_tys[t].as_int() for t in feature_table_map]\n\n self.register_buffer(\n \"weights_offsets\",\n torch.tensor(weights_offsets, device=self.current_device, dtype=torch.int64),\n )\n self.register_buffer(\n \"weights_tys\",\n torch.tensor(weights_tys_int, device=self.current_device, dtype=torch.uint8),\n )\n\n if index_remapping:\n # pyre-fixme[4]: Attribute must be annotated.\n self.index_remapping_hash_table_cpu = torch.classes.fb.PrunedMapCPU()\n\n if not self.use_cpu:\n # TODO: tune this?\n LOAD_FACTOR = 0.5\n capacity = round_up(int(sum(rows) * 1.0 / LOAD_FACTOR), 32)\n hash_table = torch.empty(\n (capacity, 3),\n dtype=torch.int32,\n )\n hash_table[:, :] = -1\n # TODO: handle feature remapping!!! Are these physical or virtual tables?\n feature_rows = [rows[t] for t in feature_table_map]\n assert len(feature_rows) == len(index_remapping)\n for row, index_map in zip(feature_rows, index_remapping):\n assert row == index_map.numel()\n indices = torch.cat(index_remapping, dim=0).int()\n dense_indices = torch.cat(\n [torch.arange(row) for row in feature_rows], dim=0\n ).int()\n offsets = torch.tensor([0] + np.cumsum(feature_rows).tolist()).int()\n torch.ops.fb.pruned_hashmap_insert(\n indices, dense_indices, offsets, hash_table, T\n )\n self.register_buffer(\n \"index_remapping_hash_table_gpu\", hash_table.to(self.current_device)\n )\n else:\n # TODO: handle feature remapping!!! Are these physical or virtual tables?\n feature_rows = [rows[t] for t in feature_table_map]\n assert len(feature_rows) == len(index_remapping)\n for row, index_map in zip(feature_rows, index_remapping):\n assert row == index_map.numel()\n indices = torch.cat(index_remapping, dim=0).int()\n dense_indices = torch.cat(\n [torch.arange(row) for row in feature_rows], dim=0\n ).int()\n offsets = torch.tensor([0] + np.cumsum(feature_rows).tolist()).int()\n self.index_remapping_hash_table_cpu.insert(\n indices, dense_indices, offsets, T\n )\n self.register_buffer(\"index_remapping_hash_table_gpu\", torch.empty(0))\n\n else:\n # pyre-fixme[4]: Attribute must be annotated.\n self.index_remapping_hash_table_gpu = None\n self.index_remapping_hash_table_cpu = None\n\n def forward(\n self,\n indices: Tensor,\n offsets: Tensor,\n per_sample_weights: Optional[Tensor] = None,\n feature_requires_grad: Optional[Tensor] = None,\n ) -> Tensor:\n if self.index_remapping_hash_table_cpu is not None:\n if not self.use_cpu:\n # Convert from raw indices to pruned indices\n indices = torch.ops.fb.pruned_hashmap_lookup(\n indices,\n offsets,\n self.index_remapping_hash_table_gpu,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self,\n # Tensor)], int], Tensor], Tensor, nn.Module]` is not a function.\n self.D_offsets.numel() - 1,\n )\n else:\n indices = self.index_remapping_hash_table_cpu.lookup(\n indices,\n offsets,\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self,\n # Tensor)], int], Tensor], Tensor, nn.Module]` is not a function.\n self.D_offsets.numel() - 1,\n )\n return torch.ops.fb.int_nbit_split_embedding_codegen_lookup_function(\n dev_weights=self.weights,\n weights_offsets=self.weights_offsets,\n weights_tys=self.weights_tys,\n D_offsets=self.D_offsets,\n total_D=self.total_D,\n max_int2_D=self.max_int2_D,\n max_int4_D=self.max_int4_D,\n max_int8_D=self.max_int8_D,\n max_float16_D=self.max_float16_D,\n indices=indices,\n offsets=offsets,\n pooling_mode=self.pooling_mode,\n indice_weights=per_sample_weights,\n )\n\n @torch.jit.export\n def split_embedding_weights(self) -> List[Tuple[Tensor, Optional[Tensor]]]:\n \"\"\"\n Returns a list of weights, split by table\n \"\"\"\n splits: List[Tuple[Tensor, Optional[Tensor]]] = []\n for t, (rows, dim, weight_ty) in enumerate(self.embedding_specs):\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,\n # Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,\n # nn.Module]` is not a function.\n offset = self.weights_offsets[t]\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.detach)[[Named(self, Tensor)],\n # Tensor], Tensor], Tensor, nn.Module]` is not a function.\n weights_shifts = self.weights.detach()[\n offset : offset + rows * rounded_row_size_in_bytes(dim, weight_ty)\n ].view(rows, rounded_row_size_in_bytes(dim, weight_ty))\n # remove the padding at the end of each row.\n weights_shifts = weights_shifts[:, :unpadded_row_size_in_bytes(dim, weight_ty)]\n if weight_ty == SparseType.INT8 or weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:\n splits.append(\n (\n weights_shifts[:, 4:],\n weights_shifts[:, :4],\n )\n )\n else:\n assert weight_ty == SparseType.FP16\n splits.append(\n (\n weights_shifts,\n None,\n )\n )\n\n return splits\n"
] | [
[
"torch.cuda.get_device_properties",
"torch.randint",
"torch.zeros",
"torch.cat",
"torch.sum",
"numpy.cumsum",
"torch.ops.fb.dense_embedding_codegen_lookup_function",
"torch.unique",
"torch.where",
"torch.device",
"torch.ops.fb.lru_cache_populate",
"torch.cuda.memory_reserved",
"torch.randn",
"torch.tensor",
"torch.arange",
"torch.ops.fb.lfu_cache_populate",
"torch.ops.fb.lxu_cache_flush",
"torch.classes.fb.PrunedMapCPU",
"torch.empty",
"torch.cuda.current_device",
"torch.ops.fb.linearize_cache_indices",
"torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized",
"torch.ops.fb.int_nbit_split_embedding_codegen_lookup_function",
"torch.ops.fb.pruned_hashmap_insert",
"torch.ops.fb.lxu_cache_lookup"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
navjothbn/new_spatial | [
"4113c29fe81facbf79ccb4f160cc4a252aa6f745"
] | [
"apps/xy.py"
] | [
"import leafmap.foliumap as leafmap\nimport pandas as pd\nimport streamlit as st\n\n\ndef app():\n\n st.title(\"Add Points from XY\")\n\n sample_url = \"https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv\"\n url = st.text_input(\"Enter URL:\", sample_url)\n m = leafmap.Map(locate_control=True, plugin_LatLngPopup=False)\n\n if url:\n\n try:\n df = pd.read_csv(url)\n\n columns = df.columns.values.tolist()\n row1_col1, row1_col2, row1_col3, row1_col4, row1_col5 = st.columns(\n [1, 1, 3, 1, 1]\n )\n\n lon_index = 0\n lat_index = 0\n\n for col in columns:\n if col.lower() in [\"lon\", \"longitude\", \"long\", \"lng\"]:\n lon_index = columns.index(col)\n elif col.lower() in [\"lat\", \"latitude\"]:\n lat_index = columns.index(col)\n\n with row1_col1:\n x = st.selectbox(\"Select longitude column\", columns, lon_index)\n\n with row1_col2:\n y = st.selectbox(\"Select latitude column\", columns, lat_index)\n\n with row1_col3:\n popups = st.multiselect(\"Select popup columns\", columns, columns)\n\n with row1_col4:\n heatmap = st.checkbox(\"Add heatmap\")\n\n if heatmap:\n with row1_col5:\n if \"pop_max\" in columns:\n index = columns.index(\"pop_max\")\n else:\n index = 0\n heatmap_col = st.selectbox(\"Select heatmap column\", columns, index)\n try:\n m.add_heatmap(df, y, x, heatmap_col)\n except:\n st.error(\"Please select a numeric column\")\n\n try:\n m.add_points_from_xy(df, x, y, popups)\n except:\n st.error(\"Please select a numeric column\")\n\n except Exception as e:\n st.error(e)\n\n m.to_streamlit()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mleszczy/emmental | [
"879902626ed9e97f43fa42fe471275cbfad52f90"
] | [
"tests/schedulers/test_round_robin_scheduler.py"
] | [
"\"\"\"Emmental round robin scheduler unit tests.\"\"\"\nimport logging\n\nimport numpy as np\nimport torch\n\nimport emmental\nfrom emmental.data import EmmentalDataLoader, EmmentalDataset\nfrom emmental.schedulers.round_robin_scheduler import RoundRobinScheduler\nfrom emmental.utils.utils import set_random_seed\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_round_robin_scheduler(caplog):\n \"\"\"Unit test of round robin scheduler.\"\"\"\n caplog.set_level(logging.INFO)\n\n emmental.Meta.init()\n\n # Set random seed seed\n set_random_seed(2)\n\n task1 = \"task1\"\n x1 = np.random.rand(20, 2)\n y1 = torch.from_numpy(np.random.rand(20))\n\n task2 = \"task2\"\n x2 = np.random.rand(30, 3)\n y2 = torch.from_numpy(np.random.rand(30))\n\n dataloaders = [\n EmmentalDataLoader(\n task_to_label_dict={task_name: \"label\"},\n dataset=EmmentalDataset(\n name=task_name, X_dict={\"feature\": x}, Y_dict={\"label\": y}\n ),\n split=\"train\",\n batch_size=10,\n shuffle=True,\n )\n for task_name, x, y in [(task1, x1, y1), (task2, x2, y2)]\n ]\n\n scheduler = RoundRobinScheduler()\n\n assert scheduler.get_num_batches(dataloaders) == 5\n\n batch_task_names = [\n batch_data[-2] for batch_data in scheduler.get_batches(dataloaders)\n ]\n\n assert batch_task_names == [task2, task1, task2, task2, task1]\n\n scheduler = RoundRobinScheduler(fillup=True)\n\n assert scheduler.get_num_batches(dataloaders) == 6\n\n batch_task_names = [\n batch_data[-2] for batch_data in scheduler.get_batches(dataloaders)\n ]\n\n assert batch_task_names == [task2, task1, task2, task2, task1, task1]\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uhseon/Deep-Repulsive-Clustering-of-Ordered-Data-Based-on-Order-Identity-Decomposition | [
"436aee7ade293a0a404b2e1a2965571864efb676"
] | [
"test/test_morph_kCH_attr.py"
] | [
"from datetime import datetime\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\nfrom sklearn.preprocessing import normalize\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\n\nsys.path.append('..')\nfrom configs.cfg_morph_estimation_kCH_setting_A import ConfigMorphV0 as Config\nfrom networks import model_comparator\nfrom utils.comparison_utils import compute_ternary_order_fixed_ref\nfrom utils.comparison_utils import compute_ternary_order_fixed_base\nfrom utils.comparison_utils import one_step_voting_ternary_log, soft_voting_ternary_log\nfrom utils.utils import save_or_load_feature_v2\nfrom utils.utils import load_images\nfrom utils.utils import write_log\n\n\ndef clustering_imgs_save(memberships, filelists, analysis_path):\n K = max(memberships) + 1\n if not os.path.exists(f'{analysis_path}/K_{K}'): os.mkdir(f'{analysis_path}/K_{K}')\n for k in range(K):\n if not os.path.exists(f'{analysis_path}/K_{K}/{k}'): os.mkdir(f'{analysis_path}/K_{K}/{k}')\n\n for idx, (i_cluster, filename) in enumerate(zip(memberships, filelists)):\n if idx % 500 == 0:\n print(f'{idx} saved / {memberships.shape[0]}')\n os.system(f'cp {filename} {analysis_path}/K_{K}/{i_cluster}')\n\n\ndef main():\n # --- select GPU to use\n GPU = '1'\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU # specify which GPU(s) to be used\n print(f'USE GPU {GPU}')\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n print('gpu', gpu)\n tf.config.experimental.set_memory_growth(gpu, True)\n print('memory growth:', tf.config.experimental.get_memory_growth(gpu))\n\n # --- load configs\n config = Config()\n experiment_name = 'vgg16mdba_t0.1_2CH_Setting_A_20200915-210722' ### <--- EDIT HERE \n ckpt_name = 'ckpt_0.8740808963775635' ### <--- EDIT HERE \n ref_file = 'ref_by_attr.npy'\n\n # --- generate log files\n log_path = f'../results/results_morph/{experiment_name}'\n ckpt_path = f'{log_path}/checkpoints'\n ckpt_to_load = f'{ckpt_path}/{ckpt_name}'\n\n batch_size_for_comp = 1024\n # ref_filelist = ['ref_list_ep170_by_acc_N3_L66_T3_cluster_id0.npy',\n # 'ref_list_ep170_by_acc_N3_L66_T3_cluster_id1.npy',\n # ]\n batch_size_for_comp = 66 * 3\n\n # --- create or open log file\n log_file_path = f'{log_path}/test_log_{ckpt_name}.txt'\n if os.path.exists(log_file_path):\n log_file = open(log_file_path, 'a')\n else:\n log_file = open(log_file_path, 'w')\n\n # --- record the config of experiment\n config_dict = vars(Config)\n write_log(log_file, '*' * 100)\n for key in config_dict.keys():\n if not key.startswith('_'):\n write_log(log_file, f'{key} : {config_dict[key]}')\n write_log(log_file, '*' * 100 + '\\n')\n #\n # for ref_file in ref_filelist:\n # write_log(log_file, ref_file)\n\n # --- load data\n train_data = pd.read_csv(config.train_list, sep=\",\")\n test_data = pd.read_csv(config.test_list, sep=\",\")\n\n train_filelist = np.array(\n [f'{config.img_folder}/{train_data[\"database\"][idx]}/{train_data[\"filename\"][idx]}' for idx in\n range(len(train_data))])\n train_labels = np.array(train_data['age'])\n train_genders = np.array(train_data['gender'])\n train_race = np.array(train_data['race'])\n\n test_filelist = np.array(\n [f'{config.img_folder}/{test_data[\"database\"][idx]}/{test_data[\"filename\"][idx]}' for idx in\n range(len(test_data))])\n test_labels = np.array(test_data['age'])\n test_race = np.array(test_data['race'])\n\n # --- load clustering info\n print(f'{config.dataset} dataset is successfully loaded!')\n\n # --- make network model\n model = model_comparator.vgg_mdba(config)\n model.summary()\n write_log(log_file, f'Model name: {model.name}')\n try:\n model.load_weights(ckpt_to_load)\n write_log(log_file, f'Parameters are loaded from {ckpt_to_load}')\n except:\n write_log(log_file, f'Network are initialized with IMAGENET feature')\n\n # extract encoder part\n feature_extractor = model.get_layer('feature_extractor')\n feature_extractor = keras.Model(inputs=feature_extractor.input, outputs=feature_extractor.output)\n\n comp_input = keras.Input(512 * 2)\n l1 = model.get_layer('dense_1')\n l2 = model.get_layer('batch_normalization_1')\n l3 = model.get_layer('activation_1')\n l4 = model.get_layer('dense_2')\n l5 = model.get_layer('batch_normalization_2')\n l6 = model.get_layer('activation_2')\n l7 = model.get_layer('dense_3')\n\n x = l1(comp_input)\n x = l2(x)\n x = l3(x)\n x = l4(x)\n x = l5(x)\n x = l6(x)\n output = l7(x)\n\n comparator = keras.Model(comp_input, output, name='comparator')\n\n train_features = save_or_load_feature_v2(f'{log_path}/train_features_{ckpt_name}', train_filelist,512, feature_extractor, config)\n test_features = save_or_load_feature_v2(f'{log_path}/test_features_{ckpt_name}', test_filelist,512, feature_extractor, config)\n\n #\n total_preds = []\n total_soft_preds = []\n total_gt = []\n total_comparison_acc = []\n ref_idxs_list = np.load(f'{log_path}/{ref_file}', allow_pickle=True)\n\n # --- infer age\n for base_idx in range(config.num_test):\n ref_idxs = ref_idxs_list[base_idx]\n ref_features = train_features[ref_idxs]\n ref_labels = train_labels[ref_idxs]\n num_ref = len(ref_labels)\n batch_base = np.zeros((batch_size_for_comp, 512), dtype=np.float32)\n batch_base[:, ...] = test_features[base_idx]\n order_list = []\n age_list = []\n prob_list = []\n\n for batch_idx in range(np.ceil(num_ref / batch_size_for_comp).astype(np.int32)):\n start_idx = batch_size_for_comp * batch_idx\n end_idx = min(start_idx + batch_size_for_comp, num_ref)\n batch_ref = ref_features[start_idx:end_idx, ...]\n batch_label = ref_labels[start_idx:end_idx]\n\n # 1. infer the ordering relation\n if end_idx - start_idx < batch_size_for_comp:\n batch_base = np.zeros_like(batch_ref)\n batch_base[:, ...] = test_features[base_idx]\n\n batch_pair = tf.concat((batch_base, batch_ref), axis=-1)\n preds = comparator(batch_pair, training=False)\n preds = tf.nn.softmax(preds, axis=-1)\n order_pred = np.argmax(preds, axis=-1)\n prob_list.append(preds.numpy())\n order_list.append(order_pred)\n age_list.append(batch_label)\n order_list = np.concatenate(order_list, axis=0)\n age_list = np.concatenate(age_list, axis=0)\n prob_list = np.concatenate(prob_list, axis=0)\n\n # 2. hard voting\n gt_order_list = compute_ternary_order_fixed_base(np.log(test_labels[base_idx]), np.log(age_list), config.tau)\n comparison_acc = np.sum(gt_order_list == order_list) / num_ref\n\n pred_score, voting_result = one_step_voting_ternary_log(order_list, np.log(age_list), config.tau,\n config.age_levels)\n\n # 3. soft voting\n soft_pred, _, _ = soft_voting_ternary_log(prob_list, np.log(age_list), config.tau, config.age_levels)\n\n total_preds.append(pred_score)\n total_soft_preds.append(soft_pred)\n total_gt.append(test_labels[base_idx])\n total_comparison_acc.append(comparison_acc)\n print(f'infer the score: {base_idx} / {config.num_test}')\n\n total_preds = np.array(total_preds)\n total_soft_preds = np.array(total_soft_preds)\n total_gt = np.array(total_gt)\n total_comparison_acc = np.array(total_comparison_acc)\n total_MAE = np.abs(total_preds - total_gt)\n total_MAE_soft = np.abs(total_soft_preds - total_gt)\n\n # --- measure MAE\n MAE_metric = keras.metrics.MeanAbsoluteError()\n MAE_metric(total_gt, total_preds)\n\n # --- measure CS (MAE <= 5)\n n_correct_CS = np.sum(total_MAE <= 5)\n CS = n_correct_CS / len(test_labels)\n\n n_correct_CS_soft = np.sum(total_MAE_soft <= 5)\n CS_soft = n_correct_CS_soft / len(test_labels)\n\n write_log(log_file, '\\n+ ------------------------------------------------------------ +')\n write_log(log_file, '| TEST |') # MAE: 4.23 CS: 73.2%\n write_log(log_file, '| ============================================================ |')\n write_log(log_file, '| MAE | CS (%) | Comparison Acc. (%) |')\n write_log(log_file, '+ ------------------------------------------------------------ +')\n write_log(log_file, f'| {MAE_metric.result():.3f} | {CS * 100:.3f} | {np.mean(total_comparison_acc) * 100:.3f} |')\n write_log(log_file, f'| {np.mean(total_MAE_soft):.3f} | {CS_soft * 100:.3f} | {np.mean(total_comparison_acc) * 100:.3f} |')\n\n write_log(log_file, '+ ------------------------------------------------------------ +')\n\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.concat",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.mean",
"pandas.read_csv",
"tensorflow.keras.Input",
"tensorflow.config.experimental.set_memory_growth",
"numpy.ceil",
"numpy.argmax",
"numpy.load",
"numpy.zeros",
"numpy.log",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.Model",
"numpy.array",
"numpy.sum",
"tensorflow.nn.softmax",
"numpy.abs",
"tensorflow.config.experimental.get_memory_growth",
"tensorflow.keras.metrics.MeanAbsoluteError"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
wbbhcb/Firm-Characteristics-and-Chinese-Stock-Market | [
"5d2d4858b9e2292987eb38f660fbc9457c0d9595"
] | [
"fc.py"
] | [
"\r\n\"\"\"\r\n由于并未达到原文中的数值,所以之后会重新检查,看看算式能否进一步提升,因此,并未写出很集成的模块。\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport statsmodels.api as sm\r\nfrom scipy import stats\r\n\r\nfrom factor_test_monthly import compute_num_months, compute_factor_return_series, compute_return_T_test, compute_5_factor_model\r\n\r\nfrom fm import process_bar\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef forecast_combination(X, y):\r\n fc_params = []\r\n for i in range(X.shape[1]):\r\n if i == 0: # 对于常数项\r\n # result = sm.OLS(y, X[:, i]).fit()\r\n # fc_params.append(result.params[0])\r\n fc_params.append(stats.linregress(y, X[:, 1])[1])\r\n # fc_params.append(stats.linregress(X[:, 1], y)[1])\r\n else:\r\n fc_params.append(stats.linregress(y, X[:, i])[0])\r\n # fc_params.append(stats.linregress(X[:, i], y)[0])\r\n return fc_params\r\n\r\nif __name__ == \"__main__\":\r\n data = pd.read_csv('./data.csv')\r\n begin_month = 200203\r\n time_length = 190\r\n\r\n months = compute_num_months(begin_month, time_length)\r\n\r\n # 转为按时间排序\r\n data = data.sort_values(by = \"TRDMNT\")\r\n data = data.reset_index(drop = True)\r\n\r\n # 然后对于每一个时间节点,对于75个因子计算一次,得到其参数,\r\n for i in range(time_length):\r\n month = months[i]\r\n data_atmonth = data[data.TRDMNT == month]\r\n # 线性回归\r\n X = data_atmonth.iloc[:, 18:92].values\r\n # X = data_atmonth.iloc[:, 92:166].values\r\n X = np.column_stack((np.ones(X.shape[0]), X)) #先加上常数看看\r\n # y = data_atmonth.retx.values\r\n y = data_atmonth.reta.values\r\n fc_point = forecast_combination(X, y)\r\n if i == 0:\r\n fc_matrix = fc_point\r\n else:\r\n fc_matrix = np.vstack((fc_matrix, fc_point)) # 该矩阵和时间的对应关系为: 时间对应的那一行用到了下一个月的回报,\r\n # 所以应该移动\r\n T = 12\r\n dates = data.TRDMNT.tolist()\r\n data_matrix = data.iloc[:, 18:92].values\r\n fc_data = []\r\n for i in range(len(data)):\r\n date = dates[i]\r\n if date >= months[T]: # 因为需要前T个时间点的系数进行计算\r\n now_pos = int((date - 200200)/100) * 12 + date%100- 3\r\n fc_params = np.sum(fc_matrix[now_pos - T:now_pos, :], axis=0) / T\r\n fc_point = np.sum(fc_params * np.array([1] + list(data_matrix[i, :])))\r\n # fc_point = np.sum(np.array(list(fc_params[0, i] for i in range(74))) * np.array(data.iloc[i, 18:92].tolist()))\r\n # fc_point = np.sum(np.array(list(fc_params[0, i] for i in range(74))) * np.array(data.iloc[i, 92:166].tolist()))\r\n fc_data.append(fc_point)\r\n else:\r\n fc_data.append(0)\r\n process_bar(i, len(data))\r\n\r\n data[\"fc\"] = fc_data\r\n\r\n new_panel = data.loc[:, ['stkid', 'TRDMNT', 'retx', 'fc']]\r\n # new_panel.to_csv('./fc.csv', mode='w', header=True)\r\n\r\n FACTOR = 'fc'\r\n begin_month = 200303 # 200203\r\n time_length = 178 # 从200306 到 201612\r\n\r\n months = compute_num_months(begin_month, time_length)\r\n # 计算该因子对应的多空组合回报率表格\r\n result = compute_factor_return_series(new_panel, FACTOR, begin_month, time_length)\r\n\r\n print(\"Factor Name:\", FACTOR)\r\n the_return, t, Minus = compute_return_T_test(result)\r\n\r\n the_return2, t2 = compute_5_factor_model(Minus, months)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.read_csv",
"numpy.ones",
"scipy.stats.linregress",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
fengrussell/tensor2tensor-hvd | [
"d24de4756da1d990863d78086aa6eadf95960f10"
] | [
"tensor2tensor/data_generators/imagenet.py"
] | [
"# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ImageNet.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import image_utils\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n# Derived from ImageNet data\nMEAN_RGB = [0.485, 0.456, 0.406]\nSTDDEV_RGB = [0.229, 0.224, 0.225]\n\n\ndef imagenet_preprocess_example(example, mode, resize_size=None):\n \"\"\"Preprocessing used for Imagenet and similar problems.\"\"\"\n resize_size = resize_size or [299, 299]\n assert resize_size[0] == resize_size[1]\n\n image = example[\"inputs\"]\n if mode == tf.estimator.ModeKeys.TRAIN:\n image = preprocess_for_train(image, image_size=resize_size[0])\n else:\n image = preprocess_for_eval(image, image_size=resize_size[0])\n\n example[\"inputs\"] = image\n return example\n\n\[email protected]_problem\nclass ImageImagenet(image_utils.Image2ClassProblem):\n \"\"\"Imagenet.\"\"\"\n\n @property\n def is_small(self):\n return False\n\n @property\n def num_classes(self):\n return 1000\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n # TODO(lukaszkaiser): find a better way than printing this.\n print(\"To generate the ImageNet dataset in the proper format, follow \"\n \"instructions at https://github.com/tensorflow/models/blob/master\"\n \"/inception/README.md#getting-started\")\n\n def preprocess_example(self, example, mode, _):\n return imagenet_preprocess_example(example, mode)\n\n\nclass ImageImagenetRescaled(ImageImagenet):\n \"\"\"Imagenet rescaled to rescale_size.\"\"\"\n\n @property\n def rescale_size(self):\n # return [224, 224]\n raise NotImplementedError()\n\n def dataset_filename(self):\n return \"image_imagenet\" # Reuse Imagenet data.\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n tf.logging.warning(\n \"Generate data for rescaled ImageNet problems with image_imagenet\")\n\n def preprocess_example(self, example, mode, _):\n return imagenet_preprocess_example(\n example, mode, resize_size=self.rescale_size)\n\n\[email protected]_problem\nclass ImageImagenet224(ImageImagenetRescaled):\n \"\"\"Imagenet rescaled to 224x224.\"\"\"\n\n @property\n def rescale_size(self):\n return [224, 224]\n\n\[email protected]_problem\nclass ImageImagenet32(ImageImagenetRescaled):\n \"\"\"Imagenet rescaled to 32x32.\"\"\"\n\n @property\n def rescale_size(self):\n return [32, 32]\n\n @property\n def is_small(self):\n return True # Modalities like for CIFAR.\n\n def preprocess_example(self, example, mode, _):\n # Just resize with area.\n if self._was_reversed:\n example[\"inputs\"] = tf.to_int64(\n tf.image.resize_images(example[\"inputs\"], self.rescale_size,\n tf.image.ResizeMethod.AREA))\n else:\n example = imagenet_preprocess_example(example, mode)\n example[\"inputs\"] = tf.to_int64(\n tf.image.resize_images(example[\"inputs\"], self.rescale_size))\n return example\n\n\[email protected]_problem\nclass ImageImagenet64(ImageImagenet32):\n \"\"\"Imagenet rescaled to 64x64.\"\"\"\n\n @property\n def rescale_size(self):\n return [64, 64]\n\n\[email protected]_problem\nclass Img2imgImagenet(image_utils.ImageProblem):\n \"\"\"Imagenet rescaled to 8x8 for input and 32x32 for output.\"\"\"\n\n def dataset_filename(self):\n return \"image_imagenet\" # Reuse Imagenet data.\n\n def preprocess_example(self, example, unused_mode, unused_hparams):\n\n inputs = example[\"inputs\"]\n # For Img2Img resize input and output images as desired.\n example[\"inputs\"] = image_utils.resize_by_area(inputs, 8)\n example[\"targets\"] = image_utils.resize_by_area(inputs, 32)\n return example\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n tf.logging.warning(\"Generate data for img2img_imagenet with image_imagenet\")\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.input_modality = {\"inputs\": (\"image:identity\", 256)}\n p.target_modality = (\"image:identity\", 256)\n p.batch_size_multiplier = 256\n p.max_expected_batch_size_per_shard = 4\n p.input_space_id = 1\n p.target_space_id = 1\n\n\n# The following preprocessing functions were taken from\n# cloud_tpu/models/resnet/resnet_preprocessing.py\n# ==============================================================================\ndef _crop(image, offset_height, offset_width, crop_height, crop_width):\n \"\"\"Crops the given image using the provided offsets and sizes.\n\n Note that the method doesn't assume we know the input image size but it does\n assume we know the input image rank.\n\n Args:\n image: `Tensor` image of shape [height, width, channels].\n offset_height: `Tensor` indicating the height offset.\n offset_width: `Tensor` indicating the width offset.\n crop_height: the height of the cropped image.\n crop_width: the width of the cropped image.\n\n Returns:\n the cropped (and resized) image.\n\n Raises:\n InvalidArgumentError: if the rank is not 3 or if the image dimensions are\n less than the crop size.\n \"\"\"\n original_shape = tf.shape(image)\n\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3), [\"Rank of image must be equal to 3.\"])\n with tf.control_dependencies([rank_assertion]):\n cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n\n size_assertion = tf.Assert(\n tf.logical_and(\n tf.greater_equal(original_shape[0], crop_height),\n tf.greater_equal(original_shape[1], crop_width)),\n [\"Crop size greater than the image size.\"])\n\n offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))\n\n # Use tf.slice instead of crop_to_bounding box as it accepts tensors to\n # define the crop size.\n with tf.control_dependencies([size_assertion]):\n image = tf.slice(image, offsets, cropped_shape)\n return tf.reshape(image, cropped_shape)\n\n\ndef distorted_bounding_box_crop(image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100,\n scope=None):\n \"\"\"Generates cropped_image using a one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image: `Tensor` of image (it will be converted to floats in [0, 1]).\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n (cropped image `Tensor`, distorted bbox `Tensor`).\n \"\"\"\n with tf.name_scope(scope, \"distorted_bounding_box_crop\", [image, bbox]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n\n # A large fraction of image datasets contain a human-annotated bounding\n # box delineating the region of the image containing the object of interest.\n # We choose to create a new bounding box for the object which is a randomly\n # distorted version of the human-annotated bounding box that obeys an\n # allowed range of aspect ratios, sizes and overlap with the human-annotated\n # bounding box. If no box is supplied, then we assume the bounding box is\n # the entire image.\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bbox,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n return cropped_image, distort_bbox\n\n\ndef _random_crop(image, size):\n \"\"\"Make a random crop of (`size` x `size`).\"\"\"\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n random_image, bbox = distorted_bounding_box_crop(\n image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(3. / 4, 4. / 3.),\n area_range=(0.08, 1.0),\n max_attempts=1,\n scope=None)\n bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3)\n\n image = tf.cond(\n bad, lambda: _center_crop(_do_scale(image, size), size),\n lambda: tf.image.resize_bicubic([random_image], [size, size])[0])\n return image\n\n\ndef _flip(image):\n \"\"\"Random horizontal image flip.\"\"\"\n image = tf.image.random_flip_left_right(image)\n return image\n\n\ndef _at_least_x_are_true(a, b, x):\n \"\"\"At least `x` of `a` and `b` `Tensors` are true.\"\"\"\n match = tf.equal(a, b)\n match = tf.cast(match, tf.int32)\n return tf.greater_equal(tf.reduce_sum(match), x)\n\n\ndef _do_scale(image, size):\n \"\"\"Rescale the image by scaling the smaller spatial dimension to `size`.\"\"\"\n shape = tf.cast(tf.shape(image), tf.float32)\n w_greater = tf.greater(shape[0], shape[1])\n shape = tf.cond(w_greater,\n lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32),\n lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32))\n\n return tf.image.resize_bicubic([image], shape)[0]\n\n\ndef _center_crop(image, size):\n \"\"\"Crops to center of image with specified `size`.\"\"\"\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n\n offset_height = ((image_height - size) + 1) / 2\n offset_width = ((image_width - size) + 1) / 2\n image = _crop(image, offset_height, offset_width, size, size)\n return image\n\n\ndef _normalize(image):\n \"\"\"Normalize the image to zero mean and unit variance.\"\"\"\n offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])\n image -= offset\n\n scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])\n image /= scale\n return image\n\n\ndef preprocess_for_train(image, image_size=224):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n image_size: int, how large the output image should be.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n image = _random_crop(image, image_size)\n image = _normalize(image)\n image = _flip(image)\n image = tf.reshape(image, [image_size, image_size, 3])\n return image\n\n\ndef preprocess_for_eval(image, image_size=224):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n image_size: int, how large the output image should be.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n image = _do_scale(image, image_size + 32)\n image = _normalize(image)\n image = _center_crop(image, image_size)\n image = tf.reshape(image, [image_size, image_size, 3])\n return image\n\n\n# ==============================================================================\n"
] | [
[
"tensorflow.logging.warning",
"tensorflow.constant",
"tensorflow.image.random_flip_left_right",
"tensorflow.control_dependencies",
"tensorflow.shape",
"tensorflow.greater",
"tensorflow.stack",
"tensorflow.slice",
"tensorflow.equal",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.reduce_sum",
"tensorflow.image.resize_images",
"tensorflow.image.resize_bicubic",
"tensorflow.name_scope",
"tensorflow.rank",
"tensorflow.greater_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
samf1986/matching | [
"c1ed91127ef73e22702c66d88a4c53464625b7a8"
] | [
"docs/tutorials/hospital_resident/data.py"
] | [
"\"\"\" A script to create the dummy data used in `main.ipynb`. \"\"\"\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport yaml\n\nNUM_RESIDENTS = 200\nCAPACITY = 30\nSEED = 0\n\nresident_names = [f\"{i:03d}\" for i in range(NUM_RESIDENTS)]\nhospital_names = [\n \"Dewi Sant\",\n \"Prince Charles\",\n \"Prince of Wales\",\n \"Royal Glamorgan\",\n \"Royal Gwent\",\n \"St. David\",\n \"University\",\n]\n\n\ndef create_resident_to_preferences_map():\n \"\"\"Create a map from resident names to an ordered subset of the hospital\n names.\"\"\"\n\n resident_to_preference_size = {\n resident: np.random.randint(1, len(hospital_names) + 1)\n for resident in resident_names\n }\n\n resident_to_preference_idxs = {\n resident: np.random.choice(\n len(hospital_names), size=size, replace=False\n )\n for resident, size in resident_to_preference_size.items()\n }\n\n resident_to_preferences = {\n resident: np.array(hospital_names)[idxs].tolist()\n for resident, idxs in resident_to_preference_idxs.items()\n }\n\n return resident_to_preferences\n\n\ndef create_hospital_to_preferences_map(resident_to_preferences):\n \"\"\"Create a map from hospital names to a permutation of all those residents\n who ranked them.\"\"\"\n\n hospital_to_residents = defaultdict(set)\n for resident, hospitals in resident_to_preferences.items():\n for hospital in hospitals:\n hospital_to_residents[hospital].add(resident)\n\n hospital_to_preferences = {\n hospital: np.random.permutation(list(residents)).tolist()\n for hospital, residents in hospital_to_residents.items()\n }\n\n return hospital_to_preferences\n\n\ndef create_hospital_to_capacity_map():\n \"\"\" Create a map from hospital names to their capacity. \"\"\"\n\n hospital_to_capacity = {hospital: CAPACITY for hospital in hospital_names}\n\n return hospital_to_capacity\n\n\ndef save_dictionaries_to_yaml(\n resident_preferences, hospital_preferences, capacities\n):\n\n for dictionary, name in zip(\n (resident_preferences, hospital_preferences, capacities),\n (\"residents\", \"hospitals\", \"capacities\"),\n ):\n with open(f\"{name}.yml\", \"w\") as f:\n yaml.dump(dictionary, f, indent=4)\n\n\ndef main():\n \"\"\" Create the required maps to form the players, and then save them. \"\"\"\n\n np.random.seed(SEED)\n print(\"Seed set:\", SEED)\n\n resident_preferences = create_resident_to_preferences_map()\n hospital_preferences = create_hospital_to_preferences_map(\n resident_preferences\n )\n capacities = create_hospital_to_capacity_map()\n print(\"Player dictionaries created...\")\n\n save_dictionaries_to_yaml(\n resident_preferences, hospital_preferences, capacities\n )\n print(\"Dictionaries saved.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreamad8/EntNet | [
"b8398db96d8167d7db6855bf960d59b9afc38548"
] | [
"CNN/src/memories/DMC_simple.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport tensorflow as tf\nimport functools\n\n\ndef prelu_func(features, initializer=None, scope=None):\n \"\"\"\n Implementation of [Parametric ReLU](https://arxiv.org/abs/1502.01852) borrowed from Keras.\n \"\"\"\n with tf.variable_scope(scope, 'PReLU', initializer=initializer):\n alpha = tf.get_variable('alpha', features.get_shape().as_list()[1:])\n pos = tf.nn.relu(features)\n neg = alpha * (features - tf.abs(features)) * 0.5\n return pos + neg\nprelu = functools.partial(prelu_func, initializer=tf.constant_initializer(1.0))\n\nclass DynamicMemoryCell(tf.contrib.rnn.RNNCell):\n \"\"\"\n Implementation of a dynamic memory cell as a gated recurrent network.\n The cell's hidden state is divided into blocks and each block's weights are tied.\n \"\"\"\n\n def __init__(self, num_blocks, num_units_per_block, keys, query_embedding,\n activation = prelu,\n initializer=tf.random_normal_initializer(stddev=0.1)):\n self._num_blocks = num_blocks # M\n self._num_units_per_block = num_units_per_block # d\n self._keys = keys\n self._initializer = initializer\n self._activation = activation\n self._q = query_embedding\n\n\n @property\n def state_size(self):\n \"Return the total state size of the cell, across all blocks.\"\n return self._num_blocks * self._num_units_per_block\n\n @property\n def output_size(self):\n \"Return the total output size of the cell, across all blocks.\"\n return self._num_blocks * self._num_units_per_block\n\n def zero_state(self, batch_size, dtype):\n \"\"\"\n We initialize the memory to the key values.\n \"\"\"\n zero_state = tf.concat([tf.expand_dims(key, 0) for key in self._keys], 1)\n zero_state_batch = tf.tile(zero_state, tf.stack([batch_size, 1]))\n return zero_state_batch\n\n def get_gate(self, state_j, key_j, inputs):\n \"\"\"\n Implements the gate (scalar for each block). Equation 2:\n\n g_j <- \\sigma(s_t^T h_j + s_t^T w_j + s_t^T q)\n \"\"\"\n a = tf.reduce_sum(inputs * state_j, axis=1)\n b = tf.reduce_sum(inputs * tf.expand_dims(key_j, 0), axis=1)\n # c = tf.reduce_sum(inputs * tf.squeeze(self._q), axis=1)\n return tf.nn.sigmoid(a + b)\n\n def get_candidate(self, state_j, key_j, inputs, U, V, W):\n \"\"\"\n Represents the new memory candidate that will be weighted by the\n gate value and combined with the existing memory. Equation 3:\n\n h_j^~ <- \\phi(U h_j + V w_j + W s_t)\n \"\"\"\n key_V = tf.matmul(tf.expand_dims(key_j, 0), V)\n state_U = tf.matmul(state_j, U)\n inputs_W = tf.matmul(inputs, W)\n return state_U + key_V + inputs_W\n\n def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):\n # Split the hidden state into blocks (each U, V, W are shared across blocks).\n\n # U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block],\n # initializer= tf.constant_initializer(np.identity(self._num_units_per_block)),\n # trainable = False)\n # W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block],\n # initializer = tf.constant_initializer(np.zeros(self._num_units_per_block, self._num_units_per_block)),\n # trainable = False)\n # V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block],\n # initializer = tf.constant_initializer(np.zeros(self._num_units_per_block, self._num_units_per_block)),\n # trainable = False)\n\n # b = tf.get_variable('biasU',[self._num_units_per_block])\n # self._q = tf.Print(self._q, [self._q],summarize=10)\n # TODO: layer norm?\n\n\n state = tf.split(state, self._num_blocks, 1)\n next_states = []\n for j, state_j in enumerate(state): # Hidden State (j)\n key_j = self._keys[j]\n gate_j = self.get_gate(state_j, key_j, inputs)\n candidate_j = inputs\n\n # Equation 4: h_j <- h_j + g_j * h_j^~\n # Perform an update of the hidden state (memory).\n state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j\n\n # # Forget previous memories by normalization.\n # state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary?\n\n\n next_states.append(state_j_next)\n state_next = tf.concat(next_states, 1)\n return state_next, state_next\n"
] | [
[
"tensorflow.nn.relu",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.split",
"tensorflow.random_normal_initializer",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
abhiramr/mlflow | [
"2bbdc20f2d90d551fb7d40f982f2f799da9feca8"
] | [
"examples/pytorch/CaptumExample/Titanic_Captum_Interpret.py"
] | [
"\"\"\"\nGetting started with Captum - Titanic Data Analysis\n\"\"\"\n# Initial imports\nimport numpy as np\nimport torch\nfrom captum.attr import IntegratedGradients\nfrom captum.attr import LayerConductance\nfrom captum.attr import NeuronConductance\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy import stats\nimport mlflow\nfrom prettytable import PrettyTable\nfrom sklearn.model_selection import train_test_split\nimport os\nfrom argparse import ArgumentParser\nimport torch.nn as nn\n\n\ndef get_titanic():\n \"\"\"\n we now preprocess the data by converting some categorical features such as\n gender, location of embarcation, and passenger class into one-hot encodings\n We also remove some features that are more difficult to analyze\n After processing, the features we have are:\n Age: Passenger Age\n Sibsp: Number of Siblings / Spouses Aboard\n Parch: Number of Parents / Children Aboard\n Fare: Fare Amount Paid in British Pounds\n Female: Binary variable indicating whether passenger is female\n Male: Binary variable indicating whether passenger is male\n EmbarkC : Binary var indicating whether passenger embarked @ Cherbourg\n EmbarkQ : Binary var indicating whether passenger embarked @ Queenstown\n EmbarkS : Binary var indicating whether passenger embarked @ Southampton\n Class1 : Binary var indicating whether passenger was in first class\n Class2 : Binary var indicating whether passenger was in second class\n Class3 : Binary var indicating whether passenger was in third class\n url = \"https://biostat.app.vumc.org/wiki/pub/Main/DataSets/titanic3.csv\"\n \"\"\"\n url = \"https://biostat.app.vumc.org/wiki/pub/Main/DataSets/titanic3.csv\"\n titanic_data = pd.read_csv(url)\n titanic_data = pd.concat(\n [\n titanic_data,\n pd.get_dummies(titanic_data[\"sex\"]),\n pd.get_dummies(titanic_data[\"embarked\"], prefix=\"embark\"),\n pd.get_dummies(titanic_data[\"pclass\"], prefix=\"class\"),\n ],\n axis=1,\n )\n\n titanic_data[\"age\"] = titanic_data[\"age\"].fillna(titanic_data[\"age\"].mean())\n titanic_data[\"fare\"] = titanic_data[\"fare\"].fillna(titanic_data[\"fare\"].mean())\n titanic_data = titanic_data.drop(\n [\n \"name\",\n \"ticket\",\n \"cabin\",\n \"boat\",\n \"body\",\n \"home.dest\",\n \"sex\",\n \"embarked\",\n \"pclass\",\n ],\n axis=1,\n )\n return titanic_data\n\n\ntorch.manual_seed(1) # Set seed for reproducibility.\n\n\nclass TitanicSimpleNNModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(12, 12)\n self.sigmoid1 = nn.Sigmoid()\n self.linear2 = nn.Linear(12, 8)\n self.sigmoid2 = nn.Sigmoid()\n self.linear3 = nn.Linear(8, 2)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n lin1_out = self.linear1(x)\n sigmoid_out1 = self.sigmoid1(lin1_out)\n sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1))\n return self.softmax(self.linear3(sigmoid_out2))\n\n\ndef prepare():\n RANDOM_SEED = 42\n titanic_data = get_titanic()\n labels = titanic_data[\"survived\"].to_numpy()\n titanic_data = titanic_data.drop([\"survived\"], axis=1)\n feature_names = list(titanic_data.columns)\n data = titanic_data.to_numpy()\n # Separate training and test sets using\n train_features, test_features, train_labels, test_labels = train_test_split(\n data, labels, test_size=0.3, random_state=RANDOM_SEED, stratify=labels\n )\n return (train_features, train_labels, test_features, test_labels, feature_names)\n\n\ndef count_model_parameters(model):\n table = PrettyTable([\"Modules\", \"Parameters\"])\n total_params = 0\n for name, parameter in model.named_parameters():\n\n if not parameter.requires_grad:\n continue\n param = parameter.nonzero(as_tuple=False).size(0)\n table.add_row([name, param])\n total_params += param\n\n return table, total_params\n\n\ndef visualize_importances(\n feature_names,\n importances,\n title=\"Average Feature Importances\",\n plot=True,\n axis_title=\"Features\",\n):\n print(title)\n feature_imp = PrettyTable([\"feature_name\", \"importances\"])\n feature_imp_dict = {}\n for i in range(len(feature_names)):\n print(feature_names[i], \": \", \"%.3f\" % (importances[i]))\n feature_imp.add_row([feature_names[i], importances[i]])\n feature_imp_dict[str(feature_names[i])] = importances[i]\n x_pos = np.arange(len(feature_names))\n if plot:\n fig, ax = plt.subplots(figsize=(12, 6))\n ax.bar(x_pos, importances, align=\"center\")\n ax.set(title=title, xlabel=axis_title)\n ax.set_xticks(x_pos)\n ax.set_xticklabels(feature_names, rotation=\"vertical\")\n mlflow.log_figure(fig, title + \".png\")\n return feature_imp, feature_imp_dict\n\n\ndef train(USE_PRETRAINED_MODEL=False):\n net = TitanicSimpleNNModel()\n train_features, train_labels, test_features, test_labels, feature_names = prepare()\n USE_PRETRAINED_MODEL = dict_args[\"use_pretrained_model\"]\n if USE_PRETRAINED_MODEL:\n net.load_state_dict(torch.load(\"models/titanic_state_dict.pt\"))\n net.eval()\n print(\"Model Loaded!\")\n else:\n criterion = nn.CrossEntropyLoss()\n num_epochs = dict_args[\"max_epochs\"]\n mlflow.log_param(\"epochs\", num_epochs)\n mlflow.log_param(\"lr\", dict_args[\"lr\"])\n\n optimizer = torch.optim.Adam(net.parameters(), lr=dict_args[\"lr\"])\n input_tensor = torch.from_numpy(train_features).type(torch.FloatTensor)\n label_tensor = torch.from_numpy(train_labels)\n for epoch in range(num_epochs):\n output = net(input_tensor)\n loss = criterion(output, label_tensor)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if epoch % 50 == 0:\n print(\n \"Epoch {}/{} => Train Loss: {:.2f}\".format(epoch + 1, num_epochs, loss.item())\n )\n mlflow.log_metric(\n \"Epoch {} Loss\".format(str(epoch + 1)),\n float(loss.item()),\n step=epoch,\n )\n if not os.path.isdir(\"models\"):\n os.makedirs(\"models\")\n torch.save(net.state_dict(), \"models/titanic_state_dict.pt\")\n summary, _ = count_model_parameters(net)\n mlflow.log_text(str(summary), \"model_summary.txt\")\n return (\n net,\n train_features,\n train_labels,\n test_features,\n test_labels,\n feature_names,\n )\n\n\ndef compute_accuracy(net, features, labels, title=None):\n input_tensor = torch.from_numpy(features).type(torch.FloatTensor)\n out_probs = net(input_tensor).detach().numpy()\n out_classes = np.argmax(out_probs, axis=1)\n mlflow.log_metric(title, float(sum(out_classes == labels) / len(labels)))\n print(title, sum(out_classes == labels) / len(labels))\n return input_tensor\n\n\ndef feature_conductance(net, test_input_tensor):\n \"\"\"\n The method takes tensor(s) of input examples (matching the forward function of the model),\n and returns the input attributions for the given input example.\n The returned values of the attribute method are the attributions,\n which match the size of the given inputs, and delta,\n which approximates the error between the approximated integral and true integral.\n This method saves the distribution of avg attributions of the trained features for the given target.\n \"\"\"\n ig = IntegratedGradients(net)\n test_input_tensor.requires_grad_()\n attr, _ = ig.attribute(test_input_tensor, target=1, return_convergence_delta=True)\n attr = attr.detach().numpy()\n # To understand these attributions, we can first average them across all the inputs and print and visualize the average attribution for each feature.\n feature_imp, feature_imp_dict = visualize_importances(feature_names, np.mean(attr, axis=0))\n mlflow.log_metrics(feature_imp_dict)\n mlflow.log_text(str(feature_imp), \"feature_imp_summary.txt\")\n fig, (ax1, ax2) = plt.subplots(2, 1)\n fig.tight_layout(pad=3)\n ax1.hist(attr[:, 1], 100)\n ax1.set(title=\"Distribution of Sibsp Attribution Values\")\n\n # we can bucket the examples by the value of the sibsp feature and plot the average attribution for the feature.\n # In the plot below, the size of the dot is proportional to the number of examples with that value.\n\n bin_means, bin_edges, _ = stats.binned_statistic(\n test_features[:, 1], attr[:, 1], statistic=\"mean\", bins=6\n )\n bin_count, _, _ = stats.binned_statistic(\n test_features[:, 1], attr[:, 1], statistic=\"count\", bins=6\n )\n\n bin_width = bin_edges[1] - bin_edges[0]\n bin_centers = bin_edges[1:] - bin_width / 2\n ax2.scatter(bin_centers, bin_means, s=bin_count)\n ax2.set(xlabel=\"Average Sibsp Feature Value\", ylabel=\"Average Attribution\")\n mlflow.log_figure(fig, \"Average_Sibsp_Feature_Value.png\")\n\n\ndef layer_conductance(net, test_input_tensor):\n \"\"\"\n To use Layer Conductance, we create a LayerConductance object passing in the model as well as the module (layer) whose output we would like to understand.\n In this case, we choose net.sigmoid1, the output of the first hidden layer.\n Now obtain the conductance values for all the test examples by calling attribute on the LayerConductance object.\n LayerConductance also requires a target index for networks with multiple outputs, defining the index of the output for which gradients are computed.\n Similar to feature attributions, we provide target = 1, corresponding to survival.\n LayerConductance also utilizes a baseline, but we simply use the default zero baseline as in integrated gradients.\n \"\"\"\n\n cond = LayerConductance(net, net.sigmoid1)\n\n cond_vals = cond.attribute(test_input_tensor, target=1)\n cond_vals = cond_vals.detach().numpy()\n # We can begin by visualizing the average conductance for each neuron.\n neuron_names = [\"neuron \" + str(x) for x in range(12)]\n avg_neuron_imp, neuron_imp_dict = visualize_importances(\n neuron_names,\n np.mean(cond_vals, axis=0),\n title=\"Average Neuron Importances\",\n axis_title=\"Neurons\",\n )\n mlflow.log_metrics(neuron_imp_dict)\n mlflow.log_text(str(avg_neuron_imp), \"neuron_imp_summary.txt\")\n # We can also look at the distribution of each neuron's attributions. Below we look at the distributions for neurons 7 and 9,\n # and we can confirm that their attribution distributions are very close to 0, suggesting they are not learning substantial features.\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 6))\n fig.tight_layout(pad=3)\n ax1.hist(cond_vals[:, 9], 100)\n ax1.set(title=\"Neuron 9 Distribution\")\n ax2.hist(cond_vals[:, 7], 100)\n ax2.set(title=\"Neuron 7 Distribution\")\n mlflow.log_figure(fig, \"Neurons_Distribution.png\")\n\n\ndef neuron_conductance(net, test_input_tensor, neuron_selector=None):\n \"\"\"\n We have identified that some of the neurons are not learning important features, while others are.\n Can we now understand what each of these important neurons are looking at in the input?\n For instance, are they identifying different features in the input or similar ones?\n\n To answer these questions, we can apply the third type of attributions available in Captum, **Neuron Attributions**.\n This allows us to understand what parts of the input contribute to activating a particular input neuron. For this example,\n we will apply Neuron Conductance, which divides the neuron's total conductance value into the contribution from each individual input feature.\n\n To use Neuron Conductance, we create a NeuronConductance object, analogously to Conductance,\n passing in the model as well as the module (layer) whose output we would like to understand, in this case, net.sigmoid1, as before.\n \"\"\"\n neuron_selector = 0\n neuron_cond = NeuronConductance(net, net.sigmoid1)\n\n # We can now obtain the neuron conductance values for all the test examples by calling attribute on the NeuronConductance object.\n # Neuron Conductance requires the neuron index in the target layer for which attributions are requested as well as the target index for networks with multiple outputs,\n # similar to layer conductance. As before, we provide target = 1, corresponding to survival, and compute neuron conductance for neurons 0 and 10, the significant neurons identified above.\n # The neuron index can be provided either as a tuple or as just an integer if the layer output is 1-dimensional.\n\n neuron_cond_vals = neuron_cond.attribute(\n test_input_tensor, neuron_selector=neuron_selector, target=1\n )\n neuron_cond, _ = visualize_importances(\n feature_names,\n neuron_cond_vals.mean(dim=0).detach().numpy(),\n title=\"Average Feature Importances for Neuron {}\".format(neuron_selector),\n )\n mlflow.log_text(\n str(neuron_cond), \"Avg_Feature_Importances_Neuron_\" + str(neuron_selector) + \".txt\"\n )\n\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser(description=\"Titanic Captum Example\")\n\n parser.add_argument(\n \"--use_pretrained_model\",\n default=False,\n metavar=\"N\",\n help=\"Use pretrained model or train from the scratch\",\n )\n\n parser.add_argument(\n \"--max_epochs\",\n type=int,\n default=100,\n metavar=\"N\",\n help=\"Number of epochs to be used for training\",\n )\n\n parser.add_argument(\n \"--lr\",\n type=float,\n default=0.1,\n metavar=\"LR\",\n help=\"learning rate (default: 0.1)\",\n )\n\n args = parser.parse_args()\n dict_args = vars(args)\n\n with mlflow.start_run(run_name=\"Titanic_Captum_mlflow\"):\n net, train_features, train_labels, test_features, test_labels, feature_names = train()\n\n compute_accuracy(net, train_features, train_labels, title=\"Train Accuracy\")\n test_input_tensor = compute_accuracy(net, test_features, test_labels, title=\"Test Accuracy\")\n feature_conductance(net, test_input_tensor)\n layer_conductance(net, test_input_tensor)\n neuron_conductance(net, test_input_tensor)\n mlflow.log_param(\"Train Size\", len(train_labels))\n mlflow.log_param(\"Test Size\", len(test_labels))\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.load",
"torch.manual_seed",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.train_test_split",
"torch.nn.Sigmoid",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.argmax",
"numpy.mean",
"scipy.stats.binned_statistic",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
airium/pytorch-lightning | [
"c7292b4db40cbd94196999dd93903ff4907ad40b"
] | [
"tests/base/deterministic_model.py"
] | [
"import torch\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\n\n\nclass DeterministicModel(LightningModule):\n\n def __init__(self, weights=None):\n super().__init__()\n\n self.training_step_called = False\n self.training_step_end_called = False\n self.training_epoch_end_called = False\n\n if weights is None:\n weights = torch.tensor([\n [4, 3, 5],\n [10, 11, 13]\n ]).float()\n self.l1 = torch.nn.Parameter(weights, requires_grad=True)\n\n def forward(self, x):\n return self.l1.mm(x.float().t())\n\n def step(self, batch, batch_idx):\n x = batch\n y_hat = self(x)\n\n assert torch.all(y_hat[0, :] == 15.0)\n assert torch.all(y_hat[1, :] == 42.0)\n out = y_hat.sum()\n assert out == (42.0 * 3) + (15.0 * 3)\n\n return out\n\n def assert_graph_count(self, result, count=1):\n counts = self.count_num_graphs(result)\n assert counts == count\n\n def count_num_graphs(self, result, num_graphs=0):\n for k, v in result.items():\n if isinstance(v, torch.Tensor) and v.grad_fn is not None:\n num_graphs += 1\n if isinstance(v, dict):\n num_graphs += self.count_num_graphs(v)\n\n return num_graphs\n\n # --------------------------\n # dictionary returns\n # --------------------------\n def training_step_dict_return(self, batch, batch_idx):\n acc = self.step(batch, batch_idx)\n\n logs = {'log_acc1': torch.tensor(12).type_as(acc), 'log_acc2': torch.tensor(7).type_as(acc)}\n pbar = {'pbar_acc1': torch.tensor(17).type_as(acc), 'pbar_acc2': torch.tensor(19).type_as(acc)}\n\n self.training_step_called = True\n return {'loss': acc, 'log': logs, 'progress_bar': pbar}\n\n def training_step_for_step_end_dict(self, batch, batch_idx):\n \"\"\"sends outputs to training_batch_end\"\"\"\n acc = self.step(batch, batch_idx)\n\n logs = {'log_acc1': torch.tensor(12).type_as(acc), 'log_acc2': torch.tensor(7).type_as(acc)}\n pbar = {'pbar_acc1': torch.tensor(17).type_as(acc), 'pbar_acc2': torch.tensor(19).type_as(acc)}\n\n self.training_step_called = True\n result = {'loss': acc}\n result.update(logs)\n result.update(pbar)\n return result\n\n def training_step_end_dict(self, output):\n self.training_step_end_called = True\n\n # make sure loss has the grad\n assert 'loss' in output\n assert output['loss'].grad_fn is not None\n\n # make sure nothing else has grads\n assert self.count_num_graphs(output) == 1\n\n # make sure the other keys are there\n assert 'log_acc1' in output\n assert 'log_acc2' in output\n assert 'pbar_acc1' in output\n assert 'pbar_acc2' in output\n\n logs = {'log_acc1': output['log_acc1'], 'log_acc2': output['log_acc2']}\n pbar = {'pbar_acc1': output['pbar_acc1'], 'pbar_acc2': output['pbar_acc2']}\n\n acc = output['loss']\n return {'loss': acc, 'log': logs, 'progress_bar': pbar}\n\n def training_epoch_end_dict(self, outputs):\n self.training_epoch_end_called = True\n\n if self.use_dp or self.use_ddp2:\n pass\n else:\n # only saw 4 batches\n assert len(outputs) == 4\n for batch_out in outputs:\n assert len(batch_out.keys()) == 5\n keys = ['batch_loss', 'pbar_on_batch_end', 'log_metrics', 'callback_metrics']\n for key in keys:\n assert key in batch_out\n\n prototype_loss = outputs[0]['batch_loss']\n logs = {'epoch_end_log_1': torch.tensor(178).type_as(prototype_loss)}\n pbar = {'epoch_end_pbar_1': torch.tensor(234).type_as(prototype_loss)}\n\n return {'log': logs, 'progress_bar': pbar}\n\n def validation_step_dict_return(self, batch, batch_idx):\n acc = self.step(batch, batch_idx)\n\n logs = {'log_acc1': torch.tensor(12).type_as(acc), 'log_acc2': torch.tensor(7).type_as(acc)}\n pbar = {'pbar_acc1': torch.tensor(17).type_as(acc), 'pbar_acc2': torch.tensor(19).type_as(acc)}\n return {'val_loss': acc, 'log': logs, 'progress_bar': pbar}\n\n def train_dataloader(self):\n return DataLoader(DummyDataset(), batch_size=3, shuffle=False)\n\n def val_dataloader(self):\n return DataLoader(DummyDataset(), batch_size=3, shuffle=False)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0)\n\n def backward(self, trainer, loss, optimizer, optimizer_idx):\n assert loss == 171.0\n loss.backward()\n\n\nclass DummyDataset(Dataset):\n\n def __len__(self):\n return 12\n\n def __getitem__(self, idx):\n return np.array([0.5, 1.0, 2.0])\n"
] | [
[
"torch.all",
"numpy.array",
"torch.nn.Parameter",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mljar/automlbenchmark | [
"d72dbc5b9cc14e0b02179348be7d5c01d4c13da2"
] | [
"frameworks/shared/caller.py"
] | [
"import io\nimport logging\nimport os\nimport re\nfrom typing import Union\nimport uuid\n\nimport numpy as np\n\nfrom amlb.benchmark import TaskConfig\nfrom amlb.data import Dataset\nfrom amlb.resources import config as rconfig\nfrom amlb.results import NoResultError, save_predictions\nfrom amlb.utils import Namespace as ns, Timer, TmpDir, dir_of, run_cmd, json_dumps, json_loads\n\nlog = logging.getLogger(__name__)\n\nvector_keys = re.compile(\"^y(_.+)?$\")\n\n\ndef run_in_venv(caller_file, script_file: str, *args,\n input_data: Union[dict, ns], dataset: Dataset, config: TaskConfig,\n process_results=None,\n python_exec=None):\n\n here = dir_of(caller_file)\n venv_bin_path = os.path.join(here, 'venv', 'bin')\n if python_exec is None: # use local virtual env by default\n python_exec = os.path.join(venv_bin_path, 'python -W ignore')\n script_path = os.path.join(here, script_file)\n cmd = f\"{python_exec} {script_path}\"\n\n input_data = ns.from_dict(input_data)\n with TmpDir() as tmpdir:\n\n def make_path(k, v, parents=None):\n if isinstance(v, np.ndarray):\n path = os.path.join(tmpdir, '.'.join(parents+[k, 'npy']))\n if vector_keys.match(k):\n v = v.reshape(-1, 1)\n np.save(path, v, allow_pickle=True)\n return k, path\n return k, v\n\n ds = ns.walk(input_data, make_path)\n dataset.release()\n\n config.result_token = str(uuid.uuid1())\n config.result_dir = tmpdir\n\n params = json_dumps(dict(dataset=ds, config=config), style='compact')\n with Timer() as proc_timer:\n output, err = run_cmd(cmd, *args,\n _input_str_=params,\n _live_output_=True,\n _error_level_=logging.DEBUG,\n _env_=dict(\n PATH=os.pathsep.join([\n venv_bin_path,\n os.environ['PATH']\n ]),\n PYTHONPATH=os.pathsep.join([\n rconfig().root_dir,\n ]),\n AMLB_PATH=os.path.join(rconfig().root_dir, \"amlb\")\n ),\n )\n\n out = io.StringIO(output)\n res = ns()\n for line in out:\n li = line.rstrip()\n if li == config.result_token:\n res = json_loads(out.readline(), as_namespace=True)\n break\n\n if res.error_message is not None:\n raise NoResultError(res.error_message)\n\n for name in ['predictions', 'truth', 'probabilities']:\n res[name] = np.load(res[name], allow_pickle=True) if res[name] is not None else None\n\n log.debug(\"Result from subprocess:\\n%s\", res)\n if callable(process_results):\n res = process_results(res)\n\n if res.output_file:\n save_predictions(dataset=dataset,\n output_file=res.output_file,\n predictions=res.predictions.reshape(-1) if res.predictions is not None else None,\n truth=res.truth.reshape(-1) if res.truth is not None else None,\n probabilities=res.probabilities,\n probabilities_labels=res.probabilities_labels,\n target_is_encoded=res.target_is_encoded)\n\n return dict(\n models_count=res.models_count if res.models_count is not None else 1,\n training_duration=res.training_duration if res.training_duration is not None else proc_timer.duration,\n predict_duration=res.predict_duration,\n **res.others.__dict__\n )\n"
] | [
[
"numpy.load",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
karelvaculik/altair-flask-demo | [
"04c54c057720b44b8ba8c78f9b86221d399889ea"
] | [
"altair_flask_demo/main.py"
] | [
"from flask import Flask\nfrom flask import render_template\nfrom flask import request\n\nimport altair as alt\nimport numpy as np\nimport pandas as pd\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n return render_template(f\"index.html\")\n\n\[email protected](\"/build_plot\", methods=[\"POST\"])\ndef build_plot():\n n = request.form.get(\"n_points\", 200, type=int)\n max_bins = request.form.get(\"max_bins\", 30, type=int)\n\n data = pd.DataFrame({'x': np.random.normal(size=n)})\n fig = alt.Chart(data).mark_bar().encode(\n alt.X(\"x:Q\", bin=alt.Bin(maxbins=max_bins)),\n y='count()',\n )\n\n return fig.to_json()\n"
] | [
[
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrieke/rio-tiler | [
"ccf6aa002fc0c3525c9889cffbb9ca26689ba0b9"
] | [
"tests/test_mosaic.py"
] | [
"\"\"\"tests ard_tiler.mosaic.\"\"\"\n\nimport os\nfrom typing import Tuple\nfrom unittest.mock import patch\n\nimport numpy\nimport pytest\nimport rasterio\nfrom rasterio.warp import transform_bounds\n\nfrom rio_tiler import mosaic\nfrom rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS\nfrom rio_tiler.errors import EmptyMosaicError, InvalidMosaicMethod, TileOutsideBounds\nfrom rio_tiler.io import COGReader, STACReader\nfrom rio_tiler.models import ImageData\nfrom rio_tiler.mosaic.methods import defaults\n\nasset1 = os.path.join(os.path.dirname(__file__), \"fixtures\", \"mosaic_cog1.tif\")\nasset2 = os.path.join(os.path.dirname(__file__), \"fixtures\", \"mosaic_cog2.tif\")\nassets = [asset1, asset2]\nassets_order = [asset2, asset1]\n\nstac_asset = os.path.join(os.path.dirname(__file__), \"fixtures\", \"stac.json\")\n\n# Full covered tile\nx = 150\ny = 182\nz = 9\n\n# Partially covered tile\nxp = 150\nyp = 180\nzp = 9\n\n# Outside tile\nxo = 200\nyo = 180\nzo = 9\n\n\ndef _read_tile(src_path: str, *args, **kwargs) -> ImageData:\n \"\"\"Read tile from an asset\"\"\"\n with COGReader(src_path) as cog:\n return cog.tile(*args, **kwargs)\n\n\ndef _read_part(src_path: str, *args, **kwargs) -> ImageData:\n \"\"\"Read part from an asset\"\"\"\n with COGReader(src_path) as cog:\n return cog.part(*args, **kwargs)\n\n\ndef _read_preview(\n src_path: str, *args, **kwargs\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"Read preview from an asset\"\"\"\n with COGReader(src_path) as cog:\n data, mask = cog.preview(*args, **kwargs)\n return data, mask\n\n\ndef test_mosaic_tiler():\n \"\"\"Test mosaic tiler.\"\"\"\n # test with default and full covered tile and default options\n (t, m), assets_used = mosaic.mosaic_reader(assets, _read_tile, x, y, z)\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8682\n\n # Test last pixel selection\n assetsr = list(reversed(assets))\n (t, m), _ = mosaic.mosaic_reader(assetsr, _read_tile, x, y, z)\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8057\n\n (t, m), _ = mosaic.mosaic_reader(assets, _read_tile, x, y, z, indexes=1)\n assert t.shape == (1, 256, 256)\n assert m.shape == (256, 256)\n assert t.all()\n assert m.all()\n assert t[0][-1][-1] == 8682\n\n # Test darkest pixel selection\n (t, m), assets_used = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.LowestMethod()\n )\n assert len(assets_used) == 2\n assert m.all()\n assert t[0][-1][-1] == 8057\n\n (to, mo), _ = mosaic.mosaic_reader(\n assets_order, _read_tile, x, y, z, pixel_selection=defaults.LowestMethod()\n )\n numpy.testing.assert_array_equal(t[0, m], to[0, mo])\n\n # Test brightest pixel selection\n (t, m), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.HighestMethod()\n )\n assert m.all()\n assert t[0][-1][-1] == 8682\n\n (to, mo), _ = mosaic.mosaic_reader(\n assets_order, _read_tile, x, y, z, pixel_selection=defaults.HighestMethod()\n )\n numpy.testing.assert_array_equal(to, t)\n numpy.testing.assert_array_equal(mo, m)\n\n # test with default and partially covered tile\n (t, m), _ = mosaic.mosaic_reader(\n assets, _read_tile, xp, yp, zp, pixel_selection=defaults.HighestMethod()\n )\n assert t.any()\n assert not m.all()\n\n # test when tiler raise errors (outside bounds)\n with pytest.raises(EmptyMosaicError):\n mosaic.mosaic_reader(assets, _read_tile, 150, 300, 9)\n\n # Test mean pixel selection\n (t, m), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.MeanMethod()\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8369\n\n # Test mean pixel selection\n (t, m), _ = mosaic.mosaic_reader(\n assets,\n _read_tile,\n x,\n y,\n z,\n pixel_selection=defaults.MeanMethod(enforce_data_type=False),\n )\n assert m.all()\n assert t[0][-1][-1] == 8369.5\n\n # Test median pixel selection\n (t, m), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.MedianMethod()\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8369\n\n # Test median pixel selection\n (t, m), _ = mosaic.mosaic_reader(\n assets,\n _read_tile,\n x,\n y,\n z,\n pixel_selection=defaults.MedianMethod(enforce_data_type=False),\n )\n assert m.all()\n assert t[0][-1][-1] == 8369.5\n\n (t, m), _ = mosaic.mosaic_reader(\n assets_order,\n _read_tile,\n x,\n y,\n z,\n pixel_selection=defaults.LastBandHigh(),\n indexes=(1, 2, 3, 1),\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8682\n\n (t, m), _ = mosaic.mosaic_reader(\n assets_order,\n _read_tile,\n x,\n y,\n z,\n pixel_selection=defaults.LastBandLow(),\n indexes=(1, 2, 3, 1),\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8057\n\n # Test pixel selection as _class_, not instance of class\n (t, m), assets_used = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.FirstMethod\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == 8682\n\n # Test invalid Pixel Selection class\n with pytest.raises(InvalidMosaicMethod):\n\n class aClass(object):\n pass\n\n mosaic.mosaic_reader(assets, _read_tile, x, y, z, pixel_selection=aClass())\n\n # test with preview\n # NOTE: We need to fix the output width and height because each preview could have different size\n # Also because the 2 assets cover different bbox, getting the preview merged together doesn't make real sense\n (t, m), _ = mosaic.mosaic_reader(assets, _read_preview, width=256, height=256)\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n\n\ndef mock_rasterio_open(asset):\n \"\"\"Mock rasterio Open.\"\"\"\n assert asset.startswith(\"http://somewhere-over-the-rainbow.io\")\n asset = asset.replace(\n \"http://somewhere-over-the-rainbow.io\",\n os.path.join(os.path.dirname(__file__), \"fixtures\"),\n )\n return rasterio.open(asset)\n\n\n@patch(\"rio_tiler.io.cogeo.rasterio\")\ndef test_stac_mosaic_tiler(rio):\n \"\"\"Test mosaic tiler with STACReader.\"\"\"\n rio.open = mock_rasterio_open\n\n def _reader(src_path: str, *args, **kwargs) -> ImageData:\n \"\"\"Read tile from an asset\"\"\"\n with STACReader(src_path) as stac:\n return stac.tile(*args, **kwargs)\n\n (data, mask), assets_used = mosaic.mosaic_reader(\n [stac_asset], _reader, 71, 102, 8, assets=\"green\", threads=0,\n )\n assert assets_used == [stac_asset]\n assert data.shape == (1, 256, 256)\n assert mask.shape == (256, 256)\n\n\ndef test_mosaic_tiler_Stdev():\n \"\"\"Test Stdev mosaic methods.\"\"\"\n tile1, _ = _read_tile(assets[0], x, y, z)\n tile2, _ = _read_tile(assets[1], x, y, z)\n\n (t, m), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.StdevMethod()\n )\n assert t.shape == (3, 256, 256)\n assert m.shape == (256, 256)\n assert m.all()\n assert t[0][-1][-1] == numpy.std([tile1[0][-1][-1], tile2[0][-1][-1]])\n assert t[1][-1][-1] == numpy.std([tile1[1][-1][-1], tile2[1][-1][-1]])\n assert t[2][-1][-1] == numpy.std([tile1[2][-1][-1], tile2[2][-1][-1]])\n\n\ndef test_threads():\n \"\"\"Test mosaic tiler.\"\"\"\n assets = [asset2, asset1, asset1, asset2, asset1, asset2]\n\n # TileOutSide bounds should be ignored but no tile is created\n with pytest.raises(EmptyMosaicError):\n mosaic.mosaic_reader(assets, _read_tile, xo, yo, zo, threads=2)\n\n # TileOutSide bounds should be ignored but no tile is created\n with pytest.raises(EmptyMosaicError):\n mosaic.mosaic_reader(assets, _read_tile, xo, yo, zo, threads=0)\n\n # Only cover asset1\n xpp = 147\n ypp = 180\n zpp = 9\n\n with pytest.raises(TileOutsideBounds):\n mosaic.mosaic_reader(\n assets,\n _read_tile,\n xpp,\n ypp,\n zpp,\n pixel_selection=defaults.MedianMethod,\n allowed_exceptions=None,\n )\n\n # Partial tile, some assets should Error with TileOutside bounds\n (tnothread, _), a = mosaic.mosaic_reader(\n assets,\n _read_tile,\n xpp,\n ypp,\n zpp,\n threads=0,\n pixel_selection=defaults.MedianMethod,\n )\n assert len(a) == 3\n assert tnothread.shape\n\n # Partial tile, some assets should Error with TileOutside bounds\n (tnothread, _), a = mosaic.mosaic_reader(\n assets,\n _read_tile,\n xpp,\n ypp,\n zpp,\n threads=1,\n pixel_selection=defaults.MedianMethod,\n )\n assert len(a) == 3\n assert tnothread.shape\n\n (tnothread, _), _ = mosaic.mosaic_reader(\n assets,\n _read_tile,\n xpp,\n ypp,\n zpp,\n threads=0,\n pixel_selection=defaults.MedianMethod,\n )\n (tmulti_threads, _), _ = mosaic.mosaic_reader(\n assets,\n _read_tile,\n xpp,\n ypp,\n zpp,\n threads=3,\n pixel_selection=defaults.MedianMethod,\n )\n numpy.testing.assert_array_equal(tnothread, tmulti_threads)\n\n (t, _), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, threads=0, chunk_size=2\n )\n assert t.shape == (3, 256, 256)\n (t, _), _ = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, threads=2, chunk_size=4\n )\n assert t.shape == (3, 256, 256)\n\n\ndef test_mosaic_tiler_with_imageDataClass():\n \"\"\"Test mosaic tiler.\"\"\"\n img, _ = mosaic.mosaic_reader(assets, _read_tile, x, y, z)\n assert img.data.shape == (3, 256, 256)\n assert img.mask.shape == (256, 256)\n assert img.mask.all()\n assert img.data[0][-1][-1] == 8682\n assert len(img.assets) == 1\n\n assert img.crs == WEB_MERCATOR_TMS.crs\n assert img.bounds == WEB_MERCATOR_TMS.xy_bounds(x, y, z)\n\n img, assets_used = mosaic.mosaic_reader(\n assets, _read_tile, x, y, z, pixel_selection=defaults.LowestMethod()\n )\n assert assets_used == img.assets == assets\n assert img.crs == WEB_MERCATOR_TMS.crs\n assert img.bounds == WEB_MERCATOR_TMS.xy_bounds(x, y, z)\n\n img, assets_used = mosaic.mosaic_reader(\n assets,\n _read_preview,\n width=256,\n height=256,\n pixel_selection=defaults.LowestMethod(),\n )\n assert img.data.shape == (3, 256, 256)\n assert img.mask.shape == (256, 256)\n assert assets_used == img.assets == assets\n assert not img.crs\n assert not img.bounds\n\n bbox = [-75.98703377413767, 44.93504283293786, -71.337604723999, 47.09685599202324]\n with COGReader(assets[0]) as cog:\n crs1 = cog.dataset.crs\n\n with COGReader(assets[0]) as cog:\n crs2 = cog.dataset.crs\n\n img, assets_used = mosaic.mosaic_reader(\n assets, _read_part, bbox=bbox, dst_crs=crs1, bounds_crs=WGS84_CRS, max_size=1024\n )\n assert img.data.shape == (3, 690, 1024)\n assert img.mask.shape == (690, 1024)\n assert img.mask.any()\n assert assets_used == img.assets == assets\n assert img.crs == crs1 == crs2\n assert not img.bounds == bbox\n bbox_in_crs = transform_bounds(WGS84_CRS, crs1, *bbox, densify_pts=21)\n assert img.bounds == bbox_in_crs\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.std"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlparslanErol/Course_Related | [
"9a59eb2857c525769b046b7b2a7706ec4a1cdba8"
] | [
"Nonparametric_Regression/regressogram.py"
] | [
"# IMPORT LIBRARIES\n# =============================================================================\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\n# =============================================================================\n\n\n# VERSION CHECK\n# =============================================================================\npd.set_option('display.float_format', lambda x: '%.3f' % x)\nsns.set(style='white', context='notebook', palette='deep')\nwarnings.filterwarnings('ignore')\nsns.set_style('white')\n# =============================================================================\n\n\n# SET UP DATA\ndf = pd.read_csv(\"./hw04_data_set.csv\")\n\n\n# SPLIT DATA TRAIN AND TEST\n# =============================================================================\nnum_train = 150\nnum_test = 122\ntrain = df[0:num_train]\ntest = df[num_train:]\n# =============================================================================\n\n\n# SORTING BY ERUPTIONS\n# =============================================================================\ntrain = train.sort_values(by = [\"eruptions\"])\ntrain = train.reset_index(drop=True)\ntest = test.sort_values(by = [\"eruptions\"])\ntest = test.reset_index(drop=True)\ntrain_np_erup = np.array(train[\"eruptions\"])\ntrain_np_wait = np.array(train[\"waiting\"])\ntest_np_erup = np.array(test[\"eruptions\"])\ntest_np_wait = np.array(test[\"waiting\"])\n# =============================================================================\n\n\n# CONFIGS\n# =============================================================================\nwidht_param = 0.37\norigin_param = 1.5\nnum_bins = 10\nend_point = (origin_param) + (widht_param*num_bins)\nbins = np.linspace(origin_param, end_point, num_bins+1)\n# =============================================================================\n\n\n# FUNCTIONS\n# =============================================================================\n#RETURN NUMBER OF INPUTS IN EACH BIN\ndef bin_(data):\n temp = 0\n binss = np.zeros(num_bins)\n for index, value in enumerate(data[\"eruptions\"]):\n if not((value > (origin_param + (temp * widht_param))) & (value <= (origin_param + ((temp + 1) * widht_param)))):\n temp = temp + 1\n binss[temp] = binss[temp] + 1\n else:\n binss[temp] = binss[temp] + 1\n \n return binss\n\n\n#RETURN MIDDLE VALUE OF EACH BIN\ndef binn_mid(data):\n output = np.ceil(bin_(data)/2)\n return output\n\n\n# DIGITIZE DATA WTIH BIN NUMBERS\ndef split_bin(data):\n output = np.zeros(len(data))\n number = 1\n temp = 0\n for index, value in enumerate(data[\"waiting\"]):\n output[index] = number\n temp = temp + 1\n if temp == bin_(data)[number - 1]:\n number = number + 1\n temp = 0\n if number == (num_bins + 1):\n break\n return output\n\n\n# RETURN MEAN OF EACH BIN\ndef bin_means(data):\n output = [data.waiting[split_bin(data) == i].mean() for i in range(1, len(bins))]\n return output\n\n\n# ALGORITHM FOR REGRESSOGRAM\ndef regressogram(data):\n output = np.zeros(len(data))\n number = 1\n temp = 0\n for index, value in enumerate(data[\"waiting\"]):\n output[index] = bin_means(data)[number-1]\n temp = temp + 1\n if temp == bin_(data)[number - 1]:\n number = number + 1\n temp = 0\n if number == (num_bins + 1):\n break\n return output\n\n\n# FUNCTION FOR EVALUATION\ndef evaluate(data):\n # ROOT MEAN SQUARE ERROR RMSE CALCULATION \n differences = regressogram(test) - data.waiting \n differences_squared = differences ** 2 \n mean_of_differences_squared = differences_squared.mean() \n rmse_val = np.sqrt(mean_of_differences_squared) \n rmse_val = float(\"{0:.4f}\".format(rmse_val))\n # OUTPUT\n print(\"Regressogram => RMSE is \", rmse_val, \" when h is \", widht_param)\n# =============================================================================\n\n\nif __name__ == '__main__':\n \n reg = regressogram(train)\n # PLOTTING\n # =============================================================================\n a = plt.scatter(train[\"eruptions\"], train[\"waiting\"], edgecolors='b')\n b = plt.scatter(test[\"eruptions\"], test[\"waiting\"], color='r')\n plt.plot(train[\"eruptions\"],reg, linewidth = 3, color = 'k')\n plt.legend((a,b),\n ('train', 'test'),\n scatterpoints=1,\n loc='upper left',\n ncol=3,\n fontsize=10)\n plt.xlabel('Eruption time (min)')\n plt.ylabel('Waiting time to next eruption (min)')\n plt.title('h = 0.37')\n plt.show()\n # =============================================================================\n \n # EVALUATION WITH TEST DATA\n # ============================================================================= \n evaluate(test)\n # ============================================================================="
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"pandas.set_option",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Palzer/pytorch-lightning | [
"886702a1af442f33625693a9ba33c669f9fe9535",
"4018237c309b7d9d6978da73132003615341e04a"
] | [
"tests/trainer/test_dataloaders.py",
"pytorch_lightning/metrics/functional/ssim.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport platform\nfrom distutils.version import LooseVersion\nfrom unittest import mock\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import IterableDataset, Subset\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport tests.base.develop_pipelines as tpipes\nfrom pytorch_lightning import Trainer, Callback\nfrom pytorch_lightning.utilities.data import has_iterable_dataset, has_len\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\n\ndef test_fit_train_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.validation_step = None\n model.validation_epoch_end = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader)\n\n\ndef test_fit_val_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n val_dataloader = model.val_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)\n\n\[email protected](\"dataloader_options\", [\n dict(val_check_interval=10000),\n])\ndef test_dataloader_config_errors_runtime(tmpdir, dataloader_options):\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n **dataloader_options,\n )\n with pytest.raises(ValueError):\n # fit model\n trainer.fit(model)\n\n\[email protected](\"dataloader_options\", [\n dict(limit_train_batches=-0.1),\n dict(limit_train_batches=1.2),\n dict(limit_val_batches=-0.1),\n dict(limit_val_batches=1.2),\n dict(limit_test_batches=-0.1),\n dict(limit_test_batches=1.2),\n dict(val_check_interval=-0.1),\n dict(val_check_interval=1.2),\n dict(overfit_batches=-0.1),\n dict(overfit_batches=1.2),\n])\ndef test_dataloader_config_errors_init(tmpdir, dataloader_options):\n with pytest.raises(MisconfigurationException, match='passed invalid value'):\n Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n **dataloader_options,\n )\n\n\ndef test_multiple_val_dataloader(tmpdir):\n \"\"\"Verify multiple val_dataloader.\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=1.0,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n # verify there are 2 val loaders\n assert len(trainer.val_dataloaders) == 2, \\\n 'Multiple val_dataloaders not initiated properly'\n\n # make sure predictions are good for each val set\n for dataloader in trainer.val_dataloaders:\n tpipes.run_prediction(dataloader, trainer.model)\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_multiple_test_dataloader(tmpdir, ckpt_path):\n \"\"\"Verify multiple test_dataloader.\"\"\"\n\n model_template = EvalModelTemplate()\n\n class MultipleTestDataloaderModel(EvalModelTemplate):\n def test_dataloader(self):\n return model_template.test_dataloader__multiple()\n\n def test_step(self, batch, batch_idx, *args, **kwargs):\n return model_template.test_step__multiple_dataloaders(batch, batch_idx, *args, **kwargs)\n\n model = MultipleTestDataloaderModel()\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n trainer.fit(model)\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n trainer.test(ckpt_path=ckpt_path)\n\n # verify there are 2 test loaders\n assert len(trainer.test_dataloaders) == 2, \\\n 'Multiple test_dataloaders not initiated properly'\n\n # make sure predictions are good for each test set\n for dataloader in trainer.test_dataloaders:\n tpipes.run_prediction(dataloader, trainer.model)\n\n # run the test method\n trainer.test(ckpt_path=ckpt_path)\n\n\ndef test_train_dataloader_passed_to_fit(tmpdir):\n \"\"\"Verify that train dataloader can be passed to fit \"\"\"\n\n # only train passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True))\n result = trainer.fit(model, **fit_options)\n\n assert result == 1\n\n\ndef test_train_val_dataloaders_passed_to_fit(tmpdir):\n \"\"\" Verify that train & val dataloader can be passed to fit \"\"\"\n\n # train, val passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n\n result = trainer.fit(model, **fit_options)\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_all_dataloaders_passed_to_fit(tmpdir, ckpt_path):\n \"\"\"Verify train, val & test dataloader(s) can be passed to fit and test method\"\"\"\n\n model = EvalModelTemplate()\n\n # train, val and test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n result = trainer.fit(model, **fit_options)\n\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=model.dataloader(train=False),\n ckpt_path=ckpt_path)\n trainer.test(**test_options)\n\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):\n \"\"\"Verify that multiple val & test dataloaders can be passed to fit.\"\"\"\n\n model = EvalModelTemplate()\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n\n # train, multiple val and multiple test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)])\n trainer.fit(model, **fit_options)\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)],\n ckpt_path=ckpt_path)\n trainer.test(**test_options)\n\n assert len(trainer.val_dataloaders) == 2, \\\n f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 2, \\\n f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\[email protected](['limit_train_batches', 'limit_val_batches', 'limit_test_batches'], [\n pytest.param(0.0, 0.0, 0.0),\n pytest.param(1.0, 1.0, 1.0),\n])\ndef test_inf_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit in percent\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n model.val_dataloader = model.val_dataloader__infinite\n model.test_dataloader = model.test_dataloader__infinite\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n\n results = trainer.fit(model)\n assert results == 1\n assert trainer.num_training_batches == (0 if limit_train_batches == 0.0 else float('inf'))\n assert trainer.num_val_batches[0] == (0 if limit_val_batches == 0.0 else float('inf'))\n\n trainer.test(ckpt_path=None)\n assert trainer.num_test_batches[0] == (0 if limit_test_batches == 0.0 else float('inf'))\n\n\[email protected](['limit_train_batches', 'limit_val_batches', 'limit_test_batches'], [\n pytest.param(0, 0, 0),\n pytest.param(10, 10, 10),\n])\ndef test_inf_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify inf train, val & test dataloaders (e.g. IterableDataset) passed with batch limit as number\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n model.val_dataloader = model.val_dataloader__infinite\n model.test_dataloader = model.test_dataloader__infinite\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n\n results = trainer.fit(model)\n assert results\n assert trainer.num_training_batches == limit_train_batches\n assert trainer.num_val_batches[0] == limit_val_batches\n\n trainer.test(ckpt_path=None)\n assert trainer.num_test_batches[0] == limit_test_batches\n\n\[email protected](\n ['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],\n [\n pytest.param(0.0, 0.0, 0.0),\n pytest.param(0, 0, 0.5),\n pytest.param(1.0, 1.0, 1.0),\n pytest.param(0.2, 0.4, 0.4),\n ]\n)\ndef test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify num_batches for train, val & test dataloaders passed with batch limit in percent\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test passed with percent_check\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n trainer.fit(model)\n expected_train_batches = int(len(trainer.train_dataloader) * limit_train_batches)\n expected_val_batches = [\n int(len(dataloader) * limit_val_batches) for dataloader in trainer.val_dataloaders\n ]\n assert trainer.num_training_batches == expected_train_batches\n assert trainer.num_val_batches == expected_val_batches\n\n trainer.test(ckpt_path=None)\n expected_test_batches = [\n int(len(dataloader) * limit_test_batches) for dataloader in trainer.test_dataloaders\n ]\n assert trainer.num_test_batches == expected_test_batches\n\n\[email protected](\n ['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],\n [\n pytest.param(0, 0, 0),\n pytest.param(1, 2, 3),\n pytest.param(1, 2, 1e50),\n ]\n)\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify num_batches for train, val & test dataloaders passed with batch limit as number\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test passed with percent_check\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n trainer.fit(model)\n\n # -------------------------------------------\n # MAKE SURE THE TRAINER SET THE CORRECT VALUES\n # -------------------------------------------\n assert trainer.num_training_batches == limit_train_batches\n assert trainer.num_val_batches == [limit_val_batches] * len(trainer.val_dataloaders)\n trainer.test(ckpt_path=None)\n\n # when the limit is greater than the number of test batches it should be the num in loaders\n test_dataloader_lengths = [len(x) for x in model.test_dataloader()]\n if limit_test_batches > 1e10:\n assert trainer.num_test_batches == test_dataloader_lengths\n else:\n assert trainer.num_test_batches == [limit_test_batches] * len(trainer.test_dataloaders)\n\n # -------------------------------------------\n # make sure we actually saw the expected num of batches\n # -------------------------------------------\n num_val_dataloaders = len(model.val_dataloader())\n num_test_dataloaders = len(model.test_dataloader())\n if limit_train_batches > 0:\n\n # make sure val batches are as expected\n assert len(trainer.dev_debugger.num_seen_val_check_batches) == num_val_dataloaders\n for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_val_check_batches.items():\n assert num_batches == limit_val_batches\n\n # make sure test batches are as expected\n assert len(trainer.dev_debugger.num_seen_test_check_batches) == num_test_dataloaders\n for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_test_check_batches.items():\n if limit_test_batches > 1e10:\n assert num_batches == test_dataloader_lengths[dataloader_idx]\n else:\n assert num_batches == limit_test_batches\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_with_fast_dev_run(tmpdir):\n \"\"\"Verify num_batches for train, val & test dataloaders passed with fast_dev_run = True\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test dataloaders passed with fast_dev_run = True\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=2,\n fast_dev_run=True,\n )\n assert trainer.max_epochs == 1\n assert trainer.num_sanity_val_steps == 0\n\n trainer.fit(model)\n assert not trainer.disable_validation\n assert trainer.num_training_batches == 1\n assert trainer.num_val_batches == [1] * len(trainer.val_dataloaders)\n\n trainer.test(ckpt_path=None)\n assert trainer.num_test_batches == [1] * len(trainer.test_dataloaders)\n\n # verify sanity check batches match as expected\n num_val_dataloaders = len(model.val_dataloader())\n assert trainer.dev_debugger.num_seen_sanity_check_batches == trainer.num_sanity_val_steps * num_val_dataloaders\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_mixing_of_dataloader_options(tmpdir, ckpt_path):\n \"\"\"Verify that dataloaders can be passed to fit\"\"\"\n\n model = EvalModelTemplate()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n trainer.test(test_dataloaders=model.dataloader(train=False), ckpt_path=ckpt_path)\n\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\ndef test_train_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.fit(model)\n\n\ndef test_val_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.fit(model)\n\n\ndef test_test_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_test_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.test(model)\n\n\[email protected]('check_interval', [50, 1.0])\ndef test_inf_train_dataloader(tmpdir, check_interval):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\[email protected]('check_interval', [1.0])\ndef test_inf_val_dataloader(tmpdir, check_interval):\n \"\"\"Test inf val data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n\ndef test_error_on_zero_len_dataloader(tmpdir):\n \"\"\" Test that error is raised if a zero-length dataloader is defined \"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__zero_length\n\n # fit model\n with pytest.raises(ValueError):\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n limit_test_batches=0.1,\n )\n trainer.fit(model)\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\[email protected]('ckpt_path', [None, 'best', 'specific'])\n@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)\ndef test_warning_with_few_workers(mock, tmpdir, ckpt_path):\n \"\"\" Test that error is raised if dataloader with only a few workers is used \"\"\"\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n train_dl = model.dataloader(train=True)\n train_dl.num_workers = 0\n\n val_dl = model.dataloader(train=False)\n val_dl.num_workers = 0\n\n train_dl = model.dataloader(train=False)\n train_dl.num_workers = 0\n\n fit_options = dict(train_dataloader=train_dl,\n val_dataloaders=val_dl)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n\n # fit model\n with pytest.warns(\n UserWarning, match='The dataloader, train dataloader, does not have many workers which may be a bottleneck.'\n ):\n trainer.fit(model, **fit_options)\n\n with pytest.warns(\n UserWarning, match='The dataloader, val dataloader 0, does not have many workers which may be a bottleneck.'\n ):\n trainer.fit(model, **fit_options)\n\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=train_dl, ckpt_path=ckpt_path)\n with pytest.warns(\n UserWarning, match='The dataloader, test dataloader 0, does not have many workers which may be a bottleneck.'\n ):\n trainer.test(**test_options)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\"),\n reason=\"IterableDataset with __len__ before 1.4 raises\",\n)\ndef test_warning_with_iterable_dataset_and_len(tmpdir):\n \"\"\" Tests that a warning message is shown when an IterableDataset defines `__len__`. \"\"\"\n model = EvalModelTemplate()\n original_dataset = model.train_dataloader().dataset\n\n class IterableWithLen(IterableDataset):\n\n def __iter__(self):\n return iter(original_dataset)\n\n def __len__(self):\n return len(original_dataset)\n\n dataloader = DataLoader(IterableWithLen(), batch_size=16)\n assert has_len(dataloader)\n assert has_iterable_dataset(dataloader)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=3,\n )\n with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):\n trainer.fit(model, train_dataloader=dataloader, val_dataloaders=[dataloader])\n with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):\n trainer.test(model, test_dataloaders=[dataloader])\n\n\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_reinit_for_subclass(tmpdir):\n\n class CustomDataLoader(torch.utils.data.DataLoader):\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=None,\n pin_memory=False, drop_last=False, timeout=0,\n worker_init_fn=None, dummy_kwarg=None, **kwargs):\n super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last, timeout,\n worker_init_fn)\n\n self.dummy_kwarg = dummy_kwarg\n\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp_spawn',\n default_root_dir=tmpdir,\n )\n\n class CustomDummyObj:\n sampler = None\n\n result = trainer.auto_add_sampler(CustomDummyObj(), shuffle=True)\n assert isinstance(result, CustomDummyObj), \"Wrongly reinstantiated data loader\"\n\n dataset = list(range(1000))\n result = trainer.auto_add_sampler(CustomDataLoader(dataset), shuffle=True)\n assert isinstance(result, torch.utils.data.DataLoader)\n assert isinstance(result, CustomDataLoader)\n assert hasattr(result, 'dummy_kwarg')\n\n # Shuffled DataLoader should also work\n result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000)), shuffle=True), shuffle=True)\n assert isinstance(result, torch.utils.data.DataLoader)\n assert isinstance(result, CustomDataLoader)\n assert hasattr(result, 'dummy_kwarg')\n\n class CustomSampler(torch.utils.data.Sampler):\n pass\n\n # Should raise an error if existing sampler is being replaced\n with pytest.raises(MisconfigurationException, match='DistributedSampler'):\n trainer.auto_add_sampler(\n CustomDataLoader(list(range(1000)), sampler=CustomSampler(list(range(1000)))), shuffle=True)\n\n\nclass DistribSamplerCallback(Callback):\n\n def on_train_start(self, trainer, pl_module):\n train_sampler = trainer.train_dataloader.sampler\n assert isinstance(train_sampler, DistributedSampler)\n assert train_sampler.shuffle\n\n def on_validation_start(self, trainer, pl_module):\n val_sampler = trainer.val_dataloaders[0].sampler\n assert isinstance(val_sampler, DistributedSampler)\n assert not val_sampler.shuffle\n\n def on_test_start(self, trainer, pl_module):\n test_sampler = trainer.test_dataloaders[0].sampler\n assert isinstance(test_sampler, DistributedSampler)\n assert not test_sampler.shuffle\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_distributed_sampler(tmpdir):\n \"\"\" Test DistributedSampler and it's arguments for DDP backend \"\"\"\n\n model = EvalModelTemplate()\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp_spawn',\n default_root_dir=tmpdir,\n max_steps=1,\n callbacks=[DistribSamplerCallback()]\n )\n trainer.fit(model)\n trainer.test(ckpt_path=None)\n\n\nclass ModelWithDataLoaderDistributedSampler(EvalModelTemplate):\n\n def train_dataloader(self):\n dataloader = super().train_dataloader()\n dist_sampler = DistributedSampler(dataloader.dataset, shuffle=True)\n return DataLoader(\n dataloader.dataset,\n batch_size=self.batch_size,\n drop_last=False,\n sampler=dist_sampler,\n shuffle=False\n )\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_distributed_sampler_already_attached(tmpdir):\n \"\"\" Test DistributedSampler and it's arguments for DDP backend when DistSampler already included on dataloader \"\"\"\n\n model = ModelWithDataLoaderDistributedSampler()\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp_spawn',\n default_root_dir=tmpdir,\n max_steps=100,\n callbacks=[DistribSamplerCallback()],\n replace_sampler_ddp=True,\n )\n result = trainer.fit(model)\n assert result == 1, \"DDP Training failed\"\n\n\[email protected](torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')\ndef test_batch_size_smaller_than_num_gpus(tmpdir):\n # we need at least 3 gpus for this test\n num_gpus = 3\n batch_size = 3\n\n class CurrentTestModel(EvalModelTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # batch norm doesn't work with batch size 1, we replace it\n self.c_d1_bn = torch.nn.ReLU()\n\n def training_step(self, *args, **kwargs):\n output = super().training_step(*args, **kwargs)\n loss = output['loss']\n # we make sure to add some metrics to the output dict,\n # this is essential for this test\n output['progress_bar'] = {'train_loss': loss}\n return output\n\n def train_dataloader(self):\n dataloader = super().train_dataloader()\n # construct a dataset with a size that is not divisible by num_gpus\n # therefore the last batch will have a size < num_gpus\n size = num_gpus * batch_size + (num_gpus - 1)\n dataset = Subset(dataloader.dataset, range(size))\n dataloader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n drop_last=False,\n )\n return dataloader\n\n hparams = EvalModelTemplate.get_default_hparams()\n hparams['batch_size'] = batch_size\n model = CurrentTestModel(**hparams)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0,\n gpus=num_gpus,\n )\n\n # we expect the reduction for the metrics also to happen on the last batch\n # where we will get fewer metrics than gpus\n result = trainer.fit(model)\n assert 1 == result\n\n\[email protected]('check_interval', [1.0])\ndef test_val_dataloader_not_implemented_error(tmpdir, check_interval):\n \"\"\"Test not_implemented_error data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=5,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\[email protected]('check_interval', [50, 1.0])\ndef test_train_dataloader_not_implemented_error(tmpdir, check_interval):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__not_implemented_error\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=5,\n max_epochs=1,\n val_check_interval=check_interval\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\ndef test_train_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, val_check_interval=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.fit(model)\n\n\ndef test_val_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_val_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.fit(model)\n\n\ndef test_test_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_test_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='using an IterableDataset'):\n trainer.test(model)\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_only_once(tmpdir):\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n max_epochs=3,\n )\n result = trainer.fit(model)\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 1\n assert len(trainer.dev_debugger.test_dataloader_calls) == 0\n assert len(trainer.dev_debugger.train_dataloader_calls) == 1\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'val_dataloader',\n 'train_dataloader',\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_only_once_val_interval(tmpdir):\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=10,\n limit_val_batches=10,\n val_check_interval=0.3,\n reload_dataloaders_every_epoch=True,\n max_epochs=3,\n )\n result = trainer.fit(model)\n\n trainer.test()\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 10\n assert len(trainer.dev_debugger.test_dataloader_calls) == 1\n assert len(trainer.dev_debugger.train_dataloader_calls) == 3\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'val_dataloader',\n 'test_dataloader'\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_only_once_no_sanity_check(tmpdir):\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n num_sanity_val_steps=0,\n max_epochs=3,\n )\n result = trainer.fit(model)\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 1\n assert len(trainer.dev_debugger.test_dataloader_calls) == 0\n assert len(trainer.dev_debugger.train_dataloader_calls) == 1\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'train_dataloader',\n 'val_dataloader',\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_every_epoch(tmpdir):\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n reload_dataloaders_every_epoch=True,\n max_epochs=3,\n )\n result = trainer.fit(model)\n\n trainer.test()\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 4\n assert len(trainer.dev_debugger.train_dataloader_calls) == 3\n assert len(trainer.dev_debugger.test_dataloader_calls) == 1\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'test_dataloader'\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_every_epoch_no_sanity_check(tmpdir):\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n num_sanity_val_steps=0,\n reload_dataloaders_every_epoch=True,\n max_epochs=3,\n )\n result = trainer.fit(model)\n\n trainer.test()\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 3\n assert len(trainer.dev_debugger.train_dataloader_calls) == 3\n assert len(trainer.dev_debugger.test_dataloader_calls) == 1\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'train_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'train_dataloader',\n 'val_dataloader',\n 'test_dataloader'\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n\n\[email protected](os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_dataloaders_load_only_once_passed_loaders(tmpdir):\n\n model = EvalModelTemplate()\n train_loader = model.train_dataloader()\n model.train_dataloader = None\n val_loader = model.val_dataloader()\n model.val_dataloader = None\n test_loader = model.test_dataloader()\n model.test_dataloader = None\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n max_epochs=3,\n )\n result = trainer.fit(model, train_loader, val_loader)\n\n trainer.test(test_dataloaders=test_loader)\n\n assert len(trainer.dev_debugger.val_dataloader_calls) == 1\n assert len(trainer.dev_debugger.test_dataloader_calls) == 1\n assert len(trainer.dev_debugger.train_dataloader_calls) == 1\n\n # verify the sequence\n calls = trainer.dev_debugger.dataloader_sequence_calls\n expected_sequence = [\n 'val_dataloader',\n 'train_dataloader',\n ]\n for call, expected in zip(calls, expected_sequence):\n assert call['name'] == expected\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Sequence, Tuple\n\nimport torch\nfrom pytorch_lightning.metrics.functional.reduction import reduce\nfrom pytorch_lightning.metrics.utils import _check_same_shape\nfrom torch.nn import functional as F\n\n\ndef _gaussian_kernel(channel, kernel_size, sigma, device):\n def _gaussian(kernel_size, sigma, device):\n gauss = torch.arange(\n start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32, device=device\n )\n gauss = torch.exp(-gauss.pow(2) / (2 * pow(sigma, 2)))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], device)\n gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], device)\n kernel = torch.matmul(gaussian_kernel_x.t(), gaussian_kernel_y)\n\n return kernel.expand(channel, 1, kernel_size[0], kernel_size[1])\n\n\ndef _ssim_update(\n preds: torch.Tensor,\n target: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n if preds.dtype != target.dtype:\n raise TypeError(\n \"Expected `preds` and `target` to have the same data type.\"\n f\" Got pred: {preds.dtype} and target: {target.dtype}.\"\n )\n _check_same_shape(preds, target)\n if len(preds.shape) != 4:\n raise ValueError(\n \"Expected `preds` and `target` to have BxCxHxW shape.\"\n f\" Got pred: {preds.shape} and target: {target.shape}.\"\n )\n return preds, target\n\n\ndef _ssim_compute(\n preds: torch.Tensor,\n target: torch.Tensor,\n kernel_size: Sequence[int] = (11, 11),\n sigma: Sequence[float] = (1.5, 1.5),\n reduction: str = \"elementwise_mean\",\n data_range: Optional[float] = None,\n k1: float = 0.01,\n k2: float = 0.03,\n):\n if len(kernel_size) != 2 or len(sigma) != 2:\n raise ValueError(\n \"Expected `kernel_size` and `sigma` to have the length of two.\"\n f\" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}.\"\n )\n\n if any(x % 2 == 0 or x <= 0 for x in kernel_size):\n raise ValueError(f\"Expected `kernel_size` to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in sigma):\n raise ValueError(f\"Expected `sigma` to have positive number. Got {sigma}.\")\n\n if data_range is None:\n data_range = max(preds.max() - preds.min(), target.max() - target.min())\n\n c1 = pow(k1 * data_range, 2)\n c2 = pow(k2 * data_range, 2)\n device = preds.device\n\n channel = preds.size(1)\n kernel = _gaussian_kernel(channel, kernel_size, sigma, device)\n\n input_list = torch.cat([preds, target, preds * preds, target * target, preds * target]) # (5 * B, C, H, W)\n outputs = F.conv2d(input_list, kernel, groups=channel)\n output_list = [outputs[x * preds.size(0): (x + 1) * preds.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n upper = 2 * sigma_pred_target + c2\n lower = sigma_pred_sq + sigma_target_sq + c2\n\n ssim_idx = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower)\n\n return reduce(ssim_idx, reduction)\n\n\ndef ssim(\n preds: torch.Tensor,\n target: torch.Tensor,\n kernel_size: Sequence[int] = (11, 11),\n sigma: Sequence[float] = (1.5, 1.5),\n reduction: str = \"elementwise_mean\",\n data_range: Optional[float] = None,\n k1: float = 0.01,\n k2: float = 0.03,\n) -> torch.Tensor:\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n pred: estimated image\n target: ground truth image\n kernel_size: size of the gaussian kernel (default: (11, 11))\n sigma: Standard deviation of the gaussian kernel (default: (1.5, 1.5))\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'``: no reduction will be applied\n\n data_range: Range of the image. If ``None``, it is determined from the image (max - min)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n\n Return:\n Tensor with SSIM score\n\n Example:\n >>> preds = torch.rand([16, 1, 16, 16])\n >>> target = preds * 0.75\n >>> ssim(preds, target)\n tensor(0.9219)\n \"\"\"\n preds, target = _ssim_update(preds, target)\n return _ssim_compute(preds, target, kernel_size, sigma, reduction, data_range, k1, k2)\n"
] | [
[
"torch.nn.ReLU",
"torch.cuda.device_count",
"torch.utils.data.dataloader.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
],
[
"torch.nn.functional.conv2d",
"torch.arange",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shaandesai1/VIGN_ | [
"7687f1ecd1912a033136b75563868ce749c76336"
] | [
"models_long.py"
] | [
"\"\"\"\nAuthor: ****\ncode to build graph based models for VIGN\n\nSome aspects adopted from: https://github.com/steindoringi/Variational_Integrator_Networks/blob/master/models.py\n\"\"\"\nfrom graph_nets import modules\nfrom graph_nets import utils_tf\nimport sonnet as snt\nimport tensorflow as tf\nfrom utils import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nimport tensorflow.keras as tfk\nfrom tensorflow_probability import distributions as tfd\nimport os\n\n\ndef create_loss_ops(true, predicted):\n \"\"\"MSE loss\"\"\"\n loss_ops = tf.reduce_mean((true - predicted) ** 2)\n return loss_ops\n\n\ndef log_likelihood_y(y, y_rec, log_noise_var):\n \"\"\" noise loss\"\"\"\n noise_var = tf.nn.softplus(log_noise_var) * tf.ones_like(y_rec)\n py = tfd.Normal(y_rec, noise_var)\n log_py = py.log_prob(y)\n log_py = tf.reduce_sum(log_py, [0])\n log_lik = tf.reduce_mean(log_py)\n return log_lik\n\n\ndef choose_integrator(method):\n \"\"\"\n returns integrator for dgn/hnn from utils\n args:\n method (str): 'rk1' or 'rk4'\n \"\"\"\n if method == 'rk1':\n return rk1\n\n elif method == 'rk2':\n return rk2\n\n elif method == 'rk3':\n return rk3\n\n elif method == 'rk4':\n return rk4\n\n elif method == 'vi1':\n return vi1\n\n elif method == 'vi2':\n return vi2\n\n elif method == 'vi3':\n return vi3\n\n elif method == 'vi4':\n return vi4\n\n\ndef choose_integrator_nongraph(method):\n \"\"\"\n returns integrator for dgn/hnn from utils\n args:\n method (str): 'rk1' or 'rk4'\n \"\"\"\n if method == 'rk1':\n return rk1ng\n elif method == 'rk2':\n return rk2ng\n elif method == 'rk3':\n return rk3ng\n elif method == 'rk4':\n return rk4ng\n elif method == 'vi1':\n return vi1ng\n elif method == 'vi2':\n return vi2ng\n elif method == 'vi3':\n return vi3ng\n elif method == 'vi4':\n return vi4ng\n\n\nclass nongraph_model(object):\n\n def __init__(self, sess, deriv_method, num_nodes, BS, integ_meth, expt_name, lr,\n noisy, spatial_dim, dt):\n \"\"\"\n Builds a tensorflow classic non-graph model object\n Args:\n sess (tf.session): instantiated session\n deriv_method (str): one of hnn,dgn,vin_rk1,vin_rk4,vin_rk1_lr,vin_rk4_lr\n num_nodes (int): number of particles\n BS (int): batch size\n integ_method (str): rk1 or rk4 for now, though vign has higher order integrators\n expt_name (str): identifier for specific experiment\n lr (float): learning rate\n is_noisy (bool): flag for noisy data\n spatial_dim (int): the dimension of state vector for 1 particle (e.g. 2, [q,qdot] in spring system)\n dt (float): sampling rate\n \"\"\"\n self.sess = sess\n self.deriv_method = deriv_method\n self.num_nodes = num_nodes\n self.BS = BS\n self.BS_test = 1\n self.integ_method = integ_meth\n self.expt_name = expt_name\n self.lr = lr\n self.spatial_dim = spatial_dim\n self.dt = dt\n self.is_noisy = noisy\n self.log_noise_var = None\n if self.num_nodes == 1:\n self.activate_sub = False\n else:\n self.activate_sub = True\n self.output_plots = False\n self.M = tf.transpose(self.permutation_tensor(self.spatial_dim * self.num_nodes))\n self._build_net()\n\n def _build_net(self):\n \"\"\"\n initializes all tf placeholders/networks/losses\n \"\"\"\n\n if self.is_noisy:\n self.log_noise_var = tf.Variable([0.], dtype=tfk.backend.floatx())\n\n self.nonlin = tf.nn.tanh\n\n self.h1 = snt.Linear(output_size=256, use_bias=True, name='h1')\n self.h2 = snt.Linear(output_size=256, use_bias=True, name='h2')\n\n if self.deriv_method == 'dn':\n self.h3 = snt.Linear(output_size=self.spatial_dim * self.num_nodes, use_bias=False, name='h3')\n else:\n self.h3 = snt.Linear(output_size=1, use_bias=False, name='h3')\n\n self.mlp = snt.Sequential([\n self.h1,\n self.nonlin,\n self.h2,\n self.nonlin,\n self.h3\n ])\n\n self.input_ph = tf.compat.v1.placeholder(tf.float32, shape=[1, self.spatial_dim * self.num_nodes])\n self.test_ph = tf.compat.v1.placeholder(tf.float32, shape=[1, self.spatial_dim * self.num_nodes])\n self.ground_truth_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, self.spatial_dim * self.num_nodes])\n\n integ = choose_integrator_nongraph(self.integ_method)\n\n if self.deriv_method == 'dn':\n self.next_step = self.future_pred(integ, self.deriv_fun_dn, self.input_ph, self.dt)\n self.test_next_step = integ(self.deriv_fun_dn, self.test_ph, self.dt)\n\n elif self.deriv_method == 'hnn':\n self.next_step = self.future_pred(integ, self.deriv_fun_hnn, self.input_ph, self.dt)\n self.test_next_step = integ(self.deriv_fun_hnn, self.test_ph, self.dt)\n\n elif self.deriv_method == 'pnn':\n self.next_step = self.future_pred(integ, self.deriv_fun_pnn, self.input_ph, self.dt)\n self.test_next_step = integ(self.deriv_fun_pnn, self.test_ph, self.dt)\n\n else:\n raise ValueError(\"the derivative generator is incorrect, should be dn,hnn or pn\")\n\n if self.is_noisy:\n self.loss_op_tr = -log_likelihood_y(self.next_step, self.ground_truth_ph, self.log_noise_var)\n else:\n self.loss_op_tr = self.create_loss_ops(self.next_step, self.ground_truth_ph)\n\n global_step = tf.Variable(0, trainable=False)\n rate = tf.compat.v1.train.exponential_decay(self.lr, global_step, 1000, 0.5, staircase=False)\n # tf.train.AdamW()\n optimizer = tf.train.AdamOptimizer(rate)\n self.step_op = optimizer.minimize(self.loss_op_tr, global_step=global_step)\n\n def future_pred(self, integ, deriv_fun, state_init, dt):\n accum = []\n\n q_init = state_init\n\n accum.append(q_init)\n\n for _ in range(self.BS):\n xtp1 = integ(deriv_fun, accum[-1], dt)\n\n accum.append(xtp1)\n\n yhat = tf.concat(accum, 0)\n return yhat[1:]\n\n def create_loss_ops(self, true, predicted):\n \"\"\"MSE loss\"\"\"\n loss_ops = tf.reduce_mean((true - predicted) ** 2)\n return loss_ops\n\n def deriv_fun_dn(self, xt):\n output_nodes = self.mlp(xt)\n return output_nodes\n\n def deriv_fun_hnn(self, xt):\n with tf.GradientTape() as g:\n g.watch(xt)\n output_nodes = self.mlp(xt)\n dH = g.gradient(output_nodes, xt)\n return tf.concat(\n [dH[:, int(self.spatial_dim * self.num_nodes / 2):], -dH[:, :int(self.spatial_dim * self.num_nodes / 2)]],\n 1) # tf.matmul(dH, self.M)\n\n def deriv_fun_pnn(self, xt):\n qvals = xt[:, :int(self.spatial_dim * self.num_nodes / 2)]\n pvals = xt[:, int(self.spatial_dim * self.num_nodes / 2):]\n with tf.GradientTape() as g:\n g.watch(qvals)\n output_nodes = self.mlp(qvals)\n dH = g.gradient(output_nodes, qvals)\n return tf.concat([pvals, -dH], 1)\n\n def permutation_tensor(self, n):\n M = None\n M = tf.eye(n)\n M = tf.concat([M[n // 2:], -M[:n // 2]], 0)\n return M\n\n def train_step(self, input_batch, true_batch):\n\n train_feed = {self.input_ph: input_batch,\n self.ground_truth_ph: true_batch,\n }\n train_ops = [self.loss_op_tr, self.next_step, self.step_op]\n loss, next_pred, _ = self.sess.run(train_ops, feed_dict=train_feed)\n return loss, next_pred\n\n def valid_step(self, input_batch, true_batch):\n\n train_feed = {self.input_ph: input_batch,\n self.ground_truth_ph: true_batch,\n }\n train_ops = [self.loss_op_tr, self.next_step]\n loss, next_pred = self.sess.run(train_ops, feed_dict=train_feed)\n\n return loss, next_pred\n\n def test_step(self, input_batch, true_batch, steps):\n # figures relegated to jupyter notebook infengine\n stored_states = [input_batch.astype(np.float32)]\n for i in range(steps):\n test_feed = {self.test_ph: stored_states[-1],\n }\n test_ops = [self.test_next_step]\n\n yhat = self.sess.run(test_ops, feed_dict=test_feed)\n stored_states.append(yhat[0])\n\n preds = tf.concat(stored_states, 0).eval(session=self.sess)\n\n error = mean_squared_error(preds[1:, :], true_batch[:, :])\n\n return error, preds[1:, :]\n\n\nclass graph_model(object):\n \"\"\"\n Builds a tensorflow graph model object\n Args:\n sess (tf.session): instantiated session\n deriv_method (str): one of hnn,dgn,vin_rk1,vin_rk4,vin_rk1_lr,vin_rk4_lr\n num_nodes (int): number of particles\n BS (int): batch size\n integ_method (str): rk1 or rk4 for now, though vign has higher order integrators\n expt_name (str): identifier for specific experiment\n lr (float): learning rate\n is_noisy (bool): flag for noisy data\n spatial_dim (int): the dimension of state vector for 1 particle (e.g. 2, [q,qdot] in spring system)\n dt (float): sampling rate\n eflag (bool): whether to use extra input in building graph (default=True)\n \"\"\"\n\n def __init__(self, sess, deriv_method, num_nodes, BS, integ_meth, expt_name, lr,\n noisy, spatial_dim, dt, eflag=True):\n\n self.sess = sess\n self.deriv_method = deriv_method\n self.num_nodes = num_nodes\n self.BS = BS\n self.BS_test = 1\n self.integ_method = integ_meth\n self.expt_name = expt_name\n self.lr = lr\n self.spatial_dim = spatial_dim\n self.dt = dt\n self.eflag = False\n self.is_noisy = noisy\n self.log_noise_var = None\n if self.num_nodes == 1:\n self.activate_sub = False\n else:\n self.activate_sub = False\n self.output_plots = False\n self._build_net()\n\n def _build_net(self):\n \"\"\"\n initializes all tf placeholders/graph networks/losses\n \"\"\"\n\n if self.is_noisy:\n self.log_noise_var = tf.Variable([0.], dtype=tfk.backend.floatx())\n\n self.out_to_global = snt.Linear(output_size=1, use_bias=False, name='out_to_global')\n self.out_to_node = snt.Linear(output_size=self.spatial_dim, use_bias=True, name='out_to_node')\n\n self.graph_network = modules.GraphNetwork(\n edge_model_fn=lambda: snt.nets.MLP([32, 32], activation=tf.nn.softplus, activate_final=True),\n node_model_fn=lambda: snt.nets.MLP([32, 32], activation=tf.nn.softplus, activate_final=True),\n global_model_fn=lambda: snt.nets.MLP([32, 32], activation=tf.nn.softplus, activate_final=True),\n )\n\n self.base_graph_tr = tf.compat.v1.placeholder(tf.float32,\n shape=[self.num_nodes * self.BS, self.spatial_dim])\n self.ks_ph = tf.compat.v1.placeholder(tf.float32, shape=[self.BS, self.num_nodes])\n self.ms_ph = tf.compat.v1.placeholder(tf.float32, shape=[self.BS, self.num_nodes])\n\n self.true_dq_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, self.spatial_dim])\n\n self.test_graph_ph = tf.compat.v1.placeholder(tf.float32,\n shape=[self.num_nodes * self.BS_test, self.spatial_dim])\n self.test_ks_ph = tf.compat.v1.placeholder(tf.float32, shape=[self.BS_test, self.num_nodes])\n self.test_ms_ph = tf.compat.v1.placeholder(tf.float32, shape=[self.BS_test, self.num_nodes])\n\n integ = choose_integrator(self.integ_method)\n\n if self.deriv_method == 'dgn':\n self.next_step = self.future_pred(integ, self.deriv_fun_dgn, self.base_graph_tr, self.ks_ph, self.ms_ph,\n self.dt, self.BS,\n self.num_nodes)\n self.test_next_step = integ(self.deriv_fun_dgn, self.test_graph_ph, self.test_ks_ph, self.test_ms_ph,\n self.dt, 1, self.num_nodes)\n elif self.deriv_method == 'hogn':\n self.next_step = self.future_pred(integ, self.deriv_fun_hogn, self.base_graph_tr, self.ks_ph, self.ms_ph,\n self.dt, self.BS,\n self.num_nodes)\n self.test_next_step = integ(self.deriv_fun_hogn, self.test_graph_ph, self.test_ks_ph, self.test_ms_ph,\n self.dt, 1, self.num_nodes)\n elif self.deriv_method == 'pgn':\n self.next_step = self.future_pred(integ, self.deriv_fun_pgn, self.base_graph_tr, self.ks_ph, self.ms_ph,\n self.dt, self.BS,\n self.num_nodes)\n self.test_next_step = integ(self.deriv_fun_pgn, self.test_graph_ph, self.test_ks_ph, self.test_ms_ph,\n self.dt, 1, self.num_nodes)\n else:\n raise ValueError(\"the derivative generator is incorrect, should be dgn,hogn or pgn\")\n\n if self.is_noisy:\n self.loss_op_tr = -log_likelihood_y(self.next_step, self.true_dq_ph, self.log_noise_var)\n else:\n self.loss_op_tr = self.create_loss_ops(self.next_step, self.true_dq_ph)\n\n global_step = tf.Variable(0, trainable=False)\n rate = self.lr # tf.compat.v1.train.exponential_decay(self.lr, global_step, 10000, 0.5, staircase=False)\n optimizer = tf.train.AdamOptimizer(rate)\n self.step_op = optimizer.minimize(self.loss_op_tr, global_step=global_step)\n\n def future_pred(self, integ, deriv_fun, state_init, ks, ms, dt, bs, num_nodes):\n \"\"\"\n only used with long range rollout (i.e. neuralODE without adjoint method) - future step predictions\n \"\"\"\n accum = []\n\n q_init = state_init[:num_nodes]\n ks_init = ks[0]\n ms_init = ms[0]\n accum.append(q_init)\n\n for _ in range(bs):\n xtp1 = integ(deriv_fun, accum[-1], ks_init, ms_init, dt, 1, num_nodes)\n accum.append(xtp1)\n\n yhat = tf.concat(accum, 0)\n return yhat[num_nodes:]\n\n def create_loss_ops(self, true, predicted):\n \"\"\"MSE loss\"\"\"\n loss_ops = tf.reduce_mean((true - predicted) ** 2)\n return loss_ops\n\n def base_graph(self, input_features, ks, ms, num_nodes):\n \"\"\"builds graph for every group of particles\"\"\"\n # Node features for graph 0.\n if self.eflag:\n nodes_0 = tf.concat([input_features, tf.reshape(ms, [num_nodes, 1]), tf.reshape(ks, [num_nodes, 1])], 1)\n else:\n nodes_0 = input_features\n\n senders_0 = []\n receivers_0 = []\n # edges_0 = []\n an = np.arange(0, num_nodes, 1)\n for i in range(len(an)):\n for j in range(i + 1, len(an)):\n senders_0.append(i)\n senders_0.append(j)\n receivers_0.append(j)\n receivers_0.append(i)\n\n data_dict_0 = {\n \"nodes\": nodes_0,\n \"senders\": senders_0,\n \"receivers\": receivers_0\n }\n\n return data_dict_0\n\n def deriv_fun_dgn(self, xt, ks, ms, bs, n_nodes):\n if self.activate_sub == True:\n sub_vecs = self.sub_mean(xt, bs)\n else:\n sub_vecs = xt\n\n input_vec = tf.concat(sub_vecs, 0)\n vec2g = [self.base_graph(input_vec[n_nodes * i:n_nodes * (i + 1)], ks[i], ms[i], n_nodes) for i in range(bs)]\n vec2g = utils_tf.data_dicts_to_graphs_tuple(vec2g)\n vec2g = utils_tf.set_zero_global_features(vec2g, 1)\n vec2g = utils_tf.set_zero_edge_features(vec2g, 1)\n output_graphs = self.graph_network(vec2g)\n new_node_vals = self.out_to_node(output_graphs.nodes)\n return new_node_vals\n\n def deriv_fun_hogn(self, xt, ks, ms, bs, n_nodes):\n if self.activate_sub == True:\n sub_vecs = self.sub_mean(xt, bs)\n else:\n sub_vecs = xt\n\n input_vec = tf.concat(sub_vecs, 0)\n with tf.GradientTape() as g:\n g.watch(input_vec)\n vec2g = [self.base_graph(input_vec[n_nodes * i:n_nodes * (i + 1)], ks[i], ms[i], n_nodes) for i in\n range(bs)]\n vec2g = utils_tf.data_dicts_to_graphs_tuple(vec2g)\n vec2g = utils_tf.set_zero_global_features(vec2g, 1)\n vec2g = utils_tf.set_zero_edge_features(vec2g, 1)\n output_graphs = self.graph_network(vec2g)\n global_vals = self.out_to_global(output_graphs.globals)\n dUdq = g.gradient(global_vals, input_vec)\n\n dqdt = dUdq[:, int(self.spatial_dim / 2):]\n dpdt = -dUdq[:, :int(self.spatial_dim / 2)]\n dHdin = tf.concat([dqdt, dpdt], 1)\n return dHdin\n\n def sub_mean(self, xt, bs):\n init_x = xt[:, :int(self.spatial_dim / 2)]\n # means = tf.reduce_mean(init_x, 0)\n # new_means = tf.transpose(tf.reshape(tf.repeat(means, init_x.shape[0]), (int(self.spatial_dim / 2), -1)))\n # return tf.concat([init_x - new_means, xt[:, int(self.spatial_dim / 2):]], 1)\n init_x = tf.reshape(init_x, (bs, self.num_nodes, int(self.spatial_dim / 2)))\n means = tf.reshape(tf.reduce_mean(init_x, 1), (-1, int(self.spatial_dim / 2)))\n new_means = tf.repeat(means, self.num_nodes, 0)\n # print(new_means.shape)\n return tf.concat([xt[:, :int(self.spatial_dim / 2)] - new_means, xt[:, int(self.spatial_dim / 2):]], 1)\n\n def deriv_fun_pgn(self, xt, ks, ms, bs, n_nodes):\n if self.activate_sub == True:\n sub_vecs = self.sub_mean(xt, bs)\n else:\n sub_vecs = xt\n\n input_vec = tf.concat(sub_vecs, 0)\n q = input_vec[:, :int(self.spatial_dim / 2)]\n p = input_vec[:, int(self.spatial_dim / 2):]\n\n with tf.GradientTape() as g:\n g.watch(q)\n # if bs == 1:\n # vec2g = [self.base_graph(q[n_nodes * i:n_nodes * (i + 1)], ks, ms, n_nodes) for i in range(bs)]\n # else:\n vec2g = [self.base_graph(q[n_nodes * i:n_nodes * (i + 1)], ks[i], ms[i], n_nodes) for i in\n range(bs)]\n\n vec2g = utils_tf.data_dicts_to_graphs_tuple(vec2g)\n vec2g = utils_tf.set_zero_global_features(vec2g, 1)\n vec2g = utils_tf.set_zero_edge_features(vec2g, 1)\n output_graphs = self.graph_network(vec2g)\n global_vals = self.out_to_global(output_graphs.globals)\n\n dUdq = g.gradient(global_vals, q)\n\n return tf.concat([p, -dUdq], 1)\n\n def train_step(self, input_batch, true_batch, ks, mass):\n\n train_feed = {self.base_graph_tr: input_batch,\n self.true_dq_ph: true_batch,\n self.ks_ph: ks,\n self.ms_ph: mass}\n train_ops = [self.loss_op_tr, self.next_step, self.step_op]\n loss, next_pred, _ = self.sess.run(train_ops, feed_dict=train_feed)\n\n return loss, next_pred\n\n def valid_step(self, input_batch, true_batch, ks, mass):\n BS = int(len(input_batch) / self.num_nodes)\n integ = choose_integrator(self.integ_method)\n base_graph_tr = tf.compat.v1.placeholder(tf.float32, shape=[self.num_nodes * BS, self.spatial_dim])\n ks_ph = tf.compat.v1.placeholder(tf.float32, shape=[BS, self.num_nodes])\n ms_ph = tf.compat.v1.placeholder(tf.float32, shape=[BS, self.num_nodes])\n true_dq_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, self.spatial_dim])\n\n if self.deriv_method == 'dgn':\n next_step = integ(self.deriv_fun_dgn, base_graph_tr, ks_ph, ms_ph, self.dt, BS,\n self.num_nodes)\n elif self.deriv_method == 'hogn':\n next_step = integ(self.deriv_fun_hogn, base_graph_tr, ks_ph, ms_ph, self.dt, BS,\n self.num_nodes)\n elif self.deriv_method == 'pgn':\n next_step = integ(self.deriv_fun_pgn, base_graph_tr, ks_ph, ms_ph, self.dt, BS,\n self.num_nodes)\n loss_op_tr = self.create_loss_ops(next_step, true_dq_ph)\n train_feed = {base_graph_tr: input_batch,\n true_dq_ph: true_batch,\n ks_ph: ks,\n ms_ph: mass}\n train_ops = [loss_op_tr, next_step]\n loss, next_pred = self.sess.run(train_ops, feed_dict=train_feed)\n\n return loss, next_pred\n\n def test_step(self, input_batch, true_batch, ks, mass, steps):\n # figures relegated to jupyter notebook infengine\n stored_states = [input_batch.astype(np.float32)]\n for i in range(steps):\n test_feed = {self.test_graph_ph: stored_states[-1],\n self.test_ks_ph: ks,\n self.test_ms_ph: mass}\n test_ops = [self.test_next_step]\n\n yhat = self.sess.run(test_ops, feed_dict=test_feed)\n stored_states.append(yhat[0])\n\n preds = tf.concat(stored_states, 0).eval(session=self.sess)\n\n error = mean_squared_error(preds[self.num_nodes:, :], true_batch[:, :])\n\n if self.output_plots is True:\n data_dir = 'data/plots/' + self.expt_name + '/' + str(self.lr) + '/' + str(self.integ_method) + '/'\n\n if not os.path.exists(data_dir):\n print('non existent')\n os.makedirs(data_dir)\n\n plt.figure(figsize=(15, 10))\n nv = preds[:, :2]\n gt = true_batch[:, :2]\n plt.scatter(nv[:, 0], nv[:, 1], label=self.deriv_method, c='blue')\n plt.scatter(gt[:, 0], gt[:, 1], label='gt', c='black', alpha=0.5)\n plt.legend()\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(self.deriv_method + str(self.lr) + ' graphic space evolution')\n plt.savefig(data_dir + 'graphic' + self.deriv_method)\n\n plt.figure(figsize=(15, 10))\n nv = preds[:, :2]\n gt = true_batch[:, :2]\n plt.scatter(nv[::5, 0], nv[::5, 1], label=self.deriv_method, c='blue')\n plt.scatter(gt[::5, 0], gt[::5, 1], label='gt', c='black', alpha=0.5)\n plt.legend()\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(self.deriv_method + str(self.lr) + ' graphic space evolution')\n plt.savefig(data_dir + 'graphic' + self.deriv_method + 'onetraj')\n\n return error, preds[self.num_nodes:]\n"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.keras.backend.floatx",
"tensorflow.concat",
"tensorflow.reduce_sum",
"sklearn.metrics.mean_squared_error",
"tensorflow.train.AdamOptimizer",
"tensorflow.Variable",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"tensorflow.compat.v1.train.exponential_decay",
"matplotlib.pyplot.ylabel",
"tensorflow.GradientTape",
"matplotlib.pyplot.scatter",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.eye",
"tensorflow.repeat",
"tensorflow.compat.v1.placeholder",
"matplotlib.pyplot.xlabel",
"tensorflow.nn.softplus"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
junbinhuang/mofem | [
"6ed2ea5b7a8fbb1f8f0954636f6326c706da302c"
] | [
"linearSolvers/traditionalElement.py"
] | [
"import numpy as np \nimport scipy\n# from sksparse.cholmod import cholesky # It works (from Terminal).\nfrom scipy.sparse.linalg import spsolve\nimport time\nimport sys, os\nsys.path.append(os.path.dirname(sys.path[0]))\nfrom elementLibrary import stiffnessMatrix, shapeFunction\nfrom otherFunctions import numericalIntegration\nfrom linearSolvers import AMORE\nfrom meshTools import toDC\n\ndef lowerOrderFE(inputData):\n \"\"\"The solver for lower order finite elements (4-node quads & 3-node triangles).\"\"\"\n startTime=time.time()\n\n parameters=inputData[0]\n\n # Material matrices.\n materialList=inputData[6]\n\n materialMatrix=[None]*len(materialList)\n\n for i in range(len(materialList)):\n materialMatrix[i]=twoDMaterialMatrix(materialList[i],parameters[1])\n \n # Assemble stiffness matrix.\n coordinates=inputData[5]\n meshes=inputData[3]\n materialMeshList=inputData[4]\n\n Iglo=[]\n Jglo=[]\n Vglo=[]\n\n for i in range(len(meshes[0])):\n coord=AMORE.getCoord(coordinates,meshes[0][i])\n\n if len(meshes[0][i])==3:\n Kloc=stiffnessMatrix.triFE(coord,materialMatrix[materialMeshList[0][i]])\n elif len(meshes[0][i])==4:\n Kloc=stiffnessMatrix.quadFE(coord,materialMatrix[materialMeshList[0][i]])\n else: raise ValueError(\"Wrong element numbering!\")\n \n Iloc,Jloc,Vloc=stiffnessMatrix.sparsifyElementMatrix(Kloc,meshes[0][i])\n Iglo.extend(Iloc)\n Jglo.extend(Jloc)\n Vglo.extend(Vloc)\n\n Iglo=np.array(Iglo,dtype=int)\n Jglo=np.array(Jglo,dtype=int)\n Vglo=np.array(Vglo,dtype='d')\n\n Kglo=scipy.sparse.coo_matrix((Vglo,(Iglo,Jglo)),shape=(2*len(coordinates),2*len(coordinates))).tocsr()\n \n print(\"Assembling stiffness matrix costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n\n # Force term.\n indForce=inputData[-2]\n if indForce[0]: # Body force is imposed.\n pass\n\n forceList=inputData[-1]\n\n nInt=2\n if indForce[1]: nInt+=3 # Customized boundary force.\n\n pos,wei=numericalIntegration.gaussQuad(nInt)\n fglo=np.zeros((2*len(coordinates),1))\n\n for i in range(len(forceList)//2):\n node1=forceList[2*i][0]\n node2=forceList[2*i+1][0]\n length=lenEdge(coordinates[node1],coordinates[node2])\n\n force1=np.array([forceList[2*i][1:3]]).transpose()\n force2=np.array([forceList[2*i+1][1:3]]).transpose()\n\n floc=np.zeros((4,1))\n\n for j in range(nInt):\n Nmat=shapeFunction.oneDLinear(pos[j])\n force=Nmat[0,0]*force1+Nmat[0,2]*force2\n\n floc+=0.5*wei[j]*length*np.matmul(Nmat.transpose(),force)\n\n fglo[2*node1:2*node1+2,0]+=floc[0:2,0]\n fglo[2*node2:2*node2+2,0]+=floc[2:4,0]\n \n print(\"Calculating force term costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n # Impose constraints.\n fixList=np.zeros((2*len(coordinates),1))\n fixIndexList=np.zeros((2*len(coordinates),1),dtype=int)\n\n constraintList=inputData[-3]\n # Very important!!! Sort the constraints!!!\n constraintList.sort(key=lambda item:item[0])\n\n for i in constraintList:\n if i[1]: \n fixList[2*i[0]]=i[3]\n fixIndexList[2*i[0]]=1\n if i[2]: \n fixList[2*i[0]+1]=i[4]\n fixIndexList[2*i[0]+1]=1\n\n # Solve.\n fglo-=(Kglo.dot(fixList))\n\n Kglo_complete=Kglo.copy()\n Kglo=Kglo.tolil()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n fglo=np.delete(fglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n fglo=np.delete(fglo,2*i[0]+1-count)\n count+=1\n\n Kglo=Kglo.transpose()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n count+=1\n\n print(\"Imposing constraints costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n Kglo=Kglo.tocsc()\n print(\"Number of non-zero sparse matrix entries = %s.\"%Kglo.count_nonzero())\n\n # factor=cholesky(Kglo)\n # disp=factor(fglo)\n disp=spsolve(Kglo,fglo)\n\n print(\"Solving the linear system costs %s seconds.\"%(time.time()-startTime))\n\n # The complete displacement solution:\n displacement=np.zeros((2*len(coordinates),1))\n count=0\n for i in range(2*len(coordinates)):\n if fixIndexList[i]: \n displacement[i]=fixList[i]\n count+=1\n else: \n displacement[i]=disp[i-count]\n\n energy=0.5*displacement.transpose()@Kglo_complete@displacement\n\n return displacement,energy\n\ndef ICMFE(inputData):\n \"\"\"The solver for (4-node) ICM finite elements. Warning: The code is only for squares. \n For general quadrilaterals, the formulation needs to be modified to pass patch tests.\"\"\"\n startTime=time.time()\n\n parameters=inputData[0]\n\n # Material matrices.\n materialList=inputData[6]\n\n materialMatrix=[None]*len(materialList)\n\n for i in range(len(materialList)):\n materialMatrix[i]=twoDMaterialMatrix(materialList[i],parameters[1])\n \n # Assemble stiffness matrix.\n coordinates=inputData[5]\n meshes=inputData[3]\n materialMeshList=inputData[4]\n\n Iglo=[]\n Jglo=[]\n Vglo=[]\n\n for i in range(len(meshes[0])):\n coord=AMORE.getCoord(coordinates,meshes[0][i])\n\n if len(meshes[0][i])==4:\n Kloc,_,_=stiffnessMatrix.ICMFE(coord,materialMatrix[materialMeshList[0][i]])\n else: raise ValueError(\"Wrong element numbering!\")\n \n Iloc,Jloc,Vloc=stiffnessMatrix.sparsifyElementMatrix(Kloc,meshes[0][i])\n Iglo.extend(Iloc)\n Jglo.extend(Jloc)\n Vglo.extend(Vloc)\n\n Iglo=np.array(Iglo,dtype=int)\n Jglo=np.array(Jglo,dtype=int)\n Vglo=np.array(Vglo,dtype='d')\n\n Kglo=scipy.sparse.coo_matrix((Vglo,(Iglo,Jglo)),shape=(2*len(coordinates),2*len(coordinates))).tocsr()\n \n print(\"Assembling stiffness matrix costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n\n # Force term.\n indForce=inputData[-2]\n if indForce[0]: # Body force is imposed.\n pass\n\n forceList=inputData[-1]\n\n nInt=2\n if indForce[1]: nInt+=3 # Customized boundary force.\n\n pos,wei=numericalIntegration.gaussQuad(nInt)\n fglo=np.zeros((2*len(coordinates),1))\n\n for i in range(len(forceList)//2):\n node1=forceList[2*i][0]\n node2=forceList[2*i+1][0]\n length=lenEdge(coordinates[node1],coordinates[node2])\n\n force1=np.array([forceList[2*i][1:3]]).transpose()\n force2=np.array([forceList[2*i+1][1:3]]).transpose()\n\n floc=np.zeros((4,1))\n\n for j in range(nInt):\n Nmat=shapeFunction.oneDLinear(pos[j])\n force=Nmat[0,0]*force1+Nmat[0,2]*force2\n\n floc+=0.5*wei[j]*length*np.matmul(Nmat.transpose(),force)\n\n fglo[2*node1:2*node1+2,0]+=floc[0:2,0]\n fglo[2*node2:2*node2+2,0]+=floc[2:4,0]\n \n print(\"Calculating force term costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n # Impose constraints.\n fixList=np.zeros((2*len(coordinates),1))\n fixIndexList=np.zeros((2*len(coordinates),1),dtype=int)\n\n constraintList=inputData[-3]\n # Very important!!! Sort the constraints!!!\n constraintList.sort(key=lambda item:item[0])\n\n for i in constraintList:\n if i[1]: \n fixList[2*i[0]]=i[3]\n fixIndexList[2*i[0]]=1\n if i[2]: \n fixList[2*i[0]+1]=i[4]\n fixIndexList[2*i[0]+1]=1\n\n # Solve.\n fglo-=(Kglo.dot(fixList))\n\n Kglo_complete=Kglo.copy()\n Kglo=Kglo.tolil()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n fglo=np.delete(fglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n fglo=np.delete(fglo,2*i[0]+1-count)\n count+=1\n\n Kglo=Kglo.transpose()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n count+=1\n\n print(\"Imposing constraints costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n Kglo=Kglo.tocsc()\n print(\"Number of non-zero sparse matrix entries = %s.\"%Kglo.count_nonzero())\n\n # factor=cholesky(Kglo)\n # disp=factor(fglo)\n disp=spsolve(Kglo,fglo)\n\n print(\"Solving the linear system costs %s seconds.\"%(time.time()-startTime))\n\n # The complete displacement solution:\n displacement=np.zeros((2*len(coordinates),1))\n count=0\n for i in range(2*len(coordinates)):\n if fixIndexList[i]: \n displacement[i]=fixList[i]\n count+=1\n else: \n displacement[i]=disp[i-count]\n\n energy=0.5*displacement.transpose()@Kglo_complete@displacement\n\n return displacement,energy\n\ndef quadraticFE(inputData):\n \"\"\"The solver for second order finite elements (9-node quads & 6-node triangles).\"\"\"\n startTime=time.time()\n\n parameters=inputData[0]\n\n # Material matrices.\n materialList=inputData[6]\n\n materialMatrix=[None]*len(materialList)\n\n for i in range(len(materialList)):\n materialMatrix[i]=twoDMaterialMatrix(materialList[i],parameters[1])\n \n # Assemble stiffness matrix.\n coordinates=inputData[5]\n meshes=inputData[3]\n materialMeshList=inputData[4]\n\n nodeElements=toDC.nodeElementList(coordinates,meshes)\n\n Iglo=[]\n Jglo=[]\n Vglo=[]\n\n for i in range(len(meshes[0])):\n coord=AMORE.getCoord(coordinates,meshes[0][i])\n\n if len(meshes[0][i])==6:\n Kloc=stiffnessMatrix.triQuadFE(coord,materialMatrix[materialMeshList[0][i]])\n elif len(meshes[0][i])==9:\n Kloc=stiffnessMatrix.quadQuadFE(coord,materialMatrix[materialMeshList[0][i]])\n else: raise ValueError(\"Wrong element numbering!\")\n \n Iloc,Jloc,Vloc=stiffnessMatrix.sparsifyElementMatrix(Kloc,meshes[0][i])\n Iglo.extend(Iloc)\n Jglo.extend(Jloc)\n Vglo.extend(Vloc)\n\n Iglo=np.array(Iglo,dtype=int)\n Jglo=np.array(Jglo,dtype=int)\n Vglo=np.array(Vglo,dtype='d')\n\n Kglo=scipy.sparse.coo_matrix((Vglo,(Iglo,Jglo)),shape=(2*len(coordinates),2*len(coordinates))).tocsr()\n \n print(\"Assembling stiffness matrix costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n\n # Force term.\n indForce=inputData[-2]\n if indForce[0]: # Body force is imposed.\n pass\n\n forceList=inputData[-1]\n\n nInt=3\n if indForce[1]: nInt+=2 # Customized boundary force.\n\n pos,wei=numericalIntegration.gaussQuad(nInt)\n fglo=np.zeros((2*len(coordinates),1))\n\n for i in range(len(forceList)//2):\n node1=forceList[2*i][0]\n node2=forceList[2*i+1][0]\n # length=lenEdge(coordinates[node1],coordinates[node2])\n\n # Find the element.\n elementPosition=(set(nodeElements[node1]) & set(nodeElements[node2])).pop()\n numbering=meshes[elementPosition[0]][elementPosition[1]]\n node3=findMidNode(numbering,node1,node2)\n\n force1=np.array([forceList[2*i][1:3]]).transpose()\n force2=np.array([forceList[2*i+1][1:3]]).transpose()\n\n floc=np.zeros((6,1))\n coord=np.array([coordinates[node1],coordinates[node2],[0.0,0.0]])\n if coordinates[node3]: coord[2,:]=np.array(coordinates[node3])\n else: coord[2,:]=0.5*(coord[0,:]+coord[1,:])\n\n for j in range(nInt):\n # Only support linear force distribution.\n # Otherwise, use customized boundary force.\n Nmat=shapeFunction.oneDLinear(pos[j])\n force=Nmat[0,0]*force1+Nmat[0,2]*force2\n\n quadNmat,Jacobian=shapeFunction.oneDQuadratic(pos[j],coord)\n\n floc+=wei[j]*Jacobian*np.matmul(quadNmat.transpose(),force)\n\n fglo[2*node1:2*node1+2,0]+=floc[0:2,0]\n fglo[2*node2:2*node2+2,0]+=floc[2:4,0]\n fglo[2*node3:2*node3+2,0]+=floc[4:6,0]\n \n print(\"Calculating force term costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n # Impose constraints.\n fixList=np.zeros((2*len(coordinates),1))\n fixIndexList=np.zeros((2*len(coordinates),1),dtype=int)\n\n constraintList=inputData[-3]\n # Very important!!! Sort the constraints!!!\n constraintList.sort(key=lambda item:item[0])\n \n for i in constraintList:\n if i[1]: \n fixList[2*i[0]]=i[3]\n fixIndexList[2*i[0]]=1\n if i[2]: \n fixList[2*i[0]+1]=i[4]\n fixIndexList[2*i[0]+1]=1\n\n # Solve.\n fglo-=(Kglo.dot(fixList))\n\n Kglo_complete=Kglo.copy()\n Kglo=Kglo.tolil()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n fglo=np.delete(fglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n fglo=np.delete(fglo,2*i[0]+1-count)\n count+=1\n\n Kglo=Kglo.transpose()\n\n count=0\n for i in constraintList:\n if i[1]:\n delete_row_lil(Kglo,2*i[0]-count)\n count+=1\n if i[2]:\n delete_row_lil(Kglo,2*i[0]+1-count)\n count+=1\n\n print(\"Imposing constraints costs %s seconds.\"%(time.time()-startTime))\n startTime=time.time()\n \n Kglo=Kglo.tocsc()\n\n # factor=cholesky(Kglo)\n # disp=factor(fglo)\n disp=spsolve(Kglo,fglo)\n\n print(\"Solving the linear system costs %s seconds.\"%(time.time()-startTime))\n\n # The complete displacement solution:\n displacement=np.zeros((2*len(coordinates),1))\n count=0\n for i in range(2*len(coordinates)):\n if fixIndexList[i]: \n displacement[i]=fixList[i]\n count+=1\n else: \n displacement[i]=disp[i-count]\n\n energy=0.5*displacement.transpose()@Kglo_complete@displacement\n\n return displacement,energy\n\ndef findMidNode(numbering,node1,node2):\n if len(numbering)==6:\n temp=[1,2,0]\n for i in range(3):\n if (numbering[i]==node1 and numbering[temp[i]]==node2) or \\\n (numbering[i]==node2 and numbering[temp[i]]==node1):\n return numbering[i+3]\n elif len(numbering)==9:\n temp=[1,2,3,0]\n for i in range(4):\n if (numbering[i]==node1 and numbering[temp[i]]==node2) or \\\n (numbering[i]==node2 and numbering[temp[i]]==node1):\n return numbering[i+4]\n\n return None\n\ndef twoDMaterialMatrix(material,problemType):\n \"\"\"Input:\n \n material: [E,nu];\n \n problemType: 1 -- Plane stress; 2 -- Plane strain.\"\"\"\n\n if problemType==1: # Plane stress\n Dmat=np.array([[material[0]/(1.0-material[1]**2),material[0]*material[1]/(1.0-material[1]**2),0.0],\\\n [material[0]*material[1]/(1.0-material[1]**2),material[0]/(1.0-material[1]**2),0.0],\\\n [0.0,0.0,material[0]/2.0/(1.0+material[1])]])\n elif problemType==2: #Plane strain\n cc=material[0]*(1.0-material[1])/(1.0+material[1])/(1.0-2.0*material[1])\n Dmat=np.array([[cc,cc*material[1]/(1.0-material[1]),0.0],\\\n [cc*material[1]/(1.0-material[1]),cc,0.0],\\\n [0.0,0.0,cc*(1.0-2.0*material[1])/(2.0*(1.0-material[1]))]])\n else: raise ValueError(\"No such problem type!\")\n \n return Dmat\n\ndef lenEdge(coord1,coord2):\n return ((coord1[0]-coord2[0])**2+(coord1[1]-coord2[1])**2)**0.5\n\ndef delete_row_lil(matrix,i):\n if not isinstance(matrix,scipy.sparse.lil_matrix):\n raise ValueError(\"The matrix should be in LIL format!\")\n matrix.rows=np.delete(matrix.rows,i)\n matrix.data=np.delete(matrix.data,i)\n matrix._shape=(matrix._shape[0]-1,matrix._shape[1])\n\nif __name__==\"__main__\":\n pass"
] | [
[
"numpy.delete",
"numpy.array",
"numpy.zeros",
"scipy.sparse.linalg.spsolve"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Nvpiao/PhD-Work | [
"cb257d1e2fe828b837634a9b113df57eb3c44065"
] | [
"30-11-2021/finetune_gpt2/src/gpt2model/dataset.py"
] | [
"import random\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass AmazonDataset(Dataset):\n def __init__(self, data, tokenizer, max_length, special_tokens, randomize=True):\n \"\"\"\n create dataset\n :param data: data frame\n :param tokenizer:\n :param max_length:\n :param special_tokens:\n :param randomize:\n \"\"\"\n self.data = data\n self.data_size = len(data)\n self.tokenizer = tokenizer\n self.special_tokens = special_tokens\n self.randomize = randomize\n self.max_length = max_length\n\n @staticmethod\n def join_keywords(keywords, randomize=True):\n keywords_len = len(keywords)\n if randomize:\n random_size = random.choice(range(keywords_len + 1))\n keywords = keywords[:random_size]\n random.shuffle(keywords)\n return ','.join(keywords)\n\n def __getitem__(self, idx):\n row_obj = self.data.iloc[idx]\n product_title = row_obj['PRODUCT_TITLE']\n review_title = row_obj['REVIEW_TITLE']\n review_text = row_obj['REVIEW_TEXT']\n #\n # keywords = review_title.split()\n # keywords = self.join_keywords(keywords, self.randomize)\n\n input = self.special_tokens['bos_token'] + product_title + self.special_tokens['sep_token'] + review_title + \\\n self.special_tokens['sep_token'] + review_text + self.special_tokens['eos_token']\n\n encodings_dict = self.tokenizer(input,\n truncation=True,\n max_length=self.max_length,\n padding='max_length')\n input_ids = encodings_dict['input_ids']\n attention_mask = encodings_dict['attention_mask']\n\n return {'label': torch.tensor(input_ids),\n 'input_ids': torch.tensor(input_ids),\n 'attention_mask': torch.tensor(attention_mask)}\n\n def __len__(self):\n return self.data_size\n\n\ndef get_train_val_dataloader(batch_size, train_set, train_ratio):\n \"\"\"\n split train set into train and validation sets\n :param batch_size:\n :param train_set:\n :param train_ratio\n :return: train\\\\validation datasets and loaders\n \"\"\"\n\n train_size = int(train_ratio * len(train_set))\n val_size = len(train_set) - train_size\n train_dataset, val_dataset = torch.utils.data.random_split(train_set, [train_size, val_size])\n\n train_loader = DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset,\n batch_size=len(val_dataset),\n shuffle=False, )\n\n return train_loader, val_loader, train_dataset, val_dataset\n"
] | [
[
"torch.utils.data.random_split",
"torch.utils.data.DataLoader",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
komour/diploma | [
"545af6481f488cfeea9d543fb5fcde66c9c35664"
] | [
"gradcam/utils.py"
] | [
"import cv2\nimport torch\n\nlayer_finders = {}\n\n\ndef register_layer_finder(model_type):\n def register(func):\n layer_finders[model_type] = func\n return func\n\n return register\n\n\ndef visualize_cam(mask, img, alpha=1.0):\n \"\"\"Make heatmap from mask and synthesize GradCAM result image using heatmap and img.\n Args:\n mask (torch.tensor): mask shape of (1, 1, H, W) and each element has value in range [0, 1]\n img (torch.tensor): img shape of (1, 3, H, W) and each pixel value is in range [0, 1]\n\n Return:\n heatmap (torch.tensor): heatmap img shape of (3, H, W)\n result (torch.tensor): synthesized GradCAM result of same shape with heatmap.\n \"\"\"\n heatmap = (255 * mask.squeeze()).type(torch.uint8).cpu().numpy()\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = torch.from_numpy(heatmap).permute(2, 0, 1).float().div(255)\n b, g, r = heatmap.split(1)\n heatmap = torch.cat([r, g, b]) * alpha\n\n result = heatmap+img.cpu()\n # result = heatmap + img\n result = result.div(result.max()).squeeze()\n\n return heatmap, result\n\n\n@register_layer_finder('resnet')\ndef find_resnet_layer(arch, target_layer_name):\n \"\"\"Find resnet layer to calculate GradCAM and GradCAM++\n\n Args:\n arch: default torchvision densenet models\n target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.\n target_layer_name = 'conv1'\n target_layer_name = 'layer1'\n target_layer_name = 'layer1_basicblock0'\n target_layer_name = 'layer1_basicblock0_relu'\n target_layer_name = 'layer1_bottleneck0'\n target_layer_name = 'layer1_bottleneck0_conv1'\n target_layer_name = 'layer1_bottleneck0_downsample'\n target_layer_name = 'layer1_bottleneck0_downsample_0'\n target_layer_name = 'avgpool'\n target_layer_name = 'fc'\n\n Return:\n target_layer: found layer. this layer will be hooked to get forward/backward pass information.\n \"\"\"\n if 'layer' in target_layer_name:\n hierarchy = target_layer_name.split('_')\n layer_num = int(hierarchy[0].lstrip('layer'))\n if layer_num == 1:\n target_layer = arch.layer1\n elif layer_num == 2:\n target_layer = arch.layer2\n elif layer_num == 3:\n target_layer = arch.layer3\n elif layer_num == 4:\n target_layer = arch.layer4\n else:\n raise ValueError('unknown layer : {}'.format(target_layer_name))\n\n if len(hierarchy) >= 2:\n bottleneck_num = int(hierarchy[1].lower().lstrip('bottleneck').lstrip('basicblock'))\n target_layer = target_layer[bottleneck_num]\n\n if len(hierarchy) >= 3:\n target_layer = target_layer._modules[hierarchy[2]]\n\n if len(hierarchy) == 4:\n target_layer = target_layer._modules[hierarchy[3]]\n\n else:\n target_layer = arch._modules[target_layer_name]\n\n return target_layer\n\n\n@register_layer_finder('densenet')\ndef find_densenet_layer(arch, target_layer_name):\n \"\"\"Find densenet layer to calculate GradCAM and GradCAM++\n\n Args:\n arch: default torchvision densenet models\n target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.\n target_layer_name = 'features'\n target_layer_name = 'features_transition1'\n target_layer_name = 'features_transition1_norm'\n target_layer_name = 'features_denseblock2_denselayer12'\n target_layer_name = 'features_denseblock2_denselayer12_norm1'\n target_layer_name = 'features_denseblock2_denselayer12_norm1'\n target_layer_name = 'classifier'\n\n Return:\n target_layer: found layer. this layer will be hooked to get forward/backward pass information.\n \"\"\"\n\n hierarchy = target_layer_name.split('_')\n target_layer = arch._modules[hierarchy[0]]\n\n if len(hierarchy) >= 2:\n target_layer = target_layer._modules[hierarchy[1]]\n\n if len(hierarchy) >= 3:\n target_layer = target_layer._modules[hierarchy[2]]\n\n if len(hierarchy) == 4:\n target_layer = target_layer._modules[hierarchy[3]]\n\n return target_layer\n\n\n@register_layer_finder('vgg')\ndef find_vgg_layer(arch, target_layer_name):\n \"\"\"Find vgg layer to calculate GradCAM and GradCAM++\n\n Args:\n arch: default torchvision densenet models\n target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.\n target_layer_name = 'features'\n target_layer_name = 'features_42'\n target_layer_name = 'classifier'\n target_layer_name = 'classifier_0'\n\n Return:\n target_layer: found layer. this layer will be hooked to get forward/backward pass information.\n \"\"\"\n hierarchy = target_layer_name.split('_')\n\n if len(hierarchy) >= 1:\n target_layer = arch.features\n\n if len(hierarchy) == 2:\n target_layer = target_layer[int(hierarchy[1])]\n\n return target_layer\n\n\n@register_layer_finder('alexnet')\ndef find_alexnet_layer(arch, target_layer_name):\n \"\"\"Find alexnet layer to calculate GradCAM and GradCAM++\n\n Args:\n arch: default torchvision densenet models\n target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.\n target_layer_name = 'features'\n target_layer_name = 'features_0'\n target_layer_name = 'classifier'\n target_layer_name = 'classifier_0'\n\n Return:\n target_layer: found layer. this layer will be hooked to get forward/backward pass information.\n \"\"\"\n hierarchy = target_layer_name.split('_')\n\n if len(hierarchy) >= 1:\n target_layer = arch.features\n\n if len(hierarchy) == 2:\n target_layer = target_layer[int(hierarchy[1])]\n\n return target_layer\n\n\n@register_layer_finder('squeezenet')\ndef find_squeezenet_layer(arch, target_layer_name):\n \"\"\"Find squeezenet layer to calculate GradCAM and GradCAM++\n\n Args:\n arch: default torchvision densenet models\n target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below.\n target_layer_name = 'features_12'\n target_layer_name = 'features_12_expand3x3'\n target_layer_name = 'features_12_expand3x3_activation'\n\n Return:\n target_layer: found layer. this layer will be hooked to get forward/backward pass information.\n \"\"\"\n hierarchy = target_layer_name.split('_')\n target_layer = arch._modules[hierarchy[0]]\n\n if len(hierarchy) >= 2:\n target_layer = target_layer._modules[hierarchy[1]]\n\n if len(hierarchy) == 3:\n target_layer = target_layer._modules[hierarchy[2]]\n\n elif len(hierarchy) == 4:\n target_layer = target_layer._modules[hierarchy[2] + '_' + hierarchy[3]]\n\n return target_layer\n\n\ndef denormalize(tensor, mean, std):\n if not tensor.ndimension() == 4:\n raise TypeError('tensor should be 4D')\n\n mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)\n std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)\n\n return tensor.mul(std).add(mean)\n\n\ndef normalize(tensor, mean, std):\n if not tensor.ndimension() == 4:\n raise TypeError('tensor should be 4D')\n\n mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)\n std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)\n\n return tensor.sub(mean).div(std)\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n return self.do(tensor)\n\n def do(self, tensor):\n return normalize(tensor, self.mean, self.std)\n\n def undo(self, tensor):\n return denormalize(tensor, self.mean, self.std)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n"
] | [
[
"torch.FloatTensor",
"torch.from_numpy",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mfranco/pymir | [
"f6d86bfdec942156ae95984f1ef8182d8983181f"
] | [
"code/python/pymir/dsp/waveform.py"
] | [
"import matplotlib\nmatplotlib.use('Agg')\nimport librosa\nfrom librosa.core import load\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n y, sr = load('/data/audio/autum_leaves_take1.wav', duration=10)\n plt.figure()\n plt.subplot(1, 1, 1)\n librosa.display.waveplot(y, sr=sr)\n plt.title('Monophonic')\n plt.savefig('wave.png')\n \n D = librosa.logamplitude(np.abs(librosa.stft(y))**2, ref_power=np.max)\n plt.subplot(1, 2, 1)\n librosa.display.specshow(D, y_axis='linear')\n plt.colorbar(format='%+2.0f dB')\n plt.title('Linear-frequency power spectrogram')\n plt.savefig('spectrogram.png')\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.